File size: 16,996 Bytes
9f45be4
5646b79
 
9f45be4
5646b79
6008e29
5646b79
 
3d4cb67
d84a28c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f45be4
 
 
 
 
3d4cb67
 
 
 
 
 
 
 
 
 
 
 
 
 
e2ac31b
278ee93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2a5c47
 
 
 
 
14d3e13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01f5717
 
 
 
 
bd4a38c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79ce184
 
 
 
 
c1f98f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6af0eed
 
 
 
 
a1da2aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5371e27
 
 
 
 
278ee93
e2ac31b
 
 
 
 
 
 
 
 
 
 
 
 
 
a35faf3
 
 
6f0c5ae
 
 
 
 
518e06f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a04f56
 
 
 
 
97f7c2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573b9af
 
 
 
 
6225f9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25c33bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4afc024
 
 
 
 
7b9f80d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8795fa
 
 
 
 
31f890d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c09924
31f890d
d9ea96f
 
 
 
 
240face
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c32038c
 
 
 
 
7efe4ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95f5369
 
 
 
 
5163c96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05814cd
 
 
 
 
38acd3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e80232
 
 
 
 
5c85b3d
3d4cb67
c806698
3d4cb67
 
5c85b3d
d2c8ed4
 
3d4cb67
d2c8ed4
 
3d4cb67
 
d2c8ed4
 
3d4cb67
d2c8ed4
 
3d4cb67
 
d2c8ed4
 
3d4cb67
57bdae4
3d4cb67
c1b3ffe
 
72853e9
 
 
 
 
 
 
 
 
 
 
0fd84f6
72853e9
 
 
 
3d4cb67
 
5c85b3d
 
15ffc90
5c85b3d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15ffc90
5c85b3d
 
 
 
1a7d0ee
 
15ffc90
 
 
5c85b3d
 
c1b3ffe
5c85b3d
 
0bd16df
5c85b3d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
---
language:
- en
license: apache-2.0
size_categories:
- 10M<n<100M
task_categories:
- reinforcement-learning
pretty_name: Procgen Benchmark Dataset
configs:
- config_name: bigfish
  data_files:
  - split: train
    path: bigfish/train-*
  - split: test
    path: bigfish/test-*
- config_name: bossfight
  data_files:
  - split: train
    path: bossfight/train-*
  - split: test
    path: bossfight/test-*
- config_name: caveflyer
  data_files:
  - split: train
    path: caveflyer/train-*
  - split: test
    path: caveflyer/test-*
- config_name: chaser
  data_files:
  - split: train
    path: chaser/train-*
  - split: test
    path: chaser/test-*
- config_name: climber
  data_files:
  - split: train
    path: climber/train-*
  - split: test
    path: climber/test-*
- config_name: coinrun
  data_files:
  - split: train
    path: coinrun/train-*
  - split: test
    path: coinrun/test-*
- config_name: dodgeball
  data_files:
  - split: train
    path: dodgeball/train-*
  - split: test
    path: dodgeball/test-*
- config_name: fruitbot
  data_files:
  - split: train
    path: fruitbot/train-*
  - split: test
    path: fruitbot/test-*
- config_name: heist
  data_files:
  - split: train
    path: heist/train-*
  - split: test
    path: heist/test-*
- config_name: jumper
  data_files:
  - split: train
    path: jumper/train-*
  - split: test
    path: jumper/test-*
- config_name: leaper
  data_files:
  - split: train
    path: leaper/train-*
  - split: test
    path: leaper/test-*
- config_name: maze
  data_files:
  - split: train
    path: maze/train-*
  - split: test
    path: maze/test-*
- config_name: miner
  data_files:
  - split: train
    path: miner/train-*
  - split: test
    path: miner/test-*
- config_name: ninja
  data_files:
  - split: train
    path: ninja/train-*
  - split: test
    path: ninja/test-*
- config_name: plunder
  data_files:
  - split: train
    path: plunder/train-*
  - split: test
    path: plunder/test-*
- config_name: starpilot
  data_files:
  - split: train
    path: starpilot/train-*
  - split: test
    path: starpilot/test-*
tags:
- procgen
- bigfish
- benchmark
- openai
- bossfight
- caveflyer
- chaser
- climber
- dodgeball
- fruitbot
- heist
- jumper
- leaper
- maze
- miner
- ninja
- plunder
- starpilot
dataset_info:
- config_name: bigfish
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 52167166912.0
    num_examples: 9000000
  - name: test
    num_bytes: 5791205186.0
    num_examples: 1000000
  download_size: 57881994479
  dataset_size: 57958372098.0
- config_name: bossfight
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 66990875098.0
    num_examples: 9000000
  - name: test
    num_bytes: 7422341538.0
    num_examples: 1000000
  download_size: 74364918274
  dataset_size: 74413216636.0
- config_name: caveflyer
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 43639897909.0
    num_examples: 9000000
  - name: test
    num_bytes: 4873680436.0
    num_examples: 1000000
  download_size: 48700939902
  dataset_size: 48513578345.0
- config_name: chaser
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 51447214089.0
    num_examples: 9000000
  - name: test
    num_bytes: 5714598297.0
    num_examples: 1000000
  download_size: 55552919888
  dataset_size: 57161812386.0
- config_name: climber
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 24375527245.0
    num_examples: 9000000
  - name: test
    num_bytes: 2690264702.0
    num_examples: 1000000
  download_size: 26574301930
  dataset_size: 27065791947.0
- config_name: coinrun
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 16788785439.0
    num_examples: 9000000
  - name: test
    num_bytes: 1875181202.0
    num_examples: 1000000
  download_size: 18088978913
  dataset_size: 18663966641.0
- config_name: dodgeball
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 62421409783.0
    num_examples: 9000000
  - name: test
    num_bytes: 6939014458.0
    num_examples: 1000000
  download_size: 69244806537
  dataset_size: 69360424241.0
- config_name: fruitbot
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 59648385662.0
    num_examples: 9000000
  - name: test
    num_bytes: 6610963384.0
    num_examples: 1000000
  download_size: 66466587233
  dataset_size: 66259349046.0
- config_name: heist
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 55385943063.0
    num_examples: 9000000
  download_size: 45218795172
  dataset_size: 55385943063.0
- config_name: jumper
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 23713574993.0
    num_examples: 9000000
  - name: test
    num_bytes: 2596720354.0
    num_examples: 1000000
  download_size: 26160892415
  dataset_size: 26310295347.0
- config_name: leaper
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 44825702164.0
    num_examples: 9000000
  - name: test
    num_bytes: 4970059656.0
    num_examples: 1000000
  download_size: 48913285349
  dataset_size: 49795761820.0
- config_name: maze
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 49276863929.0
    num_examples: 9000000
  - name: test
    num_bytes: 5482794667.0
    num_examples: 1000000
  download_size: 38922389228
  dataset_size: 54759658596.0
- config_name: miner
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 44058761019.0
    num_examples: 9000000
  - name: test
    num_bytes: 4876567632.0
    num_examples: 1000000
  download_size: 17739617811
  dataset_size: 48935328651.0
- config_name: ninja
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 28875440305.0
    num_examples: 9000000
  - name: test
    num_bytes: 3207526456.0
    num_examples: 1000000
  download_size: 32127321141
  dataset_size: 32082966761.0
- config_name: plunder
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 58061310067.0
    num_examples: 9000000
  - name: test
    num_bytes: 6454993435.0
    num_examples: 1000000
  download_size: 64177454509
  dataset_size: 64516303502.0
- config_name: starpilot
  features:
  - name: observation
    dtype:
      image:
        mode: RGB
  - name: action
    dtype: int32
  - name: reward
    dtype: float32
  - name: terminated
    dtype: bool
  - name: truncated
    dtype: bool
  splits:
  - name: train
    num_bytes: 56365605339.0
    num_examples: 9000000
  - name: test
    num_bytes: 6192523494.0
    num_examples: 1000000
  download_size: 62629456972
  dataset_size: 62558128833.0
---
# Procgen Benchmark
This dataset contains expert trajectories generated by a [PPO](https://arxiv.org/abs/1707.06347) reinforcement learning agent trained on each of the 16 procedurally-generated gym environments from the [Procgen Benchmark](https://openai.com/index/procgen-benchmark/). The environments were created on `distribution_mode=easy` and with unlimited levels.

Disclaimer: This is not an official repository from OpenAI.

## Dataset Usage

Regular usage (for environment bigfish):
```python
from datasets import load_dataset
train_dataset = load_dataset("EpicPinkPenguin/procgen", name="bigfish", split="train")
test_dataset = load_dataset("EpicPinkPenguin/procgen", name="bigfish", split="test")
```

Usage with PyTorch (for environment bossfight):
```python
from datasets import load_dataset
train_dataset = load_dataset("EpicPinkPenguin/procgen", name="bossfight", split="train").with_format("torch")
test_dataset = load_dataset("EpicPinkPenguin/procgen", name="bossfight", split="test").with_format("torch")
```

## Agent Performance
The PPO RL agent was trained for 25M steps on each environment and obtained the following final performance metrics on the evaluation environment. These values are attain or surpass the performance described in "Easy Difficulty Baseline Results" in Appendix I of the paper.

| Environment |  Steps (Train)  |  Steps (Test)  | Return | Observation |
|:------------|:----------------|:---------------|:-------|:------------|
| bigfish     | 9,000,000       | 1,000,000      | 33.79  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/lHQXBqLdoWicXlt68I9QX.mp4"></video>  |
| bossfight   | 9,000,000       | 1,000,000      | 11.47  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/LPoafGi4YBWqqkuFlEN_l.mp4"></video>  |
| caveflyer   | 9,000,000       | 1,000,000      | 09.42  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/XVqRwu_9yfX4ECQc4At4G.mp4"></video>  |
| chaser      | 9,000,000       | 1,000,000      | 10.55  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/FIKVv48SThqiC1Z2PYQ7U.mp4"></video>  |
| climber     | 9,000,000       | 1,000,000      | 11.30  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/XJQlA7IyF9_gwUiw-FkND.mp4"></video>  |
| coinrun     | 9,000,000       | 1,000,000      | 09.02  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/Ucv3HZttewMRQzTL8r_Tw.mp4"></video>  |
| dodgeball   | 9,000,000       | 1,000,000      | 13.90  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/5HetbKuXBpO-v1jcVyLTU.mp4"></video>  |
| fruitbot    | 9,000,000       | 1,000,000      | 31.58  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/zKCyxXvauXjUac-5kEAWz.mp4"></video>  |
| heist       | 9,000,000       | 1,000,000      | 08.32  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/AdZ6XNmUN5_00BKd9BN8R.mp4"></video>  |
| jumper      | 9,000,000       | 1,000,000      | 08.10  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/s5k31gWK2Vc6Lp6QVzQXA.mp4"></video>  |
| leaper      | 9,000,000       | 1,000,000      | 06.32  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/_hDMocxjmzutc0t5FfoTX.mp4"></video>  |
| maze        | 9,000,000       | 1,000,000      | 09.95  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/uhNdDPuNhZpxVns91Ba-9.mp4"></video>  |
| miner       | 9,000,000       | 1,000,000      | 12.02  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/ElpJ8l2WHJGrprZ3-giHU.mp4"></video>  |
| ninja       | 9,000,000       | 1,000,000      | 09.32  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/b9i-fb2Twh8XmBBNf2DRG.mp4"></video>  |
| plunder     | 9,000,000       | 1,000,000      | 24.18  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/JPeGNOVzrotuYUjfzZj40.mp4"></video>  |
| starpilot   | 9,000,000       | 1,000,000      | 49.79  | <video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/633c1daf31c06121a58f2df9/wY9lZgkw5tor19hCWmm6A.mp4"></video>  |


## Dataset Structure
### Data Instances
Each data instance represents a single step consisting of tuples of the form (observation, action, reward, done, truncated) = (o_t, a_t, r_t, terminated_t, truncated_t).

```json
{'action': 1,
 'observation': [[[0, 166, 253],
                  [0, 174, 255],
                  [0, 170, 251],
                  [0, 191, 255],
                  [0, 191, 255],
                  [0, 221, 255],
                  [0, 243, 255],
                  [0, 248, 255],
                  [0, 243, 255],
                  [10, 239, 255],
                  [25, 255, 255],
                  [0, 241, 255],
                  [0, 235, 255],
                  [17, 240, 255],
                  [10, 243, 255],
                  [27, 253, 255],
                  [39, 255, 255],
                  [58, 255, 255],
                  [85, 255, 255],
                  [111, 255, 255],
                  [135, 255, 255],
                  [151, 255, 255],
                  [173, 255, 255],
...
                  [0, 0, 37],
                  [0, 0, 39]]],
 'reward': 0.0,
 'terminated': False,
 'truncated': False}
```

### Data Fields
- `observation`: The current RGB observation from the environment.
- `action`: The action predicted by the agent for the current observation.
- `reward`: The received reward for the current observation.
- `terminated`: If the episode has terminated with the current observation.
- `truncated`: If the episode is truncated with the current observation.

### Data Splits
The dataset is divided into a `train` (90%) and `test` (10%) split. Each environment-dataset has in sum 10M steps (data points).

## Dataset Creation
The dataset was created by training an RL agent with [PPO](https://arxiv.org/abs/1707.06347) for 25M steps in each environment. The trajectories where generated by sampling from the predicted action distribution at each step (not taking the argmax). The environments were created on `distribution_mode=easy` and with unlimited levels.

## Procgen Benchmark
The [Procgen Benchmark](https://openai.com/index/procgen-benchmark/), released by OpenAI, consists of 16 procedurally-generated environments designed to measure how quickly reinforcement learning (RL) agents learn generalizable skills. It emphasizes experimental convenience, high diversity within and across environments, and is ideal for evaluating both sample efficiency and generalization. The benchmark allows for distinct training and test sets in each environment, making it a standard research platform for the OpenAI RL team. It aims to address the need for more diverse RL benchmarks compared to complex environments like Dota and StarCraft.