jadechoghari commited on
Commit
ddcfa6c
·
verified ·
1 Parent(s): 9b365a1

add all safetensor models of mar

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +14 -0
  2. checkpoints/050000/pretrained_model/config.json +122 -0
  3. checkpoints/050000/pretrained_model/model.safetensors +3 -0
  4. checkpoints/050000/pretrained_model/train_config.json +267 -0
  5. checkpoints/050000/training_state/optimizer_param_groups.json +174 -0
  6. checkpoints/050000/training_state/optimizer_state.safetensors +3 -0
  7. checkpoints/050000/training_state/rng_state.safetensors +3 -0
  8. checkpoints/050000/training_state/scheduler_state.json +14 -0
  9. checkpoints/050000/training_state/training_step.json +3 -0
  10. checkpoints/100000/pretrained_model/config.json +122 -0
  11. checkpoints/100000/pretrained_model/model.safetensors +3 -0
  12. checkpoints/100000/pretrained_model/train_config.json +267 -0
  13. checkpoints/100000/training_state/optimizer_param_groups.json +174 -0
  14. checkpoints/100000/training_state/optimizer_state.safetensors +3 -0
  15. checkpoints/100000/training_state/rng_state.safetensors +3 -0
  16. checkpoints/100000/training_state/scheduler_state.json +14 -0
  17. checkpoints/100000/training_state/training_step.json +3 -0
  18. eval/videos_step_010000/eval_episode_0.mp4 +0 -0
  19. eval/videos_step_010000/eval_episode_1.mp4 +0 -0
  20. eval/videos_step_010000/eval_episode_2.mp4 +3 -0
  21. eval/videos_step_010000/eval_episode_3.mp4 +0 -0
  22. eval/videos_step_020000/eval_episode_0.mp4 +0 -0
  23. eval/videos_step_020000/eval_episode_1.mp4 +3 -0
  24. eval/videos_step_020000/eval_episode_2.mp4 +3 -0
  25. eval/videos_step_020000/eval_episode_3.mp4 +0 -0
  26. eval/videos_step_030000/eval_episode_0.mp4 +0 -0
  27. eval/videos_step_030000/eval_episode_1.mp4 +0 -0
  28. eval/videos_step_030000/eval_episode_2.mp4 +0 -0
  29. eval/videos_step_030000/eval_episode_3.mp4 +0 -0
  30. eval/videos_step_040000/eval_episode_0.mp4 +0 -0
  31. eval/videos_step_040000/eval_episode_1.mp4 +0 -0
  32. eval/videos_step_040000/eval_episode_2.mp4 +3 -0
  33. eval/videos_step_040000/eval_episode_3.mp4 +0 -0
  34. eval/videos_step_050000/eval_episode_0.mp4 +3 -0
  35. eval/videos_step_050000/eval_episode_1.mp4 +0 -0
  36. eval/videos_step_050000/eval_episode_2.mp4 +0 -0
  37. eval/videos_step_050000/eval_episode_3.mp4 +0 -0
  38. eval/videos_step_060000/eval_episode_0.mp4 +0 -0
  39. eval/videos_step_060000/eval_episode_1.mp4 +0 -0
  40. eval/videos_step_060000/eval_episode_2.mp4 +0 -0
  41. eval/videos_step_060000/eval_episode_3.mp4 +0 -0
  42. eval/videos_step_070000/eval_episode_0.mp4 +0 -0
  43. eval/videos_step_070000/eval_episode_1.mp4 +0 -0
  44. eval/videos_step_070000/eval_episode_2.mp4 +0 -0
  45. eval/videos_step_070000/eval_episode_3.mp4 +3 -0
  46. eval/videos_step_080000/eval_episode_0.mp4 +3 -0
  47. eval/videos_step_080000/eval_episode_1.mp4 +3 -0
  48. eval/videos_step_080000/eval_episode_2.mp4 +0 -0
  49. eval/videos_step_080000/eval_episode_3.mp4 +0 -0
  50. eval/videos_step_090000/eval_episode_0.mp4 +3 -0
.gitattributes CHANGED
@@ -33,3 +33,17 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ eval/videos_step_010000/eval_episode_2.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ eval/videos_step_020000/eval_episode_1.mp4 filter=lfs diff=lfs merge=lfs -text
38
+ eval/videos_step_020000/eval_episode_2.mp4 filter=lfs diff=lfs merge=lfs -text
39
+ eval/videos_step_040000/eval_episode_2.mp4 filter=lfs diff=lfs merge=lfs -text
40
+ eval/videos_step_050000/eval_episode_0.mp4 filter=lfs diff=lfs merge=lfs -text
41
+ eval/videos_step_070000/eval_episode_3.mp4 filter=lfs diff=lfs merge=lfs -text
42
+ eval/videos_step_080000/eval_episode_0.mp4 filter=lfs diff=lfs merge=lfs -text
43
+ eval/videos_step_080000/eval_episode_1.mp4 filter=lfs diff=lfs merge=lfs -text
44
+ eval/videos_step_090000/eval_episode_0.mp4 filter=lfs diff=lfs merge=lfs -text
45
+ eval/videos_step_090000/eval_episode_2.mp4 filter=lfs diff=lfs merge=lfs -text
46
+ eval/videos_step_100000/eval_episode_1.mp4 filter=lfs diff=lfs merge=lfs -text
47
+ wandb/run-20250218_135755-9fxppz9z/files/media/videos/eval/video_50000_423ee38f17b29c765529.mp4 filter=lfs diff=lfs merge=lfs -text
48
+ wandb/run-20250218_135755-9fxppz9z/files/media/videos/eval/video_80000_3c51cadaf9d526473612.mp4 filter=lfs diff=lfs merge=lfs -text
49
+ wandb/run-20250218_135755-9fxppz9z/files/media/videos/eval/video_90000_718c92a8f4429bdceac9.mp4 filter=lfs diff=lfs merge=lfs -text
checkpoints/050000/pretrained_model/config.json ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "dot",
3
+ "n_obs_steps": 3,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MIN_MAX",
7
+ "ENV": "MIN_MAX",
8
+ "ACTION": "MIN_MAX"
9
+ },
10
+ "input_features": {
11
+ "observation.state": {
12
+ "type": "STATE",
13
+ "shape": [
14
+ 2
15
+ ]
16
+ },
17
+ "observation.environment_state": {
18
+ "type": "ENV",
19
+ "shape": [
20
+ 16
21
+ ]
22
+ }
23
+ },
24
+ "output_features": {
25
+ "action": {
26
+ "type": "ACTION",
27
+ "shape": [
28
+ 2
29
+ ]
30
+ }
31
+ },
32
+ "train_horizon": 30,
33
+ "inference_horizon": 30,
34
+ "lookback_obs_steps": 10,
35
+ "lookback_aug": 5,
36
+ "override_dataset_stats": false,
37
+ "new_dataset_stats": {
38
+ "action": {
39
+ "max": [
40
+ 512.0,
41
+ 512.0
42
+ ],
43
+ "min": [
44
+ 0.0,
45
+ 0.0
46
+ ]
47
+ },
48
+ "observation.environment_state": {
49
+ "max": [
50
+ 512.0,
51
+ 512.0,
52
+ 512.0,
53
+ 512.0,
54
+ 512.0,
55
+ 512.0,
56
+ 512.0,
57
+ 512.0,
58
+ 512.0,
59
+ 512.0,
60
+ 512.0,
61
+ 512.0,
62
+ 512.0,
63
+ 512.0,
64
+ 512.0,
65
+ 512.0
66
+ ],
67
+ "min": [
68
+ 0.0,
69
+ 0.0,
70
+ 0.0,
71
+ 0.0,
72
+ 0.0,
73
+ 0.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 0.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 0.0
84
+ ]
85
+ },
86
+ "observation.state": {
87
+ "max": [
88
+ 512.0,
89
+ 512.0
90
+ ],
91
+ "min": [
92
+ 0.0,
93
+ 0.0
94
+ ]
95
+ }
96
+ },
97
+ "vision_backbone": "resnet18",
98
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
99
+ "pre_norm": true,
100
+ "lora_rank": 20,
101
+ "merge_lora": false,
102
+ "dim_model": 128,
103
+ "n_heads": 8,
104
+ "dim_feedforward": 512,
105
+ "n_decoder_layers": 8,
106
+ "rescale_shape": [
107
+ 96,
108
+ 96
109
+ ],
110
+ "crop_scale": 0.8,
111
+ "state_noise": 0.01,
112
+ "noise_decay": 0.999995,
113
+ "dropout": 0.1,
114
+ "alpha": 0.75,
115
+ "train_alpha": 0.9,
116
+ "predict_every_n": 1,
117
+ "return_every_n": 2,
118
+ "optimizer_lr": 0.0001,
119
+ "optimizer_min_lr": 0.0001,
120
+ "optimizer_lr_cycle_steps": 300000,
121
+ "optimizer_weight_decay": 1e-05
122
+ }
checkpoints/050000/pretrained_model/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9de4d5833d9e06e7df463fb28b49a5e66266c792be8eca814ee9509c9cd872b
3
+ size 8534312
checkpoints/050000/pretrained_model/train_config.json ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "lerobot/pusht_keypoints",
4
+ "episodes": null,
5
+ "image_transforms": {
6
+ "enable": false,
7
+ "max_num_transforms": 3,
8
+ "random_order": false,
9
+ "tfs": {
10
+ "brightness": {
11
+ "weight": 1.0,
12
+ "type": "ColorJitter",
13
+ "kwargs": {
14
+ "brightness": [
15
+ 0.8,
16
+ 1.2
17
+ ]
18
+ }
19
+ },
20
+ "contrast": {
21
+ "weight": 1.0,
22
+ "type": "ColorJitter",
23
+ "kwargs": {
24
+ "contrast": [
25
+ 0.8,
26
+ 1.2
27
+ ]
28
+ }
29
+ },
30
+ "saturation": {
31
+ "weight": 1.0,
32
+ "type": "ColorJitter",
33
+ "kwargs": {
34
+ "saturation": [
35
+ 0.5,
36
+ 1.5
37
+ ]
38
+ }
39
+ },
40
+ "hue": {
41
+ "weight": 1.0,
42
+ "type": "ColorJitter",
43
+ "kwargs": {
44
+ "hue": [
45
+ -0.05,
46
+ 0.05
47
+ ]
48
+ }
49
+ },
50
+ "sharpness": {
51
+ "weight": 1.0,
52
+ "type": "SharpnessJitter",
53
+ "kwargs": {
54
+ "sharpness": [
55
+ 0.5,
56
+ 1.5
57
+ ]
58
+ }
59
+ }
60
+ }
61
+ },
62
+ "local_files_only": false,
63
+ "use_imagenet_stats": true,
64
+ "video_backend": "pyav"
65
+ },
66
+ "env": {
67
+ "type": "pusht",
68
+ "task": "PushT-v0",
69
+ "fps": 10,
70
+ "features": {
71
+ "action": {
72
+ "type": "ACTION",
73
+ "shape": [
74
+ 2
75
+ ]
76
+ },
77
+ "agent_pos": {
78
+ "type": "STATE",
79
+ "shape": [
80
+ 2
81
+ ]
82
+ },
83
+ "environment_state": {
84
+ "type": "ENV",
85
+ "shape": [
86
+ 16
87
+ ]
88
+ }
89
+ },
90
+ "features_map": {
91
+ "action": "action",
92
+ "agent_pos": "observation.state",
93
+ "environment_state": "observation.environment_state",
94
+ "pixels": "observation.image"
95
+ },
96
+ "episode_length": 300,
97
+ "obs_type": "environment_state_agent_pos",
98
+ "render_mode": "rgb_array",
99
+ "visualization_width": 384,
100
+ "visualization_height": 384
101
+ },
102
+ "policy": {
103
+ "type": "dot",
104
+ "n_obs_steps": 3,
105
+ "normalization_mapping": {
106
+ "VISUAL": "MEAN_STD",
107
+ "STATE": "MIN_MAX",
108
+ "ENV": "MIN_MAX",
109
+ "ACTION": "MIN_MAX"
110
+ },
111
+ "input_features": {
112
+ "observation.state": {
113
+ "type": "STATE",
114
+ "shape": [
115
+ 2
116
+ ]
117
+ },
118
+ "observation.environment_state": {
119
+ "type": "ENV",
120
+ "shape": [
121
+ 16
122
+ ]
123
+ }
124
+ },
125
+ "output_features": {
126
+ "action": {
127
+ "type": "ACTION",
128
+ "shape": [
129
+ 2
130
+ ]
131
+ }
132
+ },
133
+ "train_horizon": 30,
134
+ "inference_horizon": 30,
135
+ "lookback_obs_steps": 10,
136
+ "lookback_aug": 5,
137
+ "override_dataset_stats": false,
138
+ "new_dataset_stats": {
139
+ "action": {
140
+ "max": [
141
+ 512.0,
142
+ 512.0
143
+ ],
144
+ "min": [
145
+ 0.0,
146
+ 0.0
147
+ ]
148
+ },
149
+ "observation.environment_state": {
150
+ "max": [
151
+ 512.0,
152
+ 512.0,
153
+ 512.0,
154
+ 512.0,
155
+ 512.0,
156
+ 512.0,
157
+ 512.0,
158
+ 512.0,
159
+ 512.0,
160
+ 512.0,
161
+ 512.0,
162
+ 512.0,
163
+ 512.0,
164
+ 512.0,
165
+ 512.0,
166
+ 512.0
167
+ ],
168
+ "min": [
169
+ 0.0,
170
+ 0.0,
171
+ 0.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 0.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 0.0,
182
+ 0.0,
183
+ 0.0,
184
+ 0.0
185
+ ]
186
+ },
187
+ "observation.state": {
188
+ "max": [
189
+ 512.0,
190
+ 512.0
191
+ ],
192
+ "min": [
193
+ 0.0,
194
+ 0.0
195
+ ]
196
+ }
197
+ },
198
+ "vision_backbone": "resnet18",
199
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
200
+ "pre_norm": true,
201
+ "lora_rank": 20,
202
+ "merge_lora": false,
203
+ "dim_model": 128,
204
+ "n_heads": 8,
205
+ "dim_feedforward": 512,
206
+ "n_decoder_layers": 8,
207
+ "rescale_shape": [
208
+ 96,
209
+ 96
210
+ ],
211
+ "crop_scale": 0.8,
212
+ "state_noise": 0.01,
213
+ "noise_decay": 0.999995,
214
+ "dropout": 0.1,
215
+ "alpha": 0.75,
216
+ "train_alpha": 0.9,
217
+ "predict_every_n": 1,
218
+ "return_every_n": 2,
219
+ "optimizer_lr": 0.0001,
220
+ "optimizer_min_lr": 0.0001,
221
+ "optimizer_lr_cycle_steps": 300000,
222
+ "optimizer_weight_decay": 1e-05
223
+ },
224
+ "output_dir": "outputs/train/pusht_keyponts",
225
+ "job_name": "pusht_dot",
226
+ "resume": false,
227
+ "device": "cuda",
228
+ "use_amp": true,
229
+ "seed": 100000,
230
+ "num_workers": 24,
231
+ "batch_size": 24,
232
+ "steps": 100000,
233
+ "eval_freq": 10000,
234
+ "log_freq": 1000,
235
+ "save_checkpoint": true,
236
+ "save_freq": 50000,
237
+ "use_policy_training_preset": true,
238
+ "optimizer": {
239
+ "type": "adamw",
240
+ "lr": 0.0001,
241
+ "weight_decay": 1e-05,
242
+ "grad_clip_norm": 10.0,
243
+ "betas": [
244
+ 0.9,
245
+ 0.999
246
+ ],
247
+ "eps": 1e-08
248
+ },
249
+ "scheduler": {
250
+ "type": "cosine_annealing",
251
+ "num_warmup_steps": 0,
252
+ "min_lr": 0.0001,
253
+ "T_max": 300000
254
+ },
255
+ "eval": {
256
+ "n_episodes": 50,
257
+ "batch_size": 50,
258
+ "use_async_envs": false
259
+ },
260
+ "wandb": {
261
+ "enable": true,
262
+ "disable_artifact": false,
263
+ "project": "lerobot",
264
+ "entity": null,
265
+ "notes": null
266
+ }
267
+ }
checkpoints/050000/training_state/optimizer_param_groups.json ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "lr": 0.0001,
4
+ "betas": [
5
+ 0.9,
6
+ 0.999
7
+ ],
8
+ "eps": 1e-08,
9
+ "weight_decay": 1e-05,
10
+ "amsgrad": false,
11
+ "foreach": null,
12
+ "maximize": false,
13
+ "capturable": false,
14
+ "differentiable": false,
15
+ "fused": null,
16
+ "initial_lr": 0.0001,
17
+ "params": [
18
+ 0,
19
+ 1,
20
+ 2,
21
+ 3,
22
+ 4,
23
+ 5,
24
+ 6,
25
+ 7,
26
+ 8,
27
+ 9,
28
+ 10,
29
+ 11,
30
+ 12,
31
+ 13,
32
+ 14,
33
+ 15,
34
+ 16,
35
+ 17,
36
+ 18,
37
+ 19,
38
+ 20,
39
+ 21,
40
+ 22,
41
+ 23,
42
+ 24,
43
+ 25,
44
+ 26,
45
+ 27,
46
+ 28,
47
+ 29,
48
+ 30,
49
+ 31,
50
+ 32,
51
+ 33,
52
+ 34,
53
+ 35,
54
+ 36,
55
+ 37,
56
+ 38,
57
+ 39,
58
+ 40,
59
+ 41,
60
+ 42,
61
+ 43,
62
+ 44,
63
+ 45,
64
+ 46,
65
+ 47,
66
+ 48,
67
+ 49,
68
+ 50,
69
+ 51,
70
+ 52,
71
+ 53,
72
+ 54,
73
+ 55,
74
+ 56,
75
+ 57,
76
+ 58,
77
+ 59,
78
+ 60,
79
+ 61,
80
+ 62,
81
+ 63,
82
+ 64,
83
+ 65,
84
+ 66,
85
+ 67,
86
+ 68,
87
+ 69,
88
+ 70,
89
+ 71,
90
+ 72,
91
+ 73,
92
+ 74,
93
+ 75,
94
+ 76,
95
+ 77,
96
+ 78,
97
+ 79,
98
+ 80,
99
+ 81,
100
+ 82,
101
+ 83,
102
+ 84,
103
+ 85,
104
+ 86,
105
+ 87,
106
+ 88,
107
+ 89,
108
+ 90,
109
+ 91,
110
+ 92,
111
+ 93,
112
+ 94,
113
+ 95,
114
+ 96,
115
+ 97,
116
+ 98,
117
+ 99,
118
+ 100,
119
+ 101,
120
+ 102,
121
+ 103,
122
+ 104,
123
+ 105,
124
+ 106,
125
+ 107,
126
+ 108,
127
+ 109,
128
+ 110,
129
+ 111,
130
+ 112,
131
+ 113,
132
+ 114,
133
+ 115,
134
+ 116,
135
+ 117,
136
+ 118,
137
+ 119,
138
+ 120,
139
+ 121,
140
+ 122,
141
+ 123,
142
+ 124,
143
+ 125,
144
+ 126,
145
+ 127,
146
+ 128,
147
+ 129,
148
+ 130,
149
+ 131,
150
+ 132,
151
+ 133,
152
+ 134,
153
+ 135,
154
+ 136,
155
+ 137,
156
+ 138,
157
+ 139,
158
+ 140,
159
+ 141,
160
+ 142,
161
+ 143,
162
+ 144,
163
+ 145,
164
+ 146,
165
+ 147,
166
+ 148,
167
+ 149,
168
+ 150,
169
+ 151,
170
+ 152,
171
+ 153
172
+ ]
173
+ }
174
+ ]
checkpoints/050000/training_state/optimizer_state.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:290700ee502e45ecf80c3fa96437f00fe9dd6211882144a57d77914f99c4b900
3
+ size 17003504
checkpoints/050000/training_state/rng_state.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:987c9a6b3896169cf5d4b1fceddab65ac4421a8a2a67cfdad0feb1b7183eb74c
3
+ size 15708
checkpoints/050000/training_state/scheduler_state.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "T_max": 300000,
3
+ "eta_min": 0.0001,
4
+ "base_lrs": [
5
+ 0.0001
6
+ ],
7
+ "last_epoch": 50000,
8
+ "verbose": false,
9
+ "_step_count": 50001,
10
+ "_get_lr_called_within_step": false,
11
+ "_last_lr": [
12
+ 0.0001
13
+ ]
14
+ }
checkpoints/050000/training_state/training_step.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "step": 50000
3
+ }
checkpoints/100000/pretrained_model/config.json ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "dot",
3
+ "n_obs_steps": 3,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MIN_MAX",
7
+ "ENV": "MIN_MAX",
8
+ "ACTION": "MIN_MAX"
9
+ },
10
+ "input_features": {
11
+ "observation.state": {
12
+ "type": "STATE",
13
+ "shape": [
14
+ 2
15
+ ]
16
+ },
17
+ "observation.environment_state": {
18
+ "type": "ENV",
19
+ "shape": [
20
+ 16
21
+ ]
22
+ }
23
+ },
24
+ "output_features": {
25
+ "action": {
26
+ "type": "ACTION",
27
+ "shape": [
28
+ 2
29
+ ]
30
+ }
31
+ },
32
+ "train_horizon": 30,
33
+ "inference_horizon": 30,
34
+ "lookback_obs_steps": 10,
35
+ "lookback_aug": 5,
36
+ "override_dataset_stats": false,
37
+ "new_dataset_stats": {
38
+ "action": {
39
+ "max": [
40
+ 512.0,
41
+ 512.0
42
+ ],
43
+ "min": [
44
+ 0.0,
45
+ 0.0
46
+ ]
47
+ },
48
+ "observation.environment_state": {
49
+ "max": [
50
+ 512.0,
51
+ 512.0,
52
+ 512.0,
53
+ 512.0,
54
+ 512.0,
55
+ 512.0,
56
+ 512.0,
57
+ 512.0,
58
+ 512.0,
59
+ 512.0,
60
+ 512.0,
61
+ 512.0,
62
+ 512.0,
63
+ 512.0,
64
+ 512.0,
65
+ 512.0
66
+ ],
67
+ "min": [
68
+ 0.0,
69
+ 0.0,
70
+ 0.0,
71
+ 0.0,
72
+ 0.0,
73
+ 0.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 0.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 0.0
84
+ ]
85
+ },
86
+ "observation.state": {
87
+ "max": [
88
+ 512.0,
89
+ 512.0
90
+ ],
91
+ "min": [
92
+ 0.0,
93
+ 0.0
94
+ ]
95
+ }
96
+ },
97
+ "vision_backbone": "resnet18",
98
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
99
+ "pre_norm": true,
100
+ "lora_rank": 20,
101
+ "merge_lora": false,
102
+ "dim_model": 128,
103
+ "n_heads": 8,
104
+ "dim_feedforward": 512,
105
+ "n_decoder_layers": 8,
106
+ "rescale_shape": [
107
+ 96,
108
+ 96
109
+ ],
110
+ "crop_scale": 0.8,
111
+ "state_noise": 0.01,
112
+ "noise_decay": 0.999995,
113
+ "dropout": 0.1,
114
+ "alpha": 0.75,
115
+ "train_alpha": 0.9,
116
+ "predict_every_n": 1,
117
+ "return_every_n": 2,
118
+ "optimizer_lr": 0.0001,
119
+ "optimizer_min_lr": 0.0001,
120
+ "optimizer_lr_cycle_steps": 300000,
121
+ "optimizer_weight_decay": 1e-05
122
+ }
checkpoints/100000/pretrained_model/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24b41ab2a27ee34e5f422f7753bc4651b092b25c3aac9b212a4f9ea98f6b87c7
3
+ size 8534312
checkpoints/100000/pretrained_model/train_config.json ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "lerobot/pusht_keypoints",
4
+ "episodes": null,
5
+ "image_transforms": {
6
+ "enable": false,
7
+ "max_num_transforms": 3,
8
+ "random_order": false,
9
+ "tfs": {
10
+ "brightness": {
11
+ "weight": 1.0,
12
+ "type": "ColorJitter",
13
+ "kwargs": {
14
+ "brightness": [
15
+ 0.8,
16
+ 1.2
17
+ ]
18
+ }
19
+ },
20
+ "contrast": {
21
+ "weight": 1.0,
22
+ "type": "ColorJitter",
23
+ "kwargs": {
24
+ "contrast": [
25
+ 0.8,
26
+ 1.2
27
+ ]
28
+ }
29
+ },
30
+ "saturation": {
31
+ "weight": 1.0,
32
+ "type": "ColorJitter",
33
+ "kwargs": {
34
+ "saturation": [
35
+ 0.5,
36
+ 1.5
37
+ ]
38
+ }
39
+ },
40
+ "hue": {
41
+ "weight": 1.0,
42
+ "type": "ColorJitter",
43
+ "kwargs": {
44
+ "hue": [
45
+ -0.05,
46
+ 0.05
47
+ ]
48
+ }
49
+ },
50
+ "sharpness": {
51
+ "weight": 1.0,
52
+ "type": "SharpnessJitter",
53
+ "kwargs": {
54
+ "sharpness": [
55
+ 0.5,
56
+ 1.5
57
+ ]
58
+ }
59
+ }
60
+ }
61
+ },
62
+ "local_files_only": false,
63
+ "use_imagenet_stats": true,
64
+ "video_backend": "pyav"
65
+ },
66
+ "env": {
67
+ "type": "pusht",
68
+ "task": "PushT-v0",
69
+ "fps": 10,
70
+ "features": {
71
+ "action": {
72
+ "type": "ACTION",
73
+ "shape": [
74
+ 2
75
+ ]
76
+ },
77
+ "agent_pos": {
78
+ "type": "STATE",
79
+ "shape": [
80
+ 2
81
+ ]
82
+ },
83
+ "environment_state": {
84
+ "type": "ENV",
85
+ "shape": [
86
+ 16
87
+ ]
88
+ }
89
+ },
90
+ "features_map": {
91
+ "action": "action",
92
+ "agent_pos": "observation.state",
93
+ "environment_state": "observation.environment_state",
94
+ "pixels": "observation.image"
95
+ },
96
+ "episode_length": 300,
97
+ "obs_type": "environment_state_agent_pos",
98
+ "render_mode": "rgb_array",
99
+ "visualization_width": 384,
100
+ "visualization_height": 384
101
+ },
102
+ "policy": {
103
+ "type": "dot",
104
+ "n_obs_steps": 3,
105
+ "normalization_mapping": {
106
+ "VISUAL": "MEAN_STD",
107
+ "STATE": "MIN_MAX",
108
+ "ENV": "MIN_MAX",
109
+ "ACTION": "MIN_MAX"
110
+ },
111
+ "input_features": {
112
+ "observation.state": {
113
+ "type": "STATE",
114
+ "shape": [
115
+ 2
116
+ ]
117
+ },
118
+ "observation.environment_state": {
119
+ "type": "ENV",
120
+ "shape": [
121
+ 16
122
+ ]
123
+ }
124
+ },
125
+ "output_features": {
126
+ "action": {
127
+ "type": "ACTION",
128
+ "shape": [
129
+ 2
130
+ ]
131
+ }
132
+ },
133
+ "train_horizon": 30,
134
+ "inference_horizon": 30,
135
+ "lookback_obs_steps": 10,
136
+ "lookback_aug": 5,
137
+ "override_dataset_stats": false,
138
+ "new_dataset_stats": {
139
+ "action": {
140
+ "max": [
141
+ 512.0,
142
+ 512.0
143
+ ],
144
+ "min": [
145
+ 0.0,
146
+ 0.0
147
+ ]
148
+ },
149
+ "observation.environment_state": {
150
+ "max": [
151
+ 512.0,
152
+ 512.0,
153
+ 512.0,
154
+ 512.0,
155
+ 512.0,
156
+ 512.0,
157
+ 512.0,
158
+ 512.0,
159
+ 512.0,
160
+ 512.0,
161
+ 512.0,
162
+ 512.0,
163
+ 512.0,
164
+ 512.0,
165
+ 512.0,
166
+ 512.0
167
+ ],
168
+ "min": [
169
+ 0.0,
170
+ 0.0,
171
+ 0.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 0.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 0.0,
182
+ 0.0,
183
+ 0.0,
184
+ 0.0
185
+ ]
186
+ },
187
+ "observation.state": {
188
+ "max": [
189
+ 512.0,
190
+ 512.0
191
+ ],
192
+ "min": [
193
+ 0.0,
194
+ 0.0
195
+ ]
196
+ }
197
+ },
198
+ "vision_backbone": "resnet18",
199
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
200
+ "pre_norm": true,
201
+ "lora_rank": 20,
202
+ "merge_lora": false,
203
+ "dim_model": 128,
204
+ "n_heads": 8,
205
+ "dim_feedforward": 512,
206
+ "n_decoder_layers": 8,
207
+ "rescale_shape": [
208
+ 96,
209
+ 96
210
+ ],
211
+ "crop_scale": 0.8,
212
+ "state_noise": 0.01,
213
+ "noise_decay": 0.999995,
214
+ "dropout": 0.1,
215
+ "alpha": 0.75,
216
+ "train_alpha": 0.9,
217
+ "predict_every_n": 1,
218
+ "return_every_n": 2,
219
+ "optimizer_lr": 0.0001,
220
+ "optimizer_min_lr": 0.0001,
221
+ "optimizer_lr_cycle_steps": 300000,
222
+ "optimizer_weight_decay": 1e-05
223
+ },
224
+ "output_dir": "outputs/train/pusht_keyponts",
225
+ "job_name": "pusht_dot",
226
+ "resume": false,
227
+ "device": "cuda",
228
+ "use_amp": true,
229
+ "seed": 100000,
230
+ "num_workers": 24,
231
+ "batch_size": 24,
232
+ "steps": 100000,
233
+ "eval_freq": 10000,
234
+ "log_freq": 1000,
235
+ "save_checkpoint": true,
236
+ "save_freq": 50000,
237
+ "use_policy_training_preset": true,
238
+ "optimizer": {
239
+ "type": "adamw",
240
+ "lr": 0.0001,
241
+ "weight_decay": 1e-05,
242
+ "grad_clip_norm": 10.0,
243
+ "betas": [
244
+ 0.9,
245
+ 0.999
246
+ ],
247
+ "eps": 1e-08
248
+ },
249
+ "scheduler": {
250
+ "type": "cosine_annealing",
251
+ "num_warmup_steps": 0,
252
+ "min_lr": 0.0001,
253
+ "T_max": 300000
254
+ },
255
+ "eval": {
256
+ "n_episodes": 50,
257
+ "batch_size": 50,
258
+ "use_async_envs": false
259
+ },
260
+ "wandb": {
261
+ "enable": true,
262
+ "disable_artifact": false,
263
+ "project": "lerobot",
264
+ "entity": null,
265
+ "notes": null
266
+ }
267
+ }
checkpoints/100000/training_state/optimizer_param_groups.json ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "lr": 0.0001,
4
+ "betas": [
5
+ 0.9,
6
+ 0.999
7
+ ],
8
+ "eps": 1e-08,
9
+ "weight_decay": 1e-05,
10
+ "amsgrad": false,
11
+ "foreach": null,
12
+ "maximize": false,
13
+ "capturable": false,
14
+ "differentiable": false,
15
+ "fused": null,
16
+ "initial_lr": 0.0001,
17
+ "params": [
18
+ 0,
19
+ 1,
20
+ 2,
21
+ 3,
22
+ 4,
23
+ 5,
24
+ 6,
25
+ 7,
26
+ 8,
27
+ 9,
28
+ 10,
29
+ 11,
30
+ 12,
31
+ 13,
32
+ 14,
33
+ 15,
34
+ 16,
35
+ 17,
36
+ 18,
37
+ 19,
38
+ 20,
39
+ 21,
40
+ 22,
41
+ 23,
42
+ 24,
43
+ 25,
44
+ 26,
45
+ 27,
46
+ 28,
47
+ 29,
48
+ 30,
49
+ 31,
50
+ 32,
51
+ 33,
52
+ 34,
53
+ 35,
54
+ 36,
55
+ 37,
56
+ 38,
57
+ 39,
58
+ 40,
59
+ 41,
60
+ 42,
61
+ 43,
62
+ 44,
63
+ 45,
64
+ 46,
65
+ 47,
66
+ 48,
67
+ 49,
68
+ 50,
69
+ 51,
70
+ 52,
71
+ 53,
72
+ 54,
73
+ 55,
74
+ 56,
75
+ 57,
76
+ 58,
77
+ 59,
78
+ 60,
79
+ 61,
80
+ 62,
81
+ 63,
82
+ 64,
83
+ 65,
84
+ 66,
85
+ 67,
86
+ 68,
87
+ 69,
88
+ 70,
89
+ 71,
90
+ 72,
91
+ 73,
92
+ 74,
93
+ 75,
94
+ 76,
95
+ 77,
96
+ 78,
97
+ 79,
98
+ 80,
99
+ 81,
100
+ 82,
101
+ 83,
102
+ 84,
103
+ 85,
104
+ 86,
105
+ 87,
106
+ 88,
107
+ 89,
108
+ 90,
109
+ 91,
110
+ 92,
111
+ 93,
112
+ 94,
113
+ 95,
114
+ 96,
115
+ 97,
116
+ 98,
117
+ 99,
118
+ 100,
119
+ 101,
120
+ 102,
121
+ 103,
122
+ 104,
123
+ 105,
124
+ 106,
125
+ 107,
126
+ 108,
127
+ 109,
128
+ 110,
129
+ 111,
130
+ 112,
131
+ 113,
132
+ 114,
133
+ 115,
134
+ 116,
135
+ 117,
136
+ 118,
137
+ 119,
138
+ 120,
139
+ 121,
140
+ 122,
141
+ 123,
142
+ 124,
143
+ 125,
144
+ 126,
145
+ 127,
146
+ 128,
147
+ 129,
148
+ 130,
149
+ 131,
150
+ 132,
151
+ 133,
152
+ 134,
153
+ 135,
154
+ 136,
155
+ 137,
156
+ 138,
157
+ 139,
158
+ 140,
159
+ 141,
160
+ 142,
161
+ 143,
162
+ 144,
163
+ 145,
164
+ 146,
165
+ 147,
166
+ 148,
167
+ 149,
168
+ 150,
169
+ 151,
170
+ 152,
171
+ 153
172
+ ]
173
+ }
174
+ ]
checkpoints/100000/training_state/optimizer_state.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f501041c30806fedcc03e5943d40ab43821fafc20a625e5828abefcf5fa90c48
3
+ size 17003504
checkpoints/100000/training_state/rng_state.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:993f9d868f14ec177d63f17f8a013e5d55f3c10e28d08dbcb9cd1f7624d83360
3
+ size 15708
checkpoints/100000/training_state/scheduler_state.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "T_max": 300000,
3
+ "eta_min": 0.0001,
4
+ "base_lrs": [
5
+ 0.0001
6
+ ],
7
+ "last_epoch": 100000,
8
+ "verbose": false,
9
+ "_step_count": 100001,
10
+ "_get_lr_called_within_step": false,
11
+ "_last_lr": [
12
+ 0.0001
13
+ ]
14
+ }
checkpoints/100000/training_state/training_step.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "step": 100000
3
+ }
eval/videos_step_010000/eval_episode_0.mp4 ADDED
Binary file (46.3 kB). View file
 
eval/videos_step_010000/eval_episode_1.mp4 ADDED
Binary file (62.8 kB). View file
 
eval/videos_step_010000/eval_episode_2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f83d92a193e648e224020592d0163287e4d95b734e16ab6e755f5dc9831756f
3
+ size 110520
eval/videos_step_010000/eval_episode_3.mp4 ADDED
Binary file (78 kB). View file
 
eval/videos_step_020000/eval_episode_0.mp4 ADDED
Binary file (85.9 kB). View file
 
eval/videos_step_020000/eval_episode_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2aad4e084a6fd1f3e3f9fde0899caf01630d9f89465c8bd0bed3961d8baf56b
3
+ size 110656
eval/videos_step_020000/eval_episode_2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6c8d07849bd6549f81f0d5987ab315319eea9e7a5261ed13bf9b4958580b9a9
3
+ size 107730
eval/videos_step_020000/eval_episode_3.mp4 ADDED
Binary file (50.6 kB). View file
 
eval/videos_step_030000/eval_episode_0.mp4 ADDED
Binary file (70.2 kB). View file
 
eval/videos_step_030000/eval_episode_1.mp4 ADDED
Binary file (72.7 kB). View file
 
eval/videos_step_030000/eval_episode_2.mp4 ADDED
Binary file (54.8 kB). View file
 
eval/videos_step_030000/eval_episode_3.mp4 ADDED
Binary file (66.2 kB). View file
 
eval/videos_step_040000/eval_episode_0.mp4 ADDED
Binary file (73.4 kB). View file
 
eval/videos_step_040000/eval_episode_1.mp4 ADDED
Binary file (43 kB). View file
 
eval/videos_step_040000/eval_episode_2.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c0df8d09f71e2929213ce2f9dbc7b7ef2b874c2223ed612e48bb5aff2a2ea1c
3
+ size 122381
eval/videos_step_040000/eval_episode_3.mp4 ADDED
Binary file (46.2 kB). View file
 
eval/videos_step_050000/eval_episode_0.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:423ee38f17b29c765529f3c21b8ecc42588ec93a39dc4eb5b20090755a4c80e8
3
+ size 118818
eval/videos_step_050000/eval_episode_1.mp4 ADDED
Binary file (77.2 kB). View file
 
eval/videos_step_050000/eval_episode_2.mp4 ADDED
Binary file (40.9 kB). View file
 
eval/videos_step_050000/eval_episode_3.mp4 ADDED
Binary file (98.1 kB). View file
 
eval/videos_step_060000/eval_episode_0.mp4 ADDED
Binary file (89.4 kB). View file
 
eval/videos_step_060000/eval_episode_1.mp4 ADDED
Binary file (44.1 kB). View file
 
eval/videos_step_060000/eval_episode_2.mp4 ADDED
Binary file (48.4 kB). View file
 
eval/videos_step_060000/eval_episode_3.mp4 ADDED
Binary file (50.3 kB). View file
 
eval/videos_step_070000/eval_episode_0.mp4 ADDED
Binary file (48.5 kB). View file
 
eval/videos_step_070000/eval_episode_1.mp4 ADDED
Binary file (27.2 kB). View file
 
eval/videos_step_070000/eval_episode_2.mp4 ADDED
Binary file (46.2 kB). View file
 
eval/videos_step_070000/eval_episode_3.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bad6a0e19f46850306f44a58eef85b2b25ac1740cb150e0addd64d531fc6ad4d
3
+ size 154970
eval/videos_step_080000/eval_episode_0.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c51cadaf9d526473612d78ccd5f77f7e959e296314ed4f6bbed5b7c8a485f67
3
+ size 110152
eval/videos_step_080000/eval_episode_1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a73fffe4de439c7ab5a3c47f868e0a673f1eddac073ca087cb0f5bc7aa036bb
3
+ size 106163
eval/videos_step_080000/eval_episode_2.mp4 ADDED
Binary file (51.1 kB). View file
 
eval/videos_step_080000/eval_episode_3.mp4 ADDED
Binary file (90.1 kB). View file
 
eval/videos_step_090000/eval_episode_0.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:718c92a8f4429bdceac9e2942e01377579b7525afab7d39d1cf26f0df58bf4f7
3
+ size 129860