Robotics
LeRobot
Safetensors
act
fracapuano HF Staff commited on
Commit
1159ff7
·
verified ·
1 Parent(s): 43388e8

Upload policy weights, train config and readme

Browse files
Files changed (4) hide show
  1. README.md +62 -0
  2. config.json +137 -0
  3. model.safetensors +3 -0
  4. train_config.json +244 -0
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets: aractingi/droid_1.0.1
3
+ library_name: lerobot
4
+ license: apache-2.0
5
+ model_name: act
6
+ pipeline_tag: robotics
7
+ tags:
8
+ - robotics
9
+ - lerobot
10
+ - act
11
+ ---
12
+
13
+ # Model Card for act
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+ [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates.
19
+
20
+
21
+ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot).
22
+ See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index).
23
+
24
+ ---
25
+
26
+ ## How to Get Started with the Model
27
+
28
+ For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy).
29
+ Below is the short version on how to train and run inference/eval:
30
+
31
+ ### Train from scratch
32
+
33
+ ```bash
34
+ python -m lerobot.scripts.train \
35
+ --dataset.repo_id=${HF_USER}/<dataset> \
36
+ --policy.type=act \
37
+ --output_dir=outputs/train/<desired_policy_repo_id> \
38
+ --job_name=lerobot_training \
39
+ --policy.device=cuda \
40
+ --policy.repo_id=${HF_USER}/<desired_policy_repo_id>
41
+ --wandb.enable=true
42
+ ```
43
+
44
+ _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._
45
+
46
+ ### Evaluate the policy/run inference
47
+
48
+ ```bash
49
+ python -m lerobot.record \
50
+ --robot.type=so100_follower \
51
+ --dataset.repo_id=<hf_user>/eval_<dataset> \
52
+ --policy.path=<hf_user>/<desired_policy_repo_id> \
53
+ --episodes=10
54
+ ```
55
+
56
+ Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint.
57
+
58
+ ---
59
+
60
+ ## Model Details
61
+
62
+ - **License:** apache-2.0
config.json ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "act",
3
+ "n_obs_steps": 1,
4
+ "normalization_mapping": {
5
+ "VISUAL": "MEAN_STD",
6
+ "STATE": "MEAN_STD",
7
+ "ACTION": "MEAN_STD"
8
+ },
9
+ "input_features": {
10
+ "observation.state.gripper_position": {
11
+ "type": "STATE",
12
+ "shape": [
13
+ 1
14
+ ]
15
+ },
16
+ "observation.state.cartesian_position": {
17
+ "type": "STATE",
18
+ "shape": [
19
+ 6
20
+ ]
21
+ },
22
+ "observation.state.joint_position": {
23
+ "type": "STATE",
24
+ "shape": [
25
+ 7
26
+ ]
27
+ },
28
+ "observation.state": {
29
+ "type": "STATE",
30
+ "shape": [
31
+ 8
32
+ ]
33
+ },
34
+ "observation.images.wrist_left": {
35
+ "type": "VISUAL",
36
+ "shape": [
37
+ 3,
38
+ 180,
39
+ 320
40
+ ]
41
+ },
42
+ "observation.images.exterior_1_left": {
43
+ "type": "VISUAL",
44
+ "shape": [
45
+ 3,
46
+ 180,
47
+ 320
48
+ ]
49
+ },
50
+ "observation.images.exterior_2_left": {
51
+ "type": "VISUAL",
52
+ "shape": [
53
+ 3,
54
+ 180,
55
+ 320
56
+ ]
57
+ }
58
+ },
59
+ "output_features": {
60
+ "action.gripper_position": {
61
+ "type": "ACTION",
62
+ "shape": [
63
+ 1
64
+ ]
65
+ },
66
+ "action.gripper_velocity": {
67
+ "type": "ACTION",
68
+ "shape": [
69
+ 1
70
+ ]
71
+ },
72
+ "action.cartesian_position": {
73
+ "type": "ACTION",
74
+ "shape": [
75
+ 6
76
+ ]
77
+ },
78
+ "action.cartesian_velocity": {
79
+ "type": "ACTION",
80
+ "shape": [
81
+ 6
82
+ ]
83
+ },
84
+ "action.joint_position": {
85
+ "type": "ACTION",
86
+ "shape": [
87
+ 7
88
+ ]
89
+ },
90
+ "action.joint_velocity": {
91
+ "type": "ACTION",
92
+ "shape": [
93
+ 7
94
+ ]
95
+ },
96
+ "action.original": {
97
+ "type": "ACTION",
98
+ "shape": [
99
+ 7
100
+ ]
101
+ },
102
+ "action": {
103
+ "type": "ACTION",
104
+ "shape": [
105
+ 8
106
+ ]
107
+ }
108
+ },
109
+ "device": "mps",
110
+ "use_amp": false,
111
+ "push_to_hub": true,
112
+ "repo_id": "fracapuano/streaming_policy",
113
+ "private": null,
114
+ "tags": null,
115
+ "license": null,
116
+ "chunk_size": 100,
117
+ "n_action_steps": 100,
118
+ "vision_backbone": "resnet18",
119
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
120
+ "replace_final_stride_with_dilation": false,
121
+ "pre_norm": false,
122
+ "dim_model": 512,
123
+ "n_heads": 8,
124
+ "dim_feedforward": 3200,
125
+ "feedforward_activation": "relu",
126
+ "n_encoder_layers": 4,
127
+ "n_decoder_layers": 1,
128
+ "use_vae": true,
129
+ "latent_dim": 32,
130
+ "n_vae_encoder_layers": 4,
131
+ "temporal_ensemble_coeff": null,
132
+ "dropout": 0.1,
133
+ "kl_weight": 10.0,
134
+ "optimizer_lr": 1e-05,
135
+ "optimizer_weight_decay": 0.0001,
136
+ "optimizer_lr_backbone": 1e-05
137
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1447bc129033928a0fe487485d716f96722e97827e490e761252a60a8b65c56e
3
+ size 206722656
train_config.json ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dataset": {
3
+ "repo_id": "aractingi/droid_1.0.1",
4
+ "root": null,
5
+ "episodes": null,
6
+ "image_transforms": {
7
+ "enable": false,
8
+ "max_num_transforms": 3,
9
+ "random_order": false,
10
+ "tfs": {
11
+ "brightness": {
12
+ "weight": 1.0,
13
+ "type": "ColorJitter",
14
+ "kwargs": {
15
+ "brightness": [
16
+ 0.8,
17
+ 1.2
18
+ ]
19
+ }
20
+ },
21
+ "contrast": {
22
+ "weight": 1.0,
23
+ "type": "ColorJitter",
24
+ "kwargs": {
25
+ "contrast": [
26
+ 0.8,
27
+ 1.2
28
+ ]
29
+ }
30
+ },
31
+ "saturation": {
32
+ "weight": 1.0,
33
+ "type": "ColorJitter",
34
+ "kwargs": {
35
+ "saturation": [
36
+ 0.5,
37
+ 1.5
38
+ ]
39
+ }
40
+ },
41
+ "hue": {
42
+ "weight": 1.0,
43
+ "type": "ColorJitter",
44
+ "kwargs": {
45
+ "hue": [
46
+ -0.05,
47
+ 0.05
48
+ ]
49
+ }
50
+ },
51
+ "sharpness": {
52
+ "weight": 1.0,
53
+ "type": "SharpnessJitter",
54
+ "kwargs": {
55
+ "sharpness": [
56
+ 0.5,
57
+ 1.5
58
+ ]
59
+ }
60
+ }
61
+ }
62
+ },
63
+ "revision": null,
64
+ "use_imagenet_stats": true,
65
+ "video_backend": "torchcodec",
66
+ "streaming": true
67
+ },
68
+ "env": null,
69
+ "policy": {
70
+ "type": "act",
71
+ "n_obs_steps": 1,
72
+ "normalization_mapping": {
73
+ "VISUAL": "MEAN_STD",
74
+ "STATE": "MEAN_STD",
75
+ "ACTION": "MEAN_STD"
76
+ },
77
+ "input_features": {
78
+ "observation.state.gripper_position": {
79
+ "type": "STATE",
80
+ "shape": [
81
+ 1
82
+ ]
83
+ },
84
+ "observation.state.cartesian_position": {
85
+ "type": "STATE",
86
+ "shape": [
87
+ 6
88
+ ]
89
+ },
90
+ "observation.state.joint_position": {
91
+ "type": "STATE",
92
+ "shape": [
93
+ 7
94
+ ]
95
+ },
96
+ "observation.state": {
97
+ "type": "STATE",
98
+ "shape": [
99
+ 8
100
+ ]
101
+ },
102
+ "observation.images.wrist_left": {
103
+ "type": "VISUAL",
104
+ "shape": [
105
+ 3,
106
+ 180,
107
+ 320
108
+ ]
109
+ },
110
+ "observation.images.exterior_1_left": {
111
+ "type": "VISUAL",
112
+ "shape": [
113
+ 3,
114
+ 180,
115
+ 320
116
+ ]
117
+ },
118
+ "observation.images.exterior_2_left": {
119
+ "type": "VISUAL",
120
+ "shape": [
121
+ 3,
122
+ 180,
123
+ 320
124
+ ]
125
+ }
126
+ },
127
+ "output_features": {
128
+ "action.gripper_position": {
129
+ "type": "ACTION",
130
+ "shape": [
131
+ 1
132
+ ]
133
+ },
134
+ "action.gripper_velocity": {
135
+ "type": "ACTION",
136
+ "shape": [
137
+ 1
138
+ ]
139
+ },
140
+ "action.cartesian_position": {
141
+ "type": "ACTION",
142
+ "shape": [
143
+ 6
144
+ ]
145
+ },
146
+ "action.cartesian_velocity": {
147
+ "type": "ACTION",
148
+ "shape": [
149
+ 6
150
+ ]
151
+ },
152
+ "action.joint_position": {
153
+ "type": "ACTION",
154
+ "shape": [
155
+ 7
156
+ ]
157
+ },
158
+ "action.joint_velocity": {
159
+ "type": "ACTION",
160
+ "shape": [
161
+ 7
162
+ ]
163
+ },
164
+ "action.original": {
165
+ "type": "ACTION",
166
+ "shape": [
167
+ 7
168
+ ]
169
+ },
170
+ "action": {
171
+ "type": "ACTION",
172
+ "shape": [
173
+ 8
174
+ ]
175
+ }
176
+ },
177
+ "device": "mps",
178
+ "use_amp": false,
179
+ "push_to_hub": true,
180
+ "repo_id": "fracapuano/streaming_policy",
181
+ "private": null,
182
+ "tags": null,
183
+ "license": null,
184
+ "chunk_size": 100,
185
+ "n_action_steps": 100,
186
+ "vision_backbone": "resnet18",
187
+ "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1",
188
+ "replace_final_stride_with_dilation": false,
189
+ "pre_norm": false,
190
+ "dim_model": 512,
191
+ "n_heads": 8,
192
+ "dim_feedforward": 3200,
193
+ "feedforward_activation": "relu",
194
+ "n_encoder_layers": 4,
195
+ "n_decoder_layers": 1,
196
+ "use_vae": true,
197
+ "latent_dim": 32,
198
+ "n_vae_encoder_layers": 4,
199
+ "temporal_ensemble_coeff": null,
200
+ "dropout": 0.1,
201
+ "kl_weight": 10.0,
202
+ "optimizer_lr": 1e-05,
203
+ "optimizer_weight_decay": 0.0001,
204
+ "optimizer_lr_backbone": 1e-05
205
+ },
206
+ "output_dir": "/tmp/outputs/train/streaming_test",
207
+ "job_name": "streaming_test",
208
+ "resume": false,
209
+ "seed": 1000,
210
+ "num_workers": 4,
211
+ "batch_size": 8,
212
+ "steps": 10,
213
+ "eval_freq": 20000,
214
+ "log_freq": 200,
215
+ "save_checkpoint": true,
216
+ "save_freq": 20000,
217
+ "use_policy_training_preset": true,
218
+ "optimizer": {
219
+ "type": "adamw",
220
+ "lr": 1e-05,
221
+ "weight_decay": 0.0001,
222
+ "grad_clip_norm": 10.0,
223
+ "betas": [
224
+ 0.9,
225
+ 0.999
226
+ ],
227
+ "eps": 1e-08
228
+ },
229
+ "scheduler": null,
230
+ "eval": {
231
+ "n_episodes": 50,
232
+ "batch_size": 50,
233
+ "use_async_envs": false
234
+ },
235
+ "wandb": {
236
+ "enable": true,
237
+ "disable_artifact": false,
238
+ "project": "lerobot",
239
+ "entity": null,
240
+ "notes": null,
241
+ "run_id": "5ietmknv",
242
+ "mode": null
243
+ }
244
+ }