Saving weights and logs of step 10000
Browse files- events.out.tfevents.1642349949.t1v-n-e1a08808-w-0.1786795.0.v2 +3 -0
- flax_model.msgpack +1 -1
- run_mlm_flax.py +6 -1
- run_step2.sh +1 -1
- run_step3.sh +29 -0
events.out.tfevents.1642349949.t1v-n-e1a08808-w-0.1786795.0.v2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4dec4f33794c71cb36e2174526c6bf55d893d81add611f5fa83f296421cacbb8
|
3 |
+
size 1470136
|
flax_model.msgpack
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 498796983
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b23fc406171b4262000759c2ab5d18fabe36374906b73b564b726f4a8907d3ec
|
3 |
size 498796983
|
run_mlm_flax.py
CHANGED
@@ -129,6 +129,10 @@ class DataTrainingArguments:
|
|
129 |
static_learning_rate: bool = field(
|
130 |
default=False, metadata={"help": "Use a non decaying learning rate"}
|
131 |
)
|
|
|
|
|
|
|
|
|
132 |
auth_token: bool = field(
|
133 |
default=False, metadata={"help": "Use authorisation token"}
|
134 |
)
|
@@ -510,6 +514,8 @@ if __name__ == "__main__":
|
|
510 |
|
511 |
if data_args.static_learning_rate:
|
512 |
end_lr_value = training_args.learning_rate
|
|
|
|
|
513 |
else:
|
514 |
end_lr_value = 0
|
515 |
|
@@ -622,7 +628,6 @@ if __name__ == "__main__":
|
|
622 |
|
623 |
# Generate an epoch by shuffling sampling indices from the train dataset
|
624 |
num_train_samples = len(tokenized_datasets["train"])
|
625 |
-
print(f'Total number of training samples: {num_train_samples}')
|
626 |
train_samples_idx = jax.random.permutation(input_rng, jnp.arange(num_train_samples))
|
627 |
train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size)
|
628 |
|
|
|
129 |
static_learning_rate: bool = field(
|
130 |
default=False, metadata={"help": "Use a non decaying learning rate"}
|
131 |
)
|
132 |
+
end_learning_rate: float = field(
|
133 |
+
default=0, metadata={"help": "End learning rate. Will be ignored it startic learning rate is set"}
|
134 |
+
)
|
135 |
+
|
136 |
auth_token: bool = field(
|
137 |
default=False, metadata={"help": "Use authorisation token"}
|
138 |
)
|
|
|
514 |
|
515 |
if data_args.static_learning_rate:
|
516 |
end_lr_value = training_args.learning_rate
|
517 |
+
elif data_args.end_learning_rate:
|
518 |
+
end_lr_value = data_args.end_learning_rate
|
519 |
else:
|
520 |
end_lr_value = 0
|
521 |
|
|
|
628 |
|
629 |
# Generate an epoch by shuffling sampling indices from the train dataset
|
630 |
num_train_samples = len(tokenized_datasets["train"])
|
|
|
631 |
train_samples_idx = jax.random.permutation(input_rng, jnp.arange(num_train_samples))
|
632 |
train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size)
|
633 |
|
run_step2.sh
CHANGED
@@ -14,7 +14,7 @@
|
|
14 |
--learning_rate="4e-4" \
|
15 |
--warmup_steps="0" \
|
16 |
--overwrite_output_dir \
|
17 |
-
--num_train_epochs="
|
18 |
--adam_beta1="0.9" \
|
19 |
--adam_beta2="0.98" \
|
20 |
--adam_epsilon="1e-6" \
|
|
|
14 |
--learning_rate="4e-4" \
|
15 |
--warmup_steps="0" \
|
16 |
--overwrite_output_dir \
|
17 |
+
--num_train_epochs="1" \
|
18 |
--adam_beta1="0.9" \
|
19 |
--adam_beta2="0.98" \
|
20 |
--adam_epsilon="1e-6" \
|
run_step3.sh
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
./run_mlm_flax.py \
|
2 |
+
--output_dir="./" \
|
3 |
+
--model_type="roberta" \
|
4 |
+
--model_name_or_path="./" \
|
5 |
+
--config_name="./" \
|
6 |
+
--tokenizer_name="./" \
|
7 |
+
--train_file /mnt/disks/flaxdisk/corpus/train_3_4.json \
|
8 |
+
--validation_file /mnt/disks/flaxdisk/corpus/validation.json \
|
9 |
+
--cache_dir="/mnt/disks/flaxdisk/cache/" \
|
10 |
+
--max_seq_length="128" \
|
11 |
+
--weight_decay="0.01" \
|
12 |
+
--per_device_train_batch_size="192" \
|
13 |
+
--per_device_eval_batch_size="192" \
|
14 |
+
--learning_rate="4e-4" \
|
15 |
+
--end_learning_rate="2e-4" \
|
16 |
+
--warmup_steps="0" \
|
17 |
+
--overwrite_output_dir \
|
18 |
+
--num_train_epochs="1" \
|
19 |
+
--adam_beta1="0.9" \
|
20 |
+
--adam_beta2="0.98" \
|
21 |
+
--adam_epsilon="1e-6" \
|
22 |
+
--logging_steps="10000" \
|
23 |
+
--save_steps="10000" \
|
24 |
+
--eval_steps="10000" \
|
25 |
+
--preprocessing_num_workers="64" \
|
26 |
+
--auth_token="True" \
|
27 |
+
--dtype="bfloat16" \
|
28 |
+
--push_to_hub
|
29 |
+
|