diff --git a/.gitattributes b/.gitattributes
index c7d9f3332a950355d5a77d85000f05e6f45435ea..9465f3dee05702760cf48275b10a710eb23dfee3 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..faafcee93060917fc6c1428a7e986b02e7496c66
--- /dev/null
+++ b/config.json
@@ -0,0 +1,31 @@
+{
+ "apply_residual_connection_post_layernorm": false,
+ "architectures": [
+ "BloomModel"
+ ],
+ "attention_dropout": 0.0,
+ "attention_softmax_in_fp32": true,
+ "bias_dropout_fusion": true,
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "hidden_dropout": 0.0,
+ "initializer_range": 0.02,
+ "layer_norm_epsilon": 1e-05,
+ "masked_softmax_fusion": true,
+ "model_type": "bloom",
+ "n_embed": 4096,
+ "n_inner": null,
+ "n_layer": 30,
+ "num_attention_heads": 32,
+ "offset_alibi": 100,
+ "pad_token_id": 3,
+ "pretraining_tp": 4,
+ "seq_length": 2048,
+ "skip_bias_add": true,
+ "skip_bias_add_qkv": false,
+ "slow_but_exact": false,
+ "transformers_version": "4.21.0.dev0",
+ "unk_token_id": 0,
+ "use_cache": true,
+ "vocab_size": 250880
+}
diff --git a/evaluation/Muennighoff_xstory_cloze/ru/Answer_Given_options/results.json b/evaluation/Muennighoff_xstory_cloze/ru/Answer_Given_options/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..b3f523c5a192a842b798ac053530f8ad331b8e76
--- /dev/null
+++ b/evaluation/Muennighoff_xstory_cloze/ru/Answer_Given_options/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xstory_cloze",
+ "dataset_config_name": "ru",
+ "template_name": "Answer Given options",
+ "evaluation": {
+ "accuracy": 0.6340172071475844
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xstory_cloze', dataset_config_name='ru', template_config_name='en', template_name='Answer Given options', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/Muennighoff_xstory_cloze/ru/Choose_Story_Ending/results.json b/evaluation/Muennighoff_xstory_cloze/ru/Choose_Story_Ending/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..90de87e09f914b7cbe11d88a4fd238bf77acbb05
--- /dev/null
+++ b/evaluation/Muennighoff_xstory_cloze/ru/Choose_Story_Ending/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xstory_cloze",
+ "dataset_config_name": "ru",
+ "template_name": "Choose Story Ending",
+ "evaluation": {
+ "accuracy": 0.7908669755129054
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xstory_cloze', dataset_config_name='ru', template_config_name='en', template_name='Choose Story Ending', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/Muennighoff_xstory_cloze/ru/Generate_Ending/results.json b/evaluation/Muennighoff_xstory_cloze/ru/Generate_Ending/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..3c123d5643e50001aa4ad1dc077c0f63e7eae5a1
--- /dev/null
+++ b/evaluation/Muennighoff_xstory_cloze/ru/Generate_Ending/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xstory_cloze",
+ "dataset_config_name": "ru",
+ "template_name": "Generate Ending",
+ "evaluation": {
+ "accuracy": 0.5532759761747187
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xstory_cloze', dataset_config_name='ru', template_config_name='en', template_name='Generate Ending', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/Muennighoff_xstory_cloze/ru/Novel_Correct_Ending/results.json b/evaluation/Muennighoff_xstory_cloze/ru/Novel_Correct_Ending/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..5725859dd6d90bbccffbed55c62d2198a1e732d0
--- /dev/null
+++ b/evaluation/Muennighoff_xstory_cloze/ru/Novel_Correct_Ending/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xstory_cloze",
+ "dataset_config_name": "ru",
+ "template_name": "Novel Correct Ending",
+ "evaluation": {
+ "accuracy": 0.7379219060225016
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xstory_cloze', dataset_config_name='ru', template_config_name='en', template_name='Novel Correct Ending', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/Muennighoff_xstory_cloze/ru/Story_Continuation_and_Options/results.json b/evaluation/Muennighoff_xstory_cloze/ru/Story_Continuation_and_Options/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..b43a1ff7d636bc7b9e318779c9e53f05a74e3c12
--- /dev/null
+++ b/evaluation/Muennighoff_xstory_cloze/ru/Story_Continuation_and_Options/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xstory_cloze",
+ "dataset_config_name": "ru",
+ "template_name": "Story Continuation and Options",
+ "evaluation": {
+ "accuracy": 0.7531436135009927
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xstory_cloze', dataset_config_name='ru', template_config_name='en', template_name='Story Continuation and Options', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/Muennighoff_xwinograd/ru/Replace/results.json b/evaluation/Muennighoff_xwinograd/ru/Replace/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..7e06315cb8657306f0a5df32f65851d874ea0757
--- /dev/null
+++ b/evaluation/Muennighoff_xwinograd/ru/Replace/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xwinograd",
+ "dataset_config_name": "ru",
+ "template_name": "Replace",
+ "evaluation": {
+ "accuracy": 0.5396825396825397
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xwinograd', dataset_config_name='ru', template_config_name='en', template_name='Replace', split='test', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/Muennighoff_xwinograd/ru/True_or_False/results.json b/evaluation/Muennighoff_xwinograd/ru/True_or_False/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..2b26a8196e0bbe5aeac050fd7a2a98e63a1b3485
--- /dev/null
+++ b/evaluation/Muennighoff_xwinograd/ru/True_or_False/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xwinograd",
+ "dataset_config_name": "ru",
+ "template_name": "True or False",
+ "evaluation": {
+ "accuracy": 0.46984126984126984
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xwinograd', dataset_config_name='ru', template_config_name='en', template_name='True or False', split='test', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/Muennighoff_xwinograd/ru/does_underscore_refer_to/results.json b/evaluation/Muennighoff_xwinograd/ru/does_underscore_refer_to/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..0c2e528c25d06cf4deb78b14fcb80f0f8481e64d
--- /dev/null
+++ b/evaluation/Muennighoff_xwinograd/ru/does_underscore_refer_to/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xwinograd",
+ "dataset_config_name": "ru",
+ "template_name": "does underscore refer to",
+ "evaluation": {
+ "accuracy": 0.5079365079365079
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xwinograd', dataset_config_name='ru', template_config_name='en', template_name='does underscore refer to', split='test', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/Muennighoff_xwinograd/ru/stand_for/results.json b/evaluation/Muennighoff_xwinograd/ru/stand_for/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..aecbadd544336f7fee82e03787fc876ffa7c9389
--- /dev/null
+++ b/evaluation/Muennighoff_xwinograd/ru/stand_for/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xwinograd",
+ "dataset_config_name": "ru",
+ "template_name": "stand for",
+ "evaluation": {
+ "accuracy": 0.4888888888888889
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xwinograd', dataset_config_name='ru', template_config_name='en', template_name='stand for', split='test', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/Muennighoff_xwinograd/ru/underscore_refer_to/results.json b/evaluation/Muennighoff_xwinograd/ru/underscore_refer_to/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..8855ce8c3e7291ec561340cbb3f5ee6d98683314
--- /dev/null
+++ b/evaluation/Muennighoff_xwinograd/ru/underscore_refer_to/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "Muennighoff/xwinograd",
+ "dataset_config_name": "ru",
+ "template_name": "underscore refer to",
+ "evaluation": {
+ "accuracy": 0.5079365079365079
+ },
+ "arguments": "Namespace(dataset_name='Muennighoff/xwinograd', dataset_config_name='ru', template_config_name='en', template_name='underscore refer to', split='test', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/xnli/ru/GPT-3_style/results.json b/evaluation/xnli/ru/GPT-3_style/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..7c4111605b6fdfd043192fab47a2b300d14748f8
--- /dev/null
+++ b/evaluation/xnli/ru/GPT-3_style/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "xnli",
+ "dataset_config_name": "ru",
+ "template_name": "GPT-3 style",
+ "evaluation": {
+ "accuracy": 0.5
+ },
+ "arguments": "Namespace(dataset_name='xnli', dataset_config_name='ru', template_config_name='en', template_name='GPT-3 style', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/xnli/ru/MNLI_crowdsource/results.json b/evaluation/xnli/ru/MNLI_crowdsource/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..a9572415e39b548d1bf2f91ceb501d5163a56b60
--- /dev/null
+++ b/evaluation/xnli/ru/MNLI_crowdsource/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "xnli",
+ "dataset_config_name": "ru",
+ "template_name": "MNLI crowdsource",
+ "evaluation": {
+ "accuracy": 0.38473895582329315
+ },
+ "arguments": "Namespace(dataset_name='xnli', dataset_config_name='ru', template_config_name='en', template_name='MNLI crowdsource', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/xnli/ru/can_we_infer/results.json b/evaluation/xnli/ru/can_we_infer/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..0a8297785a69b5f38d8b59eb41fe47451d6011a6
--- /dev/null
+++ b/evaluation/xnli/ru/can_we_infer/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "xnli",
+ "dataset_config_name": "ru",
+ "template_name": "can we infer",
+ "evaluation": {
+ "accuracy": 0.4971887550200803
+ },
+ "arguments": "Namespace(dataset_name='xnli', dataset_config_name='ru', template_config_name='en', template_name='can we infer', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/xnli/ru/guaranteed_possible_impossible/results.json b/evaluation/xnli/ru/guaranteed_possible_impossible/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..c62ddf5fc49c8f4227e8d6b2856b92ce8a78caca
--- /dev/null
+++ b/evaluation/xnli/ru/guaranteed_possible_impossible/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "xnli",
+ "dataset_config_name": "ru",
+ "template_name": "guaranteed/possible/impossible",
+ "evaluation": {
+ "accuracy": 0.36626506024096384
+ },
+ "arguments": "Namespace(dataset_name='xnli', dataset_config_name='ru', template_config_name='en', template_name='guaranteed/possible/impossible', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/evaluation/xnli/ru/justified_in_saying/results.json b/evaluation/xnli/ru/justified_in_saying/results.json
new file mode 100644
index 0000000000000000000000000000000000000000..d4fdf7a583b24579c90f304a4cc9b07c78a0d76f
--- /dev/null
+++ b/evaluation/xnli/ru/justified_in_saying/results.json
@@ -0,0 +1,9 @@
+{
+ "dataset_name": "xnli",
+ "dataset_config_name": "ru",
+ "template_name": "justified in saying",
+ "evaluation": {
+ "accuracy": 0.46265060240963857
+ },
+ "arguments": "Namespace(dataset_name='xnli', dataset_config_name='ru', template_config_name='en', template_name='justified in saying', split='validation', max_length=2048, target_max_length=256, pad_to_max_length=False, model_name_or_path='bloomz-7b1-xp3ru', config_name=None, tokenizer_name=None, use_slow_tokenizer=False, per_device_eval_batch_size=4, output_dir='bloomz-7b1-xp3ru/evaluation', debug=False, prefixlm=False, dtype='float16', nospace=False, scratchpad=False)"
+}
\ No newline at end of file
diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..28bd6d29ec25afa3dcb95735baa1d7f4cb07407e
--- /dev/null
+++ b/pytorch_model.bin.index.json
@@ -0,0 +1,372 @@
+{
+ "metadata": {
+ "total_size": 14138032128
+ },
+ "weight_map": {
+ "h.0.input_layernorm.bias": "pytorch_model_00002-of-00032.bin",
+ "h.0.input_layernorm.weight": "pytorch_model_00002-of-00032.bin",
+ "h.0.mlp.dense_4h_to_h.bias": "pytorch_model_00002-of-00032.bin",
+ "h.0.mlp.dense_4h_to_h.weight": "pytorch_model_00002-of-00032.bin",
+ "h.0.mlp.dense_h_to_4h.bias": "pytorch_model_00002-of-00032.bin",
+ "h.0.mlp.dense_h_to_4h.weight": "pytorch_model_00002-of-00032.bin",
+ "h.0.post_attention_layernorm.bias": "pytorch_model_00002-of-00032.bin",
+ "h.0.post_attention_layernorm.weight": "pytorch_model_00002-of-00032.bin",
+ "h.0.self_attention.dense.bias": "pytorch_model_00002-of-00032.bin",
+ "h.0.self_attention.dense.weight": "pytorch_model_00002-of-00032.bin",
+ "h.0.self_attention.query_key_value.bias": "pytorch_model_00002-of-00032.bin",
+ "h.0.self_attention.query_key_value.weight": "pytorch_model_00002-of-00032.bin",
+ "h.1.input_layernorm.bias": "pytorch_model_00003-of-00032.bin",
+ "h.1.input_layernorm.weight": "pytorch_model_00003-of-00032.bin",
+ "h.1.mlp.dense_4h_to_h.bias": "pytorch_model_00003-of-00032.bin",
+ "h.1.mlp.dense_4h_to_h.weight": "pytorch_model_00003-of-00032.bin",
+ "h.1.mlp.dense_h_to_4h.bias": "pytorch_model_00003-of-00032.bin",
+ "h.1.mlp.dense_h_to_4h.weight": "pytorch_model_00003-of-00032.bin",
+ "h.1.post_attention_layernorm.bias": "pytorch_model_00003-of-00032.bin",
+ "h.1.post_attention_layernorm.weight": "pytorch_model_00003-of-00032.bin",
+ "h.1.self_attention.dense.bias": "pytorch_model_00003-of-00032.bin",
+ "h.1.self_attention.dense.weight": "pytorch_model_00003-of-00032.bin",
+ "h.1.self_attention.query_key_value.bias": "pytorch_model_00003-of-00032.bin",
+ "h.1.self_attention.query_key_value.weight": "pytorch_model_00003-of-00032.bin",
+ "h.10.input_layernorm.bias": "pytorch_model_00012-of-00032.bin",
+ "h.10.input_layernorm.weight": "pytorch_model_00012-of-00032.bin",
+ "h.10.mlp.dense_4h_to_h.bias": "pytorch_model_00012-of-00032.bin",
+ "h.10.mlp.dense_4h_to_h.weight": "pytorch_model_00012-of-00032.bin",
+ "h.10.mlp.dense_h_to_4h.bias": "pytorch_model_00012-of-00032.bin",
+ "h.10.mlp.dense_h_to_4h.weight": "pytorch_model_00012-of-00032.bin",
+ "h.10.post_attention_layernorm.bias": "pytorch_model_00012-of-00032.bin",
+ "h.10.post_attention_layernorm.weight": "pytorch_model_00012-of-00032.bin",
+ "h.10.self_attention.dense.bias": "pytorch_model_00012-of-00032.bin",
+ "h.10.self_attention.dense.weight": "pytorch_model_00012-of-00032.bin",
+ "h.10.self_attention.query_key_value.bias": "pytorch_model_00012-of-00032.bin",
+ "h.10.self_attention.query_key_value.weight": "pytorch_model_00012-of-00032.bin",
+ "h.11.input_layernorm.bias": "pytorch_model_00013-of-00032.bin",
+ "h.11.input_layernorm.weight": "pytorch_model_00013-of-00032.bin",
+ "h.11.mlp.dense_4h_to_h.bias": "pytorch_model_00013-of-00032.bin",
+ "h.11.mlp.dense_4h_to_h.weight": "pytorch_model_00013-of-00032.bin",
+ "h.11.mlp.dense_h_to_4h.bias": "pytorch_model_00013-of-00032.bin",
+ "h.11.mlp.dense_h_to_4h.weight": "pytorch_model_00013-of-00032.bin",
+ "h.11.post_attention_layernorm.bias": "pytorch_model_00013-of-00032.bin",
+ "h.11.post_attention_layernorm.weight": "pytorch_model_00013-of-00032.bin",
+ "h.11.self_attention.dense.bias": "pytorch_model_00013-of-00032.bin",
+ "h.11.self_attention.dense.weight": "pytorch_model_00013-of-00032.bin",
+ "h.11.self_attention.query_key_value.bias": "pytorch_model_00013-of-00032.bin",
+ "h.11.self_attention.query_key_value.weight": "pytorch_model_00013-of-00032.bin",
+ "h.12.input_layernorm.bias": "pytorch_model_00014-of-00032.bin",
+ "h.12.input_layernorm.weight": "pytorch_model_00014-of-00032.bin",
+ "h.12.mlp.dense_4h_to_h.bias": "pytorch_model_00014-of-00032.bin",
+ "h.12.mlp.dense_4h_to_h.weight": "pytorch_model_00014-of-00032.bin",
+ "h.12.mlp.dense_h_to_4h.bias": "pytorch_model_00014-of-00032.bin",
+ "h.12.mlp.dense_h_to_4h.weight": "pytorch_model_00014-of-00032.bin",
+ "h.12.post_attention_layernorm.bias": "pytorch_model_00014-of-00032.bin",
+ "h.12.post_attention_layernorm.weight": "pytorch_model_00014-of-00032.bin",
+ "h.12.self_attention.dense.bias": "pytorch_model_00014-of-00032.bin",
+ "h.12.self_attention.dense.weight": "pytorch_model_00014-of-00032.bin",
+ "h.12.self_attention.query_key_value.bias": "pytorch_model_00014-of-00032.bin",
+ "h.12.self_attention.query_key_value.weight": "pytorch_model_00014-of-00032.bin",
+ "h.13.input_layernorm.bias": "pytorch_model_00015-of-00032.bin",
+ "h.13.input_layernorm.weight": "pytorch_model_00015-of-00032.bin",
+ "h.13.mlp.dense_4h_to_h.bias": "pytorch_model_00015-of-00032.bin",
+ "h.13.mlp.dense_4h_to_h.weight": "pytorch_model_00015-of-00032.bin",
+ "h.13.mlp.dense_h_to_4h.bias": "pytorch_model_00015-of-00032.bin",
+ "h.13.mlp.dense_h_to_4h.weight": "pytorch_model_00015-of-00032.bin",
+ "h.13.post_attention_layernorm.bias": "pytorch_model_00015-of-00032.bin",
+ "h.13.post_attention_layernorm.weight": "pytorch_model_00015-of-00032.bin",
+ "h.13.self_attention.dense.bias": "pytorch_model_00015-of-00032.bin",
+ "h.13.self_attention.dense.weight": "pytorch_model_00015-of-00032.bin",
+ "h.13.self_attention.query_key_value.bias": "pytorch_model_00015-of-00032.bin",
+ "h.13.self_attention.query_key_value.weight": "pytorch_model_00015-of-00032.bin",
+ "h.14.input_layernorm.bias": "pytorch_model_00016-of-00032.bin",
+ "h.14.input_layernorm.weight": "pytorch_model_00016-of-00032.bin",
+ "h.14.mlp.dense_4h_to_h.bias": "pytorch_model_00016-of-00032.bin",
+ "h.14.mlp.dense_4h_to_h.weight": "pytorch_model_00016-of-00032.bin",
+ "h.14.mlp.dense_h_to_4h.bias": "pytorch_model_00016-of-00032.bin",
+ "h.14.mlp.dense_h_to_4h.weight": "pytorch_model_00016-of-00032.bin",
+ "h.14.post_attention_layernorm.bias": "pytorch_model_00016-of-00032.bin",
+ "h.14.post_attention_layernorm.weight": "pytorch_model_00016-of-00032.bin",
+ "h.14.self_attention.dense.bias": "pytorch_model_00016-of-00032.bin",
+ "h.14.self_attention.dense.weight": "pytorch_model_00016-of-00032.bin",
+ "h.14.self_attention.query_key_value.bias": "pytorch_model_00016-of-00032.bin",
+ "h.14.self_attention.query_key_value.weight": "pytorch_model_00016-of-00032.bin",
+ "h.15.input_layernorm.bias": "pytorch_model_00017-of-00032.bin",
+ "h.15.input_layernorm.weight": "pytorch_model_00017-of-00032.bin",
+ "h.15.mlp.dense_4h_to_h.bias": "pytorch_model_00017-of-00032.bin",
+ "h.15.mlp.dense_4h_to_h.weight": "pytorch_model_00017-of-00032.bin",
+ "h.15.mlp.dense_h_to_4h.bias": "pytorch_model_00017-of-00032.bin",
+ "h.15.mlp.dense_h_to_4h.weight": "pytorch_model_00017-of-00032.bin",
+ "h.15.post_attention_layernorm.bias": "pytorch_model_00017-of-00032.bin",
+ "h.15.post_attention_layernorm.weight": "pytorch_model_00017-of-00032.bin",
+ "h.15.self_attention.dense.bias": "pytorch_model_00017-of-00032.bin",
+ "h.15.self_attention.dense.weight": "pytorch_model_00017-of-00032.bin",
+ "h.15.self_attention.query_key_value.bias": "pytorch_model_00017-of-00032.bin",
+ "h.15.self_attention.query_key_value.weight": "pytorch_model_00017-of-00032.bin",
+ "h.16.input_layernorm.bias": "pytorch_model_00018-of-00032.bin",
+ "h.16.input_layernorm.weight": "pytorch_model_00018-of-00032.bin",
+ "h.16.mlp.dense_4h_to_h.bias": "pytorch_model_00018-of-00032.bin",
+ "h.16.mlp.dense_4h_to_h.weight": "pytorch_model_00018-of-00032.bin",
+ "h.16.mlp.dense_h_to_4h.bias": "pytorch_model_00018-of-00032.bin",
+ "h.16.mlp.dense_h_to_4h.weight": "pytorch_model_00018-of-00032.bin",
+ "h.16.post_attention_layernorm.bias": "pytorch_model_00018-of-00032.bin",
+ "h.16.post_attention_layernorm.weight": "pytorch_model_00018-of-00032.bin",
+ "h.16.self_attention.dense.bias": "pytorch_model_00018-of-00032.bin",
+ "h.16.self_attention.dense.weight": "pytorch_model_00018-of-00032.bin",
+ "h.16.self_attention.query_key_value.bias": "pytorch_model_00018-of-00032.bin",
+ "h.16.self_attention.query_key_value.weight": "pytorch_model_00018-of-00032.bin",
+ "h.17.input_layernorm.bias": "pytorch_model_00019-of-00032.bin",
+ "h.17.input_layernorm.weight": "pytorch_model_00019-of-00032.bin",
+ "h.17.mlp.dense_4h_to_h.bias": "pytorch_model_00019-of-00032.bin",
+ "h.17.mlp.dense_4h_to_h.weight": "pytorch_model_00019-of-00032.bin",
+ "h.17.mlp.dense_h_to_4h.bias": "pytorch_model_00019-of-00032.bin",
+ "h.17.mlp.dense_h_to_4h.weight": "pytorch_model_00019-of-00032.bin",
+ "h.17.post_attention_layernorm.bias": "pytorch_model_00019-of-00032.bin",
+ "h.17.post_attention_layernorm.weight": "pytorch_model_00019-of-00032.bin",
+ "h.17.self_attention.dense.bias": "pytorch_model_00019-of-00032.bin",
+ "h.17.self_attention.dense.weight": "pytorch_model_00019-of-00032.bin",
+ "h.17.self_attention.query_key_value.bias": "pytorch_model_00019-of-00032.bin",
+ "h.17.self_attention.query_key_value.weight": "pytorch_model_00019-of-00032.bin",
+ "h.18.input_layernorm.bias": "pytorch_model_00020-of-00032.bin",
+ "h.18.input_layernorm.weight": "pytorch_model_00020-of-00032.bin",
+ "h.18.mlp.dense_4h_to_h.bias": "pytorch_model_00020-of-00032.bin",
+ "h.18.mlp.dense_4h_to_h.weight": "pytorch_model_00020-of-00032.bin",
+ "h.18.mlp.dense_h_to_4h.bias": "pytorch_model_00020-of-00032.bin",
+ "h.18.mlp.dense_h_to_4h.weight": "pytorch_model_00020-of-00032.bin",
+ "h.18.post_attention_layernorm.bias": "pytorch_model_00020-of-00032.bin",
+ "h.18.post_attention_layernorm.weight": "pytorch_model_00020-of-00032.bin",
+ "h.18.self_attention.dense.bias": "pytorch_model_00020-of-00032.bin",
+ "h.18.self_attention.dense.weight": "pytorch_model_00020-of-00032.bin",
+ "h.18.self_attention.query_key_value.bias": "pytorch_model_00020-of-00032.bin",
+ "h.18.self_attention.query_key_value.weight": "pytorch_model_00020-of-00032.bin",
+ "h.19.input_layernorm.bias": "pytorch_model_00021-of-00032.bin",
+ "h.19.input_layernorm.weight": "pytorch_model_00021-of-00032.bin",
+ "h.19.mlp.dense_4h_to_h.bias": "pytorch_model_00021-of-00032.bin",
+ "h.19.mlp.dense_4h_to_h.weight": "pytorch_model_00021-of-00032.bin",
+ "h.19.mlp.dense_h_to_4h.bias": "pytorch_model_00021-of-00032.bin",
+ "h.19.mlp.dense_h_to_4h.weight": "pytorch_model_00021-of-00032.bin",
+ "h.19.post_attention_layernorm.bias": "pytorch_model_00021-of-00032.bin",
+ "h.19.post_attention_layernorm.weight": "pytorch_model_00021-of-00032.bin",
+ "h.19.self_attention.dense.bias": "pytorch_model_00021-of-00032.bin",
+ "h.19.self_attention.dense.weight": "pytorch_model_00021-of-00032.bin",
+ "h.19.self_attention.query_key_value.bias": "pytorch_model_00021-of-00032.bin",
+ "h.19.self_attention.query_key_value.weight": "pytorch_model_00021-of-00032.bin",
+ "h.2.input_layernorm.bias": "pytorch_model_00004-of-00032.bin",
+ "h.2.input_layernorm.weight": "pytorch_model_00004-of-00032.bin",
+ "h.2.mlp.dense_4h_to_h.bias": "pytorch_model_00004-of-00032.bin",
+ "h.2.mlp.dense_4h_to_h.weight": "pytorch_model_00004-of-00032.bin",
+ "h.2.mlp.dense_h_to_4h.bias": "pytorch_model_00004-of-00032.bin",
+ "h.2.mlp.dense_h_to_4h.weight": "pytorch_model_00004-of-00032.bin",
+ "h.2.post_attention_layernorm.bias": "pytorch_model_00004-of-00032.bin",
+ "h.2.post_attention_layernorm.weight": "pytorch_model_00004-of-00032.bin",
+ "h.2.self_attention.dense.bias": "pytorch_model_00004-of-00032.bin",
+ "h.2.self_attention.dense.weight": "pytorch_model_00004-of-00032.bin",
+ "h.2.self_attention.query_key_value.bias": "pytorch_model_00004-of-00032.bin",
+ "h.2.self_attention.query_key_value.weight": "pytorch_model_00004-of-00032.bin",
+ "h.20.input_layernorm.bias": "pytorch_model_00022-of-00032.bin",
+ "h.20.input_layernorm.weight": "pytorch_model_00022-of-00032.bin",
+ "h.20.mlp.dense_4h_to_h.bias": "pytorch_model_00022-of-00032.bin",
+ "h.20.mlp.dense_4h_to_h.weight": "pytorch_model_00022-of-00032.bin",
+ "h.20.mlp.dense_h_to_4h.bias": "pytorch_model_00022-of-00032.bin",
+ "h.20.mlp.dense_h_to_4h.weight": "pytorch_model_00022-of-00032.bin",
+ "h.20.post_attention_layernorm.bias": "pytorch_model_00022-of-00032.bin",
+ "h.20.post_attention_layernorm.weight": "pytorch_model_00022-of-00032.bin",
+ "h.20.self_attention.dense.bias": "pytorch_model_00022-of-00032.bin",
+ "h.20.self_attention.dense.weight": "pytorch_model_00022-of-00032.bin",
+ "h.20.self_attention.query_key_value.bias": "pytorch_model_00022-of-00032.bin",
+ "h.20.self_attention.query_key_value.weight": "pytorch_model_00022-of-00032.bin",
+ "h.21.input_layernorm.bias": "pytorch_model_00023-of-00032.bin",
+ "h.21.input_layernorm.weight": "pytorch_model_00023-of-00032.bin",
+ "h.21.mlp.dense_4h_to_h.bias": "pytorch_model_00023-of-00032.bin",
+ "h.21.mlp.dense_4h_to_h.weight": "pytorch_model_00023-of-00032.bin",
+ "h.21.mlp.dense_h_to_4h.bias": "pytorch_model_00023-of-00032.bin",
+ "h.21.mlp.dense_h_to_4h.weight": "pytorch_model_00023-of-00032.bin",
+ "h.21.post_attention_layernorm.bias": "pytorch_model_00023-of-00032.bin",
+ "h.21.post_attention_layernorm.weight": "pytorch_model_00023-of-00032.bin",
+ "h.21.self_attention.dense.bias": "pytorch_model_00023-of-00032.bin",
+ "h.21.self_attention.dense.weight": "pytorch_model_00023-of-00032.bin",
+ "h.21.self_attention.query_key_value.bias": "pytorch_model_00023-of-00032.bin",
+ "h.21.self_attention.query_key_value.weight": "pytorch_model_00023-of-00032.bin",
+ "h.22.input_layernorm.bias": "pytorch_model_00024-of-00032.bin",
+ "h.22.input_layernorm.weight": "pytorch_model_00024-of-00032.bin",
+ "h.22.mlp.dense_4h_to_h.bias": "pytorch_model_00024-of-00032.bin",
+ "h.22.mlp.dense_4h_to_h.weight": "pytorch_model_00024-of-00032.bin",
+ "h.22.mlp.dense_h_to_4h.bias": "pytorch_model_00024-of-00032.bin",
+ "h.22.mlp.dense_h_to_4h.weight": "pytorch_model_00024-of-00032.bin",
+ "h.22.post_attention_layernorm.bias": "pytorch_model_00024-of-00032.bin",
+ "h.22.post_attention_layernorm.weight": "pytorch_model_00024-of-00032.bin",
+ "h.22.self_attention.dense.bias": "pytorch_model_00024-of-00032.bin",
+ "h.22.self_attention.dense.weight": "pytorch_model_00024-of-00032.bin",
+ "h.22.self_attention.query_key_value.bias": "pytorch_model_00024-of-00032.bin",
+ "h.22.self_attention.query_key_value.weight": "pytorch_model_00024-of-00032.bin",
+ "h.23.input_layernorm.bias": "pytorch_model_00025-of-00032.bin",
+ "h.23.input_layernorm.weight": "pytorch_model_00025-of-00032.bin",
+ "h.23.mlp.dense_4h_to_h.bias": "pytorch_model_00025-of-00032.bin",
+ "h.23.mlp.dense_4h_to_h.weight": "pytorch_model_00025-of-00032.bin",
+ "h.23.mlp.dense_h_to_4h.bias": "pytorch_model_00025-of-00032.bin",
+ "h.23.mlp.dense_h_to_4h.weight": "pytorch_model_00025-of-00032.bin",
+ "h.23.post_attention_layernorm.bias": "pytorch_model_00025-of-00032.bin",
+ "h.23.post_attention_layernorm.weight": "pytorch_model_00025-of-00032.bin",
+ "h.23.self_attention.dense.bias": "pytorch_model_00025-of-00032.bin",
+ "h.23.self_attention.dense.weight": "pytorch_model_00025-of-00032.bin",
+ "h.23.self_attention.query_key_value.bias": "pytorch_model_00025-of-00032.bin",
+ "h.23.self_attention.query_key_value.weight": "pytorch_model_00025-of-00032.bin",
+ "h.24.input_layernorm.bias": "pytorch_model_00026-of-00032.bin",
+ "h.24.input_layernorm.weight": "pytorch_model_00026-of-00032.bin",
+ "h.24.mlp.dense_4h_to_h.bias": "pytorch_model_00026-of-00032.bin",
+ "h.24.mlp.dense_4h_to_h.weight": "pytorch_model_00026-of-00032.bin",
+ "h.24.mlp.dense_h_to_4h.bias": "pytorch_model_00026-of-00032.bin",
+ "h.24.mlp.dense_h_to_4h.weight": "pytorch_model_00026-of-00032.bin",
+ "h.24.post_attention_layernorm.bias": "pytorch_model_00026-of-00032.bin",
+ "h.24.post_attention_layernorm.weight": "pytorch_model_00026-of-00032.bin",
+ "h.24.self_attention.dense.bias": "pytorch_model_00026-of-00032.bin",
+ "h.24.self_attention.dense.weight": "pytorch_model_00026-of-00032.bin",
+ "h.24.self_attention.query_key_value.bias": "pytorch_model_00026-of-00032.bin",
+ "h.24.self_attention.query_key_value.weight": "pytorch_model_00026-of-00032.bin",
+ "h.25.input_layernorm.bias": "pytorch_model_00027-of-00032.bin",
+ "h.25.input_layernorm.weight": "pytorch_model_00027-of-00032.bin",
+ "h.25.mlp.dense_4h_to_h.bias": "pytorch_model_00027-of-00032.bin",
+ "h.25.mlp.dense_4h_to_h.weight": "pytorch_model_00027-of-00032.bin",
+ "h.25.mlp.dense_h_to_4h.bias": "pytorch_model_00027-of-00032.bin",
+ "h.25.mlp.dense_h_to_4h.weight": "pytorch_model_00027-of-00032.bin",
+ "h.25.post_attention_layernorm.bias": "pytorch_model_00027-of-00032.bin",
+ "h.25.post_attention_layernorm.weight": "pytorch_model_00027-of-00032.bin",
+ "h.25.self_attention.dense.bias": "pytorch_model_00027-of-00032.bin",
+ "h.25.self_attention.dense.weight": "pytorch_model_00027-of-00032.bin",
+ "h.25.self_attention.query_key_value.bias": "pytorch_model_00027-of-00032.bin",
+ "h.25.self_attention.query_key_value.weight": "pytorch_model_00027-of-00032.bin",
+ "h.26.input_layernorm.bias": "pytorch_model_00028-of-00032.bin",
+ "h.26.input_layernorm.weight": "pytorch_model_00028-of-00032.bin",
+ "h.26.mlp.dense_4h_to_h.bias": "pytorch_model_00028-of-00032.bin",
+ "h.26.mlp.dense_4h_to_h.weight": "pytorch_model_00028-of-00032.bin",
+ "h.26.mlp.dense_h_to_4h.bias": "pytorch_model_00028-of-00032.bin",
+ "h.26.mlp.dense_h_to_4h.weight": "pytorch_model_00028-of-00032.bin",
+ "h.26.post_attention_layernorm.bias": "pytorch_model_00028-of-00032.bin",
+ "h.26.post_attention_layernorm.weight": "pytorch_model_00028-of-00032.bin",
+ "h.26.self_attention.dense.bias": "pytorch_model_00028-of-00032.bin",
+ "h.26.self_attention.dense.weight": "pytorch_model_00028-of-00032.bin",
+ "h.26.self_attention.query_key_value.bias": "pytorch_model_00028-of-00032.bin",
+ "h.26.self_attention.query_key_value.weight": "pytorch_model_00028-of-00032.bin",
+ "h.27.input_layernorm.bias": "pytorch_model_00029-of-00032.bin",
+ "h.27.input_layernorm.weight": "pytorch_model_00029-of-00032.bin",
+ "h.27.mlp.dense_4h_to_h.bias": "pytorch_model_00029-of-00032.bin",
+ "h.27.mlp.dense_4h_to_h.weight": "pytorch_model_00029-of-00032.bin",
+ "h.27.mlp.dense_h_to_4h.bias": "pytorch_model_00029-of-00032.bin",
+ "h.27.mlp.dense_h_to_4h.weight": "pytorch_model_00029-of-00032.bin",
+ "h.27.post_attention_layernorm.bias": "pytorch_model_00029-of-00032.bin",
+ "h.27.post_attention_layernorm.weight": "pytorch_model_00029-of-00032.bin",
+ "h.27.self_attention.dense.bias": "pytorch_model_00029-of-00032.bin",
+ "h.27.self_attention.dense.weight": "pytorch_model_00029-of-00032.bin",
+ "h.27.self_attention.query_key_value.bias": "pytorch_model_00029-of-00032.bin",
+ "h.27.self_attention.query_key_value.weight": "pytorch_model_00029-of-00032.bin",
+ "h.28.input_layernorm.bias": "pytorch_model_00030-of-00032.bin",
+ "h.28.input_layernorm.weight": "pytorch_model_00030-of-00032.bin",
+ "h.28.mlp.dense_4h_to_h.bias": "pytorch_model_00030-of-00032.bin",
+ "h.28.mlp.dense_4h_to_h.weight": "pytorch_model_00030-of-00032.bin",
+ "h.28.mlp.dense_h_to_4h.bias": "pytorch_model_00030-of-00032.bin",
+ "h.28.mlp.dense_h_to_4h.weight": "pytorch_model_00030-of-00032.bin",
+ "h.28.post_attention_layernorm.bias": "pytorch_model_00030-of-00032.bin",
+ "h.28.post_attention_layernorm.weight": "pytorch_model_00030-of-00032.bin",
+ "h.28.self_attention.dense.bias": "pytorch_model_00030-of-00032.bin",
+ "h.28.self_attention.dense.weight": "pytorch_model_00030-of-00032.bin",
+ "h.28.self_attention.query_key_value.bias": "pytorch_model_00030-of-00032.bin",
+ "h.28.self_attention.query_key_value.weight": "pytorch_model_00030-of-00032.bin",
+ "h.29.input_layernorm.bias": "pytorch_model_00031-of-00032.bin",
+ "h.29.input_layernorm.weight": "pytorch_model_00031-of-00032.bin",
+ "h.29.mlp.dense_4h_to_h.bias": "pytorch_model_00031-of-00032.bin",
+ "h.29.mlp.dense_4h_to_h.weight": "pytorch_model_00031-of-00032.bin",
+ "h.29.mlp.dense_h_to_4h.bias": "pytorch_model_00031-of-00032.bin",
+ "h.29.mlp.dense_h_to_4h.weight": "pytorch_model_00031-of-00032.bin",
+ "h.29.post_attention_layernorm.bias": "pytorch_model_00031-of-00032.bin",
+ "h.29.post_attention_layernorm.weight": "pytorch_model_00031-of-00032.bin",
+ "h.29.self_attention.dense.bias": "pytorch_model_00031-of-00032.bin",
+ "h.29.self_attention.dense.weight": "pytorch_model_00031-of-00032.bin",
+ "h.29.self_attention.query_key_value.bias": "pytorch_model_00031-of-00032.bin",
+ "h.29.self_attention.query_key_value.weight": "pytorch_model_00031-of-00032.bin",
+ "h.3.input_layernorm.bias": "pytorch_model_00005-of-00032.bin",
+ "h.3.input_layernorm.weight": "pytorch_model_00005-of-00032.bin",
+ "h.3.mlp.dense_4h_to_h.bias": "pytorch_model_00005-of-00032.bin",
+ "h.3.mlp.dense_4h_to_h.weight": "pytorch_model_00005-of-00032.bin",
+ "h.3.mlp.dense_h_to_4h.bias": "pytorch_model_00005-of-00032.bin",
+ "h.3.mlp.dense_h_to_4h.weight": "pytorch_model_00005-of-00032.bin",
+ "h.3.post_attention_layernorm.bias": "pytorch_model_00005-of-00032.bin",
+ "h.3.post_attention_layernorm.weight": "pytorch_model_00005-of-00032.bin",
+ "h.3.self_attention.dense.bias": "pytorch_model_00005-of-00032.bin",
+ "h.3.self_attention.dense.weight": "pytorch_model_00005-of-00032.bin",
+ "h.3.self_attention.query_key_value.bias": "pytorch_model_00005-of-00032.bin",
+ "h.3.self_attention.query_key_value.weight": "pytorch_model_00005-of-00032.bin",
+ "h.4.input_layernorm.bias": "pytorch_model_00006-of-00032.bin",
+ "h.4.input_layernorm.weight": "pytorch_model_00006-of-00032.bin",
+ "h.4.mlp.dense_4h_to_h.bias": "pytorch_model_00006-of-00032.bin",
+ "h.4.mlp.dense_4h_to_h.weight": "pytorch_model_00006-of-00032.bin",
+ "h.4.mlp.dense_h_to_4h.bias": "pytorch_model_00006-of-00032.bin",
+ "h.4.mlp.dense_h_to_4h.weight": "pytorch_model_00006-of-00032.bin",
+ "h.4.post_attention_layernorm.bias": "pytorch_model_00006-of-00032.bin",
+ "h.4.post_attention_layernorm.weight": "pytorch_model_00006-of-00032.bin",
+ "h.4.self_attention.dense.bias": "pytorch_model_00006-of-00032.bin",
+ "h.4.self_attention.dense.weight": "pytorch_model_00006-of-00032.bin",
+ "h.4.self_attention.query_key_value.bias": "pytorch_model_00006-of-00032.bin",
+ "h.4.self_attention.query_key_value.weight": "pytorch_model_00006-of-00032.bin",
+ "h.5.input_layernorm.bias": "pytorch_model_00007-of-00032.bin",
+ "h.5.input_layernorm.weight": "pytorch_model_00007-of-00032.bin",
+ "h.5.mlp.dense_4h_to_h.bias": "pytorch_model_00007-of-00032.bin",
+ "h.5.mlp.dense_4h_to_h.weight": "pytorch_model_00007-of-00032.bin",
+ "h.5.mlp.dense_h_to_4h.bias": "pytorch_model_00007-of-00032.bin",
+ "h.5.mlp.dense_h_to_4h.weight": "pytorch_model_00007-of-00032.bin",
+ "h.5.post_attention_layernorm.bias": "pytorch_model_00007-of-00032.bin",
+ "h.5.post_attention_layernorm.weight": "pytorch_model_00007-of-00032.bin",
+ "h.5.self_attention.dense.bias": "pytorch_model_00007-of-00032.bin",
+ "h.5.self_attention.dense.weight": "pytorch_model_00007-of-00032.bin",
+ "h.5.self_attention.query_key_value.bias": "pytorch_model_00007-of-00032.bin",
+ "h.5.self_attention.query_key_value.weight": "pytorch_model_00007-of-00032.bin",
+ "h.6.input_layernorm.bias": "pytorch_model_00008-of-00032.bin",
+ "h.6.input_layernorm.weight": "pytorch_model_00008-of-00032.bin",
+ "h.6.mlp.dense_4h_to_h.bias": "pytorch_model_00008-of-00032.bin",
+ "h.6.mlp.dense_4h_to_h.weight": "pytorch_model_00008-of-00032.bin",
+ "h.6.mlp.dense_h_to_4h.bias": "pytorch_model_00008-of-00032.bin",
+ "h.6.mlp.dense_h_to_4h.weight": "pytorch_model_00008-of-00032.bin",
+ "h.6.post_attention_layernorm.bias": "pytorch_model_00008-of-00032.bin",
+ "h.6.post_attention_layernorm.weight": "pytorch_model_00008-of-00032.bin",
+ "h.6.self_attention.dense.bias": "pytorch_model_00008-of-00032.bin",
+ "h.6.self_attention.dense.weight": "pytorch_model_00008-of-00032.bin",
+ "h.6.self_attention.query_key_value.bias": "pytorch_model_00008-of-00032.bin",
+ "h.6.self_attention.query_key_value.weight": "pytorch_model_00008-of-00032.bin",
+ "h.7.input_layernorm.bias": "pytorch_model_00009-of-00032.bin",
+ "h.7.input_layernorm.weight": "pytorch_model_00009-of-00032.bin",
+ "h.7.mlp.dense_4h_to_h.bias": "pytorch_model_00009-of-00032.bin",
+ "h.7.mlp.dense_4h_to_h.weight": "pytorch_model_00009-of-00032.bin",
+ "h.7.mlp.dense_h_to_4h.bias": "pytorch_model_00009-of-00032.bin",
+ "h.7.mlp.dense_h_to_4h.weight": "pytorch_model_00009-of-00032.bin",
+ "h.7.post_attention_layernorm.bias": "pytorch_model_00009-of-00032.bin",
+ "h.7.post_attention_layernorm.weight": "pytorch_model_00009-of-00032.bin",
+ "h.7.self_attention.dense.bias": "pytorch_model_00009-of-00032.bin",
+ "h.7.self_attention.dense.weight": "pytorch_model_00009-of-00032.bin",
+ "h.7.self_attention.query_key_value.bias": "pytorch_model_00009-of-00032.bin",
+ "h.7.self_attention.query_key_value.weight": "pytorch_model_00009-of-00032.bin",
+ "h.8.input_layernorm.bias": "pytorch_model_00010-of-00032.bin",
+ "h.8.input_layernorm.weight": "pytorch_model_00010-of-00032.bin",
+ "h.8.mlp.dense_4h_to_h.bias": "pytorch_model_00010-of-00032.bin",
+ "h.8.mlp.dense_4h_to_h.weight": "pytorch_model_00010-of-00032.bin",
+ "h.8.mlp.dense_h_to_4h.bias": "pytorch_model_00010-of-00032.bin",
+ "h.8.mlp.dense_h_to_4h.weight": "pytorch_model_00010-of-00032.bin",
+ "h.8.post_attention_layernorm.bias": "pytorch_model_00010-of-00032.bin",
+ "h.8.post_attention_layernorm.weight": "pytorch_model_00010-of-00032.bin",
+ "h.8.self_attention.dense.bias": "pytorch_model_00010-of-00032.bin",
+ "h.8.self_attention.dense.weight": "pytorch_model_00010-of-00032.bin",
+ "h.8.self_attention.query_key_value.bias": "pytorch_model_00010-of-00032.bin",
+ "h.8.self_attention.query_key_value.weight": "pytorch_model_00010-of-00032.bin",
+ "h.9.input_layernorm.bias": "pytorch_model_00011-of-00032.bin",
+ "h.9.input_layernorm.weight": "pytorch_model_00011-of-00032.bin",
+ "h.9.mlp.dense_4h_to_h.bias": "pytorch_model_00011-of-00032.bin",
+ "h.9.mlp.dense_4h_to_h.weight": "pytorch_model_00011-of-00032.bin",
+ "h.9.mlp.dense_h_to_4h.bias": "pytorch_model_00011-of-00032.bin",
+ "h.9.mlp.dense_h_to_4h.weight": "pytorch_model_00011-of-00032.bin",
+ "h.9.post_attention_layernorm.bias": "pytorch_model_00011-of-00032.bin",
+ "h.9.post_attention_layernorm.weight": "pytorch_model_00011-of-00032.bin",
+ "h.9.self_attention.dense.bias": "pytorch_model_00011-of-00032.bin",
+ "h.9.self_attention.dense.weight": "pytorch_model_00011-of-00032.bin",
+ "h.9.self_attention.query_key_value.bias": "pytorch_model_00011-of-00032.bin",
+ "h.9.self_attention.query_key_value.weight": "pytorch_model_00011-of-00032.bin",
+ "ln_f.bias": "pytorch_model_00032-of-00032.bin",
+ "ln_f.weight": "pytorch_model_00032-of-00032.bin",
+ "word_embeddings.weight": "pytorch_model_00001-of-00032.bin",
+ "word_embeddings_layernorm.bias": "pytorch_model_00001-of-00032.bin",
+ "word_embeddings_layernorm.weight": "pytorch_model_00001-of-00032.bin"
+ }
+}
diff --git a/pytorch_model_00001-of-00032.bin b/pytorch_model_00001-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..eac840b6a26f8dc7716b71b5aab149edb5255345
--- /dev/null
+++ b/pytorch_model_00001-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ec9cd558cfdf899889b9c728ea76a9a7bdd165a0bb4bb8afd14c7cbe61541916
+size 2055226892
diff --git a/pytorch_model_00002-of-00032.bin b/pytorch_model_00002-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b8364628ea54438d521cd65492a51851b3185ff6
--- /dev/null
+++ b/pytorch_model_00002-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:985f515466ed6be3f1e161cf0fa1f8642d5cd644424eb11ff85317205cb5ce76
+size 402763943
diff --git a/pytorch_model_00003-of-00032.bin b/pytorch_model_00003-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..cd4febeb28d154ce41d6c343505af3abd4e05726
--- /dev/null
+++ b/pytorch_model_00003-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:177dd699a09592748d6f790ed41a5853dc9d931564ee23eac9e7c408361464a0
+size 402763943
diff --git a/pytorch_model_00004-of-00032.bin b/pytorch_model_00004-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..873f0342d048bc1b5c5e1a2ee877a9011c606dc2
--- /dev/null
+++ b/pytorch_model_00004-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0947d5ef3b0e651d1334ea915bc64a20fb7ee1433ee71fedd41c360c4836df8f
+size 402763943
diff --git a/pytorch_model_00005-of-00032.bin b/pytorch_model_00005-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ab0ffe8684aaacb1aa2f9bf321bb07a348d1d7cd
--- /dev/null
+++ b/pytorch_model_00005-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d202b48bf2ce7b34d51b81f8ead81dc6c498088c5a3572f7b3204592fb181ee6
+size 402763943
diff --git a/pytorch_model_00006-of-00032.bin b/pytorch_model_00006-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..7b450fc90901f8ee3fb2f044a9a120af2e9a9980
--- /dev/null
+++ b/pytorch_model_00006-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d688567d74390a97d5dcdcce05ffb5119d3a42213eb62b9aa8f3de84740968b8
+size 402763943
diff --git a/pytorch_model_00007-of-00032.bin b/pytorch_model_00007-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c15b122e70cb7a832d1be842cd99075b6a886eff
--- /dev/null
+++ b/pytorch_model_00007-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d6133a21d7623916bfb19379724e7a46820f22d03606b6cd7acee947031d30f
+size 402763943
diff --git a/pytorch_model_00008-of-00032.bin b/pytorch_model_00008-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..def027e2aded289e9f1bbc236f78cf30e4d252d5
--- /dev/null
+++ b/pytorch_model_00008-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:85eda9b92234332f05fa854fd64b583fc86f1d9fa328032fd51c1daf123e76d6
+size 402763943
diff --git a/pytorch_model_00009-of-00032.bin b/pytorch_model_00009-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..7e111c45b7df7c8b911f72b699f6094f22ef388e
--- /dev/null
+++ b/pytorch_model_00009-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d97fee47d223c6bc5ca2ceb2e3279895e8b0fb5f66ac5da5c33b758e73227411
+size 402763943
diff --git a/pytorch_model_00010-of-00032.bin b/pytorch_model_00010-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..30436b85897827347b232a82bd8fb638be585608
--- /dev/null
+++ b/pytorch_model_00010-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7d9b2915a9b66bec28fffffd8458a54aa416d18132cd0fe5e02ca7859b1a7e3d
+size 402763943
diff --git a/pytorch_model_00011-of-00032.bin b/pytorch_model_00011-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..f0fa008f9a83f206bf407dc470f386e2c4a42305
--- /dev/null
+++ b/pytorch_model_00011-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:27d32ef6fb637dafa5a2dca5686f91ac02b81aa1c83b47d07b3400932d287ac7
+size 402763943
diff --git a/pytorch_model_00012-of-00032.bin b/pytorch_model_00012-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6599c195a2cf477676f346fb3defb354242a2448
--- /dev/null
+++ b/pytorch_model_00012-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68ed6492916eeb4a076d34faddfd89cdef4ac8cfec26ccbcfb0ec30b36769ae1
+size 402763943
diff --git a/pytorch_model_00013-of-00032.bin b/pytorch_model_00013-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c2344e429ef77deab991474662ff57bf06bb231c
--- /dev/null
+++ b/pytorch_model_00013-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:922904004e745331255d0e4690f14cd53247ae507c1f471a979a14a012d78455
+size 402763943
diff --git a/pytorch_model_00014-of-00032.bin b/pytorch_model_00014-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..24013e34c82509346bf1ec1a8274995c8f3af57a
--- /dev/null
+++ b/pytorch_model_00014-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ba8938549b29abc07699cc972596f2340f8360973fb2645fb209016c0b9837c2
+size 402763943
diff --git a/pytorch_model_00015-of-00032.bin b/pytorch_model_00015-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..74f6b9df0f9dc076f24a3b42372aa9aac2e1a973
--- /dev/null
+++ b/pytorch_model_00015-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:043514372ba5b5b3e8e9ba69d5e37916b1ff72131fc87e732bb061a03b88fad5
+size 402763943
diff --git a/pytorch_model_00016-of-00032.bin b/pytorch_model_00016-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..b3516e3223d1f91756429de96b658641d7347e7a
--- /dev/null
+++ b/pytorch_model_00016-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:62d361be621f84851a9a22c6c10e5ce5a97b01d398f21b81bd80c0884179be3c
+size 402763943
diff --git a/pytorch_model_00017-of-00032.bin b/pytorch_model_00017-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..15cd8fee75bad21ad3222a05f8712fc89783059b
--- /dev/null
+++ b/pytorch_model_00017-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0ae3e126aace7ff7b7ab103f2378a913fdbfc47c6222bceb32e018691c358c09
+size 402763943
diff --git a/pytorch_model_00018-of-00032.bin b/pytorch_model_00018-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..637b280a48c624278e3c0a29056f4cbc59993926
--- /dev/null
+++ b/pytorch_model_00018-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef181480d086f98fa9fd809973f22859dbd2e3b19e3b010660acd705dd6f1b0a
+size 402763943
diff --git a/pytorch_model_00019-of-00032.bin b/pytorch_model_00019-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a8e335cd6780193a4467a0c0ce49d3c5a3ad4295
--- /dev/null
+++ b/pytorch_model_00019-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e789af96eb4c2b181a72e5e4a8c03e85f02516e41abc9dcd6e88e1769bcecbd
+size 402763943
diff --git a/pytorch_model_00020-of-00032.bin b/pytorch_model_00020-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a0bd281578fefcef3cc5bd86fab52688d50e1c28
--- /dev/null
+++ b/pytorch_model_00020-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c4df2b4390deceb3c830a72a86c7e7ed373b0a90aee0e4e9332f0e1c36a64e01
+size 402763943
diff --git a/pytorch_model_00021-of-00032.bin b/pytorch_model_00021-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ca5058e142f45b4bb9870341aa7ccff82857516e
--- /dev/null
+++ b/pytorch_model_00021-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0c9e8c8ab937dddd8bcd5c0b3aef2d2f5a4271ed56fdec3b4aced5e3a9737c81
+size 402763943
diff --git a/pytorch_model_00022-of-00032.bin b/pytorch_model_00022-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8ed64ec5a496d97c1fe29fa132e80de24ffb62ea
--- /dev/null
+++ b/pytorch_model_00022-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c33b29956092113fdc73aa3021cb9558050164e404b34efa88b99075b9dec040
+size 402763943
diff --git a/pytorch_model_00023-of-00032.bin b/pytorch_model_00023-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..9a4e6bf3125ce103e6c178e3149c0df91d7cc230
--- /dev/null
+++ b/pytorch_model_00023-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8645bbbb558de56f57e0dc29a083872cddb6d88eabd765e5dccd633b86c68609
+size 402763943
diff --git a/pytorch_model_00024-of-00032.bin b/pytorch_model_00024-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..8aa61023da6f72a8cac5488dbb615a94042de9cc
--- /dev/null
+++ b/pytorch_model_00024-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb1dbe6b96d8af6919b1e7284134de6580aa1d789d9f83514bdb0fc33221411e
+size 402763943
diff --git a/pytorch_model_00025-of-00032.bin b/pytorch_model_00025-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..2c15f0b07cff3e2ee5c70ffa119d5fa799fceba6
--- /dev/null
+++ b/pytorch_model_00025-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad4c61bcc0649bcaea63a9a081112984df4cafff0ab56896513dca7297687b92
+size 402763943
diff --git a/pytorch_model_00026-of-00032.bin b/pytorch_model_00026-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..6e425e8a6f769f988850b96928b1cb5a66391c78
--- /dev/null
+++ b/pytorch_model_00026-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3cea4e4aac3246b4e58533f71bc3c0d001d804c7af02de79fcd2821cfc27fc77
+size 402763943
diff --git a/pytorch_model_00027-of-00032.bin b/pytorch_model_00027-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..f9fe3cc7fd3daae4ef7a54b6909d8db06f928a97
--- /dev/null
+++ b/pytorch_model_00027-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a9ad539f7bac6ad3301803736069975969d2a10592dc9d96ac3301d3f0185a02
+size 402763943
diff --git a/pytorch_model_00028-of-00032.bin b/pytorch_model_00028-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..bf83258e788cf2e99977a21d69f38fe15861b3e8
--- /dev/null
+++ b/pytorch_model_00028-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad02db2dbd90e8019845028454fc618bb1a07f9a41cd6939accc97593f2911be
+size 402763943
diff --git a/pytorch_model_00029-of-00032.bin b/pytorch_model_00029-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4279d7a83f2ad61065953bd55ec55a5444b152f3
--- /dev/null
+++ b/pytorch_model_00029-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:755e371add07052069ff71fa0903bbc54336d5fda0ecdb6e9d4d66a764667c64
+size 402763943
diff --git a/pytorch_model_00030-of-00032.bin b/pytorch_model_00030-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..1cc4f2a87b5c3a2fa572be9f998ad936e3dad6b4
--- /dev/null
+++ b/pytorch_model_00030-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60ef8192a600e0426d358c27af44e24e0ff52d54a06fe191086a6055ceb6535b
+size 402763943
diff --git a/pytorch_model_00031-of-00032.bin b/pytorch_model_00031-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..afafbe304b73b03bb5da335cc6639c71e95ab007
--- /dev/null
+++ b/pytorch_model_00031-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:febaa7b6d6b850ccda5a36cb8789369a9e9c4df663a8119bafbadcc9daab6c10
+size 402763943
diff --git a/pytorch_model_00032-of-00032.bin b/pytorch_model_00032-of-00032.bin
new file mode 100644
index 0000000000000000000000000000000000000000..bf742fc77a1f3578040a5d6e4f86160d21fb9306
--- /dev/null
+++ b/pytorch_model_00032-of-00032.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:104b1814a9bc1315c31f71dec692f5578d16b77c8613fd8d4e76a840ff749bcd
+size 17595
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..25bc39604f72700b3b8e10bd69bb2f227157edd1
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1 @@
+{"bos_token": "", "eos_token": "", "unk_token": "", "pad_token": ""}
\ No newline at end of file
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..370bd68e20b4b6574ee05b213a74b244e3f492f3
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3fa39cd4b1500feb205bcce3b9703a4373414cafe4970e0657b413f7ddd2a9d3
+size 14500438
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c63345010532cae8bae2df6903ba2a811d5891d8
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1 @@
+{"unk_token": "", "eos_token": "", "bos_token": "", "pad_token": "", "name_or_path": "bigscience/tokenizer", "special_tokens_map_file": null, "tokenizer_class": "BloomTokenizerFast"}
\ No newline at end of file