NexVeridian commited on
Commit
da7082e
·
verified ·
1 Parent(s): 5c5de10

Add files using upload-large-folder tool

Browse files
README.md CHANGED
@@ -7,14 +7,15 @@ tags:
7
  - text adventure
8
  - roleplay
9
  - mlx
10
- library_name: transformers
 
11
  ---
12
 
13
- # mlx-community/Wayfarer-12B-Q8-mlx
14
 
15
- The Model [mlx-community/Wayfarer-12B-Q8-mlx](https://huggingface.co/mlx-community/Wayfarer-12B-Q8-mlx) was
16
  converted to MLX format from [LatitudeGames/Wayfarer-12B](https://huggingface.co/LatitudeGames/Wayfarer-12B)
17
- using mlx-lm version **0.21.4**.
18
 
19
  ## Use with mlx
20
 
@@ -25,7 +26,7 @@ pip install mlx-lm
25
  ```python
26
  from mlx_lm import load, generate
27
 
28
- model, tokenizer = load("mlx-community/Wayfarer-12B-Q8-mlx")
29
 
30
  prompt = "hello"
31
 
 
7
  - text adventure
8
  - roleplay
9
  - mlx
10
+ library_name: mlx
11
+ pipeline_tag: text-generation
12
  ---
13
 
14
+ # mlx-community/Wayfarer-12B-8bit
15
 
16
+ This model [mlx-community/Wayfarer-12B-8bit](https://huggingface.co/mlx-community/Wayfarer-12B-8bit) was
17
  converted to MLX format from [LatitudeGames/Wayfarer-12B](https://huggingface.co/LatitudeGames/Wayfarer-12B)
18
+ using mlx-lm version **0.25.0**.
19
 
20
  ## Use with mlx
21
 
 
26
  ```python
27
  from mlx_lm import load, generate
28
 
29
+ model, tokenizer = load("mlx-community/Wayfarer-12B-8bit")
30
 
31
  prompt = "hello"
32
 
chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '
2
+ ' + message['content'] + '<|im_end|>' + '
3
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
+ ' }}{% endif %}
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "do_sample": true,
5
+ "eos_token_id": 2,
6
+ "transformers_version": "4.46.1"
7
+ }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f670e36509c883ba58f7a1c497d5dc8f3152074732d28ef31edbefd6a318f05f
3
- size 5348118263
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62c749b9324111d202138e73426f0bce6becb3b69b73f8a8ea1ff3a0d86c34c8
3
+ size 5348118521
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2386c874b9c969f49cc3ec917ffcf0d9ebe4d737dd1998409c1b13231f992e4
3
- size 5348155267
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b949c6a39bdbfa0660369f10104d6be1d8e9e6ac9b89e1dbb21e58b3221c3e54
3
+ size 5348155565
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:daac7c3a6290e8ff01495b47cac4e7c24c09f3c0b071cecf871353090b610618
3
- size 2317511261
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7f67d969ab381062d7511ac0196f8a5b71304e1ef007c5c0c6a6036eedccfd6
3
+ size 2317511350
model.safetensors.index.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "metadata": {
3
- "total_size": 13013679360
 
4
  },
5
  "weight_map": {
6
  "lm_head.biases": "model-00003-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 13013679360,
4
+ "total_parameters": 12247802880
5
  },
6
  "weight_map": {
7
  "lm_head.biases": "model-00003-of-00003.safetensors",
tokenizer_config.json CHANGED
@@ -8024,7 +8024,6 @@
8024
  "<|im_start|>"
8025
  ],
8026
  "bos_token": "<s>",
8027
- "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
8028
  "clean_up_tokenization_spaces": false,
8029
  "eos_token": "<|im_end|>",
8030
  "extra_special_tokens": {},
 
8024
  "<|im_start|>"
8025
  ],
8026
  "bos_token": "<s>",
 
8027
  "clean_up_tokenization_spaces": false,
8028
  "eos_token": "<|im_end|>",
8029
  "extra_special_tokens": {},