Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: llama2
|
3 |
+
base_model:
|
4 |
+
- lmsys/vicuna-13b-v1.5-16k
|
5 |
+
---
|
6 |
+
|
7 |
+
This is a half precision (float16) gguf file for vicuna-13b-v1.5-16k.
|
8 |
+
|
9 |
+
Conversion code:
|
10 |
+
|
11 |
+
import os import subprocess
|
12 |
+
|
13 |
+
from huggingface_hub import snapshot_download
|
14 |
+
|
15 |
+
model_id="lmsys/vicuna-13b-v1.5-16k" snapshot_download(repo_id=model_id, local_dir="vicuna-hf", local_dir_use_symlinks=False, revision="main")
|
16 |
+
|
17 |
+
subprocess.run(["git", "clone", "https://github.com/ggerganov/llama.cpp.git"])
|
18 |
+
|
19 |
+
subprocess.run(["pip", "install", "-r", "llama.cpp/requirements.txt"])
|
20 |
+
|
21 |
+
subprocess.run(["python", "llama.cpp/convert_hf_to_gguf.py", "vicuna-hf",
|
22 |
+
"--outfile", "vicuna-13b-v1.5-16k.gguf", "--outtype", "f16"])
|
23 |
+
|
24 |
+
(modified from https://www.substratus.ai/blog/converting-hf-model-gguf-model)
|