Update app.py
Browse files
app.py
CHANGED
@@ -209,19 +209,18 @@ def quantize(model_path, repo_id, quant_method=None):
|
|
209 |
|
210 |
def create_readme(repo_name, base_model_name, lora_model_name, quant_methods):
|
211 |
readme_path = os.path.join("output", repo_name, "README.md")
|
212 |
-
readme_template =
|
213 |
-
---
|
214 |
tags:
|
215 |
- autotrain
|
216 |
- text-generation-inference
|
217 |
- text-generation
|
218 |
-
- peft{
|
219 |
library_name: transformers
|
220 |
base_model: {base_model_name}
|
221 |
widget:
|
222 |
- messages:
|
223 |
- role: user
|
224 |
-
|
225 |
license: other
|
226 |
datasets:
|
227 |
- {lora_model_name}
|
@@ -231,8 +230,16 @@ datasets:
|
|
231 |
base_model: {base_model_name}
|
232 |
lora_model: {lora_model_name}
|
233 |
quant_methods: {quant_methods}
|
234 |
-
created_at: {
|
235 |
-
created_by: [Steven10429/apply_lora_and_quantize](https://github.com/Steven10429/apply_lora_and_quantize)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
with open(readme_path, "w") as f:
|
238 |
f.write(readme_template)
|
@@ -284,9 +291,11 @@ def process_model(base_model_name, lora_model_name, repo_name, quant_methods, hf
|
|
284 |
num_workers=os.cpu_count() if os.cpu_count() > 4 else 4,
|
285 |
print_report_every=10,
|
286 |
)
|
|
|
287 |
|
288 |
# rm -rf model_path
|
289 |
shutil.rmtree(model_path)
|
|
|
290 |
|
291 |
return "\n".join(current_logs)
|
292 |
except Exception as e:
|
|
|
209 |
|
210 |
def create_readme(repo_name, base_model_name, lora_model_name, quant_methods):
|
211 |
readme_path = os.path.join("output", repo_name, "README.md")
|
212 |
+
readme_template = """---
|
|
|
213 |
tags:
|
214 |
- autotrain
|
215 |
- text-generation-inference
|
216 |
- text-generation
|
217 |
+
- peft{quantization}
|
218 |
library_name: transformers
|
219 |
base_model: {base_model_name}
|
220 |
widget:
|
221 |
- messages:
|
222 |
- role: user
|
223 |
+
content: What is your favorite condiment?
|
224 |
license: other
|
225 |
datasets:
|
226 |
- {lora_model_name}
|
|
|
230 |
base_model: {base_model_name}
|
231 |
lora_model: {lora_model_name}
|
232 |
quant_methods: {quant_methods}
|
233 |
+
created_at: {created_at}
|
234 |
+
created_by: [Steven10429/apply_lora_and_quantize](https://github.com/Steven10429/apply_lora_and_quantize)
|
235 |
+
""".format(
|
236 |
+
quantization="\n- quantization" if len(quant_methods) > 0 else "",
|
237 |
+
base_model_name=base_model_name,
|
238 |
+
lora_model_name=lora_model_name,
|
239 |
+
repo_name=repo_name,
|
240 |
+
quant_methods=quant_methods,
|
241 |
+
created_at=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
242 |
+
)
|
243 |
|
244 |
with open(readme_path, "w") as f:
|
245 |
f.write(readme_template)
|
|
|
291 |
num_workers=os.cpu_count() if os.cpu_count() > 4 else 4,
|
292 |
print_report_every=10,
|
293 |
)
|
294 |
+
log("Upload completed.")
|
295 |
|
296 |
# rm -rf model_path
|
297 |
shutil.rmtree(model_path)
|
298 |
+
log("Removed model from local")
|
299 |
|
300 |
return "\n".join(current_logs)
|
301 |
except Exception as e:
|