chendren commited on
Commit
6541baf
·
verified ·
1 Parent(s): 1fc07fa

Upload test_model.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. test_model.py +8 -2
test_model.py CHANGED
@@ -1,8 +1,14 @@
1
  #!/usr/bin/env python3
 
 
 
 
2
  import torch
3
  from peft import PeftModel, PeftConfig
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
 
 
6
  # Load the base model and tokenizer
7
  base_model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
8
  model = AutoModelForCausalLM.from_pretrained(
@@ -12,8 +18,8 @@ model = AutoModelForCausalLM.from_pretrained(
12
  )
13
  tokenizer = AutoTokenizer.from_pretrained(base_model_id)
14
 
15
- # Load the LoRA adapter
16
- adapter_model_id = "chad/deepseek-dnd-lora"
17
  model = PeftModel.from_pretrained(model, adapter_model_id)
18
 
19
  # Test prompts
 
1
  #!/usr/bin/env python3
2
+ # Test script for the DeepSeek D&D LoRA model
3
+ # This script uses the Hugging Face Hub to track usage metrics
4
+
5
+ from huggingface_hub import snapshot_download
6
  import torch
7
  from peft import PeftModel, PeftConfig
8
  from transformers import AutoModelForCausalLM, AutoTokenizer
9
 
10
+ print("Loading model and tokenizer...")
11
+
12
  # Load the base model and tokenizer
13
  base_model_id = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
14
  model = AutoModelForCausalLM.from_pretrained(
 
18
  )
19
  tokenizer = AutoTokenizer.from_pretrained(base_model_id)
20
 
21
+ # Load the LoRA adapter - this will be tracked by HF Hub
22
+ adapter_model_id = "chendren/deepseek-dnd-lora"
23
  model = PeftModel.from_pretrained(model, adapter_model_id)
24
 
25
  # Test prompts