Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -25,7 +25,7 @@ torch.backends.cudnn.benchmark = True
|
|
25 |
torch.set_float32_matmul_precision("high")
|
26 |
|
27 |
# --- Model and Tokenizer Configuration ---
|
28 |
-
model_name = "
|
29 |
|
30 |
# --- Quantization Configuration (Example: 4-bit) ---
|
31 |
# This section is included based on our previous discussion.
|
@@ -93,7 +93,7 @@ if tokenizer.pad_token is None:
|
|
93 |
@spaces.GPU(required=True)
|
94 |
def generate_code(prompt: str) -> str:
|
95 |
messages = [
|
96 |
-
{"role": "system", "content": "You are a helpful and proficient coding assistant."},
|
97 |
{"role": "user", "content": prompt}
|
98 |
]
|
99 |
try:
|
|
|
25 |
torch.set_float32_matmul_precision("high")
|
26 |
|
27 |
# --- Model and Tokenizer Configuration ---
|
28 |
+
model_name = "InferenceIllusionist/MilkDropLM-32b-v0.3"
|
29 |
|
30 |
# --- Quantization Configuration (Example: 4-bit) ---
|
31 |
# This section is included based on our previous discussion.
|
|
|
93 |
@spaces.GPU(required=True)
|
94 |
def generate_code(prompt: str) -> str:
|
95 |
messages = [
|
96 |
+
{"role": "system", "content": "You are a helpful and proficient Milkdrop HLSL coding assistant."},
|
97 |
{"role": "user", "content": prompt}
|
98 |
]
|
99 |
try:
|