Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -90,7 +90,7 @@ if tokenizer.pad_token is None:
|
|
90 |
print(f"Tokenizer `pad_token` was None, set to `eos_token`: {tokenizer.eos_token}")
|
91 |
|
92 |
|
93 |
-
@spaces.GPU(
|
94 |
def generate_code(prompt: str) -> str:
|
95 |
messages = [
|
96 |
{"role": "system", "content": "You are a helpful and proficient Milkdrop HLSL coding assistant."},
|
@@ -119,7 +119,7 @@ def generate_code(prompt: str) -> str:
|
|
119 |
with torch.no_grad():
|
120 |
generated_ids = model.generate(
|
121 |
**model_inputs, # Pass tokenized inputs
|
122 |
-
max_new_tokens=
|
123 |
min_new_tokens=768,
|
124 |
do_sample=True,
|
125 |
temperature=0.7,
|
|
|
90 |
print(f"Tokenizer `pad_token` was None, set to `eos_token`: {tokenizer.eos_token}")
|
91 |
|
92 |
|
93 |
+
@spaces.GPU(duration=90)
|
94 |
def generate_code(prompt: str) -> str:
|
95 |
messages = [
|
96 |
{"role": "system", "content": "You are a helpful and proficient Milkdrop HLSL coding assistant."},
|
|
|
119 |
with torch.no_grad():
|
120 |
generated_ids = model.generate(
|
121 |
**model_inputs, # Pass tokenized inputs
|
122 |
+
max_new_tokens=4096,
|
123 |
min_new_tokens=768,
|
124 |
do_sample=True,
|
125 |
temperature=0.7,
|