Update app.py
Browse files
app.py
CHANGED
@@ -3,13 +3,31 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
3 |
import torch
|
4 |
|
5 |
# Set model ID
|
6 |
-
# comment
|
7 |
-
|
8 |
-
#
|
9 |
-
# model_id = "
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
11 |
|
|
|
|
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
# Load tokenizer and model
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
15 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -30,7 +48,7 @@ def generate_code(prompt):
|
|
30 |
with torch.no_grad():
|
31 |
outputs = model.generate(
|
32 |
**inputs,
|
33 |
-
max_new_tokens=
|
34 |
temperature=0.7
|
35 |
)
|
36 |
|
|
|
3 |
import torch
|
4 |
|
5 |
# Set model ID
|
6 |
+
# comment/uncomment the model you want to use
|
7 |
+
|
8 |
+
# GPT-2 (very small, general-purpose, mainly for testing or learning purposes)
|
9 |
+
# model_id = "gpt2"
|
10 |
+
|
11 |
+
# DeepSeek Coder 1.3B (base version, no instruction fine-tuning — better for raw code generation tasks)
|
12 |
+
# model_id = "deepseek-ai/deepseek-coder-1.3b"
|
13 |
+
|
14 |
+
# DeepSeek Coder 1.3B Base (same as above — explicit base naming, safe to use)
|
15 |
+
# model_id = "deepseek-ai/deepseek-coder-1.3b-base"
|
16 |
|
17 |
+
# CodeLlama 7B Instruct (powerful code generation model from Meta, instruction-tuned)
|
18 |
+
# model_id = "codellama/CodeLlama-7b-Instruct-hf"
|
19 |
|
20 |
+
# Meta-Llama 3.1 8B Instruct (very powerful general-purpose model, instruction-following, also decent for code & NLP)
|
21 |
+
# model_id = "meta-llama/Llama-3.1-8B-Instruct"
|
22 |
+
|
23 |
+
# DeepSeek-R1 + Qwen3 8B (highly capable multi-purpose model — great for reasoning, coding, general Q&A)
|
24 |
+
# model_id = "deepseek-ai/DeepSeek-R1-0528-Qwen3-8B"
|
25 |
+
|
26 |
+
# Qwen2.5-VL-7B Instruct (multimodal: can handle text + images, instruction-tuned — mostly for vision-language tasks)
|
27 |
+
# model_id = "Qwen/Qwen2.5-VL-7B-Instruct"
|
28 |
+
|
29 |
+
# DeepSeek Coder 1.3B Instruct (great for both natural language and coding tasks)
|
30 |
+
model_id = "deepseek-ai/deepseek-coder-1.3b-instruct"
|
31 |
# Load tokenizer and model
|
32 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
33 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
48 |
with torch.no_grad():
|
49 |
outputs = model.generate(
|
50 |
**inputs,
|
51 |
+
max_new_tokens=200,
|
52 |
temperature=0.7
|
53 |
)
|
54 |
|