nyasukun commited on
Commit
5b17871
·
1 Parent(s): 0bfe434
Files changed (2) hide show
  1. app.py +41 -14
  2. requirements.txt +1 -1
app.py CHANGED
@@ -45,20 +45,47 @@ try:
45
 
46
  except Exception as e:
47
  print(f"Error initializing model {MODEL_NAME}: {str(e)}")
48
- print("Falling back to tiny-gpt2...")
49
- MODEL_NAME = "sshleifer/tiny-gpt2"
50
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
51
- text_pipeline = pipeline(
52
- "text-generation",
53
- model=MODEL_NAME,
54
- tokenizer=tokenizer,
55
- torch_dtype=torch.bfloat16,
56
- device_map="auto",
57
- trust_remote_code=True
58
- )
59
- model = text_pipeline.model
60
- tok = text_pipeline.tokenizer
61
- print(f"Fallback model loaded: {MODEL_NAME}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  # Log device information
64
  if hasattr(model, 'device'):
 
45
 
46
  except Exception as e:
47
  print(f"Error initializing model {MODEL_NAME}: {str(e)}")
48
+ print("Trying with simplified parameters...")
49
+
50
+ try:
51
+ # Try with simpler parameters
52
+ text_pipeline = pipeline(
53
+ "text-generation",
54
+ model=MODEL_NAME,
55
+ trust_remote_code=True
56
+ )
57
+ model = text_pipeline.model
58
+ tok = text_pipeline.tokenizer
59
+ print(f"Model loaded with simplified parameters: {MODEL_NAME}")
60
+
61
+ except Exception as e2:
62
+ print(f"Second attempt failed: {str(e2)}")
63
+ print("Falling back to distilgpt2 (uses safetensors)...")
64
+
65
+ # Use distilgpt2 which uses safetensors format and is more compatible
66
+ MODEL_NAME = "distilgpt2"
67
+ try:
68
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
69
+ text_pipeline = pipeline(
70
+ "text-generation",
71
+ model=MODEL_NAME,
72
+ tokenizer=tokenizer
73
+ )
74
+ model = text_pipeline.model
75
+ tok = text_pipeline.tokenizer
76
+ print(f"Fallback model loaded: {MODEL_NAME}")
77
+
78
+ except Exception as e3:
79
+ print(f"Fallback also failed: {str(e3)}")
80
+ print("Trying direct model loading as last resort...")
81
+
82
+ # Last resort: direct loading without pipeline
83
+ try:
84
+ tok = AutoTokenizer.from_pretrained(MODEL_NAME)
85
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).eval()
86
+ print(f"Direct loading successful: {MODEL_NAME}")
87
+ except Exception as e4:
88
+ raise RuntimeError(f"All model loading attempts failed. Last error: {str(e4)}")
89
 
90
  # Log device information
91
  if hasattr(model, 'device'):
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
  # Core dependencies for LLM Threat Association Analysis (ZeroGPU compatible)
2
  gradio>=4.0.0
3
- torch==2.4.0
4
  transformers>=4.30.0
5
  pandas>=2.0.0
6
  accelerate>=0.26.0
 
1
  # Core dependencies for LLM Threat Association Analysis (ZeroGPU compatible)
2
  gradio>=4.0.0
3
+ torch==2.5.1
4
  transformers>=4.30.0
5
  pandas>=2.0.0
6
  accelerate>=0.26.0