nexusbert commited on
Commit
3050408
·
1 Parent(s): 859d5b2

space issue fix

Browse files
Files changed (2) hide show
  1. Dockerfile +11 -25
  2. app.py +3 -3
Dockerfile CHANGED
@@ -23,37 +23,23 @@ RUN pip install --no-cache-dir -r requirements.txt
23
  # Hugging Face + model tools
24
  RUN pip install --no-cache-dir huggingface-hub sentencepiece accelerate sacremoses
25
 
26
- # Hugging Face cache environment
27
- ENV HF_HOME=/models/huggingface \
28
- TRANSFORMERS_CACHE=/models/huggingface \
29
- HUGGINGFACE_HUB_CACHE=/models/huggingface \
30
- HF_HUB_CACHE=/models/huggingface
31
-
32
- # Create cache dir with proper permissions
33
- RUN mkdir -p /models/huggingface && \
34
- chmod -R 777 /models/huggingface && \
35
- chown -R 1000:1000 /models/huggingface
36
-
37
- # Pre-download models at build time (MedBridge AI specific models)
38
- RUN python -c "from huggingface_hub import snapshot_download; snapshot_download(repo_id='microsoft/BioGPT-Large')" \
39
- && python -c "from huggingface_hub import snapshot_download; snapshot_download(repo_id='google/vit-base-patch16-224')" \
40
- && find /models/huggingface -name '*.lock' -delete
41
-
42
- # Preload tokenizers (avoid runtime delays)
43
- RUN python -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('microsoft/BioGPT-Large', use_fast=True)" \
44
- && python -c "from transformers import ViTFeatureExtractor; ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224')"
45
 
46
  # Copy project files
47
  COPY . .
48
 
49
- # Fix permissions for runtime
50
- RUN chmod -R 777 /models/huggingface
51
-
52
  # Expose agent port
53
  EXPOSE 7860
54
 
55
- # Run agent as non-root for security
56
- USER 1000
57
-
58
  CMD ["python", "app.py"]
59
 
 
23
  # Hugging Face + model tools
24
  RUN pip install --no-cache-dir huggingface-hub sentencepiece accelerate sacremoses
25
 
26
+ # Hugging Face cache environment (using /tmp for runtime downloads)
27
+ ENV HF_HOME=/tmp/huggingface \
28
+ TRANSFORMERS_CACHE=/tmp/huggingface \
29
+ HUGGINGFACE_HUB_CACHE=/tmp/huggingface \
30
+ HF_HUB_CACHE=/tmp/huggingface
31
+
32
+ # Create cache dir with proper permissions (using /tmp for Hugging Face Spaces)
33
+ RUN mkdir -p /tmp/huggingface && \
34
+ chmod -R 777 /tmp/huggingface
35
+
36
+ # Models will be downloaded at runtime to avoid exceeding storage limit
 
 
 
 
 
 
 
 
37
 
38
  # Copy project files
39
  COPY . .
40
 
 
 
 
41
  # Expose agent port
42
  EXPOSE 7860
43
 
 
 
 
44
  CMD ["python", "app.py"]
45
 
app.py CHANGED
@@ -34,7 +34,7 @@ agent = Agent(
34
  name=AGENT_NAME,
35
  seed=AGENT_SEED,
36
  port=7860,
37
- endpoint=["http://localhost:7860/submit"],
38
  )
39
 
40
  fund_agent_if_low(agent.wallet.address())
@@ -250,11 +250,11 @@ async def handle_message(ctx: Context, sender: str, msg: ChatMessage):
250
  result = analyze_medical_image(response.content)
251
  response_message = create_text_chat(result)
252
  await ctx.send(sender, response_message)
253
- else:
254
  result = process_medical_query(item.text)
255
  response_message = create_text_chat(result)
256
  await ctx.send(sender, response_message)
257
- except Exception as e:
258
  error_msg = create_text_chat(
259
  f"Error processing your request: {str(e)}"
260
  )
 
34
  name=AGENT_NAME,
35
  seed=AGENT_SEED,
36
  port=7860,
37
+ endpoint=["https://nexusbert-medbridge.hf.space/submit"],
38
  )
39
 
40
  fund_agent_if_low(agent.wallet.address())
 
250
  result = analyze_medical_image(response.content)
251
  response_message = create_text_chat(result)
252
  await ctx.send(sender, response_message)
253
+ else:
254
  result = process_medical_query(item.text)
255
  response_message = create_text_chat(result)
256
  await ctx.send(sender, response_message)
257
+ except Exception as e:
258
  error_msg = create_text_chat(
259
  f"Error processing your request: {str(e)}"
260
  )