Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -14,14 +14,10 @@ import gradio as gr
|
|
14 |
import secrets
|
15 |
|
16 |
# In-memory store for valid API keys.
|
17 |
-
# For production, consider using a persistent storage solution.
|
18 |
valid_api_keys = set()
|
19 |
|
20 |
def generate_api_key():
|
21 |
-
"""
|
22 |
-
Generate a secure 32-character hexadecimal API key,
|
23 |
-
store it in the valid_api_keys set, and return it.
|
24 |
-
"""
|
25 |
key = secrets.token_hex(16)
|
26 |
valid_api_keys.add(key)
|
27 |
return key
|
@@ -30,20 +26,19 @@ def llama_vision_inference(api_key, image):
|
|
30 |
"""
|
31 |
Dummy inference function for Llama Vision model.
|
32 |
Replace the simulated processing below with actual model loading and inference.
|
|
|
33 |
"""
|
34 |
-
# Validate the API key.
|
35 |
if not api_key.strip():
|
36 |
return "Error: API key is required."
|
37 |
if api_key not in valid_api_keys:
|
38 |
return "Error: Invalid API key. Please generate a valid key first."
|
39 |
|
40 |
-
#
|
41 |
-
#
|
42 |
-
#
|
43 |
-
#
|
44 |
-
|
45 |
-
#
|
46 |
-
# For this example, we simulate the output:
|
47 |
result = (
|
48 |
"Simulated Model Output:\n"
|
49 |
"- Detected GUI elements: [button, menu, text field]\n"
|
@@ -54,7 +49,7 @@ def llama_vision_inference(api_key, image):
|
|
54 |
|
55 |
with gr.Blocks(title="Manus π") as demo:
|
56 |
gr.Markdown("# Manus π")
|
57 |
-
gr.Markdown("This Gradio Space lets you generate an API key and perform vision inference using the Llama Vision model.")
|
58 |
|
59 |
with gr.Tabs():
|
60 |
with gr.TabItem("API Key Generator"):
|
@@ -72,4 +67,5 @@ with gr.Blocks(title="Manus π") as demo:
|
|
72 |
run_button.click(fn=llama_vision_inference, inputs=[api_key_input, image_input], outputs=output_text)
|
73 |
|
74 |
if __name__ == "__main__":
|
75 |
-
|
|
|
|
14 |
import secrets
|
15 |
|
16 |
# In-memory store for valid API keys.
|
|
|
17 |
valid_api_keys = set()
|
18 |
|
19 |
def generate_api_key():
|
20 |
+
"""Generate a secure 32-character hexadecimal API key and store it."""
|
|
|
|
|
|
|
21 |
key = secrets.token_hex(16)
|
22 |
valid_api_keys.add(key)
|
23 |
return key
|
|
|
26 |
"""
|
27 |
Dummy inference function for Llama Vision model.
|
28 |
Replace the simulated processing below with actual model loading and inference.
|
29 |
+
Ensure that your model is explicitly loaded on CPU.
|
30 |
"""
|
|
|
31 |
if not api_key.strip():
|
32 |
return "Error: API key is required."
|
33 |
if api_key not in valid_api_keys:
|
34 |
return "Error: Invalid API key. Please generate a valid key first."
|
35 |
|
36 |
+
# Example: Force CPU usage when loading the model.
|
37 |
+
# from llama_vision import LlamaVisionModel
|
38 |
+
# model = LlamaVisionModel.from_pretrained("llama-vision-latest", device="cpu")
|
39 |
+
# result = model.infer(image)
|
40 |
+
|
41 |
+
# Simulated output for demonstration:
|
|
|
42 |
result = (
|
43 |
"Simulated Model Output:\n"
|
44 |
"- Detected GUI elements: [button, menu, text field]\n"
|
|
|
49 |
|
50 |
with gr.Blocks(title="Manus π") as demo:
|
51 |
gr.Markdown("# Manus π")
|
52 |
+
gr.Markdown("This Gradio Space lets you generate an API key and perform vision inference using the Llama Vision model (running in CPU mode).")
|
53 |
|
54 |
with gr.Tabs():
|
55 |
with gr.TabItem("API Key Generator"):
|
|
|
67 |
run_button.click(fn=llama_vision_inference, inputs=[api_key_input, image_input], outputs=output_text)
|
68 |
|
69 |
if __name__ == "__main__":
|
70 |
+
# Disable SSR to avoid potential GPU-related initialization issues.
|
71 |
+
demo.launch(ssr=False)
|