Spaces:
Paused
Paused
manichandrareddy
commited on
Commit
Β·
d3a3bf1
1
Parent(s):
fc4cb2d
π Updated app with Brain Layer + HuggingFace model improvements
Browse files- .gradio/certificate.pem +31 -0
- app.py +47 -31
- requirements.txt +1 -1
- trial.py +17 -0
- utils/__pycache__/planner.cpython-312.pyc +0 -0
- utils/planner.py +9 -11
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
app.py
CHANGED
@@ -9,52 +9,68 @@ from io import BytesIO
|
|
9 |
|
10 |
from utils.planner import extract_scene_plan # π§ Brain Layer
|
11 |
|
12 |
-
# π Hugging Face
|
13 |
HF_API_KEY = os.getenv("HF_API_KEY")
|
14 |
-
SDXL_MODEL_ID = "
|
15 |
SDXL_API_URL = f"https://api-inference.huggingface.co/models/{SDXL_MODEL_ID}"
|
16 |
HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
|
17 |
|
18 |
-
# π Image generation (
|
19 |
def process_image(prompt, image, num_variations):
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
36 |
"image": encoded_image,
|
37 |
-
"
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
41 |
|
42 |
-
|
43 |
response = requests.post(SDXL_API_URL, headers=HEADERS, json=payload)
|
|
|
44 |
if response.status_code == 200:
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
else:
|
|
|
48 |
outputs.append(f"Error {response.status_code}: {response.text}")
|
49 |
-
except Exception as e:
|
50 |
-
outputs.append(f"Exception: {e}")
|
51 |
|
52 |
-
|
53 |
|
|
|
|
|
|
|
54 |
|
55 |
# π¨ Gradio UI
|
56 |
with gr.Blocks() as demo:
|
57 |
-
gr.Markdown("# π§ NewCrux AI Demo: Image-to-Image using
|
58 |
|
59 |
with gr.Row():
|
60 |
with gr.Column():
|
@@ -78,5 +94,5 @@ with gr.Blocks() as demo:
|
|
78 |
outputs=[output_gallery, json_output]
|
79 |
)
|
80 |
|
81 |
-
demo.launch()
|
82 |
|
|
|
9 |
|
10 |
from utils.planner import extract_scene_plan # π§ Brain Layer
|
11 |
|
12 |
+
# π Hugging Face keys
|
13 |
HF_API_KEY = os.getenv("HF_API_KEY")
|
14 |
+
SDXL_MODEL_ID = "fal-ai/fast-sdxl/image-to-image" # β
Correct model for image-to-image
|
15 |
SDXL_API_URL = f"https://api-inference.huggingface.co/models/{SDXL_MODEL_ID}"
|
16 |
HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}
|
17 |
|
18 |
+
# π Image generation (img2img)
|
19 |
def process_image(prompt, image, num_variations):
|
20 |
+
try:
|
21 |
+
print("π§ Prompt received:", prompt)
|
22 |
+
|
23 |
+
# Step 1: Brain Layer
|
24 |
+
reasoning_json = extract_scene_plan(prompt)
|
25 |
+
print("π§ Scene plan extracted:", reasoning_json)
|
26 |
+
|
27 |
+
# Step 2: Encode input image
|
28 |
+
buffered = BytesIO()
|
29 |
+
image.save(buffered, format="JPEG")
|
30 |
+
img_bytes = buffered.getvalue()
|
31 |
+
encoded_image = base64.b64encode(img_bytes).decode("utf-8")
|
32 |
+
|
33 |
+
# Step 3: Send image + prompt to HF API
|
34 |
+
outputs = []
|
35 |
+
for i in range(num_variations):
|
36 |
+
payload = {
|
37 |
"image": encoded_image,
|
38 |
+
"prompt": prompt,
|
39 |
+
"negative_prompt": "blurry, deformed, cropped",
|
40 |
+
"num_inference_steps": 25,
|
41 |
+
"guidance_scale": 7.5
|
42 |
+
}
|
43 |
|
44 |
+
print(f"π€ Sending request to HF (variation {i+1})")
|
45 |
response = requests.post(SDXL_API_URL, headers=HEADERS, json=payload)
|
46 |
+
|
47 |
if response.status_code == 200:
|
48 |
+
try:
|
49 |
+
result_json = response.json()
|
50 |
+
if "images" in result_json:
|
51 |
+
base64_img = result_json["images"][0]
|
52 |
+
result_image = Image.open(BytesIO(base64.b64decode(base64_img)))
|
53 |
+
outputs.append(result_image)
|
54 |
+
print(f"β
Decoded image variation {i+1} successfully")
|
55 |
+
else:
|
56 |
+
print(f"β οΈ No 'images' key found in response")
|
57 |
+
outputs.append("β No image in response.")
|
58 |
+
except Exception as decode_err:
|
59 |
+
print("β Image decode error:", decode_err)
|
60 |
+
outputs.append("β Failed to decode image.")
|
61 |
else:
|
62 |
+
print(f"β HF API error: {response.status_code} - {response.text}")
|
63 |
outputs.append(f"Error {response.status_code}: {response.text}")
|
|
|
|
|
64 |
|
65 |
+
return outputs, reasoning_json
|
66 |
|
67 |
+
except Exception as e:
|
68 |
+
print("β General Exception in process_image:", e)
|
69 |
+
return ["Processing error occurred"], {"error": str(e)}
|
70 |
|
71 |
# π¨ Gradio UI
|
72 |
with gr.Blocks() as demo:
|
73 |
+
gr.Markdown("# π§ NewCrux AI Demo: Image-to-Image using Fast SDXL + Brain Layer")
|
74 |
|
75 |
with gr.Row():
|
76 |
with gr.Column():
|
|
|
94 |
outputs=[output_gallery, json_output]
|
95 |
)
|
96 |
|
97 |
+
demo.launch(share=True)
|
98 |
|
requirements.txt
CHANGED
@@ -2,4 +2,4 @@ gradio
|
|
2 |
requests
|
3 |
Pillow
|
4 |
openai>=1.0.0,<2.0.0
|
5 |
-
|
|
|
2 |
requests
|
3 |
Pillow
|
4 |
openai>=1.0.0,<2.0.0
|
5 |
+
python-dotenv
|
trial.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# test_openai.py
|
2 |
+
|
3 |
+
import os
|
4 |
+
from openai import OpenAI
|
5 |
+
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
load_dotenv()
|
8 |
+
# Set your API key directly or use an environment variable
|
9 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
10 |
+
|
11 |
+
# Simple test: ask ChatGPT something basic
|
12 |
+
response = client.chat.completions.create(
|
13 |
+
model="gpt-3.5-turbo",
|
14 |
+
messages=[{"role": "user", "content": "Say hello"}],
|
15 |
+
)
|
16 |
+
|
17 |
+
print(response.choices[0].message.content)
|
utils/__pycache__/planner.cpython-312.pyc
ADDED
Binary file (1.9 kB). View file
|
|
utils/planner.py
CHANGED
@@ -1,9 +1,11 @@
|
|
1 |
# utils/planner.py
|
2 |
|
|
|
3 |
import os
|
4 |
-
import json
|
5 |
-
import logging
|
6 |
from openai import OpenAI
|
|
|
|
|
|
|
7 |
|
8 |
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
9 |
|
@@ -21,7 +23,7 @@ Respond ONLY in raw JSON format. Do NOT include explanations.
|
|
21 |
def extract_scene_plan(prompt: str) -> dict:
|
22 |
try:
|
23 |
response = client.chat.completions.create(
|
24 |
-
model="gpt-
|
25 |
messages=[
|
26 |
{"role": "system", "content": SYSTEM_INSTRUCTIONS},
|
27 |
{"role": "user", "content": prompt}
|
@@ -29,16 +31,12 @@ def extract_scene_plan(prompt: str) -> dict:
|
|
29 |
temperature=0.3,
|
30 |
max_tokens=500
|
31 |
)
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
# Remove common formatting issues
|
36 |
-
content = content.strip("`").replace("json", "")
|
37 |
-
|
38 |
-
return json.loads(content)
|
39 |
|
40 |
except Exception as e:
|
41 |
-
|
42 |
return {
|
43 |
"scene": None,
|
44 |
"subject": None,
|
|
|
1 |
# utils/planner.py
|
2 |
|
3 |
+
import openai
|
4 |
import os
|
|
|
|
|
5 |
from openai import OpenAI
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
import json
|
8 |
+
load_dotenv()
|
9 |
|
10 |
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
11 |
|
|
|
23 |
def extract_scene_plan(prompt: str) -> dict:
|
24 |
try:
|
25 |
response = client.chat.completions.create(
|
26 |
+
model="gpt-4o-mini-2024-07-18",
|
27 |
messages=[
|
28 |
{"role": "system", "content": SYSTEM_INSTRUCTIONS},
|
29 |
{"role": "user", "content": prompt}
|
|
|
31 |
temperature=0.3,
|
32 |
max_tokens=500
|
33 |
)
|
34 |
+
json_output = response.choices[0].message.content
|
35 |
+
print("π§ Scene Plan (Raw):", json_output)
|
36 |
+
return json.loads(json_output) # Be cautious: Use `json.loads()` if possible
|
|
|
|
|
|
|
|
|
37 |
|
38 |
except Exception as e:
|
39 |
+
print("β extract_scene_plan() Error:", e)
|
40 |
return {
|
41 |
"scene": None,
|
42 |
"subject": None,
|