marahmerah commited on
Commit
95c9961
·
verified ·
1 Parent(s): 20086a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -95
app.py CHANGED
@@ -1,126 +1,85 @@
1
  import gradio as gr
2
- import torch
 
3
  import random
4
  import os
5
- import io
6
  from PIL import Image
7
  from deep_translator import GoogleTranslator
8
- from diffusers import DiffusionPipeline
9
- from huggingface_hub import hf_hub_download, login
10
-
11
- # Autentikasi Hugging Face
12
- HF_TOKEN = os.getenv("HF_READ_TOKEN") # Ganti dengan token Anda atau set env variable
13
- login(token=HF_TOKEN)
14
-
15
- # Konfigurasi Model
16
- BASE_MODEL = "black-forest-labs/FLUX.1-dev"
17
- LORA_REPO = "burhansyam/uncen"
18
- LORA_WEIGHTS_NAME = "uncen.safetensors" # Ganti jika nama file berbeda
19
- torch_dtype = torch.float16 # Gunakan float16 untuk kompatibilitas lebih luas
20
 
21
- # Inisialisasi Pipeline dengan LoRA
22
- def init_pipeline():
23
- # Muat model dasar
24
- pipe = DiffusionPipeline.from_pretrained(
25
- BASE_MODEL,
26
- torch_dtype=torch_dtype,
27
- use_auth_token=HF_TOKEN
28
- )
29
-
30
- # Muat weights LoRA
31
- lora_path = hf_hub_download(
32
- repo_id=LORA_REPO,
33
- filename=LORA_WEIGHTS_NAME,
34
- token=HF_TOKEN
35
- )
36
- pipe.load_lora_weights(lora_path, adapter_name="uncen")
37
-
38
- # Optimasi GPU jika tersedia
39
- if torch.cuda.is_available():
40
- pipe.to("cuda")
41
- try:
42
- pipe.enable_xformers_memory_efficient_attention()
43
- except:
44
- print("Xformers tidak tersedia, melanjutkan tanpa optimasi")
45
-
46
- return pipe
47
 
48
- # Inisialisasi pipeline
49
- try:
50
- pipe = init_pipeline()
51
- except Exception as e:
52
- raise gr.Error(f"Gagal memuat model: {str(e)}. Pastikan token akses valid dan Anda memiliki izin.")
53
 
54
  def convert_to_png(image):
55
- """Konversi gambar ke format PNG"""
56
  png_buffer = io.BytesIO()
57
  if image.mode == 'RGBA':
 
58
  image.save(png_buffer, format='PNG', optimize=True)
59
  else:
 
60
  if image.mode != 'RGB':
61
  image = image.convert('RGB')
62
  image.save(png_buffer, format='PNG', optimize=True)
63
  png_buffer.seek(0)
64
  return Image.open(png_buffer)
65
 
66
- def generate_image(
67
- prompt,
68
- negative_prompt="",
69
- steps=35,
70
- cfg_scale=7,
71
- sampler="DPM++ 2M Karras",
72
- seed=-1,
73
- width=1024,
74
- height=1024
75
- ):
76
  if not prompt:
77
  return None
78
 
79
- # Terjemahkan prompt jika bahasa Indonesia
 
 
 
 
80
  try:
81
- translated_prompt = GoogleTranslator(source='id', target='en').translate(prompt)
82
- prompt = f"{translated_prompt} | ultra detail, ultra quality, masterpiece"
83
- except:
84
- prompt = f"{prompt} | ultra detail, ultra quality, masterpiece"
 
85
 
86
- # Set generator dengan seed
87
- generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu")
88
- if seed == -1:
89
- seed = random.randint(1, 1000000000)
90
- generator.manual_seed(seed)
91
 
92
- # Pemetaan sampler ke scheduler
93
- sampler_config = {
94
- "DPM++ 2M Karras": {"use_karras_sigmas": True},
95
- "DPM++ SDE Karras": {"use_karras_sigmas": True},
96
- "Euler": {},
97
- "Euler a": {"use_karras_sigmas": False},
98
- "Heun": {},
99
- "DDIM": {}
100
  }
101
-
102
  try:
103
- # Generate gambar dengan LoRA
104
- result = pipe(
105
- prompt=prompt,
106
- negative_prompt=negative_prompt,
107
- num_inference_steps=steps,
108
- guidance_scale=cfg_scale,
109
- width=width,
110
- height=height,
111
- generator=generator,
112
- cross_attention_kwargs={"scale": 1.0}, # Kontrol kekuatan LoRA
113
- **sampler_config[sampler]
114
- )
115
 
116
- # Konversi ke PNG
117
- png_img = convert_to_png(result.images[0])
118
  return png_img
119
 
 
 
 
 
 
 
 
120
  except Exception as e:
121
- raise gr.Error(f"Generasi gambar gagal: {str(e)}")
 
122
 
123
- # Antarmuka Gradio
124
  css = """
125
  #app-container {
126
  max-width: 800px;
@@ -152,7 +111,7 @@ h1 {
152
  """
153
 
154
  with gr.Blocks(theme=gr.themes.Default(primary_hue="green"), css=css) as app:
155
- gr.HTML("<center><h1>FLUX.1-Dev with Uncensored LoRA</h1></center>")
156
 
157
  with gr.Column(elem_id="app-container"):
158
  with gr.Row():
@@ -178,6 +137,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="green"), css=css) as app:
178
  steps = gr.Slider(35, label="Steps", minimum=10, maximum=100, step=1)
179
  cfg = gr.Slider(7.0, label="CFG Scale", minimum=1.0, maximum=20.0, step=0.5)
180
  with gr.Row():
 
181
  seed = gr.Number(-1, label="Seed (-1 for random)")
182
  method = gr.Radio(
183
  ["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"],
@@ -191,13 +151,13 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="green"), css=css) as app:
191
  output_image = gr.Image(
192
  type="pil",
193
  label="Generated PNG Image",
194
- format="png",
195
  elem_id="gallery"
196
  )
197
 
198
  generate_btn.click(
199
- fn=generate_image,
200
- inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, width, height],
201
  outputs=output_image
202
  )
203
 
 
1
  import gradio as gr
2
+ import requests
3
+ import io
4
  import random
5
  import os
6
+ import time
7
  from PIL import Image
8
  from deep_translator import GoogleTranslator
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ # Project by Nymbo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
13
+ API_TOKEN = os.getenv("HF_READ_TOKEN")
14
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
15
+ timeout = 100
 
16
 
17
  def convert_to_png(image):
18
+ """Convert any image format to true PNG format"""
19
  png_buffer = io.BytesIO()
20
  if image.mode == 'RGBA':
21
+ # If image has alpha channel, save as PNG with transparency
22
  image.save(png_buffer, format='PNG', optimize=True)
23
  else:
24
+ # Convert to RGB first if not in RGB/RGBA mode
25
  if image.mode != 'RGB':
26
  image = image.convert('RGB')
27
  image.save(png_buffer, format='PNG', optimize=True)
28
  png_buffer.seek(0)
29
  return Image.open(png_buffer)
30
 
31
+ def query(prompt, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras",
32
+ seed=-1, strength=0.7, width=1024, height=1024):
 
 
 
 
 
 
 
 
33
  if not prompt:
34
  return None
35
 
36
+ key = random.randint(0, 999)
37
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
38
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
39
+
40
+ # Translate prompt
41
  try:
42
+ prompt = GoogleTranslator(source='id', target='en').translate(prompt)
43
+ print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
44
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
45
+ except Exception as e:
46
+ print(f"Translation error: {e}")
47
 
48
+ print(f'\033[1mGeneration {key}:\033[0m {prompt}')
 
 
 
 
49
 
50
+ payload = {
51
+ "inputs": prompt,
52
+ "is_negative": is_negative,
53
+ "steps": steps,
54
+ "cfg_scale": cfg_scale,
55
+ "seed": seed if seed != -1 else random.randint(1, 1000000000),
56
+ "strength": strength,
57
+ "parameters": {"width": width, "height": height}
58
  }
59
+
60
  try:
61
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
62
+ response.raise_for_status()
63
+
64
+ # Convert directly to PNG without intermediate format
65
+ img = Image.open(io.BytesIO(response.content))
66
+ png_img = convert_to_png(img)
 
 
 
 
 
 
67
 
68
+ print(f'\033[1mGeneration {key} completed as PNG!\033[0m')
 
69
  return png_img
70
 
71
+ except requests.exceptions.RequestException as e:
72
+ print(f"API Error: {e}")
73
+ if hasattr(e, 'response') and e.response:
74
+ if e.response.status_code == 503:
75
+ raise gr.Error("503: Model is loading, please try again later")
76
+ raise gr.Error(f"{e.response.status_code}: {e.response.text}")
77
+ raise gr.Error("Network error occurred")
78
  except Exception as e:
79
+ print(f"Image processing error: {e}")
80
+ raise gr.Error(f"Image processing failed: {str(e)}")
81
 
82
+ # Light theme CSS
83
  css = """
84
  #app-container {
85
  max-width: 800px;
 
111
  """
112
 
113
  with gr.Blocks(theme=gr.themes.Default(primary_hue="green"), css=css) as app:
114
+ gr.HTML("<center><h1>FLUX.1-Dev (PNG Output)</h1></center>")
115
 
116
  with gr.Column(elem_id="app-container"):
117
  with gr.Row():
 
137
  steps = gr.Slider(35, label="Steps", minimum=10, maximum=100, step=1)
138
  cfg = gr.Slider(7.0, label="CFG Scale", minimum=1.0, maximum=20.0, step=0.5)
139
  with gr.Row():
140
+ strength = gr.Slider(0.7, label="Strength", minimum=0.1, maximum=1.0, step=0.01)
141
  seed = gr.Number(-1, label="Seed (-1 for random)")
142
  method = gr.Radio(
143
  ["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"],
 
151
  output_image = gr.Image(
152
  type="pil",
153
  label="Generated PNG Image",
154
+ format="png", # Explicitly set output format
155
  elem_id="gallery"
156
  )
157
 
158
  generate_btn.click(
159
+ fn=query,
160
+ inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height],
161
  outputs=output_image
162
  )
163