import spaces import gradio as gr import torch from diffusers import FluxPipeline, FluxTransformer2DModel, FlowMatchEulerDiscreteScheduler from huggingface_hub import hf_hub_download from PIL import Image import requests from translatepy import Translator import numpy as np import random import os hf_token = os.environ.get('HF_TOKEN') from io import BytesIO translator = Translator() # Constants model = "black-forest-labs/FLUX.1-dev" MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 2048 # Ensure model and scheduler are initialized in GPU-enabled function if torch.cuda.is_available(): transformer = FluxTransformer2DModel.from_single_file( "https://huggingface.co/ekt1701/Test_case/blob/main/rayflux_photoplus.safetensors", torch_dtype=torch.bfloat16 ) pipe = FluxPipeline.from_pretrained( model, transformer=transformer, torch_dtype=torch.bfloat16, token=hf_token) pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config( pipe.scheduler.config, use_beta_sigmas=True ) pipe.to("cuda") @spaces.GPU() def infer(prompt, width, height, num_inference_steps, guidance_scale, nums, seed=42, randomize_seed=True, progress=gr.Progress(track_tqdm=True)): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(seed) image = pipe( prompt = prompt, width = width, height = height, num_inference_steps = num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=nums, generator = generator ).images return image, seed css=""" #col-container { margin: 0 auto; max-width: 1024px; } """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.HTML("