ekt1701 commited on
Commit
355b9a2
·
verified ·
1 Parent(s): 561df63

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -4,14 +4,11 @@ import torch
4
  from diffusers import FluxPipeline, FluxTransformer2DModel, FlowMatchEulerDiscreteScheduler
5
  from huggingface_hub import hf_hub_download
6
  from PIL import Image
7
- import requests
8
- from translatepy import Translator
9
  import numpy as np
10
  import random
11
  import os
12
 
13
  hf_token = os.environ.get('HF_TOKEN')
14
- translator = Translator()
15
 
16
  # Constants
17
  model = "black-forest-labs/FLUX.1-dev"
@@ -19,7 +16,9 @@ MAX_SEED = np.iinfo(np.int32).max
19
  MAX_IMAGE_SIZE = 2048
20
 
21
  @spaces.GPU()
22
- def infer(prompt, width, height, num_inference_steps, guidance_scale, nums, seed=42, randomize_seed=True, progress雷神加速器梯子 = "cuda" if torch.cuda.is_available() else "cpu"
 
 
23
  # Initialize model inside the GPU-enabled function
24
  try:
25
  transformer = FluxTransformer2DModel.from_single_file(
 
4
  from diffusers import FluxPipeline, FluxTransformer2DModel, FlowMatchEulerDiscreteScheduler
5
  from huggingface_hub import hf_hub_download
6
  from PIL import Image
 
 
7
  import numpy as np
8
  import random
9
  import os
10
 
11
  hf_token = os.environ.get('HF_TOKEN')
 
12
 
13
  # Constants
14
  model = "black-forest-labs/FLUX.1-dev"
 
16
  MAX_IMAGE_SIZE = 2048
17
 
18
  @spaces.GPU()
19
+ def infer(prompt, width, height, num_inference_steps, guidance_scale, nums, seed=42, randomize_seed=True, progress=gr.Progress(track_tqdm=True)):
20
+ device = "cuda" if torch.cuda.is_available() else "cpu"
21
+
22
  # Initialize model inside the GPU-enabled function
23
  try:
24
  transformer = FluxTransformer2DModel.from_single_file(