LPX55 commited on
Commit
a554383
·
verified ·
1 Parent(s): 4a172a5

Update raw.py

Browse files
Files changed (1) hide show
  1. raw.py +9 -8
raw.py CHANGED
@@ -16,14 +16,14 @@ import gradio as gr
16
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
17
  MAX_SEED = 1000000
18
 
19
- # quant_config = TransformersBitsAndBytesConfig(load_in_8bit=True,)
20
- # text_encoder_2_8bit = T5EncoderModel.from_pretrained(
21
- # "LPX55/FLUX.1-merged_uncensored",
22
- # subfolder="text_encoder_2",
23
- # quantization_config=quant_config,
24
- # torch_dtype=torch.bfloat16,
25
- # token=huggingface_token
26
- # )
27
 
28
  # good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=huggingface_token).to("cuda")
29
 
@@ -35,6 +35,7 @@ MAX_SEED = 1000000
35
  pipe = FluxControlNetPipeline.from_pretrained(
36
  "LPX55/FLUX.1M-8step_upscaler-cnet",
37
  torch_dtype=torch.bfloat16,
 
38
  token=huggingface_token
39
  )
40
  # adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
 
16
  huggingface_token = os.getenv("HUGGINFACE_TOKEN")
17
  MAX_SEED = 1000000
18
 
19
+ quant_config = TransformersBitsAndBytesConfig(load_in_8bit=True,)
20
+ text_encoder_2_8bit = T5EncoderModel.from_pretrained(
21
+ "LPX55/FLUX.1-merged_uncensored",
22
+ subfolder="text_encoder_2",
23
+ quantization_config=quant_config,
24
+ torch_dtype=torch.bfloat16,
25
+ token=huggingface_token
26
+ )
27
 
28
  # good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=huggingface_token).to("cuda")
29
 
 
35
  pipe = FluxControlNetPipeline.from_pretrained(
36
  "LPX55/FLUX.1M-8step_upscaler-cnet",
37
  torch_dtype=torch.bfloat16,
38
+ text_encoder_2=text_encoder_2_8bit,
39
  token=huggingface_token
40
  )
41
  # adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"