johnrachwanpruna commited on
Commit
589847e
1 Parent(s): 9aa139a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -71,7 +71,7 @@ You can run the smashed model on cards with less than 12 GB of memory with these
71
  ```python
72
 
73
  import torch
74
-
75
  from optimum.quanto import freeze, qfloat8, quantize
76
 
77
  from diffusers import FlowMatchEulerDiscreteScheduler, AutoencoderKL
@@ -104,17 +104,17 @@ You can run the smashed model on cards with less than 12 GB of memory with these
104
  )
105
  pipe.text_encoder_2 = text_encoder_2
106
  pipe.transformer = transformer
107
- pipe.enable_model_cpu_offload()
108
-
 
109
  generator = torch.Generator().manual_seed(12345)
110
- image = pipe(
111
  "a cute apple smiling",
112
  guidance_scale=0.0,
113
  num_inference_steps=4,
114
  max_sequence_length=256,
115
  generator=torch.Generator("cpu").manual_seed(0)
116
  ).images[0]
117
- image.save("flux-schnell.png")
118
  ```
119
 
120
  ## Configurations
 
71
  ```python
72
 
73
  import torch
74
+
75
  from optimum.quanto import freeze, qfloat8, quantize
76
 
77
  from diffusers import FlowMatchEulerDiscreteScheduler, AutoencoderKL
 
104
  )
105
  pipe.text_encoder_2 = text_encoder_2
106
  pipe.transformer = transformer
107
+ # pipe.enable_model_cpu_offload()
108
+ pipe.to('cuda')
109
+ print('done')
110
  generator = torch.Generator().manual_seed(12345)
111
+ pipe(
112
  "a cute apple smiling",
113
  guidance_scale=0.0,
114
  num_inference_steps=4,
115
  max_sequence_length=256,
116
  generator=torch.Generator("cpu").manual_seed(0)
117
  ).images[0]
 
118
  ```
119
 
120
  ## Configurations