Update README.md
Browse files
README.md
CHANGED
@@ -102,15 +102,13 @@ pipeline = FluxPipeline.from_pretrained(
|
|
102 |
#pipeline.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Comment out if you have enough GPU VRAM
|
103 |
|
104 |
adapter='leaf-flux.safetensors' #Step 16000, final step
|
105 |
-
#adapter='leaf-flux-step-
|
106 |
-
#adapter='leaf-flux-step-3500.safetensors' #Step 3500
|
107 |
|
108 |
pipeline.load_lora_weights(repo_id, weight_name=adapter) #You need to use the weight_name parameter since the repo includes multiple checkpoints
|
109 |
|
110 |
pipeline=pipeline.to('cuda')
|
111 |
```
|
112 |
|
113 |
-
|
114 |
Image generation - Example #1:
|
115 |
|
116 |
```python
|
@@ -124,7 +122,7 @@ prompt="""Generate a futuristic, eco-friendly architectural concept utilizing a
|
|
124 |
|
125 |
num_samples =2
|
126 |
num_rows = 2
|
127 |
-
n_steps=
|
128 |
guidance_scale=3.5
|
129 |
all_images = []
|
130 |
for _ in range(num_rows):
|
@@ -139,33 +137,29 @@ grid = image_grid(all_images, num_rows, num_samples, save_individual_files=True
|
|
139 |
grid
|
140 |
```
|
141 |
|
142 |
-

|
143 |
-
|
144 |
|
|
|
145 |
Image generation - Example #2:
|
146 |
|
147 |
```python
|
148 |
-
prompt="
|
149 |
-
|
150 |
-
|
151 |
-
num_rows = 2
|
152 |
n_steps=25
|
153 |
-
guidance_scale=
|
154 |
all_images = []
|
155 |
for _ in range(num_rows):
|
156 |
-
|
157 |
-
|
158 |
-
image = pipeline(prompt,num_inference_steps=n_steps,num_images_per_prompt=num_samples,
|
159 |
-
guidance_scale=guidance_scale,
|
160 |
-
height=1024, width=1024,).images
|
161 |
|
162 |
all_images.extend(image)
|
163 |
|
164 |
-
grid = image_grid(all_images, num_rows, num_samples, save_individual_files=True,
|
165 |
grid
|
166 |
```
|
167 |
|
168 |
-
 #save some VRAM by offloading the model to CPU. Comment out if you have enough GPU VRAM
|
103 |
|
104 |
adapter='leaf-flux.safetensors' #Step 16000, final step
|
105 |
+
#adapter='leaf-flux-step-11000.safetensors' #Step 11000, earlier checkpoint
|
|
|
106 |
|
107 |
pipeline.load_lora_weights(repo_id, weight_name=adapter) #You need to use the weight_name parameter since the repo includes multiple checkpoints
|
108 |
|
109 |
pipeline=pipeline.to('cuda')
|
110 |
```
|
111 |
|
|
|
112 |
Image generation - Example #1:
|
113 |
|
114 |
```python
|
|
|
122 |
|
123 |
num_samples =2
|
124 |
num_rows = 2
|
125 |
+
n_steps=50
|
126 |
guidance_scale=3.5
|
127 |
all_images = []
|
128 |
for _ in range(num_rows):
|
|
|
137 |
grid
|
138 |
```
|
139 |
|
|
|
|
|
140 |
|
141 |
+

|
142 |
Image generation - Example #2:
|
143 |
|
144 |
```python
|
145 |
+
prompt = "A sign that says 'PDF to AUDIO' with organic shapes, <bioinspired>"
|
146 |
+
num_samples =1
|
147 |
+
num_rows =1
|
|
|
148 |
n_steps=25
|
149 |
+
guidance_scale=10
|
150 |
all_images = []
|
151 |
for _ in range(num_rows):
|
152 |
+
image = pipeline(prompt,num_inference_steps=n_steps,num_images_per_prompt=num_samples,
|
153 |
+
guidance_scale=guidance_scale,).images
|
|
|
|
|
|
|
154 |
|
155 |
all_images.extend(image)
|
156 |
|
157 |
+
grid = image_grid(all_images, num_rows, num_samples, save_individual_files=True, )
|
158 |
grid
|
159 |
```
|
160 |
|
161 |
+

|
162 |
+
|
163 |
|
164 |
```bibtext
|
165 |
@article{BioinspiredFluxBuehler2024,
|