Update block.py
Browse files
block.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# modular diffusers diff-idff
|
2 |
|
3 |
from diffusers.modular_pipelines import (
|
4 |
-
|
5 |
SequentialPipelineBlocks,
|
6 |
PipelineState,
|
7 |
InputParam,
|
@@ -31,7 +31,7 @@ from diffusers.modular_pipelines.stable_diffusion_xl.modular_blocks import(
|
|
31 |
import torch
|
32 |
from typing import List, Tuple, Any, Optional
|
33 |
|
34 |
-
class SDXLDiffDiffPrepareLatentsStep(
|
35 |
model_name = "stable-diffusion-xl"
|
36 |
|
37 |
@property
|
@@ -73,11 +73,6 @@ class SDXLDiffDiffPrepareLatentsStep(PipelineBlock):
|
|
73 |
type_hint=Optional[float],
|
74 |
description="When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. The initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. Useful for 'Mixture of Denoisers' multi-pipeline setups."
|
75 |
),
|
76 |
-
]
|
77 |
-
|
78 |
-
@property
|
79 |
-
def intermediate_inputs(self) -> List[InputParam]:
|
80 |
-
return [
|
81 |
InputParam("generator"),
|
82 |
InputParam("timesteps",type_hint=torch.Tensor, description="The timesteps to use for sampling. Can be generated in set_timesteps step."),
|
83 |
InputParam("image_latents", type_hint=torch.Tensor, description="The latents representing the reference image for image-to-image/inpainting generation. Can be generated in vae_encode step."),
|
@@ -134,7 +129,7 @@ class SDXLDiffDiffPrepareLatentsStep(PipelineBlock):
|
|
134 |
return components, state
|
135 |
|
136 |
|
137 |
-
class SDXLDiffDiffLoopBeforeDenoiser(
|
138 |
model_name = "stable-diffusion-xl"
|
139 |
|
140 |
@property
|
@@ -147,11 +142,6 @@ class SDXLDiffDiffLoopBeforeDenoiser(PipelineBlock):
|
|
147 |
def inputs(self) -> List[Tuple[str, Any]]:
|
148 |
return [
|
149 |
InputParam("denoising_start"),
|
150 |
-
]
|
151 |
-
|
152 |
-
@property
|
153 |
-
def intermediate_inputs(self) -> List[str]:
|
154 |
-
return [
|
155 |
InputParam(
|
156 |
"latents",
|
157 |
type_hint=torch.Tensor,
|
|
|
1 |
# modular diffusers diff-idff
|
2 |
|
3 |
from diffusers.modular_pipelines import (
|
4 |
+
ModularPipelineBlocks,
|
5 |
SequentialPipelineBlocks,
|
6 |
PipelineState,
|
7 |
InputParam,
|
|
|
31 |
import torch
|
32 |
from typing import List, Tuple, Any, Optional
|
33 |
|
34 |
+
class SDXLDiffDiffPrepareLatentsStep(ModularPipelineBlocks):
|
35 |
model_name = "stable-diffusion-xl"
|
36 |
|
37 |
@property
|
|
|
73 |
type_hint=Optional[float],
|
74 |
description="When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. The initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. Useful for 'Mixture of Denoisers' multi-pipeline setups."
|
75 |
),
|
|
|
|
|
|
|
|
|
|
|
76 |
InputParam("generator"),
|
77 |
InputParam("timesteps",type_hint=torch.Tensor, description="The timesteps to use for sampling. Can be generated in set_timesteps step."),
|
78 |
InputParam("image_latents", type_hint=torch.Tensor, description="The latents representing the reference image for image-to-image/inpainting generation. Can be generated in vae_encode step."),
|
|
|
129 |
return components, state
|
130 |
|
131 |
|
132 |
+
class SDXLDiffDiffLoopBeforeDenoiser(ModularPipelineBlocks):
|
133 |
model_name = "stable-diffusion-xl"
|
134 |
|
135 |
@property
|
|
|
142 |
def inputs(self) -> List[Tuple[str, Any]]:
|
143 |
return [
|
144 |
InputParam("denoising_start"),
|
|
|
|
|
|
|
|
|
|
|
145 |
InputParam(
|
146 |
"latents",
|
147 |
type_hint=torch.Tensor,
|