Update inference_manager.py
Browse files- inference_manager.py +2 -1
inference_manager.py
CHANGED
@@ -515,7 +515,7 @@ class ModelManager:
|
|
515 |
|
516 |
average_embedding = torch.mean(torch.stack(faceid_all_embeds, dim=0), dim=0)
|
517 |
average_embedding = average_embedding.to("cuda")
|
518 |
-
|
519 |
print("start inference...")
|
520 |
style_selection = ""
|
521 |
use_negative_prompt = True
|
@@ -525,6 +525,7 @@ class ModelManager:
|
|
525 |
prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
|
526 |
generator = torch.Generator(model.base_model_pipeline.device).manual_seed(seed)
|
527 |
print(f"generate: p={p}, np={negative_prompt}, steps={steps}, guidance_scale={guidance_scale}, size={width},{height}, seed={seed}")
|
|
|
528 |
images = ip_model.generate(
|
529 |
prompt=prompt_str,
|
530 |
negative_prompt=negative_prompt,
|
|
|
515 |
|
516 |
average_embedding = torch.mean(torch.stack(faceid_all_embeds, dim=0), dim=0)
|
517 |
average_embedding = average_embedding.to("cuda")
|
518 |
+
|
519 |
print("start inference...")
|
520 |
style_selection = ""
|
521 |
use_negative_prompt = True
|
|
|
525 |
prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
|
526 |
generator = torch.Generator(model.base_model_pipeline.device).manual_seed(seed)
|
527 |
print(f"generate: p={p}, np={negative_prompt}, steps={steps}, guidance_scale={guidance_scale}, size={width},{height}, seed={seed}")
|
528 |
+
print(f"device: embedding={average_embedding.device}, generator={generator.device}, ip_model={ip_model.device}, pipe={model.base_model_pipeline.device}")
|
529 |
images = ip_model.generate(
|
530 |
prompt=prompt_str,
|
531 |
negative_prompt=negative_prompt,
|