KarthikAI commited on
Commit
7e63c54
·
verified ·
1 Parent(s): 1a391a8

Upload 4 files

Browse files
Files changed (4) hide show
  1. config.json +5 -0
  2. handler.py +35 -0
  3. model_index.json +22 -0
  4. requirements.txt +6 -0
config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.16.1",
4
+ "pipeline_tag": "stable-diffusion-image-to-image"
5
+ }
handler.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ from PIL import Image
4
+ import torch
5
+ from diffusers import StableDiffusionImg2ImgPipeline
6
+
7
+ pipe = None
8
+
9
+ class EndpointHandler:
10
+ def __init__(self):
11
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
12
+
13
+ def init(self):
14
+ global pipe
15
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
16
+ "InstantX/InstantID",
17
+ torch_dtype=torch.float16,
18
+ safety_checker=None
19
+ ).to(self.device)
20
+ pipe.enable_attention_slicing()
21
+
22
+ def inference(self, model_inputs: dict) -> dict:
23
+ img_bytes = base64.b64decode(model_inputs.get("image_base64"))
24
+ init_image = Image.open(io.BytesIO(img_bytes)).convert("RGB")
25
+ output = pipe(
26
+ prompt=model_inputs.get("prompt", ""),
27
+ image=init_image,
28
+ strength=float(model_inputs.get("strength", 0.75)),
29
+ guidance_scale=float(model_inputs.get("guidance_scale", 7.5)),
30
+ num_inference_steps=int(model_inputs.get("num_inference_steps", 50)),
31
+ )
32
+ sticker = output.images[0]
33
+ buf = io.BytesIO()
34
+ sticker.save(buf, format="PNG")
35
+ return {"generated_image_base64": base64.b64encode(buf.getvalue()).decode("utf-8")}
model_index.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionImg2ImgPipeline",
3
+ "_diffusers_version": "0.16.1",
4
+ "components": {
5
+ "unet": {
6
+ "path": "diffusers/unet",
7
+ "type": ["diffusers", "UNet2DConditionModel"]
8
+ },
9
+ "vae": {
10
+ "path": "diffusers/vae",
11
+ "type": ["diffusers", "AutoencoderKL"]
12
+ },
13
+ "scheduler": {
14
+ "path": "diffusers/scheduler",
15
+ "type": ["diffusers", "DDIMScheduler"]
16
+ },
17
+ "tokenizer": {
18
+ "path": "tokenizer",
19
+ "type": ["transformers", "CLIPTokenizer"]
20
+ }
21
+ }
22
+ }
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch>=2.0.0
2
+ diffusers>=0.16.1
3
+ transformers>=4.30.0
4
+ huggingface_hub>=0.14.1
5
+ safetensors
6
+ Pillow