Spaces:
Runtime error
Runtime error
it works
Browse files- Dockerfile +28 -0
- rp_handler.py +94 -0
- start.sh +66 -0
- test_input.json +9 -0
Dockerfile
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM pytorch/pytorch:2.2.1-cuda12.1-cudnn8-runtime
|
2 |
+
|
3 |
+
# Install git, git-lfs and basic dependencies
|
4 |
+
RUN apt-get update && apt-get install -y git git-lfs curl ffmpeg libsm6 libxext6 libgl1-mesa-glx xvfb
|
5 |
+
|
6 |
+
# Install Python dependencies including OpenCV
|
7 |
+
RUN pip install --no-cache-dir runpod requests opencv-python-headless
|
8 |
+
|
9 |
+
# Set up environment variables for HuggingFace and torch cache
|
10 |
+
ENV HUGGINGFACE_HUB_CACHE=/workspace/cache
|
11 |
+
ENV TORCH_HOME=/workspace/models
|
12 |
+
ENV TRANSFORMERS_CACHE=/workspace/cache
|
13 |
+
ENV XDG_CACHE_HOME=/workspace/cache
|
14 |
+
ENV PYTHONUNBUFFERED=1
|
15 |
+
|
16 |
+
# Copy handler and startup script
|
17 |
+
COPY rp_handler.py /
|
18 |
+
COPY start.sh /
|
19 |
+
RUN chmod +x /start.sh
|
20 |
+
|
21 |
+
# Create directory stubs for workspace to avoid permission issues
|
22 |
+
RUN mkdir -p /workspace/app /workspace/cache /workspace/models
|
23 |
+
|
24 |
+
# Set the workspace as working directory
|
25 |
+
WORKDIR /
|
26 |
+
|
27 |
+
# Start with our script
|
28 |
+
CMD ["/start.sh"]
|
rp_handler.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import runpod
|
2 |
+
import io
|
3 |
+
import base64
|
4 |
+
import time
|
5 |
+
import requests
|
6 |
+
import sys
|
7 |
+
import os
|
8 |
+
import traceback
|
9 |
+
from PIL import Image
|
10 |
+
import torch
|
11 |
+
|
12 |
+
def download_image(url):
|
13 |
+
"""Download an image from a URL and return a PIL Image object."""
|
14 |
+
try:
|
15 |
+
response = requests.get(url, stream=True)
|
16 |
+
response.raise_for_status()
|
17 |
+
return Image.open(io.BytesIO(response.content))
|
18 |
+
except Exception as e:
|
19 |
+
print(f"Error downloading image from {url}: {str(e)}")
|
20 |
+
raise
|
21 |
+
|
22 |
+
def encode_image_to_base64(image):
|
23 |
+
"""Encode a PIL Image to base64 string."""
|
24 |
+
buffered = io.BytesIO()
|
25 |
+
image.save(buffered, format="PNG")
|
26 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
27 |
+
return img_str
|
28 |
+
|
29 |
+
def handler(event):
|
30 |
+
try:
|
31 |
+
print("Handler started, importing inference module...")
|
32 |
+
from inference_utils import inference
|
33 |
+
|
34 |
+
start_time = time.time()
|
35 |
+
print("Processing request...")
|
36 |
+
|
37 |
+
# Extract input data
|
38 |
+
input_data = event["input"]["data"]
|
39 |
+
|
40 |
+
if len(input_data) < 3:
|
41 |
+
return {
|
42 |
+
"status": "error",
|
43 |
+
"message": "Missing required parameters. Expected [id_image_url, makeup_image_url, guidance_scale]"
|
44 |
+
}
|
45 |
+
|
46 |
+
id_image_url = input_data[0]
|
47 |
+
makeup_image_url = input_data[1]
|
48 |
+
guidance_scale = float(input_data[2]) if len(input_data) > 2 else 1.6
|
49 |
+
|
50 |
+
print(f"Downloading images from URLs...")
|
51 |
+
id_image = download_image(id_image_url)
|
52 |
+
makeup_image = download_image(makeup_image_url)
|
53 |
+
|
54 |
+
print(f"Running inference with guidance scale {guidance_scale}...")
|
55 |
+
result_image = inference(id_image, makeup_image, guidance_scale)
|
56 |
+
|
57 |
+
# Calculate processing time
|
58 |
+
processing_time = time.time() - start_time
|
59 |
+
print(f"Processing completed in {processing_time:.2f} seconds")
|
60 |
+
|
61 |
+
# Return base64 encoded image
|
62 |
+
return {
|
63 |
+
"status": "completed",
|
64 |
+
"image": encode_image_to_base64(result_image),
|
65 |
+
"processingTime": processing_time
|
66 |
+
}
|
67 |
+
|
68 |
+
except Exception as e:
|
69 |
+
# Print full exception for debugging
|
70 |
+
print(f"Error in handler: {str(e)}")
|
71 |
+
print(traceback.format_exc())
|
72 |
+
|
73 |
+
# Clean up GPU memory
|
74 |
+
if torch.cuda.is_available():
|
75 |
+
torch.cuda.empty_cache()
|
76 |
+
|
77 |
+
return {
|
78 |
+
"status": "error",
|
79 |
+
"message": str(e)
|
80 |
+
}
|
81 |
+
|
82 |
+
if __name__ == "__main__":
|
83 |
+
print(f"Starting RunPod Serverless handler from {os.getcwd()}")
|
84 |
+
print(f"Python version: {sys.version}")
|
85 |
+
print(f"CUDA available: {torch.cuda.is_available()}")
|
86 |
+
if torch.cuda.is_available():
|
87 |
+
print(f"CUDA device: {torch.cuda.get_device_name(0)}")
|
88 |
+
|
89 |
+
# Check if all environment variables are set
|
90 |
+
print(f"HF cache: {os.environ.get('HUGGINGFACE_HUB_CACHE')}")
|
91 |
+
print(f"Torch home: {os.environ.get('TORCH_HOME')}")
|
92 |
+
|
93 |
+
# Start the handler
|
94 |
+
runpod.serverless.start({"handler": handler})
|
start.sh
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -e
|
3 |
+
|
4 |
+
echo "Starting RunPod Stable-Makeup Serverless"
|
5 |
+
|
6 |
+
# Check if workspace volume is properly mounted
|
7 |
+
if [ ! -d "/workspace" ]; then
|
8 |
+
echo "ERROR: /workspace volume is not mounted. Please create a RunPod with a Network Volume mounted at /workspace"
|
9 |
+
exit 1
|
10 |
+
fi
|
11 |
+
|
12 |
+
# Check available disk space and display
|
13 |
+
df -h /workspace
|
14 |
+
df -h /
|
15 |
+
|
16 |
+
# Make sure our directories exist
|
17 |
+
mkdir -p /workspace/app /workspace/cache /workspace/models /workspace/models/stablemakeup
|
18 |
+
|
19 |
+
# Clone the repository with LFS since we have sufficient network storage (40GB)
|
20 |
+
if [ ! -d "/workspace/app/stable-makeup" ]; then
|
21 |
+
echo "Cloning repository with LFS from Hugging Face..."
|
22 |
+
|
23 |
+
# Setup git-lfs
|
24 |
+
git lfs install
|
25 |
+
|
26 |
+
# Clone the repository with LFS
|
27 |
+
git clone https://huggingface.co/spaces/edgarhnd/Stable-Makeup-unofficial /workspace/app/stable-makeup
|
28 |
+
|
29 |
+
# Copy our handler file
|
30 |
+
cp /rp_handler.py /workspace/app/stable-makeup/
|
31 |
+
|
32 |
+
# Modify the inference_utils.py file to use the workspace volume for checkpoints
|
33 |
+
sed -i 's|"./checkpoints/stablemakeup"|"/workspace/models/stablemakeup"|g' /workspace/app/stable-makeup/inference_utils.py
|
34 |
+
|
35 |
+
# Install required dependencies
|
36 |
+
cd /workspace/app/stable-makeup
|
37 |
+
echo "Installing dependencies..."
|
38 |
+
pip install --no-cache-dir -r requirements.txt
|
39 |
+
pip install --no-cache-dir opencv-python-headless
|
40 |
+
|
41 |
+
# Create checkpoints directory and copy model files if they exist in the repo
|
42 |
+
mkdir -p /workspace/models/stablemakeup
|
43 |
+
if [ -d "/workspace/app/stable-makeup/checkpoints/stablemakeup" ]; then
|
44 |
+
echo "Copying model files from cloned repository to workspace models directory..."
|
45 |
+
cp -v /workspace/app/stable-makeup/checkpoints/stablemakeup/* /workspace/models/stablemakeup/
|
46 |
+
fi
|
47 |
+
fi
|
48 |
+
|
49 |
+
# If model files are still missing, download them directly
|
50 |
+
if [ ! -f "/workspace/models/stablemakeup/pytorch_model.bin" ]; then
|
51 |
+
echo "Model files not found in cloned repository. Downloading directly..."
|
52 |
+
|
53 |
+
echo "Downloading pytorch_model.bin..."
|
54 |
+
curl -L -o /workspace/models/stablemakeup/pytorch_model.bin https://huggingface.co/spaces/edgarhnd/Stable-Makeup-unofficial/resolve/main/checkpoints/stablemakeup/pytorch_model.bin
|
55 |
+
|
56 |
+
echo "Downloading pytorch_model_1.bin..."
|
57 |
+
curl -L -o /workspace/models/stablemakeup/pytorch_model_1.bin https://huggingface.co/spaces/edgarhnd/Stable-Makeup-unofficial/resolve/main/checkpoints/stablemakeup/pytorch_model_1.bin
|
58 |
+
|
59 |
+
echo "Downloading pytorch_model_2.bin..."
|
60 |
+
curl -L -o /workspace/models/stablemakeup/pytorch_model_2.bin https://huggingface.co/spaces/edgarhnd/Stable-Makeup-unofficial/resolve/main/checkpoints/stablemakeup/pytorch_model_2.bin
|
61 |
+
fi
|
62 |
+
|
63 |
+
# Start the handler
|
64 |
+
echo "Starting serverless handler..."
|
65 |
+
cd /workspace/app/stable-makeup
|
66 |
+
exec python -u rp_handler.py
|
test_input.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"input": {
|
3 |
+
"data": [
|
4 |
+
"https://cool-dodo-849.convex.cloud/api/storage/488bd925-6741-4135-89e7-3ae2bac78e91",
|
5 |
+
"https://cool-dodo-849.convex.cloud/api/storage/2aa3db4c-2cd0-4d5e-b84a-ea0a7b6f3615",
|
6 |
+
1.6
|
7 |
+
]
|
8 |
+
}
|
9 |
+
}
|