Spaces:
Runtime error
Runtime error
import runpod | |
import io | |
import base64 | |
import time | |
import requests | |
import sys | |
import os | |
import traceback | |
from PIL import Image | |
import torch | |
def download_image(url): | |
"""Download an image from a URL and return a PIL Image object.""" | |
try: | |
response = requests.get(url, stream=True) | |
response.raise_for_status() | |
return Image.open(io.BytesIO(response.content)) | |
except Exception as e: | |
print(f"Error downloading image from {url}: {str(e)}") | |
raise | |
def encode_image_to_base64(image): | |
"""Encode a PIL Image to base64 string.""" | |
buffered = io.BytesIO() | |
image.save(buffered, format="PNG") | |
img_str = base64.b64encode(buffered.getvalue()).decode() | |
return img_str | |
def handler(event): | |
try: | |
print("Handler started, importing inference module...") | |
from inference_utils import inference | |
start_time = time.time() | |
print("Processing request...") | |
# Extract input data | |
input_data = event["input"]["data"] | |
if len(input_data) < 3: | |
return { | |
"status": "error", | |
"message": "Missing required parameters. Expected [id_image_url, makeup_image_url, guidance_scale]" | |
} | |
id_image_url = input_data[0] | |
makeup_image_url = input_data[1] | |
guidance_scale = float(input_data[2]) if len(input_data) > 2 else 1.6 | |
print(f"Downloading images from URLs...") | |
id_image = download_image(id_image_url) | |
makeup_image = download_image(makeup_image_url) | |
print(f"Running inference with guidance scale {guidance_scale}...") | |
result_image = inference(id_image, makeup_image, guidance_scale) | |
# Calculate processing time | |
processing_time = time.time() - start_time | |
print(f"Processing completed in {processing_time:.2f} seconds") | |
# Return base64 encoded image | |
return { | |
"status": "completed", | |
"image": encode_image_to_base64(result_image), | |
"processingTime": processing_time | |
} | |
except Exception as e: | |
# Print full exception for debugging | |
print(f"Error in handler: {str(e)}") | |
print(traceback.format_exc()) | |
# Clean up GPU memory | |
if torch.cuda.is_available(): | |
torch.cuda.empty_cache() | |
return { | |
"status": "error", | |
"message": str(e) | |
} | |
if __name__ == "__main__": | |
print(f"Starting RunPod Serverless handler from {os.getcwd()}") | |
print(f"Python version: {sys.version}") | |
print(f"CUDA available: {torch.cuda.is_available()}") | |
if torch.cuda.is_available(): | |
print(f"CUDA device: {torch.cuda.get_device_name(0)}") | |
# Check if all environment variables are set | |
print(f"HF cache: {os.environ.get('HUGGINGFACE_HUB_CACHE')}") | |
print(f"Torch home: {os.environ.get('TORCH_HOME')}") | |
# Start the handler | |
runpod.serverless.start({"handler": handler}) |