from inference_sdk import InferenceHTTPClient from PIL import Image, ImageDraw, ImageFont, ImageEnhance import matplotlib.pyplot as plt import os import gradio as gr from collections import defaultdict API_KEY = os.getenv("ROBOFLOW_API_KEY") # Initialize the Roboflow client CLIENT = InferenceHTTPClient( api_url="https://detect.roboflow.com", api_key=API_KEY ) # Set model details MODEL_ID = "hvacsym/5" CONFIDENCE_THRESHOLD = 0.3 # Confidence threshold for filtering predictions GRID_SIZE = (3, 3) # 3x3 segmentation # Colors for bounding boxes RED = (255, 0, 0) GREEN = (0, 255, 0) WHITE = (255, 255, 255) BLACK = (0, 0, 0) # Load font for labeling try: font = ImageFont.truetype("arial.ttf", 14) except: font = ImageFont.load_default() def enhance_image(image): """Enhance image by adjusting brightness and contrast.""" if image.mode != 'L': image = image.convert('L') brightness = ImageEnhance.Brightness(image) image = brightness.enhance(1.3) contrast = ImageEnhance.Contrast(image) image = contrast.enhance(1.2) return image.convert('RGB') # Convert back to RGB for colored boxes def process_image(image_path): """Processes an image by running inference and drawing bounding boxes.""" # Load and enhance the original image original_image = Image.open(image_path) original_image = enhance_image(original_image) width, height = original_image.size seg_w, seg_h = width // GRID_SIZE[1], height // GRID_SIZE[0] # Create a copy of the full image to draw bounding boxes final_image = original_image.copy() draw_final = ImageDraw.Draw(final_image) total_counts = defaultdict(int) # Process each segment for row in range(GRID_SIZE[0]): for col in range(GRID_SIZE[1]): x1, y1 = col * seg_w, row * seg_h x2, y2 = (col + 1) * seg_w, (row + 1) * seg_h segment = original_image.crop((x1, y1, x2, y2)) segment_path = f"segment_{row}_{col}.png" segment.save(segment_path) # Run inference on the segment result = CLIENT.infer(segment_path, model_id=MODEL_ID) # Filter predictions based on confidence filtered_predictions = [ pred for pred in result["predictions"] if pred["confidence"] * 100 >= CONFIDENCE_THRESHOLD ] # Draw bounding boxes and count labels for obj in filtered_predictions: class_name = obj["class"] total_counts[class_name] += 1 x_min, y_min = x1 + obj["x"] - obj["width"] // 2, y1 + obj["y"] - obj["height"] // 2 x_max, y_max = x1 + obj["x"] + obj["width"] // 2, y1 + obj["y"] + obj["height"] // 2 # Draw bounding box draw_final.rectangle([x_min, y_min, x_max, y_max], outline=GREEN, width=2) # Draw extended label above the bounding box text_size = draw_final.textbbox((0, 0), class_name, font=font) text_width = text_size[2] - text_size[0] text_height = text_size[3] - text_size[1] text_x = x_min text_y = y_min - text_height - 5 if y_min - text_height - 5 > 0 else y_max + 5 draw_final.rectangle([text_x, text_y, text_x + text_width + 6, text_y + text_height + 2], fill=BLACK) draw_final.text((text_x + 3, text_y), class_name, fill=WHITE, font=font) # Save the final processed image final_image_path = "processed_image.png" final_image.save(final_image_path) return final_image_path, total_counts def process_uploaded_image(image_path): """Handles uploaded image and processes it.""" final_image_path, total_counts = process_image(image_path) count_text = "\n".join([f"{label}: {count}" for label, count in total_counts.items()]) return final_image_path, count_text # Deploy with Gradio iface = gr.Interface( fn=process_uploaded_image, inputs=gr.Image(type="filepath"), outputs=[gr.Image(type="filepath"), gr.Text()], title="HVAC Symbol Detector", description="Upload an HVAC blueprint image. The model will segment it, detect symbols, and return the final image with bounding boxes along with symbol counts." ) iface.launch(debug=True, share=True)