Xylaria-Iris-v3 / Home.tsx
Reality123b's picture
Update Home.tsx
b1712cf verified
raw
history blame
18.3 kB
/**
* @license
* SPDX-License-Identifier: Apache-2.0
*/
/* tslint:disable */
import {ContentUnion, GoogleGenAI, Modality} from '@google/genai';
import {LoaderCircle, SendHorizontal, Trash2, X, Eraser} from 'lucide-react'; // Added Eraser
import {useEffect, useRef, useState} from 'react';
function Home() {
const [apiKey, setApiKey] = useState<string | null>(null);
const [ai, setAi] = useState<GoogleGenAI | null>(null);
const [keyError, setKeyError] = useState<string | null>(null);
useEffect(() => {
async function fetchKey() {
try {
const response = await fetch('/api/get-api-key'); // Assuming Nginx proxies this
if (!response.ok) {
throw new Error(`Failed to fetch API key: ${response.statusText}`);
}
const data = await response.json();
if (data.apiKey) {
setApiKey(data.apiKey);
setAi(new GoogleGenAI({ apiKey: data.apiKey }));
} else {
throw new Error(data.error || "API Key not returned from backend");
}
} catch (error: any) {
console.error("Error fetching API key:", error);
setKeyError(error.message);
// Show error to user
}
}
fetchKey();
}, []);
function parseError(error: string) {
try {
const regex = /{"error":(.*)}/gm;
const m = regex.exec(error);
if (m && m[1]) {
const e = m[1];
const err = JSON.parse(e);
return err.message || error;
}
} catch (e) { /* ignore */ }
return error;
}
export default function Home() {
const canvasRef = useRef<HTMLCanvasElement>(null);
const backgroundImageRef = useRef<HTMLImageElement | null>(null); // To store the uploaded image object
const fileInputRef = useRef<HTMLInputElement>(null);
const [uploadedImageFile, setUploadedImageFile] = useState<File | null>(null);
// previewUrl is mostly for the initial load into Image object, canvas is the main display
// const [previewUrl, setPreviewUrl] = useState<string | null>(null);
const [isDrawing, setIsDrawing] = useState(false);
const [penColor, setPenColor] = useState('#000000');
const colorInputRef = useRef<HTMLInputElement>(null);
const [prompt, setPrompt] = useState('');
const [generatedImage, setGeneratedImage] = useState<string | null>(null);
const [isLoading, setIsLoading] = useState(false);
const [showErrorModal, setShowErrorModal] = useState(false);
const [errorMessage, setErrorMessage] = useState('');
// Initialize canvas with white background or uploaded image
const initializeCanvas = (clearWhite: boolean = true) => {
if (!canvasRef.current) return;
const canvas = canvasRef.current;
const ctx = canvas.getContext('2d')!;
ctx.fillStyle = '#FFFFFF';
ctx.fillRect(0, 0, canvas.width, canvas.height);
if (!clearWhite && backgroundImageRef.current) {
ctx.drawImage(
backgroundImageRef.current,
0,
0,
canvas.width,
canvas.height,
);
}
};
// Effect for API key check
useEffect(() => {
if (!API_KEY) {
setErrorMessage("Gemini API Key is missing. Please configure it to use the application.");
setShowErrorModal(true);
}
initializeCanvas(true); // Initialize canvas on mount
}, []);
// Load uploaded image onto canvas
useEffect(() => {
if (uploadedImageFile) {
const reader = new FileReader();
reader.onload = (e) => {
const img = new window.Image();
img.onload = () => {
backgroundImageRef.current = img;
initializeCanvas(false); // Clear canvas and draw new image
setGeneratedImage(null); // Clear previous AI generated image
};
img.onerror = () => {
setErrorMessage("Failed to load the uploaded image.");
setShowErrorModal(true);
backgroundImageRef.current = null;
initializeCanvas(true); // Clear to white on error
}
img.src = e.target?.result as string;
};
reader.readAsDataURL(uploadedImageFile);
} else {
// No file uploaded, or file cleared
backgroundImageRef.current = null;
initializeCanvas(true); // Clear to white
}
}, [uploadedImageFile]);
// Drawing functions
const getCoordinates = (e: React.MouseEvent | React.TouchEvent) => {
const canvas = canvasRef.current!;
const rect = canvas.getBoundingClientRect();
const scaleX = canvas.width / rect.width;
const scaleY = canvas.height / rect.height;
let clientX, clientY;
if (e.nativeEvent instanceof MouseEvent) {
clientX = e.nativeEvent.offsetX * scaleX;
clientY = e.nativeEvent.offsetY * scaleY;
} else if (e.nativeEvent instanceof TouchEvent && e.nativeEvent.touches?.[0]) {
clientX = (e.nativeEvent.touches[0].clientX - rect.left) * scaleX;
clientY = (e.nativeEvent.touches[0].clientY - rect.top) * scaleY;
} else { // Fallback for other PointerEvents or if offsetX/Y are not directly available
clientX = (('clientX' in e.nativeEvent ? e.nativeEvent.clientX : (e.nativeEvent as any).layerX) - rect.left) * scaleX;
clientY = (('clientY' in e.nativeEvent ? e.nativeEvent.clientY : (e.nativeEvent as any).layerY) - rect.top) * scaleY;
}
return { x: clientX, y: clientY };
};
const startDrawing = (e: React.MouseEvent | React.TouchEvent) => {
if (!canvasRef.current) return;
const { x, y } = getCoordinates(e);
const ctx = canvasRef.current.getContext('2d')!;
if (e.type === 'touchstart') e.preventDefault();
ctx.beginPath();
ctx.moveTo(x, y);
setIsDrawing(true);
};
const draw = (e: React.MouseEvent | React.TouchEvent) => {
if (!isDrawing || !canvasRef.current) return;
if (e.type === 'touchmove') e.preventDefault();
const { x, y } = getCoordinates(e);
const ctx = canvasRef.current.getContext('2d')!;
ctx.lineWidth = 5;
ctx.lineCap = 'round';
ctx.strokeStyle = penColor;
ctx.lineTo(x, y);
ctx.stroke();
};
const stopDrawing = () => {
if (isDrawing) {
const ctx = canvasRef.current?.getContext('2d');
if (ctx) ctx.closePath(); // Close the path, though stroke() already renders
setIsDrawing(false);
}
};
const handleImageUpload = (event: React.ChangeEvent<HTMLInputElement>) => {
const file = event.target.files?.[0];
if (file) {
setUploadedImageFile(file); // This will trigger the useEffect to draw it
}
};
// Clears only the drawings, keeps the uploaded image
const clearDrawingLayer = () => {
initializeCanvas(false); // Redraws background image, effectively clearing drawings
};
// Clears everything: uploaded image, drawings, generated image
const resetAll = () => {
setUploadedImageFile(null); // This will trigger useEffect to clear canvas to white
backgroundImageRef.current = null;
setGeneratedImage(null);
setPrompt('');
if (fileInputRef.current) {
fileInputRef.current.value = "";
}
initializeCanvas(true); // Ensure it's white
};
const handleColorChange = (e: React.ChangeEvent<HTMLInputElement>) => {
setPenColor(e.target.value);
};
const openColorPicker = () => {
colorInputRef.current?.click();
};
const handleKeyDownForColorPicker = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' || e.key === ' ') {
openColorPicker();
}
};
const handleSubmit = async (e: React.FormEvent<HTMLFormElement>) => {
e.preventDefault();
if (!ai) {
setErrorMessage("Gemini API client is not initialized. Check API Key.");
setShowErrorModal(true);
return;
}
if (!canvasRef.current || !backgroundImageRef.current) { // Ensure an image was uploaded
setErrorMessage('Please upload an image first.');
setShowErrorModal(true);
return;
}
if (!prompt.trim()) {
setErrorMessage('Please enter a prompt.');
setShowErrorModal(true);
return;
}
setIsLoading(true);
setGeneratedImage(null);
try {
// Get the current canvas content (uploaded image + drawings) as base64
const canvas = canvasRef.current;
const drawingAndImageData = canvas.toDataURL('image/png').split(',')[1];
const imageMimeType = 'image/png'; // We are sending PNG from canvas
console.log('Request payload:', {
prompt,
imageMimeType,
imageData: drawingAndImageData ? `${drawingAndImageData.substring(0, 50)}... (truncated)` : null,
});
let contents: ContentUnion[] = [
{
role: 'USER',
parts: [
{ inlineData: { data: drawingAndImageData, mimeType: imageMimeType } },
{ text: `${prompt}. Modify the image based on the prompt and any drawings.` }, // Added context for drawings
],
},
];
const response = await ai.models.generateContent({
model: 'gemini-2.0-flash-preview-image-generation',
contents,
config: {
responseModalities: [Modality.TEXT, Modality.IMAGE],
},
});
const apiResponseData = { success: true, message: '', imageData: null, error: undefined };
for (const part of response.candidates[0].content.parts) {
if (part.text) apiResponseData.message = part.text;
else if (part.inlineData) apiResponseData.imageData = part.inlineData.data;
}
console.log('API Response:', {
...apiResponseData,
imageData: apiResponseData.imageData ? `${apiResponseData.imageData.substring(0, 50)}... (truncated)` : null,
});
if (apiResponseData.success && apiResponseData.imageData) {
const imageUrl = `data:image/png;base64,${apiResponseData.imageData}`;
setGeneratedImage(imageUrl);
} else {
const errorMsg = apiResponseData.error || apiResponseData.message || 'Failed to generate image from API response.';
setErrorMessage(errorMsg);
setShowErrorModal(true);
}
} catch (error: any) {
console.error('Error submitting image and prompt:', error);
setErrorMessage(parseError(error.message || 'An unexpected error occurred.'));
setShowErrorModal(true);
} finally {
setIsLoading(false);
}
};
const closeErrorModal = () => setShowErrorModal(false);
// Add touch event prevention for drawing
useEffect(() => {
const canvas = canvasRef.current;
const preventTouchDefault = (e: TouchEvent) => {
if (isDrawing && canvas && canvas.contains(e.target as Node)) {
e.preventDefault();
}
};
if (canvas) {
canvas.addEventListener('touchstart', preventTouchDefault, { passive: false });
canvas.addEventListener('touchmove', preventTouchDefault, { passive: false });
}
return () => {
if (canvas) {
canvas.removeEventListener('touchstart', preventTouchDefault);
canvas.removeEventListener('touchmove', preventTouchDefault);
}
};
}, [isDrawing]);
return (
<>
<div className="min-h-screen notebook-paper-bg text-gray-900 flex flex-col justify-start items-center">
<main className="container mx-auto px-3 sm:px-6 py-5 sm:py-10 pb-32 max-w-5xl w-full">
<div className="flex flex-col sm:flex-row sm:justify-between sm:items-end mb-2 sm:mb-6 gap-2">
<div>
<h1 className="text-2xl sm:text-3xl font-bold mb-0 leading-tight font-mega">
Gemini Image Editor
</h1>
<p className="text-sm sm:text-base text-gray-500 mt-1">
Upload an image, draw on it, and prompt Gemini to modify!
</p>
<p className="text-sm sm:text-base text-gray-500 mt-1">
Built with{' '}
<a className="underline" href="https://ai.google.dev/gemini-api/docs/image-generation" target="_blank" rel="noopener noreferrer">
Gemini API
</a>
</p>
</div>
<menu className="flex items-center bg-gray-300 rounded-full p-2 shadow-sm self-start sm:self-auto">
<button
type="button"
className="w-10 h-10 rounded-full overflow-hidden mr-2 flex items-center justify-center border-2 border-white shadow-sm transition-transform hover:scale-110 disabled:opacity-50 disabled:cursor-not-allowed"
onClick={openColorPicker}
onKeyDown={handleKeyDownForColorPicker}
aria-label="Open color picker"
style={{ backgroundColor: penColor }}
disabled={!backgroundImageRef.current}
title="Select Pen Color">
<input
ref={colorInputRef}
type="color"
value={penColor}
onChange={handleColorChange}
className="opacity-0 absolute w-px h-px"
aria-label="Select pen color"
/>
</button>
<button
type="button"
onClick={clearDrawingLayer}
className="w-10 h-10 rounded-full flex items-center justify-center bg-white shadow-sm transition-all hover:bg-gray-50 hover:scale-110 mr-2 disabled:opacity-50 disabled:cursor-not-allowed"
aria-label="Clear Drawings"
title="Clear Drawings (keeps uploaded image)"
disabled={!backgroundImageRef.current}>
<Eraser className="w-5 h-5 text-gray-700" />
</button>
<button
type="button"
onClick={resetAll}
className="w-10 h-10 rounded-full flex items-center justify-center bg-white shadow-sm transition-all hover:bg-gray-50 hover:scale-110"
aria-label="Clear All (Image, Drawings, Prompt)"
title="Clear All (Image, Drawings, Prompt)">
<Trash2 className="w-5 h-5 text-gray-700"/>
</button>
</menu>
</div>
{/* Image upload and canvas area */}
<div className="w-full mb-6 p-1 sm:p-2 bg-white/90 border-2 border-gray-300 rounded-lg shadow">
<div className="mb-4 px-2 sm:px-0">
<label htmlFor="imageUpload" className="block text-sm font-medium text-gray-700 mb-1">
Upload your image (max 4MB recommended for Gemini):
</label>
<input
id="imageUpload"
type="file"
accept="image/*"
onChange={handleImageUpload}
ref={fileInputRef}
className="block w-full text-sm text-gray-500 file:mr-4 file:py-2 file:px-4 file:rounded-full file:border-0 file:text-sm file:font-semibold file:bg-gray-200 file:text-gray-700 hover:file:bg-gray-300"
/>
</div>
<canvas
ref={canvasRef}
width={960} // Internal resolution
height={540} // Internal resolution
onMouseDown={startDrawing}
onMouseMove={draw}
onMouseUp={stopDrawing}
onMouseLeave={stopDrawing}
onTouchStart={startDrawing}
onTouchMove={draw}
onTouchEnd={stopDrawing}
className={`border-2 border-black w-full hover:cursor-crosshair sm:h-[60vh] h-[40vh] min-h-[300px] bg-white/90 touch-none ${backgroundImageRef.current ? '' : 'cursor-not-allowed'}`}
style={{ touchAction: backgroundImageRef.current ? 'none' : 'auto' }} // Prevent scroll only when drawing is possible
/>
</div>
{/* AI Generated Image Display */}
{generatedImage && (
<div className="w-full mb-6 p-4 bg-gray-100 border-2 border-gray-400 rounded-lg shadow">
<h3 className="text-center text-lg text-gray-700 mb-2 font-bold">AI Generated Image:</h3>
<img src={generatedImage} alt="Generated by AI" className="max-w-full h-auto max-h-[300px] sm:max-h-[540px] mx-auto rounded border border-gray-300" />
</div>
)}
<form onSubmit={handleSubmit} className="w-full">
<div className="relative">
<input
type="text"
value={prompt}
onChange={(e) => setPrompt(e.target.value)}
placeholder="Describe changes or what to generate from the image & drawings..."
className="w-full p-3 sm:p-4 pr-12 sm:pr-14 text-sm sm:text-base border-2 border-black bg-white text-gray-800 shadow-sm focus:ring-2 focus:ring-gray-200 focus:outline-none transition-all font-mono"
required
disabled={!API_KEY || !backgroundImageRef.current}
/>
<button
type="submit"
disabled={isLoading || !API_KEY || !backgroundImageRef.current || !prompt.trim()}
className="absolute right-3 sm:right-4 top-1/2 -translate-y-1/2 p-1.5 sm:p-2 rounded-none bg-black text-white hover:cursor-pointer hover:bg-gray-800 disabled:bg-gray-400 disabled:cursor-not-allowed transition-colors">
{isLoading ? (
<LoaderCircle className="w-5 sm:w-6 h-5 sm:h-6 animate-spin" aria-label="Loading"/>
) : (
<SendHorizontal className="w-5 sm:w-6 h-5 sm:h-6" aria-label="Submit"/>
)}
</button>
</div>
</form>
</main>
{showErrorModal && (
<div className="fixed inset-0 bg-black/50 flex items-center justify-center z-50 p-4">
<div className="bg-white rounded-lg shadow-xl max-w-md w-full p-6">
<div className="flex justify-between items-start mb-4">
<h3 className="text-xl font-bold text-red-600">Error</h3>
<button onClick={closeErrorModal} className="text-gray-400 hover:text-gray-500">
<X className="w-5 h-5" />
</button>
</div>
<p className="font-medium text-gray-700 whitespace-pre-wrap">
{parseError(errorMessage)}
</p>
</div>
</div>
)}
</div>
</>
);
}