Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| import numpy as np | |
| from PIL import Image | |
| try: | |
| from spaces import GPU | |
| except ImportError: | |
| # Define a no-op decorator if running locally | |
| def GPU(func): | |
| return func | |
| import os | |
| import argparse | |
| from inference import GenerativeInferenceModel, get_inference_configs | |
| # Parse command line arguments | |
| parser = argparse.ArgumentParser(description='Run Generative Inference Demo') | |
| parser.add_argument('--port', type=int, default=7860, help='Port to run the server on') | |
| args = parser.parse_args() | |
| # Create model directories if they don't exist | |
| os.makedirs("models", exist_ok=True) | |
| os.makedirs("stimuli", exist_ok=True) | |
| # Check if running on Hugging Face Spaces | |
| if "SPACE_ID" in os.environ: | |
| default_port = int(os.environ.get("PORT", 7860)) | |
| else: | |
| default_port = 8861 # Local default port | |
| # Initialize model | |
| model = GenerativeInferenceModel() | |
| # Define example images and their parameters with updated values from the research | |
| examples = [ | |
| { | |
| "image": os.path.join("stimuli", "Neon_Color_Circle.jpg"), | |
| "name": "Neon Color Spreading", | |
| "wiki": "https://en.wikipedia.org/wiki/Neon_color_spreading", | |
| "papers": [ | |
| "[Color Assimilation](https://doi.org/10.1016/j.visres.2000.200.1)", | |
| "[Perceptual Filling-in](https://doi.org/10.1016/j.tics.2003.08.003)" | |
| ], | |
| "method": "Prior-Guided Drift Diffusion", | |
| "reverse_diff": { | |
| "model": "resnet50_robust", | |
| "layer": "layer3", | |
| "initial_noise": 0.8, | |
| "diffusion_noise": 0.003, | |
| "step_size": 1.0, | |
| "iterations": 101, | |
| "epsilon": 20.0 | |
| } | |
| }, | |
| { | |
| "image": os.path.join("stimuli", "Kanizsa_square.jpg"), | |
| "name": "Kanizsa Square", | |
| "wiki": "https://en.wikipedia.org/wiki/Kanizsa_triangle", | |
| "papers": [ | |
| "[Gestalt Psychology](https://en.wikipedia.org/wiki/Gestalt_psychology)", | |
| "[Neural Mechanisms](https://doi.org/10.1016/j.tics.2003.08.003)" | |
| ], | |
| "method": "Prior-Guided Drift Diffusion", | |
| "reverse_diff": { | |
| "model": "resnet50_robust", | |
| "layer": "all", | |
| "initial_noise": 0.0, | |
| "diffusion_noise": 0.005, | |
| "step_size": 0.64, | |
| "iterations": 100, | |
| "epsilon": 5.0 | |
| } | |
| }, | |
| { | |
| "image": os.path.join("stimuli", "CornsweetBlock.png"), | |
| "name": "Cornsweet Illusion", | |
| "wiki": "https://en.wikipedia.org/wiki/Cornsweet_illusion", | |
| "papers": [ | |
| "[Brightness Perception](https://doi.org/10.1016/j.visres.2000.200.1)", | |
| "[Edge Effects](https://doi.org/10.1016/j.tics.2003.08.003)" | |
| ], | |
| "instructions": "Both blocks are gray in color (the same), use your finger to cover the middle line. Hit 'Load Parameters' and then hit 'Run Generative Inference' to see how the model sees the blocks.", | |
| "method": "Prior-Guided Drift Diffusion", | |
| "reverse_diff": { | |
| "model": "resnet50_robust", | |
| "layer": "layer3", | |
| "initial_noise": 0.5, | |
| "diffusion_noise": 0.005, | |
| "step_size": 0.8, | |
| "iterations": 51, | |
| "epsilon": 20.0 | |
| } | |
| }, | |
| { | |
| "image": os.path.join("stimuli", "face_vase.png"), | |
| "name": "Rubin's Face-Vase (Object Prior)", | |
| "wiki": "https://en.wikipedia.org/wiki/Rubin_vase", | |
| "papers": [ | |
| "[Figure-Ground Perception](https://en.wikipedia.org/wiki/Figure-ground_(perception))", | |
| "[Bistable Perception](https://doi.org/10.1016/j.tics.2003.08.003)" | |
| ], | |
| "method": "Prior-Guided Drift Diffusion", | |
| "reverse_diff": { | |
| "model": "resnet50_robust", | |
| "layer": "avgpool", | |
| "initial_noise": 0.9, | |
| "diffusion_noise": 0.003, | |
| "step_size": 0.58, | |
| "iterations": 100, | |
| "epsilon": 0.81 | |
| } | |
| }, | |
| { | |
| "image": os.path.join("stimuli", "Confetti_illusion.png"), | |
| "name": "Confetti Illusion", | |
| "wiki": "https://www.youtube.com/watch?v=SvEiEi8O7QE", | |
| "papers": [ | |
| "[Color Perception](https://doi.org/10.1016/j.visres.2000.200.1)", | |
| "[Context Effects](https://doi.org/10.1016/j.tics.2003.08.003)" | |
| ], | |
| "method": "Prior-Guided Drift Diffusion", | |
| "reverse_diff": { | |
| "model": "resnet50_robust", | |
| "layer": "layer3", | |
| "initial_noise": 0.1, | |
| "diffusion_noise": 0.003, | |
| "step_size": 0.5, | |
| "iterations": 101, | |
| "epsilon": 20.0 | |
| } | |
| }, | |
| { | |
| "image": os.path.join("stimuli", "EhresteinSingleColor.png"), | |
| "name": "Ehrenstein Illusion", | |
| "wiki": "https://en.wikipedia.org/wiki/Ehrenstein_illusion", | |
| "papers": [ | |
| "[Subjective Contours](https://doi.org/10.1016/j.visres.2000.200.1)", | |
| "[Neural Processing](https://doi.org/10.1016/j.tics.2003.08.003)" | |
| ], | |
| "method": "Prior-Guided Drift Diffusion", | |
| "reverse_diff": { | |
| "model": "resnet50_robust", | |
| "layer": "layer3", | |
| "initial_noise": 0.5, | |
| "diffusion_noise": 0.005, | |
| "step_size": 0.8, | |
| "iterations": 101, | |
| "epsilon": 20.0 | |
| } | |
| }, | |
| { | |
| "image": os.path.join("stimuli", "GroupingByContinuity.png"), | |
| "name": "Grouping by Continuity", | |
| "wiki": "https://en.wikipedia.org/wiki/Principles_of_grouping", | |
| "papers": [ | |
| "[Gestalt Principles](https://en.wikipedia.org/wiki/Gestalt_psychology)", | |
| "[Visual Organization](https://doi.org/10.1016/j.tics.2003.08.003)" | |
| ], | |
| "method": "Prior-Guided Drift Diffusion", | |
| "reverse_diff": { | |
| "model": "resnet50_robust", | |
| "layer": "layer3", | |
| "initial_noise": 0.0, | |
| "diffusion_noise": 0.005, | |
| "step_size": 0.4, | |
| "iterations": 101, | |
| "epsilon": 4.0 | |
| } | |
| }, | |
| { | |
| "image": os.path.join("stimuli", "figure_ground.png"), | |
| "name": "Figure-Ground Illusion", | |
| "wiki": "https://en.wikipedia.org/wiki/Figure-ground_(perception)", | |
| "papers": [ | |
| "[Gestalt Principles](https://en.wikipedia.org/wiki/Gestalt_psychology)", | |
| "[Perceptual Organization](https://doi.org/10.1016/j.tics.2003.08.003)" | |
| ], | |
| "method": "Prior-Guided Drift Diffusion", | |
| "reverse_diff": { | |
| "model": "resnet50_robust", | |
| "layer": "layer3", | |
| "initial_noise": 0.1, | |
| "diffusion_noise": 0.003, | |
| "step_size": 0.5, | |
| "iterations": 101, | |
| "epsilon": 3.0 | |
| } | |
| } | |
| ] | |
| def run_inference(image, model_type, inference_type, eps_value, num_iterations, | |
| initial_noise=0.05, diffusion_noise=0.3, step_size=0.8, model_layer="layer3"): | |
| # Check if image is provided | |
| if image is None: | |
| return None, "Please upload an image before running inference." | |
| # Convert eps to float | |
| eps = float(eps_value) | |
| # Load inference configuration based on the selected type | |
| config = get_inference_configs(inference_type=inference_type, eps=eps, n_itr=int(num_iterations)) | |
| # Handle Prior-Guided Drift Diffusion specific parameters | |
| if inference_type == "Prior-Guided Drift Diffusion": | |
| config['initial_inference_noise_ratio'] = float(initial_noise) | |
| config['diffusion_noise_ratio'] = float(diffusion_noise) | |
| config['step_size'] = float(step_size) # Added step size parameter | |
| config['top_layer'] = model_layer | |
| # Run generative inference | |
| result = model.inference(image, model_type, config) | |
| # Extract results based on return type | |
| if isinstance(result, tuple): | |
| # Old format returning (output_image, all_steps) | |
| output_image, all_steps = result | |
| else: | |
| # New format returning dictionary | |
| output_image = result['final_image'] | |
| all_steps = result['steps'] | |
| # Create animation frames | |
| frames = [] | |
| for i, step_image in enumerate(all_steps): | |
| # Convert tensor to PIL image | |
| step_pil = Image.fromarray((step_image.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)) | |
| frames.append(step_pil) | |
| # Convert the final output image to PIL | |
| final_image = Image.fromarray((output_image.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)) | |
| # Return the final inferred image and the animation frames directly | |
| return final_image, frames | |
| # Helper function to apply example parameters | |
| def apply_example(example): | |
| return [ | |
| example["image"], | |
| "resnet50_robust", # Model type | |
| example["method"], # Inference type | |
| example["reverse_diff"]["epsilon"], # Epsilon value | |
| example["reverse_diff"]["iterations"], # Number of iterations | |
| example["reverse_diff"]["initial_noise"], # Initial noise | |
| example["reverse_diff"]["diffusion_noise"], # Diffusion noise value (corrected) | |
| example["reverse_diff"]["step_size"], # Step size (added) | |
| example["reverse_diff"]["layer"], # Model layer | |
| gr.Group(visible=True) # Show parameters section | |
| ] | |
| # Define the interface | |
| with gr.Blocks(title="Generative Inference Demo", css=""" | |
| .purple-button { | |
| background-color: #8B5CF6 !important; | |
| color: white !important; | |
| border: none !important; | |
| } | |
| .purple-button:hover { | |
| background-color: #7C3AED !important; | |
| } | |
| """) as demo: | |
| gr.Markdown("# Generative Inference Demo") | |
| gr.Markdown("This demo showcases how neural networks can perceive visual illusions and develop Gestalt principles of perceptual organization through generative inference.") | |
| gr.Markdown("📄 **Paper:** [Generative Inference unifies feedback processing for learning and perception in natural and artificial vision](https://www.biorxiv.org/content/10.1101/2025.10.21.683535v1)") | |
| gr.Markdown(""" | |
| **How to use this demo:** | |
| - **Load pre-configured examples**: Click on any visual illusion below and hit "Load Parameters" to automatically set up the optimal parameters for that illusion | |
| - **Run the inference**: After loading parameters or setting your own, hit "Run Inference" to start the generative inference process | |
| - **You can also upload your own images** and experiment with different parameters to see how they affect the generative inference process | |
| """) | |
| # Main processing interface | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| # Inputs | |
| image_input = gr.Image(label="Input Image", type="pil", value=os.path.join("stimuli", "Neon_Color_Circle.jpg")) | |
| # Run Inference button right below the image | |
| run_button = gr.Button("🪄 Run Generative Inference", variant="primary", elem_classes="purple-button") | |
| # Parameters toggle button | |
| params_button = gr.Button("⚙️ Play with the parameters", variant="secondary") | |
| # Parameters section (initially hidden) | |
| with gr.Group(visible=False) as params_section: | |
| with gr.Row(): | |
| model_choice = gr.Dropdown( | |
| choices=["resnet50_robust", "standard_resnet50"], # "resnet50_robust_face" - hidden for deployment | |
| value="resnet50_robust", | |
| label="Model" | |
| ) | |
| inference_type = gr.Dropdown( | |
| choices=["Prior-Guided Drift Diffusion", "IncreaseConfidence"], | |
| value="Prior-Guided Drift Diffusion", | |
| label="Inference Method" | |
| ) | |
| with gr.Row(): | |
| eps_slider = gr.Slider(minimum=0.0, maximum=40.0, value=20.0, step=0.01, label="Epsilon (Stimulus Fidelity)") | |
| iterations_slider = gr.Slider(minimum=1, maximum=600, value=101, step=1, label="Number of Iterations") # Updated max to 600 | |
| with gr.Row(): | |
| initial_noise_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.8, step=0.01, | |
| label="Drift Noise") | |
| diffusion_noise_slider = gr.Slider(minimum=0.0, maximum=0.05, value=0.003, step=0.001, | |
| label="Diffusion Noise") # Corrected name | |
| with gr.Row(): | |
| step_size_slider = gr.Slider(minimum=0.01, maximum=2.0, value=1.0, step=0.01, | |
| label="Update Rate") # Added step size slider | |
| layer_choice = gr.Dropdown( | |
| choices=["all", "conv1", "bn1", "relu", "maxpool", "layer1", "layer2", "layer3", "layer4", "avgpool"], | |
| value="layer3", | |
| label="Model Layer" | |
| ) | |
| with gr.Column(scale=2): | |
| # Outputs | |
| output_image = gr.Image(label="Final Inferred Image") | |
| output_frames = gr.Gallery(label="Inference Steps", columns=5, rows=2) | |
| # Examples section with integrated explanations | |
| gr.Markdown("## Visual Illusion Examples") | |
| gr.Markdown("Select an illusion to load its parameters and see how generative inference reveals perceptual effects") | |
| # For each example, create a row with the image and explanation side by side | |
| for i, ex in enumerate(examples): | |
| with gr.Row(): | |
| # Left column for the image | |
| with gr.Column(scale=1): | |
| # Display the example image | |
| example_img = gr.Image(value=ex["image"], type="filepath", label=f"{ex['name']}") | |
| load_btn = gr.Button(f"Load Parameters", variant="primary") | |
| # Set up the load button to apply this example's parameters | |
| load_btn.click( | |
| fn=lambda ex=ex: apply_example(ex), | |
| outputs=[ | |
| image_input, model_choice, inference_type, | |
| eps_slider, iterations_slider, | |
| initial_noise_slider, diffusion_noise_slider, | |
| step_size_slider, layer_choice, params_section | |
| ] | |
| ) | |
| # Right column for the explanation | |
| with gr.Column(scale=2): | |
| gr.Markdown(f"### {ex['name']}") | |
| gr.Markdown(f"[Read more on Wikipedia]({ex['wiki']})") | |
| # Show instructions if they exist | |
| if "instructions" in ex: | |
| gr.Markdown(f"**Instructions:** {ex['instructions']}") | |
| if i < len(examples) - 1: # Don't add separator after the last example | |
| gr.Markdown("---") | |
| # Set up event handler for the main inference | |
| run_button.click( | |
| fn=run_inference, | |
| inputs=[ | |
| image_input, model_choice, inference_type, | |
| eps_slider, iterations_slider, | |
| initial_noise_slider, diffusion_noise_slider, | |
| step_size_slider, layer_choice | |
| ], | |
| outputs=[output_image, output_frames] | |
| ) | |
| # Toggle parameters visibility | |
| def toggle_params(): | |
| return gr.Group(visible=True) | |
| params_button.click( | |
| fn=toggle_params, | |
| outputs=[params_section] | |
| ) | |
| # About section | |
| gr.Markdown(""" | |
| ## About Generative Inference | |
| Generative inference is a technique that reveals how neural networks perceive visual stimuli. This demo primarily uses the Prior-Guided Drift Diffusion method. | |
| ### Prior-Guided Drift Diffusion | |
| Moving away from a noisy representation of the input images | |
| ### IncreaseConfidence | |
| Moving away from the least likely class identified at iteration 0 (fast perception) | |
| ### Parameters: | |
| - **Drift Noise**: Controls the amount of noise added to the image at the beginning | |
| - **Diffusion Noise**: Controls the amount of noise added at each optimization step | |
| - **Update Rate**: Learning rate for the optimization process | |
| - **Number of Iterations**: How many optimization steps to perform | |
| - **Model Layer**: Select a specific layer of the ResNet50 model to extract features from | |
| - **Epsilon (Stimulus Fidelity)**: Controls the size of perturbation during optimization | |
| **Generative Inference was developed by [Tahereh Toosi](https://toosi.github.io).** | |
| """) | |
| # Launch the demo | |
| if __name__ == "__main__": | |
| print(f"Starting server on port {args.port}") | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=args.port, | |
| share=False, | |
| debug=True | |
| ) |