import gradio as gr from huggingface_hub import InferenceClient # Initialize the Hugging Face Inference Client client = InferenceClient() # Function to stream the compliance suggestions as they are generated def analyze_compliance_stream(code, compliance_standard): prompt = f"Analyze the following code for {compliance_standard} compliance and suggest modifications or refactoring to meet the guidelines:\n\n{code}" messages = [ {"role": "user", "content": prompt} ] # Create a stream to receive generated content stream = client.chat.completions.create( model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.5, max_tokens=1024, top_p=0.7, stream=True ) # Stream content as it is generated compliance_suggestions = "" for chunk in stream: compliance_suggestions += chunk.choices[0].delta.content yield compliance_suggestions # Yield incremental content to display immediately # Create Gradio interface with the modified layout with gr.Blocks() as app: gr.Markdown("## Code Compliance Advisor") gr.Markdown("Analyze your code for legal compliance and security standards (e.g., GDPR, HIPAA) and receive actionable suggestions.") with gr.Row(): # First column for input components with gr.Column(): code_input = gr.Textbox(lines=10, label="Code Snippet", placeholder="Enter your code here", elem_id="full_width") compliance_standard = gr.Dropdown( choices=["GDPR", "HIPAA", "PCI-DSS", "SOC 2", "ISO 27001"], label="Compliance Standard", value="GDPR" ) analyze_button = gr.Button("Analyze Compliance") # Second column for output with gr.Column(): gr.Markdown("### Compliance Suggestions") # This acts as the label for the output output_markdown = gr.Markdown() # Link button to function with inputs and outputs analyze_button.click(fn=analyze_compliance_stream, inputs=[code_input, compliance_standard], outputs=output_markdown) # Run the Gradio app app.launch()