import streamlit as st from transformers import pipeline import requests # Get the Hugging Face API Key from the user my_key = st.text_input('Enter your Hugging Face API Key', type='password') # Set the API URL for Whisper model API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3-turbo" headers = {"Authorization": f"Bearer {my_key}"} # Function to send the file to the API and get the response def query(filename, file_data): response = requests.post(API_URL, headers=headers, files={'file': file_data}) return response.json() # Streamlit UI elements for file upload st.title("Whisper API Audio Transcription") st.markdown("Upload an audio file and get transcription results") # File uploader widget uploaded_files = st.file_uploader("Choose an audio file", type=["mp3", "wav", "flac"], accept_multiple_files=True) if uploaded_files: results = {} for uploaded_file in uploaded_files: st.write(f"Processing file: {uploaded_file.name}") # Send the file to Hugging Face API output = query(uploaded_file.name, uploaded_file) # Store the result results[uploaded_file.name] = output # Display the results st.write("Results:") for file, result in results.items(): st.write(f"**Results for {file}:**") st.json(result) else: st.write("Please upload an audio file.")