harveysamson's picture
added models and inference
6886c22
raw
history blame
1.5 kB
import torch
import torch.nn.functional as F
from transformers import AutoConfig, Wav2Vec2FeatureExtractor
from src.models import Wav2Vec2ForSpeechClassification
import gradio as gr
import librosa
device = torch.device("cpu")
model_name_or_path = "harshit345/xlsr-wav2vec-speech-emotion-recognition"
config = AutoConfig.from_pretrained(model_name_or_path)
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name_or_path)
sampling_rate = feature_extractor.sampling_rate
model = Wav2Vec2ForSpeechClassification.from_pretrained(model_name_or_path)
def load_data(path):
speech, sampling_rate = librosa.load(path)
if len(speech.shape) > 1:
speech = speech[:,0] + speech[:,1]
if sampling_rate != 16000:
speech = librosa.resample(speech, sampling_rate,16000)
return speech
def inference(path):
speech = load_data(path)
inputs = feature_extractor(speech, return_tensors="pt").input_values
with torch.no_grad():
logits = model(inputs).logits
scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0]
outputs = {config.id2label[i]: float(round(score,2)) for i, score in enumerate(scores)}
return outputs
examples = ['data/test_audio.wav', 'data/test_audio_2.wav']
inputs = gr.inputs.Audio(label="Input Audio", type="filepath", source="microphone")
outputs = gr.outputs.Label(type="confidences", label = "Output Scores")
iface = gr.Interface(inference, inputs, outputs=["label"], examples=examples)
iface.launch(debug=True)