Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from model import ECAPA_gender | |
| # Load the model | |
| model = ECAPA_gender.from_pretrained('JaesungHuh/ecapa-gender') | |
| model.eval() | |
| def predict_gender(filepath): | |
| audio = model.load_audio(filepath) | |
| with torch.no_grad(): | |
| output = model.forward(audio) | |
| probs = torch.softmax(output, dim=1) | |
| prob_dict = {'Human ' + model.pred2gender[i]: float(prob) for i, prob in enumerate(probs[0])} | |
| return prob_dict | |
| audio_component = gr.Audio(type='filepath', label='Upload your audio file here') | |
| label_component = gr.Label(label='Gender classification result') | |
| examples = [f'{i:05d}.wav' for i in range(1, 13)] | |
| demo = gr.Interface(fn=predict_gender, inputs=audio_component, outputs=label_component, examples=examples) | |
| demo.launch() |