Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,75 +1,42 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
|
| 3 |
-
import sounddevice as sd
|
| 4 |
-
import soundfile as sf
|
| 5 |
import numpy as np
|
| 6 |
-
import
|
| 7 |
-
import
|
| 8 |
-
import
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
# Load
|
| 11 |
-
model_name = "facebook/wav2vec2-large-xlsr-53"
|
| 12 |
tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_name)
|
| 13 |
model = Wav2Vec2ForCTC.from_pretrained(model_name)
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
audio = sd.rec(int(duration * fs), samplerate=fs, channels=1, dtype='float32')
|
| 19 |
-
sd.wait() # Wait until recording is finished
|
| 20 |
-
print("Recording finished.")
|
| 21 |
-
return audio.flatten()
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
#
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
return transcription # Return the detected text
|
| 37 |
-
|
| 38 |
-
# Function to get Spotify playlist based on mood
|
| 39 |
-
def get_playlist(mood):
|
| 40 |
-
url = "https://unsa-unofficial-spotify-api.p.rapidapi.com/search"
|
| 41 |
-
querystring = {"query": mood, "count": "10", "type": "playlists"}
|
| 42 |
-
headers = {
|
| 43 |
-
'x-rapidapi-key': "your-api-key", # Replace with your actual API key
|
| 44 |
-
'x-rapidapi-host': "unsa-unofficial-spotify-api.p.rapidapi.com"
|
| 45 |
-
}
|
| 46 |
-
|
| 47 |
-
try:
|
| 48 |
-
response = requests.get(url, headers=headers, params=querystring)
|
| 49 |
-
response.raise_for_status() # Raises error for bad responses
|
| 50 |
-
playlist_id = response.json()["Results"][0]["id"] # Get the first playlist
|
| 51 |
-
return playlist_id
|
| 52 |
-
except requests.exceptions.RequestException as e:
|
| 53 |
-
print(f"Error fetching playlist data: {e}")
|
| 54 |
-
return None
|
| 55 |
-
|
| 56 |
-
# Function to open the Spotify playlist in a web browser
|
| 57 |
-
def open_playlist(playlist_id):
|
| 58 |
-
webbrowser.open(f'https://open.spotify.com/playlist/{playlist_id}')
|
| 59 |
-
|
| 60 |
-
# Main function to record audio and recognize mood
|
| 61 |
-
def main():
|
| 62 |
-
# Record audio
|
| 63 |
-
audio = record_audio()
|
| 64 |
|
| 65 |
-
|
| 66 |
-
emotion_text = recognize_emotion(audio)
|
| 67 |
-
print(f"Detected Emotion: {emotion_text}")
|
| 68 |
|
| 69 |
-
|
| 70 |
-
playlist_id = get_playlist(emotion_text)
|
| 71 |
-
if playlist_id:
|
| 72 |
-
open_playlist(playlist_id)
|
| 73 |
|
|
|
|
| 74 |
if __name__ == "__main__":
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import numpy as np
|
| 2 |
+
import soundfile as sf
|
| 3 |
+
import librosa
|
| 4 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
|
| 5 |
+
import torch
|
| 6 |
+
from sklearn.model_selection import train_test_split
|
| 7 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 8 |
|
| 9 |
+
# Load Hugging Face's Wav2Vec2 model and tokenizer
|
| 10 |
+
model_name = "facebook/wav2vec2-large-xlsr-53"
|
| 11 |
tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_name)
|
| 12 |
model = Wav2Vec2ForCTC.from_pretrained(model_name)
|
| 13 |
|
| 14 |
+
def load_audio(file_path):
|
| 15 |
+
audio, sample_rate = sf.read(file_path)
|
| 16 |
+
return audio
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
def extract_mfcc_features(audio, sample_rate):
|
| 19 |
+
mfccs = librosa.feature.mfcc(y=audio, sr=sample_rate, n_mfcc=40)
|
| 20 |
+
mfccs_scaled = np.mean(mfccs.T, axis=0)
|
| 21 |
+
return mfccs_scaled
|
| 22 |
|
| 23 |
+
def predict_emotion(file_path):
|
| 24 |
+
audio = load_audio(file_path)
|
| 25 |
+
mfcc_features = extract_mfcc_features(audio, 16000) # Adjust sample rate if needed
|
| 26 |
+
|
| 27 |
+
# Prepare for prediction (just using random sample for this dummy)
|
| 28 |
+
encoded_input = tokenizer(audio, sampling_rate=16000, return_tensors="pt", padding=True)
|
| 29 |
|
| 30 |
+
# Make sure to use the correct model input and outputs for emotion prediction
|
| 31 |
+
with torch.no_grad():
|
| 32 |
+
logits = model(**encoded_input).logits
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
|
|
|
|
|
|
| 35 |
|
| 36 |
+
return tokenizer.decode(predicted_ids[0])
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
+
# Example usage of the model
|
| 39 |
if __name__ == "__main__":
|
| 40 |
+
file_name = "path_to_your_audio_file.wav" # Replace with your audio file path
|
| 41 |
+
emotion = predict_emotion(file_name)
|
| 42 |
+
print(f'Predicted Emotion: {emotion}')
|