Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- app.ipynb +0 -0
- app.py +6 -6
- requirements.txt +0 -1
app.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
app.py
CHANGED
|
@@ -3,16 +3,16 @@
|
|
| 3 |
# %% auto 0
|
| 4 |
__all__ = ['learn', 'categories', 'aud', 'examples', 'intf', 'log_mel_spec_tfm', 'classify_aud']
|
| 5 |
|
|
|
|
| 6 |
from fastai.vision.all import *
|
| 7 |
-
import librosa.display
|
| 8 |
import matplotlib.pyplot as plt
|
|
|
|
| 9 |
import numpy as np
|
| 10 |
import pandas as pd
|
| 11 |
import librosa
|
| 12 |
-
from scipy.io import wavfile
|
| 13 |
import gradio as gr
|
| 14 |
|
| 15 |
-
# %% app.ipynb
|
| 16 |
def log_mel_spec_tfm(fname):
|
| 17 |
y, sr = librosa.load(fname, mono=True)
|
| 18 |
D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
|
|
@@ -22,11 +22,11 @@ def log_mel_spec_tfm(fname):
|
|
| 22 |
plt.close()
|
| 23 |
return img
|
| 24 |
|
| 25 |
-
# %% app.ipynb
|
| 26 |
learn = load_learner('model.pkl')
|
| 27 |
learn.remove_cb(ProgressCallback)
|
| 28 |
|
| 29 |
-
# %% app.ipynb
|
| 30 |
categories = ('Brass', 'Flute', 'Guitar', 'Keyboard', 'Mallet', 'Reed', 'String', 'Vocal')
|
| 31 |
|
| 32 |
def classify_aud(aud):
|
|
@@ -35,7 +35,7 @@ def classify_aud(aud):
|
|
| 35 |
pred, idx, probs = learn.predict(img_fname)
|
| 36 |
return dict(zip(categories, map(float, probs)))
|
| 37 |
|
| 38 |
-
# %% app.ipynb
|
| 39 |
aud = gr.Audio(source="upload", type="numpy")
|
| 40 |
examples = [f.name for f in Path('.').iterdir() if '.wav' in f.name]
|
| 41 |
|
|
|
|
| 3 |
# %% auto 0
|
| 4 |
__all__ = ['learn', 'categories', 'aud', 'examples', 'intf', 'log_mel_spec_tfm', 'classify_aud']
|
| 5 |
|
| 6 |
+
# %% app.ipynb 1
|
| 7 |
from fastai.vision.all import *
|
|
|
|
| 8 |
import matplotlib.pyplot as plt
|
| 9 |
+
import librosa.display
|
| 10 |
import numpy as np
|
| 11 |
import pandas as pd
|
| 12 |
import librosa
|
|
|
|
| 13 |
import gradio as gr
|
| 14 |
|
| 15 |
+
# %% app.ipynb 2
|
| 16 |
def log_mel_spec_tfm(fname):
|
| 17 |
y, sr = librosa.load(fname, mono=True)
|
| 18 |
D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
|
|
|
|
| 22 |
plt.close()
|
| 23 |
return img
|
| 24 |
|
| 25 |
+
# %% app.ipynb 3
|
| 26 |
learn = load_learner('model.pkl')
|
| 27 |
learn.remove_cb(ProgressCallback)
|
| 28 |
|
| 29 |
+
# %% app.ipynb 6
|
| 30 |
categories = ('Brass', 'Flute', 'Guitar', 'Keyboard', 'Mallet', 'Reed', 'String', 'Vocal')
|
| 31 |
|
| 32 |
def classify_aud(aud):
|
|
|
|
| 35 |
pred, idx, probs = learn.predict(img_fname)
|
| 36 |
return dict(zip(categories, map(float, probs)))
|
| 37 |
|
| 38 |
+
# %% app.ipynb 7
|
| 39 |
aud = gr.Audio(source="upload", type="numpy")
|
| 40 |
examples = [f.name for f in Path('.').iterdir() if '.wav' in f.name]
|
| 41 |
|
requirements.txt
CHANGED
|
@@ -2,7 +2,6 @@ fastai
|
|
| 2 |
librosa
|
| 3 |
matplotlib
|
| 4 |
numpy
|
| 5 |
-
functools
|
| 6 |
pandas
|
| 7 |
librosa
|
| 8 |
scipy
|
|
|
|
| 2 |
librosa
|
| 3 |
matplotlib
|
| 4 |
numpy
|
|
|
|
| 5 |
pandas
|
| 6 |
librosa
|
| 7 |
scipy
|