| #deepneurones = pipeline("text2text-generation", model="google/flan-t5-small") | |
| #deepneurones = pipeline("automatic-speech-recognition")# la liste des pipelines de huggingface est disponible ici :https://huggingface.co/docs/transformers/quicktour. pipeline() telecharge dans un cache local le modele deeplearning | |
| from transformers import pipeline # le framework de huggingface | |
| deepneurones= pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") # il faut choisir un modele | |
| #from pyannote.audio import Pipeline | |
| #,use_auth_token="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") | |
| #deepneuronesdiarizatin = Pipeline.from_pretrained("pyannote/speaker-diarization",use_auth_token="test") | |
| class AudioAnalyserAnglais: | |
| def stt(cls, file_content): | |
| return deepneurones(file_content) | |