Spaces:
Sleeping
Sleeping
| import torch | |
| # from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline | |
| from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration | |
| import gradio as gr | |
| import datetime | |
| """ | |
| device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
| torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 | |
| model_id = "distil-whisper/distil-small.en" | |
| model = AutoModelForSpeechSeq2Seq.from_pretrained( | |
| model_id, torch_dtype=torch_dtype, use_safetensors=True | |
| ) | |
| model.to(device) | |
| processor = AutoProcessor.from_pretrained(model_id) | |
| pipe = pipeline( | |
| "automatic-speech-recognition", | |
| model=model, | |
| tokenizer=processor.tokenizer, | |
| feature_extractor=processor.feature_extractor, | |
| max_new_tokens=128, | |
| torch_dtype=torch_dtype, | |
| device=device, | |
| ) | |
| """ | |
| # call a text generation model to display the audio content after identifying the word(s) in the text output | |
| # import torch | |
| # from transformers import pipeline | |
| # from datasets import load_dataset | |
| # from transformers import WhisperProcessor, WhisperForConditionalGeneration | |
| # from datasets import load_dataset | |
| # load model and processor | |
| processor = WhisperProcessor.from_pretrained("microsoft/whisper-base-webnn") | |
| model = WhisperForConditionalGeneration.from_pretrained("microsoft/whisper-base-webnn") | |
| model.config.forced_decoder_ids = None | |
| # load dummy dataset and read audio files | |
| # ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") | |
| # sample = ds[0]["audio"] | |
| """ | |
| device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
| pipe = pipeline( | |
| "automatic-speech-recognition", | |
| # model="openai/whisper-base", | |
| model = "microsoft/whisper-base-webnn", | |
| chunk_length_s=30, | |
| device=device, | |
| ) | |
| """ | |
| # ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") | |
| # sample = ds[0]["audio"] | |
| # prediction = pipe(sample.copy(), batch_size=8)["text"] | |
| # we can also return timestamps for the predictions | |
| #prediction = pipe(sample.copy(), batch_size=8, return_timestamps=True)["chunks"] | |
| def audio2text(audio_file, prompt : list): | |
| input_features = processor(audio_file, sampling_rate=sample["sampling_rate"], return_tensors="pt").input_features | |
| # generate token ids | |
| predicted_ids = model.generate(input_features) | |
| # decode token ids to text | |
| transcription = processor.batch_decode(predicted_ids, skip_special_tokens=False) | |
| # transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) | |
| # prediction = pipe(audio_file, batch_size=8, return_timestamps=True)["chunks"] | |
| #prediction=pipe(audio_file) | |
| return transcription['text'] | |
| gr.Interface(fn=audio2text, inputs=[gr.Audio(label='upload your audio file', sources='upload', type='filepath'), gr.Textbox(label="provide word(s) to search for")], outputs=[gr.Textbox(label="transcription")]).launch() | |