Spaces:
Runtime error
Runtime error
| import os | |
| import gradio as gr | |
| import torch | |
| import torchaudio | |
| import numpy as np | |
| from denoiser.demucs import Demucs | |
| from pydub import AudioSegment | |
| import soundfile as sf | |
| import librosa | |
| modelpath = './denoiser/master64.th' | |
| def transcribe(file_upload, microphone): | |
| file = microphone if microphone is not None else file_upload | |
| # 統一音訊預處理流程 | |
| def preprocess_audio(input_path, output_path): | |
| # 用 pydub 處理格式轉換 | |
| audio = AudioSegment.from_file(input_path) | |
| # 強制轉單聲道 + 16kHz 採樣率 | |
| if audio.channels > 1: | |
| audio = audio.set_channels(1) | |
| if audio.frame_rate != 16000: | |
| audio = audio.set_frame_rate(16000) | |
| # 導出為 WAV 暫存檔 | |
| audio.export(output_path, format="wav") | |
| return output_path | |
| # 處理 MP3 特殊流程 | |
| if file.lower().endswith(".mp3"): | |
| temp_wav = "temp_input.wav" | |
| preprocess_audio(file, temp_wav) | |
| file = temp_wav | |
| # 載入模型 | |
| model = Demucs(hidden=64) | |
| state_dict = torch.load(modelpath, map_location='cpu') | |
| model.load_state_dict(state_dict) | |
| model.eval() | |
| # 載入音訊並強制轉單聲道 | |
| x, sr = torchaudio.load(file) | |
| if x.shape[0] > 1: | |
| x = torch.mean(x, dim=0, keepdim=True) | |
| # 音訊長度檢查 | |
| MAX_AUDIO_SECONDS = 900 | |
| if x.shape[1] / sr > MAX_AUDIO_SECONDS: | |
| raise ValueError(f"音訊過長!限制:{MAX_AUDIO_SECONDS} 秒,當前:{x.shape[1]/sr:.1f} 秒") | |
| # 執行降噪 | |
| with torch.no_grad(): | |
| out = model(x[None])[0] | |
| # 後處理 | |
| out = out / max(out.abs().max().item(), 1) | |
| torchaudio.save('enhanced.wav', out, sr) | |
| # 轉 MP3 輸出 | |
| enhanced_mp3 = 'enhanced.mp3' | |
| AudioSegment.from_wav('enhanced.wav').export( | |
| enhanced_mp3, | |
| format="mp3", | |
| bitrate="256k" | |
| ) | |
| # 清理暫存檔 | |
| if os.path.exists("temp_input.wav"): | |
| os.remove("temp_input.wav") | |
| return enhanced_mp3 | |
| # 👇 重要:修正 Gradio 類型推導問題 | |
| transcribe.__annotations__ = { | |
| "file_upload": str, | |
| "microphone": str, | |
| "return": str | |
| } | |
| # 🎯 你提供的 description 內容(已轉為 HTML) | |
| description_html = """ | |
| <h1 align='center'><a href='https://www.twman.org/AI/ASR/SpeechEnhancement' target='_blank'>中文語音質檢/噪音去除 (語音增強)</a></h1> | |
| <p align='center'><b>上傳一段音檔 (支援 .mp3, .wav),為了提升語音識別的效果,可以在識別前先進行噪音去除</b></p> | |
| <div align='center'> | |
| <a href='https://deep-learning-101.github.io' target='_blank'>deep-learning-101.github.io</a> | | |
| <a href='https://www.twman.org/AI' target='_blank'> AI </a> | | |
| <a href='https://www.twman.org' target='_blank'>TonTon Huang Ph.D.</a> | | |
| <a href='https://blog.twman.org/p/deeplearning101.html' target='_blank'>手把手帶你一起踩AI坑</a> | |
| <a href='https://github.com/Deep-Learning-101' target='_blank'>GitHub</a> | | |
| <a href='http://deeplearning101.twman.org' target='_blank'>Deep Learning 101</a> | | |
| <a href='https://www.youtube.com/c/DeepLearning101' target='_blank'>YouTube</a> | |
| </div> | |
| <br> | |
| 📘 相關技術文章: | |
| <ul> | |
| <li><a href='https://blog.twman.org/2025/04/AI-Robot.html' target='_blank'>AI 陪伴機器人:2025 趨勢分析技術突破、市場潛力與未來展望</a> | <a href='https://blog.twman.org/2025/04/FinanceGenAI.html' target='_blank'>金融科技新浪潮:生成式 AI (GenAI) 應用場景、效益與導入挑戰</a><br></li> | |
| <li><a href="https://blog.twman.org/2025/03/AIAgent.html" target="_blank">避開 AI Agent 開發陷阱:常見問題、挑戰與解決方案</a></b>:<a href="https://deep-learning-101.github.io/agent" target="_blank">探討多種 AI 代理人工具的應用經驗與挑戰,分享實用經驗與工具推薦。</a></li> | |
| <li><a href="https://blog.twman.org/2024/08/LLM.html" target="_blank">白話文手把手帶你科普 GenAI</a></b>:<a href="https://deep-learning-101.github.io/GenAI" target="_blank">淺顯介紹生成式人工智慧核心概念,強調硬體資源和數據的重要性。</a></li> | |
| <li><a href="https://blog.twman.org/2024/09/LLM.html" target="_blank">大型語言模型直接就打完收工?</a></b>:<a href="https://deep-learning-101.github.io/1010LLM" target="_blank">回顧 LLM 領域探索歷程,討論硬體升級對 AI 開發的重要性。</a></li> | |
| <li><a href="https://blog.twman.org/2024/07/RAG.html" target="_blank">檢索增強生成(RAG)不是萬靈丹之優化挑戰技巧</a></b>:<a href="https://deep-learning-101.github.io/RAG" target="_blank">探討 RAG 技術應用與挑戰,提供實用經驗分享和工具建議。</a></li> | |
| <li><a href="https://blog.twman.org/2024/02/LLM.html" target="_blank">大型語言模型 (LLM) 入門完整指南:原理、應用與未來</a></b>:<a href="https://deep-learning-101.github.io/0204LLM" target="_blank">探討多種 LLM 工具的應用與挑戰,強調硬體資源的重要性。</a></li> | |
| <li><a href="https://blog.twman.org/2023/04/GPT.html" target="_blank">解析探索大型語言模型:模型發展歷史、訓練及微調技術的 VRAM 估算</a></b>:<a href="https://deep-learning-101.github.io/GPU" target="_blank">探討 LLM 的發展與應用,強調硬體資源在開發中的關鍵作用。</a></li> | |
| <li><a href="https://blog.twman.org/2024/11/diffusion.html" target="_blank">Diffusion Model 完全解析:從原理、應用到實作 (AI 圖像生成)</a></b>;<a href="https://deep-learning-101.github.io/diffusion" target="_blank">深入探討影像生成與分割技術的應用,強調硬體資源的重要性。</a></li> | |
| <li><a href="https://blog.twman.org/2024/02/asr-tts.html" target="_blank">ASR/TTS 開發避坑指南:語音辨識與合成的常見挑戰與對策</a></b>:<a href="https://deep-learning-101.github.io/asr-tts" target="_blank">探討 ASR 和 TTS 技術應用中的問題,強調數據質量的重要性。</a></li> | |
| <li><a href="https://blog.twman.org/2021/04/NLP.html" target="_blank">那些 NLP 踩的坑</a></b>:<a href="https://deep-learning-101.github.io/nlp" target="_blank">分享 NLP 領域的實踐經驗,強調數據質量對模型效果的影響。</a></li> | |
| <li><a href="https://blog.twman.org/2021/04/ASR.html" target="_blank">那些語音處理踩的坑</a></b>:<a href="https://deep-learning-101.github.io/speech" target="_blank">分享語音處理領域的實務經驗,強調資料品質對模型效果的影響。</a></li> | |
| <li><a href="https://blog.twman.org/2020/05/DeepLearning.html" target="_blank">手把手學深度學習安裝環境</a></b>:<a href="https://deep-learning-101.github.io/101" target="_blank">詳細介紹在 Ubuntu 上安裝深度學習環境的步驟,分享實際操作經驗。</a></li> | |
| <li><a href='https://blog.twman.org/2023/07/wsl.html' target='_blank'>用PPOCRLabel來幫PaddleOCR做OCR的微調和標註</a></li> | |
| <li><a href='https://blog.twman.org/2023/07/HugIE.html' target='_blank'>基於機器閱讀理解和指令微調的統一信息抽取框架之診斷書醫囑資訊擷取分析</a></li> | |
| </ul> | |
| <br> | |
| """ | |
| demo = gr.Interface( | |
| fn=transcribe, | |
| inputs=[ | |
| gr.Audio(type="filepath", label="上傳音訊檔案", sources=["upload", "microphone"]) | |
| ], | |
| outputs=[ | |
| gr.Audio(type="filepath", label="處理後音訊") | |
| ], | |
| live=True, | |
| allow_flagging="never", | |
| description=description_html | |
| ) | |
| demo.launch(debug=True, share=True) |