Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
-
#
|
| 2 |
import os
|
| 3 |
os.environ.setdefault("GRADIO_USE_CDN", "true")
|
| 4 |
|
| 5 |
-
#
|
| 6 |
try:
|
| 7 |
import spaces
|
| 8 |
except Exception:
|
|
@@ -12,7 +12,7 @@ except Exception:
|
|
| 12 |
return deco
|
| 13 |
spaces = _DummySpaces()
|
| 14 |
|
| 15 |
-
#
|
| 16 |
@spaces.GPU(duration=10)
|
| 17 |
def gpu_probe(a: int = 1, b: int = 1):
|
| 18 |
return a + b
|
|
@@ -21,18 +21,18 @@ def gpu_probe(a: int = 1, b: int = 1):
|
|
| 21 |
def gpu_echo(x: str = "ok"):
|
| 22 |
return x
|
| 23 |
|
| 24 |
-
#
|
| 25 |
import sys
|
| 26 |
import subprocess
|
| 27 |
from pathlib import Path
|
| 28 |
-
from typing import Tuple, Optional, List,
|
| 29 |
|
| 30 |
import gradio as gr
|
| 31 |
import numpy as np
|
| 32 |
import soundfile as sf
|
| 33 |
from huggingface_hub import hf_hub_download
|
| 34 |
|
| 35 |
-
#
|
| 36 |
USE_ZEROGPU = os.getenv("SPACE_RUNTIME", "").lower() == "zerogpu"
|
| 37 |
|
| 38 |
SPACE_ROOT = Path(__file__).parent.resolve()
|
|
@@ -43,7 +43,7 @@ WEIGHTS_FILE = "model.safetensors"
|
|
| 43 |
CACHE_DIR = SPACE_ROOT / "weights"
|
| 44 |
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
| 45 |
|
| 46 |
-
#
|
| 47 |
_weights_path: Optional[Path] = None
|
| 48 |
_repo_ready: bool = False
|
| 49 |
|
|
@@ -78,9 +78,8 @@ def ensure_repo(progress: Optional[gr.Progress] = None) -> Path:
|
|
| 78 |
_repo_ready = True
|
| 79 |
return REPO_DIR
|
| 80 |
|
| 81 |
-
#
|
| 82 |
def save_temp_wav(wav: np.ndarray, sr: int, path: Path):
|
| 83 |
-
# Ensure shape (samples, channels)
|
| 84 |
if wav.ndim == 2 and wav.shape[0] < wav.shape[1]:
|
| 85 |
wav = wav.T
|
| 86 |
if wav.dtype == np.float64:
|
|
@@ -139,8 +138,8 @@ def run_sonicmaster_cli(
|
|
| 139 |
last_err = f"Unexpected error: {e}\n{traceback.format_exc()}"
|
| 140 |
return False, last_err or "All candidate commands failed."
|
| 141 |
|
| 142 |
-
#
|
| 143 |
-
@spaces.GPU(duration=60)
|
| 144 |
def enhance_on_gpu(input_path: str, prompt: str, output_path: str) -> Tuple[bool, str]:
|
| 145 |
try:
|
| 146 |
import torch # noqa: F401
|
|
@@ -156,7 +155,7 @@ def _has_cuda() -> bool:
|
|
| 156 |
except Exception:
|
| 157 |
return False
|
| 158 |
|
| 159 |
-
#
|
| 160 |
PROMPTS_10 = [
|
| 161 |
"Increase the clarity of this song by emphasizing treble frequencies.",
|
| 162 |
"Make this song sound more boomy by amplifying the low end bass frequencies.",
|
|
@@ -177,28 +176,22 @@ def list_example_files(progress: Optional[gr.Progress] = None) -> List[str]:
|
|
| 177 |
files = sorted(p for p in wav_dir.glob("*.wav") if p.is_file())
|
| 178 |
return [p.as_posix() for p in files[:10]]
|
| 179 |
|
| 180 |
-
def load_examples(_: Any = None, progress=gr.Progress())
|
| 181 |
-
"""
|
|
|
|
|
|
|
| 182 |
paths = list_example_files(progress=progress)
|
| 183 |
if not paths:
|
| 184 |
-
return
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
}
|
| 188 |
-
labels = [f"{i+1:02d} β {Path(p).name}" for i, p in enumerate(paths)]
|
| 189 |
-
return {
|
| 190 |
-
"choices": labels,
|
| 191 |
-
"paths": paths,
|
| 192 |
-
"status": f"Loaded {len(paths)} sample audios."
|
| 193 |
-
}
|
| 194 |
|
| 195 |
def set_example_selection(idx_label: str, paths: List[str]) -> Tuple[str, str]:
|
| 196 |
"""When user picks an example, set the audio path + a suggested prompt."""
|
| 197 |
if not idx_label or not paths:
|
| 198 |
return "", ""
|
| 199 |
try:
|
| 200 |
-
#
|
| 201 |
-
idx = int(idx_label.split()[0]) - 1
|
| 202 |
except Exception:
|
| 203 |
idx = 0
|
| 204 |
idx = max(0, min(idx, len(paths)-1))
|
|
@@ -206,12 +199,12 @@ def set_example_selection(idx_label: str, paths: List[str]) -> Tuple[str, str]:
|
|
| 206 |
prompt = PROMPTS_10[idx] if idx < len(PROMPTS_10) else PROMPTS_10[-1]
|
| 207 |
return audio_path, prompt
|
| 208 |
|
| 209 |
-
#
|
| 210 |
def enhance_audio_ui(
|
| 211 |
audio_path: str,
|
| 212 |
prompt: str,
|
| 213 |
progress=gr.Progress(track_tqdm=True),
|
| 214 |
-
)
|
| 215 |
"""
|
| 216 |
Returns (audio, message). On failure, audio=None and message=error text.
|
| 217 |
"""
|
|
@@ -251,12 +244,13 @@ def enhance_audio_ui(
|
|
| 251 |
import traceback
|
| 252 |
return None, f"Unexpected error: {e}\n{traceback.format_exc()}"
|
| 253 |
|
| 254 |
-
#
|
| 255 |
with gr.Blocks(title="SonicMaster β Text-Guided Restoration & Mastering", fill_height=True) as _demo:
|
| 256 |
gr.Markdown(
|
| 257 |
"## π§ SonicMaster\n"
|
| 258 |
"Upload audio or **load sample audios**, write a prompt, then click **Enhance**.\n"
|
| 259 |
-
"-
|
|
|
|
| 260 |
)
|
| 261 |
with gr.Row():
|
| 262 |
with gr.Column(scale=1):
|
|
@@ -270,7 +264,6 @@ with gr.Blocks(title="SonicMaster β Text-Guided Restoration & Mastering", fill
|
|
| 270 |
prompt = gr.Textbox(label="Text Prompt", placeholder="e.g., Reduce reverb and brighten vocals.")
|
| 271 |
run_btn = gr.Button("π Enhance", variant="primary")
|
| 272 |
|
| 273 |
-
# Optional quick prompt examples (text-only)
|
| 274 |
gr.Examples(
|
| 275 |
examples=[[p] for p in [
|
| 276 |
"Reduce roominess/echo (dereverb).",
|
|
@@ -285,26 +278,20 @@ with gr.Blocks(title="SonicMaster β Text-Guided Restoration & Mastering", fill
|
|
| 285 |
out_audio = gr.Audio(label="Enhanced Audio (output)")
|
| 286 |
status = gr.Textbox(label="Status / Messages", interactive=False, lines=8)
|
| 287 |
|
| 288 |
-
#
|
| 289 |
-
|
| 290 |
-
load_result = load_btn.click(
|
| 291 |
fn=load_examples,
|
| 292 |
inputs=None,
|
| 293 |
-
outputs=
|
| 294 |
)
|
| 295 |
-
# Manually map the dict result to components via .then (Gradio v5 API)
|
| 296 |
-
load_result.then(lambda d: d.get("choices", []), None, samples_dropdown)
|
| 297 |
-
load_result.then(lambda d: d.get("paths", []), None, samples_state)
|
| 298 |
-
load_result.then(lambda d: d.get("status", ""), None, status)
|
| 299 |
|
| 300 |
-
#
|
| 301 |
samples_dropdown.change(
|
| 302 |
fn=set_example_selection,
|
| 303 |
inputs=[samples_dropdown, samples_state],
|
| 304 |
outputs=[in_audio, prompt],
|
| 305 |
)
|
| 306 |
|
| 307 |
-
# --- Enhance button ---
|
| 308 |
run_btn.click(
|
| 309 |
fn=enhance_audio_ui,
|
| 310 |
inputs=[in_audio, prompt],
|
|
@@ -319,4 +306,4 @@ app = demo
|
|
| 319 |
|
| 320 |
# Local debugging only
|
| 321 |
if __name__ == "__main__":
|
| 322 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 1 |
+
# ========= MUST BE FIRST: Gradio entry + ZeroGPU probes =========
|
| 2 |
import os
|
| 3 |
os.environ.setdefault("GRADIO_USE_CDN", "true")
|
| 4 |
|
| 5 |
+
# 'spaces' is present on HF Spaces; make it safe locally too
|
| 6 |
try:
|
| 7 |
import spaces
|
| 8 |
except Exception:
|
|
|
|
| 12 |
return deco
|
| 13 |
spaces = _DummySpaces()
|
| 14 |
|
| 15 |
+
# Publicly-named probes so ZeroGPU supervisor can detect them
|
| 16 |
@spaces.GPU(duration=10)
|
| 17 |
def gpu_probe(a: int = 1, b: int = 1):
|
| 18 |
return a + b
|
|
|
|
| 21 |
def gpu_echo(x: str = "ok"):
|
| 22 |
return x
|
| 23 |
|
| 24 |
+
# ================= Standard imports =================
|
| 25 |
import sys
|
| 26 |
import subprocess
|
| 27 |
from pathlib import Path
|
| 28 |
+
from typing import Tuple, Optional, List, Any
|
| 29 |
|
| 30 |
import gradio as gr
|
| 31 |
import numpy as np
|
| 32 |
import soundfile as sf
|
| 33 |
from huggingface_hub import hf_hub_download
|
| 34 |
|
| 35 |
+
# Runtime hints (safe on CPU)
|
| 36 |
USE_ZEROGPU = os.getenv("SPACE_RUNTIME", "").lower() == "zerogpu"
|
| 37 |
|
| 38 |
SPACE_ROOT = Path(__file__).parent.resolve()
|
|
|
|
| 43 |
CACHE_DIR = SPACE_ROOT / "weights"
|
| 44 |
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
| 45 |
|
| 46 |
+
# ================ Lazy resources =================
|
| 47 |
_weights_path: Optional[Path] = None
|
| 48 |
_repo_ready: bool = False
|
| 49 |
|
|
|
|
| 78 |
_repo_ready = True
|
| 79 |
return REPO_DIR
|
| 80 |
|
| 81 |
+
# ================== Helpers ==================
|
| 82 |
def save_temp_wav(wav: np.ndarray, sr: int, path: Path):
|
|
|
|
| 83 |
if wav.ndim == 2 and wav.shape[0] < wav.shape[1]:
|
| 84 |
wav = wav.T
|
| 85 |
if wav.dtype == np.float64:
|
|
|
|
| 138 |
last_err = f"Unexpected error: {e}\n{traceback.format_exc()}"
|
| 139 |
return False, last_err or "All candidate commands failed."
|
| 140 |
|
| 141 |
+
# ============ GPU path (ZeroGPU) ============
|
| 142 |
+
@spaces.GPU(duration=60) # 60s is a safe cap for ZeroGPU
|
| 143 |
def enhance_on_gpu(input_path: str, prompt: str, output_path: str) -> Tuple[bool, str]:
|
| 144 |
try:
|
| 145 |
import torch # noqa: F401
|
|
|
|
| 155 |
except Exception:
|
| 156 |
return False
|
| 157 |
|
| 158 |
+
# ================== Examples (lazy) ==================
|
| 159 |
PROMPTS_10 = [
|
| 160 |
"Increase the clarity of this song by emphasizing treble frequencies.",
|
| 161 |
"Make this song sound more boomy by amplifying the low end bass frequencies.",
|
|
|
|
| 176 |
files = sorted(p for p in wav_dir.glob("*.wav") if p.is_file())
|
| 177 |
return [p.as_posix() for p in files[:10]]
|
| 178 |
|
| 179 |
+
def load_examples(_: Any = None, progress=gr.Progress()):
|
| 180 |
+
"""
|
| 181 |
+
Returns (choices:list[str], paths:list[str], status:str)
|
| 182 |
+
"""
|
| 183 |
paths = list_example_files(progress=progress)
|
| 184 |
if not paths:
|
| 185 |
+
return [], [], "No sample .wav files found in repo/samples/inputs."
|
| 186 |
+
choices = [f"{i+1:02d} β {Path(p).name}" for i, p in enumerate(paths)]
|
| 187 |
+
return choices, paths, f"Loaded {len(paths)} sample audios."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
|
| 189 |
def set_example_selection(idx_label: str, paths: List[str]) -> Tuple[str, str]:
|
| 190 |
"""When user picks an example, set the audio path + a suggested prompt."""
|
| 191 |
if not idx_label or not paths:
|
| 192 |
return "", ""
|
| 193 |
try:
|
| 194 |
+
idx = int(idx_label.split()[0]) - 1 # "01 β file.wav" -> 0
|
|
|
|
| 195 |
except Exception:
|
| 196 |
idx = 0
|
| 197 |
idx = max(0, min(idx, len(paths)-1))
|
|
|
|
| 199 |
prompt = PROMPTS_10[idx] if idx < len(PROMPTS_10) else PROMPTS_10[-1]
|
| 200 |
return audio_path, prompt
|
| 201 |
|
| 202 |
+
# ================== Main callback ==================
|
| 203 |
def enhance_audio_ui(
|
| 204 |
audio_path: str,
|
| 205 |
prompt: str,
|
| 206 |
progress=gr.Progress(track_tqdm=True),
|
| 207 |
+
):
|
| 208 |
"""
|
| 209 |
Returns (audio, message). On failure, audio=None and message=error text.
|
| 210 |
"""
|
|
|
|
| 244 |
import traceback
|
| 245 |
return None, f"Unexpected error: {e}\n{traceback.format_exc()}"
|
| 246 |
|
| 247 |
+
# ================== Gradio UI ==================
|
| 248 |
with gr.Blocks(title="SonicMaster β Text-Guided Restoration & Mastering", fill_height=True) as _demo:
|
| 249 |
gr.Markdown(
|
| 250 |
"## π§ SonicMaster\n"
|
| 251 |
"Upload audio or **load sample audios**, write a prompt, then click **Enhance**.\n"
|
| 252 |
+
"- First run downloads model weights & repo (progress will show).\n"
|
| 253 |
+
"- On failure, the **Status** box shows the exact error (we won't echo the input audio)."
|
| 254 |
)
|
| 255 |
with gr.Row():
|
| 256 |
with gr.Column(scale=1):
|
|
|
|
| 264 |
prompt = gr.Textbox(label="Text Prompt", placeholder="e.g., Reduce reverb and brighten vocals.")
|
| 265 |
run_btn = gr.Button("π Enhance", variant="primary")
|
| 266 |
|
|
|
|
| 267 |
gr.Examples(
|
| 268 |
examples=[[p] for p in [
|
| 269 |
"Reduce roominess/echo (dereverb).",
|
|
|
|
| 278 |
out_audio = gr.Audio(label="Enhanced Audio (output)")
|
| 279 |
status = gr.Textbox(label="Status / Messages", interactive=False, lines=8)
|
| 280 |
|
| 281 |
+
# Load samples (3 outputs directly; no .then needed)
|
| 282 |
+
load_btn.click(
|
|
|
|
| 283 |
fn=load_examples,
|
| 284 |
inputs=None,
|
| 285 |
+
outputs=[samples_dropdown, samples_state, status],
|
| 286 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 287 |
|
| 288 |
+
# When a sample is chosen, set audio path + suggested prompt
|
| 289 |
samples_dropdown.change(
|
| 290 |
fn=set_example_selection,
|
| 291 |
inputs=[samples_dropdown, samples_state],
|
| 292 |
outputs=[in_audio, prompt],
|
| 293 |
)
|
| 294 |
|
|
|
|
| 295 |
run_btn.click(
|
| 296 |
fn=enhance_audio_ui,
|
| 297 |
inputs=[in_audio, prompt],
|
|
|
|
| 306 |
|
| 307 |
# Local debugging only
|
| 308 |
if __name__ == "__main__":
|
| 309 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|