Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,6 +18,9 @@ from pedalboard.io import AudioFile
|
|
| 18 |
from pydub import AudioSegment
|
| 19 |
import noisereduce as nr
|
| 20 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
logging.getLogger("infer_rvc_python").setLevel(logging.ERROR)
|
| 23 |
|
|
@@ -65,27 +68,27 @@ def find_my_model(a_, b_):
|
|
| 65 |
for base_file in [a_, b_]:
|
| 66 |
if base_file is not None and base_file.endswith(".txt"):
|
| 67 |
txt_files.append(base_file)
|
| 68 |
-
|
| 69 |
directory = os.path.dirname(a_)
|
| 70 |
-
|
| 71 |
for txt in txt_files:
|
| 72 |
with open(txt, 'r') as file:
|
| 73 |
first_line = file.readline()
|
| 74 |
-
|
| 75 |
download_manager(
|
| 76 |
url=first_line.strip(),
|
| 77 |
path=directory,
|
| 78 |
extension="",
|
| 79 |
)
|
| 80 |
-
|
| 81 |
for f in find_files(directory):
|
| 82 |
if f.endswith(".zip"):
|
| 83 |
unzip_in_folder(f, directory)
|
| 84 |
-
|
| 85 |
model = None
|
| 86 |
index = None
|
| 87 |
end_files = find_files(directory)
|
| 88 |
-
|
| 89 |
for ff in end_files:
|
| 90 |
if ff.endswith(".pth"):
|
| 91 |
model = os.path.join(directory, ff)
|
|
@@ -96,13 +99,101 @@ def find_my_model(a_, b_):
|
|
| 96 |
|
| 97 |
if not model:
|
| 98 |
gr.Error(f"Model not found in: {end_files}")
|
| 99 |
-
|
| 100 |
if not index:
|
| 101 |
gr.Warning("Index not found")
|
| 102 |
-
|
| 103 |
return model, index
|
| 104 |
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
def add_audio_effects(audio_list):
|
| 107 |
print("Audio effects")
|
| 108 |
|
|
@@ -110,7 +201,7 @@ def add_audio_effects(audio_list):
|
|
| 110 |
for audio_path in audio_list:
|
| 111 |
try:
|
| 112 |
output_path = f'{os.path.splitext(audio_path)[0]}_effects.wav'
|
| 113 |
-
|
| 114 |
# Initialize audio effects plugins
|
| 115 |
board = Pedalboard(
|
| 116 |
[
|
|
@@ -119,7 +210,7 @@ def add_audio_effects(audio_list):
|
|
| 119 |
Reverb(room_size=0.10, dry_level=0.8, wet_level=0.2, damping=0.7)
|
| 120 |
]
|
| 121 |
)
|
| 122 |
-
|
| 123 |
with AudioFile(audio_path) as f:
|
| 124 |
with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o:
|
| 125 |
# Read one second of audio at a time, until the file is empty:
|
|
@@ -143,17 +234,17 @@ def apply_noisereduce(audio_list):
|
|
| 143 |
result = []
|
| 144 |
for audio_path in audio_list:
|
| 145 |
out_path = f'{os.path.splitext(audio_path)[0]}_noisereduce.wav'
|
| 146 |
-
|
| 147 |
try:
|
| 148 |
# Load audio file
|
| 149 |
audio = AudioSegment.from_file(audio_path)
|
| 150 |
-
|
| 151 |
# Convert audio to numpy array
|
| 152 |
samples = np.array(audio.get_array_of_samples())
|
| 153 |
-
|
| 154 |
# Reduce noise
|
| 155 |
reduced_noise = nr.reduce_noise(samples, sr=audio.frame_rate, prop_decrease=0.6)
|
| 156 |
-
|
| 157 |
# Convert reduced noise signal back to audio
|
| 158 |
reduced_audio = AudioSegment(
|
| 159 |
reduced_noise.tobytes(),
|
|
@@ -161,11 +252,11 @@ def apply_noisereduce(audio_list):
|
|
| 161 |
sample_width=audio.sample_width,
|
| 162 |
channels=audio.channels
|
| 163 |
)
|
| 164 |
-
|
| 165 |
# Save reduced audio to file
|
| 166 |
reduced_audio.export(out_path, format="wav")
|
| 167 |
result.append(out_path)
|
| 168 |
-
|
| 169 |
except Exception as e:
|
| 170 |
traceback.print_exc()
|
| 171 |
print(f"Error noisereduce: {str(e)}")
|
|
@@ -199,7 +290,7 @@ def run(
|
|
| 199 |
):
|
| 200 |
if not audio_files:
|
| 201 |
raise ValueError("The audio pls")
|
| 202 |
-
|
| 203 |
if isinstance(audio_files, str):
|
| 204 |
audio_files = [audio_files]
|
| 205 |
|
|
@@ -230,7 +321,7 @@ def run(
|
|
| 230 |
|
| 231 |
if audio_effects:
|
| 232 |
result = add_audio_effects(result)
|
| 233 |
-
|
| 234 |
return result
|
| 235 |
|
| 236 |
|
|
@@ -371,7 +462,7 @@ def tts_button_conf():
|
|
| 371 |
visible=False,
|
| 372 |
)
|
| 373 |
|
| 374 |
-
|
| 375 |
def tts_play_conf():
|
| 376 |
return gr.Checkbox(
|
| 377 |
False,
|
|
@@ -405,7 +496,7 @@ def denoise_conf():
|
|
| 405 |
def effects_conf():
|
| 406 |
return gr.Checkbox(
|
| 407 |
False,
|
| 408 |
-
label="
|
| 409 |
# info="",
|
| 410 |
container=False,
|
| 411 |
visible=True,
|
|
@@ -415,11 +506,11 @@ def effects_conf():
|
|
| 415 |
def infer_tts_audio(tts_voice, tts_text, play_tts):
|
| 416 |
out_dir = "output"
|
| 417 |
folder_tts = "USER_"+str(random.randint(10000, 99999))
|
| 418 |
-
|
| 419 |
os.makedirs(out_dir, exist_ok=True)
|
| 420 |
os.makedirs(os.path.join(out_dir, folder_tts), exist_ok=True)
|
| 421 |
out_path = os.path.join(out_dir, folder_tts, "tts.mp3")
|
| 422 |
-
|
| 423 |
asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save(out_path))
|
| 424 |
if play_tts:
|
| 425 |
return [out_path], out_path
|
|
@@ -437,9 +528,46 @@ def show_components_tts(value_active):
|
|
| 437 |
visible=value_active
|
| 438 |
)
|
| 439 |
|
| 440 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 441 |
def get_gui(theme):
|
| 442 |
-
with gr.Blocks(theme=theme) as app:
|
| 443 |
gr.Markdown(title)
|
| 444 |
gr.Markdown(description)
|
| 445 |
|
|
@@ -464,7 +592,7 @@ def get_gui(theme):
|
|
| 464 |
)
|
| 465 |
|
| 466 |
aud = audio_conf()
|
| 467 |
-
gr.HTML("<hr
|
| 468 |
|
| 469 |
tts_button.click(
|
| 470 |
fn=infer_tts_audio,
|
|
@@ -472,10 +600,34 @@ def get_gui(theme):
|
|
| 472 |
outputs=[aud, tts_play],
|
| 473 |
)
|
| 474 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 475 |
with gr.Column():
|
| 476 |
with gr.Row():
|
| 477 |
model = model_conf()
|
| 478 |
indx = index_conf()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 479 |
algo = pitch_algo_conf()
|
| 480 |
algo_lvl = pitch_lvl_conf()
|
| 481 |
indx_inf = index_inf_conf()
|
|
@@ -508,7 +660,6 @@ def get_gui(theme):
|
|
| 508 |
outputs=[output_base],
|
| 509 |
)
|
| 510 |
|
| 511 |
-
|
| 512 |
gr.Examples(
|
| 513 |
examples=[
|
| 514 |
[
|
|
@@ -544,7 +695,7 @@ def get_gui(theme):
|
|
| 544 |
0.25,
|
| 545 |
0.50,
|
| 546 |
],
|
| 547 |
-
|
| 548 |
],
|
| 549 |
fn=run,
|
| 550 |
inputs=[
|
|
@@ -569,7 +720,7 @@ if __name__ == "__main__":
|
|
| 569 |
|
| 570 |
tts_voice_list = asyncio.new_event_loop().run_until_complete(edge_tts.list_voices())
|
| 571 |
voices = sorted([f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list])
|
| 572 |
-
|
| 573 |
app = get_gui(theme)
|
| 574 |
|
| 575 |
app.queue(default_concurrency_limit=40)
|
|
@@ -580,4 +731,5 @@ if __name__ == "__main__":
|
|
| 580 |
show_error=True,
|
| 581 |
quiet=False,
|
| 582 |
debug=False,
|
|
|
|
| 583 |
)
|
|
|
|
| 18 |
from pydub import AudioSegment
|
| 19 |
import noisereduce as nr
|
| 20 |
import numpy as np
|
| 21 |
+
import urllib.request
|
| 22 |
+
import shutil
|
| 23 |
+
import threading
|
| 24 |
|
| 25 |
logging.getLogger("infer_rvc_python").setLevel(logging.ERROR)
|
| 26 |
|
|
|
|
| 68 |
for base_file in [a_, b_]:
|
| 69 |
if base_file is not None and base_file.endswith(".txt"):
|
| 70 |
txt_files.append(base_file)
|
| 71 |
+
|
| 72 |
directory = os.path.dirname(a_)
|
| 73 |
+
|
| 74 |
for txt in txt_files:
|
| 75 |
with open(txt, 'r') as file:
|
| 76 |
first_line = file.readline()
|
| 77 |
+
|
| 78 |
download_manager(
|
| 79 |
url=first_line.strip(),
|
| 80 |
path=directory,
|
| 81 |
extension="",
|
| 82 |
)
|
| 83 |
+
|
| 84 |
for f in find_files(directory):
|
| 85 |
if f.endswith(".zip"):
|
| 86 |
unzip_in_folder(f, directory)
|
| 87 |
+
|
| 88 |
model = None
|
| 89 |
index = None
|
| 90 |
end_files = find_files(directory)
|
| 91 |
+
|
| 92 |
for ff in end_files:
|
| 93 |
if ff.endswith(".pth"):
|
| 94 |
model = os.path.join(directory, ff)
|
|
|
|
| 99 |
|
| 100 |
if not model:
|
| 101 |
gr.Error(f"Model not found in: {end_files}")
|
| 102 |
+
|
| 103 |
if not index:
|
| 104 |
gr.Warning("Index not found")
|
| 105 |
+
|
| 106 |
return model, index
|
| 107 |
|
| 108 |
|
| 109 |
+
def get_file_size(url):
|
| 110 |
+
|
| 111 |
+
if "huggingface" not in url:
|
| 112 |
+
raise ValueError("Only downloads from Hugging Face are allowed")
|
| 113 |
+
|
| 114 |
+
try:
|
| 115 |
+
with urllib.request.urlopen(url) as response:
|
| 116 |
+
info = response.info()
|
| 117 |
+
content_length = info.get("Content-Length")
|
| 118 |
+
|
| 119 |
+
file_size = int(content_length)
|
| 120 |
+
if file_size > 500000000:
|
| 121 |
+
raise ValueError("The file is too large. You can only download files up to 500 MB in size.")
|
| 122 |
+
|
| 123 |
+
except Exception as e:
|
| 124 |
+
raise e
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def clear_files(directory):
|
| 128 |
+
time.sleep(15)
|
| 129 |
+
print(f"Clearing files: {directory}.")
|
| 130 |
+
shutil.rmtree(directory)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def get_my_model(url_data):
|
| 134 |
+
|
| 135 |
+
if not url_data:
|
| 136 |
+
return None, None
|
| 137 |
+
|
| 138 |
+
if "," in url_data:
|
| 139 |
+
a_, b_ = url_data.split()
|
| 140 |
+
a_, b_ = a_.strip().replace("/blob/", "/resolve/"), b_.strip().replace("/blob/", "/resolve/")
|
| 141 |
+
else:
|
| 142 |
+
a_, b_ = url_data.strip().replace("/blob/", "/resolve/"), None
|
| 143 |
+
|
| 144 |
+
out_dir = "downloads"
|
| 145 |
+
folder_download = str(random.randint(1000, 9999))
|
| 146 |
+
directory = os.path.join(out_dir, folder_download)
|
| 147 |
+
os.makedirs(directory, exist_ok=True)
|
| 148 |
+
|
| 149 |
+
try:
|
| 150 |
+
get_file_size(a_)
|
| 151 |
+
if b_:
|
| 152 |
+
get_file_size(b_)
|
| 153 |
+
|
| 154 |
+
valid_url = [a_] if not b_ else [a_, b_]
|
| 155 |
+
for link in valid_url:
|
| 156 |
+
download_manager(
|
| 157 |
+
url=link,
|
| 158 |
+
path=directory,
|
| 159 |
+
extension="",
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
for f in find_files(directory):
|
| 163 |
+
if f.endswith(".zip"):
|
| 164 |
+
unzip_in_folder(f, directory)
|
| 165 |
+
|
| 166 |
+
model = None
|
| 167 |
+
index = None
|
| 168 |
+
end_files = find_files(directory)
|
| 169 |
+
|
| 170 |
+
for ff in end_files:
|
| 171 |
+
if ff.endswith(".pth"):
|
| 172 |
+
model = ff
|
| 173 |
+
gr.Info(f"Model found: {ff}")
|
| 174 |
+
if ff.endswith(".index"):
|
| 175 |
+
index = ff
|
| 176 |
+
gr.Info(f"Index found: {ff}")
|
| 177 |
+
|
| 178 |
+
if not model:
|
| 179 |
+
raise ValueError(f"Model not found in: {end_files}")
|
| 180 |
+
|
| 181 |
+
if not index:
|
| 182 |
+
gr.Warning("Index not found")
|
| 183 |
+
else:
|
| 184 |
+
index = os.path.abspath(index)
|
| 185 |
+
|
| 186 |
+
return os.path.abspath(model), index
|
| 187 |
+
|
| 188 |
+
except Exception as e:
|
| 189 |
+
raise e
|
| 190 |
+
finally:
|
| 191 |
+
# time.sleep(10)
|
| 192 |
+
# shutil.rmtree(directory)
|
| 193 |
+
t = threading.Thread(target=clear_files, args=(directory,))
|
| 194 |
+
t.start()
|
| 195 |
+
|
| 196 |
+
|
| 197 |
def add_audio_effects(audio_list):
|
| 198 |
print("Audio effects")
|
| 199 |
|
|
|
|
| 201 |
for audio_path in audio_list:
|
| 202 |
try:
|
| 203 |
output_path = f'{os.path.splitext(audio_path)[0]}_effects.wav'
|
| 204 |
+
|
| 205 |
# Initialize audio effects plugins
|
| 206 |
board = Pedalboard(
|
| 207 |
[
|
|
|
|
| 210 |
Reverb(room_size=0.10, dry_level=0.8, wet_level=0.2, damping=0.7)
|
| 211 |
]
|
| 212 |
)
|
| 213 |
+
|
| 214 |
with AudioFile(audio_path) as f:
|
| 215 |
with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o:
|
| 216 |
# Read one second of audio at a time, until the file is empty:
|
|
|
|
| 234 |
result = []
|
| 235 |
for audio_path in audio_list:
|
| 236 |
out_path = f'{os.path.splitext(audio_path)[0]}_noisereduce.wav'
|
| 237 |
+
|
| 238 |
try:
|
| 239 |
# Load audio file
|
| 240 |
audio = AudioSegment.from_file(audio_path)
|
| 241 |
+
|
| 242 |
# Convert audio to numpy array
|
| 243 |
samples = np.array(audio.get_array_of_samples())
|
| 244 |
+
|
| 245 |
# Reduce noise
|
| 246 |
reduced_noise = nr.reduce_noise(samples, sr=audio.frame_rate, prop_decrease=0.6)
|
| 247 |
+
|
| 248 |
# Convert reduced noise signal back to audio
|
| 249 |
reduced_audio = AudioSegment(
|
| 250 |
reduced_noise.tobytes(),
|
|
|
|
| 252 |
sample_width=audio.sample_width,
|
| 253 |
channels=audio.channels
|
| 254 |
)
|
| 255 |
+
|
| 256 |
# Save reduced audio to file
|
| 257 |
reduced_audio.export(out_path, format="wav")
|
| 258 |
result.append(out_path)
|
| 259 |
+
|
| 260 |
except Exception as e:
|
| 261 |
traceback.print_exc()
|
| 262 |
print(f"Error noisereduce: {str(e)}")
|
|
|
|
| 290 |
):
|
| 291 |
if not audio_files:
|
| 292 |
raise ValueError("The audio pls")
|
| 293 |
+
|
| 294 |
if isinstance(audio_files, str):
|
| 295 |
audio_files = [audio_files]
|
| 296 |
|
|
|
|
| 321 |
|
| 322 |
if audio_effects:
|
| 323 |
result = add_audio_effects(result)
|
| 324 |
+
|
| 325 |
return result
|
| 326 |
|
| 327 |
|
|
|
|
| 462 |
visible=False,
|
| 463 |
)
|
| 464 |
|
| 465 |
+
|
| 466 |
def tts_play_conf():
|
| 467 |
return gr.Checkbox(
|
| 468 |
False,
|
|
|
|
| 496 |
def effects_conf():
|
| 497 |
return gr.Checkbox(
|
| 498 |
False,
|
| 499 |
+
label="Reverb",
|
| 500 |
# info="",
|
| 501 |
container=False,
|
| 502 |
visible=True,
|
|
|
|
| 506 |
def infer_tts_audio(tts_voice, tts_text, play_tts):
|
| 507 |
out_dir = "output"
|
| 508 |
folder_tts = "USER_"+str(random.randint(10000, 99999))
|
| 509 |
+
|
| 510 |
os.makedirs(out_dir, exist_ok=True)
|
| 511 |
os.makedirs(os.path.join(out_dir, folder_tts), exist_ok=True)
|
| 512 |
out_path = os.path.join(out_dir, folder_tts, "tts.mp3")
|
| 513 |
+
|
| 514 |
asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save(out_path))
|
| 515 |
if play_tts:
|
| 516 |
return [out_path], out_path
|
|
|
|
| 528 |
visible=value_active
|
| 529 |
)
|
| 530 |
|
| 531 |
+
|
| 532 |
+
def down_active_conf():
|
| 533 |
+
return gr.Checkbox(
|
| 534 |
+
False,
|
| 535 |
+
label="URL-to-Model",
|
| 536 |
+
# info="",
|
| 537 |
+
container=False,
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
def down_url_conf():
|
| 542 |
+
return gr.Textbox(
|
| 543 |
+
value="",
|
| 544 |
+
placeholder="Write the url here...",
|
| 545 |
+
label="Enter URL",
|
| 546 |
+
visible=False,
|
| 547 |
+
lines=1,
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def down_button_conf():
|
| 552 |
+
return gr.Button(
|
| 553 |
+
"Process",
|
| 554 |
+
variant="secondary",
|
| 555 |
+
visible=False,
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def show_components_down(value_active):
|
| 560 |
+
return gr.update(
|
| 561 |
+
visible=value_active
|
| 562 |
+
), gr.update(
|
| 563 |
+
visible=value_active
|
| 564 |
+
), gr.update(
|
| 565 |
+
visible=value_active
|
| 566 |
+
)
|
| 567 |
+
|
| 568 |
+
|
| 569 |
def get_gui(theme):
|
| 570 |
+
with gr.Blocks(theme=theme, delete_cache=(3200, 3200)) as app:
|
| 571 |
gr.Markdown(title)
|
| 572 |
gr.Markdown(description)
|
| 573 |
|
|
|
|
| 592 |
)
|
| 593 |
|
| 594 |
aud = audio_conf()
|
| 595 |
+
# gr.HTML("<hr>")
|
| 596 |
|
| 597 |
tts_button.click(
|
| 598 |
fn=infer_tts_audio,
|
|
|
|
| 600 |
outputs=[aud, tts_play],
|
| 601 |
)
|
| 602 |
|
| 603 |
+
down_active_gui = down_active_conf()
|
| 604 |
+
down_info = gr.Markdown(
|
| 605 |
+
"Provide a link to a zip file, like this one: `https://huggingface.co/mrmocciai/Models/resolve/main/Genshin%20Impact/ayaka-v2.zip?download=true`, or separate links for the .pth and .index files, like this: `https://huggingface.co/sail-rvc/ayaka-jp/resolve/main/model.pth?download=true, https://huggingface.co/sail-rvc/ayaka-jp/resolve/main/model.index?download=true`",
|
| 606 |
+
visible=False
|
| 607 |
+
)
|
| 608 |
+
with gr.Row():
|
| 609 |
+
with gr.Column(scale=3):
|
| 610 |
+
down_url_gui = down_url_conf()
|
| 611 |
+
with gr.Column(scale=1):
|
| 612 |
+
down_button_gui = down_button_conf()
|
| 613 |
+
|
| 614 |
with gr.Column():
|
| 615 |
with gr.Row():
|
| 616 |
model = model_conf()
|
| 617 |
indx = index_conf()
|
| 618 |
+
|
| 619 |
+
down_active_gui.change(
|
| 620 |
+
show_components_down,
|
| 621 |
+
[down_active_gui],
|
| 622 |
+
[down_info, down_url_gui, down_button_gui]
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
down_button_gui.click(
|
| 626 |
+
get_my_model,
|
| 627 |
+
[down_url_gui],
|
| 628 |
+
[model, indx]
|
| 629 |
+
)
|
| 630 |
+
|
| 631 |
algo = pitch_algo_conf()
|
| 632 |
algo_lvl = pitch_lvl_conf()
|
| 633 |
indx_inf = index_inf_conf()
|
|
|
|
| 660 |
outputs=[output_base],
|
| 661 |
)
|
| 662 |
|
|
|
|
| 663 |
gr.Examples(
|
| 664 |
examples=[
|
| 665 |
[
|
|
|
|
| 695 |
0.25,
|
| 696 |
0.50,
|
| 697 |
],
|
| 698 |
+
|
| 699 |
],
|
| 700 |
fn=run,
|
| 701 |
inputs=[
|
|
|
|
| 720 |
|
| 721 |
tts_voice_list = asyncio.new_event_loop().run_until_complete(edge_tts.list_voices())
|
| 722 |
voices = sorted([f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list])
|
| 723 |
+
|
| 724 |
app = get_gui(theme)
|
| 725 |
|
| 726 |
app.queue(default_concurrency_limit=40)
|
|
|
|
| 731 |
show_error=True,
|
| 732 |
quiet=False,
|
| 733 |
debug=False,
|
| 734 |
+
allowed_paths=["./downloads/"],
|
| 735 |
)
|