Spaces:
Paused
Paused
alan
commited on
Commit
·
947b8c3
1
Parent(s):
b78425c
amitaro-vits
Browse files
app.py
CHANGED
|
@@ -54,7 +54,8 @@ AVAILABLE_MODELS = {
|
|
| 54 |
'KOTOBA-SPEECH-BRIA': 'kotoba-speech-bria',
|
| 55 |
'KOTOBA-SPEECH-ALEX': 'kotoba-speech-alex',
|
| 56 |
'KOTOBA-SPEECH-JACOB': 'kotoba-speech-jacob',
|
| 57 |
-
'BLANE-TTS': 'blane-tts'
|
|
|
|
| 58 |
}
|
| 59 |
|
| 60 |
SPACE_ID = os.getenv('SPACE_ID')
|
|
@@ -243,7 +244,7 @@ if not os.path.isfile(DB_PATH):
|
|
| 243 |
print("Error while downloading DB:", e)
|
| 244 |
|
| 245 |
# Create DB table (if doesn't exist)
|
| 246 |
-
create_db_if_missing()
|
| 247 |
|
| 248 |
# Sync local DB with remote repo every 5 minute (only if a change is detected)
|
| 249 |
scheduler = CommitScheduler(
|
|
@@ -390,7 +391,8 @@ model_names = {
|
|
| 390 |
'kotoba-speech-bria': 'KOTOBA-SPEECH-v0.1-BRIA',
|
| 391 |
'kotoba-speech-alex': 'KOTOBA-SPEECH-v0.1-ALEX',
|
| 392 |
'kotoba-speech-jacob': 'KOTOBA-SPEECH-v0.1-JACOB',
|
| 393 |
-
'blane-tts': 'BLANE-TTS'
|
|
|
|
| 394 |
# 'styletts2': 'StyleTTS 2',
|
| 395 |
}
|
| 396 |
model_licenses = {
|
|
@@ -445,7 +447,8 @@ model_links = {
|
|
| 445 |
'kotoba-speech-bria': 'https://kotoba-tech-kotoba-speech.hf.space/',
|
| 446 |
'kotoba-speech-alex': 'https://kotoba-tech-kotoba-speech.hf.space/',
|
| 447 |
'kotoba-speech-jacob': 'https://kotoba-tech-kotoba-speech.hf.space/',
|
| 448 |
-
'blane-tts': 'https://blane187-blane-tts.hf.space/'
|
|
|
|
| 449 |
}
|
| 450 |
model_kwargs = {
|
| 451 |
'moe-vits': {
|
|
@@ -454,6 +457,9 @@ model_kwargs = {
|
|
| 454 |
'bark': {
|
| 455 |
'fn_index': 3
|
| 456 |
},
|
|
|
|
|
|
|
|
|
|
| 457 |
}
|
| 458 |
# def get_random_split(existing_split=None):
|
| 459 |
# choice = random.choice(list(audio_dataset.keys()))
|
|
@@ -724,6 +730,9 @@ def synthandreturn(text):
|
|
| 724 |
text,
|
| 725 |
f'Speaker {random.choice(range(10))} (ja)',
|
| 726 |
),
|
|
|
|
|
|
|
|
|
|
| 727 |
}
|
| 728 |
# result = router.predict(text, AVAILABLE_MODELS[model].lower(), api_name="/synthesize")
|
| 729 |
if model in model_kwargs:
|
|
@@ -974,5 +983,5 @@ with gr.Blocks(theme=theme, css="footer {visibility: hidden}textbox{resize:none}
|
|
| 974 |
with gr.Accordion("Citation", open=False):
|
| 975 |
gr.Markdown(f"If you use this data in your publication, please cite us!\n\nCopy the BibTeX citation to cite this source:\n\n```bibtext\n{CITATION_TEXT}\n```\n\nPlease remember that all generated audio clips should be assumed unsuitable for redistribution or commercial use.")
|
| 976 |
|
| 977 |
-
demo.launch()
|
| 978 |
-
|
|
|
|
| 54 |
'KOTOBA-SPEECH-BRIA': 'kotoba-speech-bria',
|
| 55 |
'KOTOBA-SPEECH-ALEX': 'kotoba-speech-alex',
|
| 56 |
'KOTOBA-SPEECH-JACOB': 'kotoba-speech-jacob',
|
| 57 |
+
'BLANE-TTS': 'blane-tts',
|
| 58 |
+
'AMITARO-VITS': 'amitaro-vits'
|
| 59 |
}
|
| 60 |
|
| 61 |
SPACE_ID = os.getenv('SPACE_ID')
|
|
|
|
| 244 |
print("Error while downloading DB:", e)
|
| 245 |
|
| 246 |
# Create DB table (if doesn't exist)
|
| 247 |
+
# create_db_if_missing()
|
| 248 |
|
| 249 |
# Sync local DB with remote repo every 5 minute (only if a change is detected)
|
| 250 |
scheduler = CommitScheduler(
|
|
|
|
| 391 |
'kotoba-speech-bria': 'KOTOBA-SPEECH-v0.1-BRIA',
|
| 392 |
'kotoba-speech-alex': 'KOTOBA-SPEECH-v0.1-ALEX',
|
| 393 |
'kotoba-speech-jacob': 'KOTOBA-SPEECH-v0.1-JACOB',
|
| 394 |
+
'blane-tts': 'BLANE-TTS',
|
| 395 |
+
'amitaro-vits': 'AMITARO-VITS'
|
| 396 |
# 'styletts2': 'StyleTTS 2',
|
| 397 |
}
|
| 398 |
model_licenses = {
|
|
|
|
| 447 |
'kotoba-speech-bria': 'https://kotoba-tech-kotoba-speech.hf.space/',
|
| 448 |
'kotoba-speech-alex': 'https://kotoba-tech-kotoba-speech.hf.space/',
|
| 449 |
'kotoba-speech-jacob': 'https://kotoba-tech-kotoba-speech.hf.space/',
|
| 450 |
+
'blane-tts': 'https://blane187-blane-tts.hf.space/',
|
| 451 |
+
'amitaro-vits': 'https://lycoris53-vits-tts-japanese-only-amitaro.hf.space/'
|
| 452 |
}
|
| 453 |
model_kwargs = {
|
| 454 |
'moe-vits': {
|
|
|
|
| 457 |
'bark': {
|
| 458 |
'fn_index': 3
|
| 459 |
},
|
| 460 |
+
'amitaro-vits': {
|
| 461 |
+
'fn_index': 0
|
| 462 |
+
}
|
| 463 |
}
|
| 464 |
# def get_random_split(existing_split=None):
|
| 465 |
# choice = random.choice(list(audio_dataset.keys()))
|
|
|
|
| 730 |
text,
|
| 731 |
f'Speaker {random.choice(range(10))} (ja)',
|
| 732 |
),
|
| 733 |
+
'amitaro-vits': (
|
| 734 |
+
text,
|
| 735 |
+
)
|
| 736 |
}
|
| 737 |
# result = router.predict(text, AVAILABLE_MODELS[model].lower(), api_name="/synthesize")
|
| 738 |
if model in model_kwargs:
|
|
|
|
| 983 |
with gr.Accordion("Citation", open=False):
|
| 984 |
gr.Markdown(f"If you use this data in your publication, please cite us!\n\nCopy the BibTeX citation to cite this source:\n\n```bibtext\n{CITATION_TEXT}\n```\n\nPlease remember that all generated audio clips should be assumed unsuitable for redistribution or commercial use.")
|
| 985 |
|
| 986 |
+
# demo.launch()
|
| 987 |
+
demo.queue(api_open=False, default_concurrency_limit=40).launch(show_api=False)
|