Spaces:
Runtime error
Runtime error
Add support for disabling queue mode
Browse files
app.py
CHANGED
|
@@ -84,7 +84,11 @@ class WhisperTranscriber:
|
|
| 84 |
print("[Auto parallel] Using GPU devices " + str(self.parallel_device_list) + " and " + str(self.vad_cpu_cores) + " CPU cores for VAD/transcription.")
|
| 85 |
|
| 86 |
# Entry function for the simple tab
|
| 87 |
-
def transcribe_webui_simple(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
progress=gr.Progress()):
|
| 89 |
|
| 90 |
vadOptions = VadOptions(vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, self.app_config.vad_initial_prompt_mode)
|
|
@@ -93,6 +97,19 @@ class WhisperTranscriber:
|
|
| 93 |
|
| 94 |
# Entry function for the full tab
|
| 95 |
def transcribe_webui_full(self, modelName, languageName, urlData, multipleFiles, microphoneData, task,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, vadInitialPromptMode,
|
| 97 |
initial_prompt: str, temperature: float, best_of: int, beam_size: int, patience: float, length_penalty: float, suppress_tokens: str,
|
| 98 |
condition_on_previous_text: bool, fp16: bool, temperature_increment_on_fallback: float,
|
|
@@ -457,7 +474,10 @@ def create_ui(app_config: ApplicationConfig):
|
|
| 457 |
gr.Number(label="VAD - Prompt Window (s)", precision=None, value=app_config.vad_prompt_window),
|
| 458 |
]
|
| 459 |
|
| 460 |
-
|
|
|
|
|
|
|
|
|
|
| 461 |
gr.File(label="Download"),
|
| 462 |
gr.Text(label="Transcription"),
|
| 463 |
gr.Text(label="Segments")
|
|
@@ -465,7 +485,8 @@ def create_ui(app_config: ApplicationConfig):
|
|
| 465 |
|
| 466 |
full_description = ui_description + "\n\n\n\n" + "Be careful when changing some of the options in the full interface - this can cause the model to crash."
|
| 467 |
|
| 468 |
-
full_transcribe = gr.Interface(fn=ui.
|
|
|
|
| 469 |
*simple_inputs(),
|
| 470 |
gr.Dropdown(choices=["prepend_first_segment", "prepend_all_segments"], value=app_config.vad_initial_prompt_mode, label="VAD - Initial Prompt Mode"),
|
| 471 |
gr.TextArea(label="Initial Prompt"),
|
|
@@ -490,8 +511,11 @@ def create_ui(app_config: ApplicationConfig):
|
|
| 490 |
demo = gr.TabbedInterface([simple_transcribe, full_transcribe], tab_names=["Simple", "Full"])
|
| 491 |
|
| 492 |
# Queue up the demo
|
| 493 |
-
if
|
| 494 |
demo.queue(concurrency_count=app_config.queue_concurrency_count)
|
|
|
|
|
|
|
|
|
|
| 495 |
|
| 496 |
demo.launch(share=app_config.share, server_name=app_config.server_name, server_port=app_config.server_port)
|
| 497 |
|
|
|
|
| 84 |
print("[Auto parallel] Using GPU devices " + str(self.parallel_device_list) + " and " + str(self.vad_cpu_cores) + " CPU cores for VAD/transcription.")
|
| 85 |
|
| 86 |
# Entry function for the simple tab
|
| 87 |
+
def transcribe_webui_simple(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow):
|
| 88 |
+
return self.transcribe_webui_simple_progress(modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow)
|
| 89 |
+
|
| 90 |
+
# Entry function for the simple tab progress
|
| 91 |
+
def transcribe_webui_simple_progress(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow,
|
| 92 |
progress=gr.Progress()):
|
| 93 |
|
| 94 |
vadOptions = VadOptions(vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, self.app_config.vad_initial_prompt_mode)
|
|
|
|
| 97 |
|
| 98 |
# Entry function for the full tab
|
| 99 |
def transcribe_webui_full(self, modelName, languageName, urlData, multipleFiles, microphoneData, task,
|
| 100 |
+
vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, vadInitialPromptMode,
|
| 101 |
+
initial_prompt: str, temperature: float, best_of: int, beam_size: int, patience: float, length_penalty: float, suppress_tokens: str,
|
| 102 |
+
condition_on_previous_text: bool, fp16: bool, temperature_increment_on_fallback: float,
|
| 103 |
+
compression_ratio_threshold: float, logprob_threshold: float, no_speech_threshold: float):
|
| 104 |
+
|
| 105 |
+
return self.transcribe_webui_full_progress(modelName, languageName, urlData, multipleFiles, microphoneData, task,
|
| 106 |
+
vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, vadInitialPromptMode,
|
| 107 |
+
initial_prompt, temperature, best_of, beam_size, patience, length_penalty, suppress_tokens,
|
| 108 |
+
condition_on_previous_text, fp16, temperature_increment_on_fallback,
|
| 109 |
+
compression_ratio_threshold, logprob_threshold, no_speech_threshold)
|
| 110 |
+
|
| 111 |
+
# Entry function for the full tab with progress
|
| 112 |
+
def transcribe_webui_full_progress(self, modelName, languageName, urlData, multipleFiles, microphoneData, task,
|
| 113 |
vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, vadInitialPromptMode,
|
| 114 |
initial_prompt: str, temperature: float, best_of: int, beam_size: int, patience: float, length_penalty: float, suppress_tokens: str,
|
| 115 |
condition_on_previous_text: bool, fp16: bool, temperature_increment_on_fallback: float,
|
|
|
|
| 474 |
gr.Number(label="VAD - Prompt Window (s)", precision=None, value=app_config.vad_prompt_window),
|
| 475 |
]
|
| 476 |
|
| 477 |
+
is_queue_mode = app_config.queue_concurrency_count is not None and app_config.queue_concurrency_count > 0
|
| 478 |
+
|
| 479 |
+
simple_transcribe = gr.Interface(fn=ui.transcribe_webui_simple_progress if is_queue_mode else ui.transcribe_webui_simple,
|
| 480 |
+
description=ui_description, article=ui_article, inputs=simple_inputs(), outputs=[
|
| 481 |
gr.File(label="Download"),
|
| 482 |
gr.Text(label="Transcription"),
|
| 483 |
gr.Text(label="Segments")
|
|
|
|
| 485 |
|
| 486 |
full_description = ui_description + "\n\n\n\n" + "Be careful when changing some of the options in the full interface - this can cause the model to crash."
|
| 487 |
|
| 488 |
+
full_transcribe = gr.Interface(fn=ui.transcribe_webui_full_progress if is_queue_mode else ui.transcribe_webui_full,
|
| 489 |
+
description=full_description, article=ui_article, inputs=[
|
| 490 |
*simple_inputs(),
|
| 491 |
gr.Dropdown(choices=["prepend_first_segment", "prepend_all_segments"], value=app_config.vad_initial_prompt_mode, label="VAD - Initial Prompt Mode"),
|
| 492 |
gr.TextArea(label="Initial Prompt"),
|
|
|
|
| 511 |
demo = gr.TabbedInterface([simple_transcribe, full_transcribe], tab_names=["Simple", "Full"])
|
| 512 |
|
| 513 |
# Queue up the demo
|
| 514 |
+
if is_queue_mode:
|
| 515 |
demo.queue(concurrency_count=app_config.queue_concurrency_count)
|
| 516 |
+
print("Queue mode enabled (concurrency count: " + str(app_config.queue_concurrency_count) + ")")
|
| 517 |
+
else:
|
| 518 |
+
print("Queue mode disabled - progress bars will not be shown.")
|
| 519 |
|
| 520 |
demo.launch(share=app_config.share, server_name=app_config.server_name, server_port=app_config.server_port)
|
| 521 |
|