Akis Giannoukos commited on
Commit
20ca8b6
·
1 Parent(s): cb98104

Replace tab design with a settings button.

Browse files
Files changed (1) hide show
  1. app.py +59 -41
app.py CHANGED
@@ -596,7 +596,7 @@ def process_turn(
596
  break
597
  explicit_flag = detect_explicit_suicidality(last_patient)
598
  high_risk = bool(explicit_flag or (scores.get("suicidal_thoughts", 0) >= 2))
599
-
600
  meta = {"Severity": severity, "Total_Score": total, "Confidence": overall_conf}
601
 
602
  # Termination conditions
@@ -714,48 +714,54 @@ def create_demo():
714
  #voice-bubble button { pointer-events: auto; cursor: pointer; }
715
  /* Hide TTS player UI but keep it in DOM for autoplay */
716
  #tts-player { width: 0 !important; height: 0 !important; opacity: 0 !important; position: absolute; pointer-events: none; }
 
 
 
717
  '''
718
  ) as demo:
719
- gr.Markdown(
720
- """
721
- ### Conversational Assessment for Responsive Engagement (CARE) Notes
722
- Tap on 'Record' to start speaking, then tap on 'Stop' to stop recording.
723
- """
724
- )
725
- intro_play_btn = gr.Button("▶️ Play Intro", variant="secondary")
726
-
727
- with gr.Tabs():
728
- with gr.TabItem("Main"):
729
- with gr.Column():
730
- # Microphone component styled as central bubble (tap to record/stop)
731
- audio_main = gr.Microphone(type="filepath", label=None, elem_id="voice-bubble", show_label=False)
732
- # Hidden text input placeholder for pipeline compatibility
733
- text_main = gr.Textbox(value="", visible=False)
734
- # Autoplay clinician voice output (player hidden with CSS)
735
- tts_audio_main = gr.Audio(label=None, interactive=False, autoplay=True, show_label=False, elem_id="tts-player")
736
- # Final summaries (shown after assessment ends)
737
- main_summary = gr.Markdown(visible=False)
738
-
739
- with gr.TabItem("Advanced"):
740
- with gr.Column():
741
- chatbot = gr.Chatbot(height=360, type="tuples", label="Conversation")
742
- with gr.Row():
743
- text_adv = gr.Textbox(placeholder="Type your message and press Enter", scale=4)
744
- send_adv_btn = gr.Button("Send", scale=1)
745
- score_json = gr.JSON(label="PHQ-9 Assessment (live)")
746
- severity_label = gr.Label(label="Severity")
747
- threshold = gr.Slider(0.5, 1.0, value=CONFIDENCE_THRESHOLD_DEFAULT, step=0.05, label="Confidence Threshold (stop when min ≥ τ)")
748
- tts_enable = gr.Checkbox(label="Speak clinician responses (TTS)", value=USE_TTS_DEFAULT)
749
- with gr.Row():
750
- tts_provider_dd = gr.Dropdown(choices=["Coqui", "gTTS"], value="Coqui", label="TTS Provider")
751
- coqui_model_tb = gr.Textbox(value=os.getenv("COQUI_MODEL", "tts_models/en/vctk/vits"), label="Coqui Model")
752
- coqui_speaker_dd = gr.Dropdown(choices=list_coqui_speakers(os.getenv("COQUI_MODEL", "tts_models/en/vctk/vits")), value="p225", label="Coqui Speaker")
753
- tts_audio = gr.Audio(label="Clinician voice", interactive=False, autoplay=False, visible=False)
754
- model_id_tb = gr.Textbox(value=current_model_id, label="Chat Model ID", info="e.g., google/gemma-2-2b-it or google/medgemma-4b-it")
755
- with gr.Row():
756
- apply_model_btn = gr.Button("Apply model (no restart)")
757
- # apply_model_restart_btn = gr.Button("Apply model and restart")
758
- model_status = gr.Markdown(value=f"Current model: `{current_model_id}`")
 
 
 
759
 
760
  # App state
761
  chat_state = gr.State()
@@ -767,6 +773,18 @@ def create_demo():
767
  # Initialize on load (no autoplay due to browser policies)
768
  demo.load(_on_load_init, inputs=None, outputs=[chatbot, scores_state, meta_state, finished_state, turns_state])
769
 
 
 
 
 
 
 
 
 
 
 
 
 
770
  # Explicit user gesture to play intro TTS (works across browsers)
771
  intro_play_btn.click(fn=_play_intro_tts, inputs=[tts_enable], outputs=[tts_audio_main])
772
 
 
596
  break
597
  explicit_flag = detect_explicit_suicidality(last_patient)
598
  high_risk = bool(explicit_flag or (scores.get("suicidal_thoughts", 0) >= 2))
599
+
600
  meta = {"Severity": severity, "Total_Score": total, "Confidence": overall_conf}
601
 
602
  # Termination conditions
 
714
  #voice-bubble button { pointer-events: auto; cursor: pointer; }
715
  /* Hide TTS player UI but keep it in DOM for autoplay */
716
  #tts-player { width: 0 !important; height: 0 !important; opacity: 0 !important; position: absolute; pointer-events: none; }
717
+ /* Settings button in top right */
718
+ #settings-btn { position: absolute; top: 16px; right: 16px; z-index: 10; }
719
+ #back-btn { position: absolute; top: 16px; right: 16px; z-index: 10; }
720
  '''
721
  ) as demo:
722
+ # Settings button (top right)
723
+ settings_btn = gr.Button("⚙️ Settings", elem_id="settings-btn", size="sm")
724
+
725
+ # Main view
726
+ with gr.Column(visible=True) as main_view:
727
+ gr.Markdown(
728
+ """
729
+ ### Conversational Assessment for Responsive Engagement (CARE) Notes
730
+ Tap on 'Record' to start speaking, then tap on 'Stop' to stop recording.
731
+ """
732
+ )
733
+ intro_play_btn = gr.Button("▶️ Play Intro", variant="secondary")
734
+ # Microphone component styled as central bubble (tap to record/stop)
735
+ audio_main = gr.Microphone(type="filepath", label=None, elem_id="voice-bubble", show_label=False)
736
+ # Hidden text input placeholder for pipeline compatibility
737
+ text_main = gr.Textbox(value="", visible=False)
738
+ # Autoplay clinician voice output (player hidden with CSS)
739
+ tts_audio_main = gr.Audio(label=None, interactive=False, autoplay=True, show_label=False, elem_id="tts-player")
740
+ # Final summaries (shown after assessment ends)
741
+ main_summary = gr.Markdown(visible=False)
742
+
743
+ # Settings view (initially hidden)
744
+ with gr.Column(visible=False) as settings_view:
745
+ back_btn = gr.Button("← Back", elem_id="back-btn", size="sm")
746
+ gr.Markdown("## Settings")
747
+ chatbot = gr.Chatbot(height=360, type="tuples", label="Conversation")
748
+ with gr.Row():
749
+ text_adv = gr.Textbox(placeholder="Type your message and press Enter", scale=4)
750
+ send_adv_btn = gr.Button("Send", scale=1)
751
+ score_json = gr.JSON(label="PHQ-9 Assessment (live)")
752
+ severity_label = gr.Label(label="Severity")
753
+ threshold = gr.Slider(0.5, 1.0, value=CONFIDENCE_THRESHOLD_DEFAULT, step=0.05, label="Confidence Threshold (stop when min ≥ τ)")
754
+ tts_enable = gr.Checkbox(label="Speak clinician responses (TTS)", value=USE_TTS_DEFAULT)
755
+ with gr.Row():
756
+ tts_provider_dd = gr.Dropdown(choices=["Coqui", "gTTS"], value="Coqui", label="TTS Provider")
757
+ coqui_model_tb = gr.Textbox(value=os.getenv("COQUI_MODEL", "tts_models/en/vctk/vits"), label="Coqui Model")
758
+ coqui_speaker_dd = gr.Dropdown(choices=list_coqui_speakers(os.getenv("COQUI_MODEL", "tts_models/en/vctk/vits")), value="p225", label="Coqui Speaker")
759
+ tts_audio = gr.Audio(label="Clinician voice", interactive=False, autoplay=False, visible=False)
760
+ model_id_tb = gr.Textbox(value=current_model_id, label="Chat Model ID", info="e.g., google/gemma-2-2b-it or google/medgemma-4b-it")
761
+ with gr.Row():
762
+ apply_model_btn = gr.Button("Apply model (no restart)")
763
+ # apply_model_restart_btn = gr.Button("Apply model and restart")
764
+ model_status = gr.Markdown(value=f"Current model: `{current_model_id}`")
765
 
766
  # App state
767
  chat_state = gr.State()
 
773
  # Initialize on load (no autoplay due to browser policies)
774
  demo.load(_on_load_init, inputs=None, outputs=[chatbot, scores_state, meta_state, finished_state, turns_state])
775
 
776
+ # View navigation
777
+ settings_btn.click(
778
+ fn=lambda: (gr.update(visible=False), gr.update(visible=True)),
779
+ inputs=None,
780
+ outputs=[main_view, settings_view]
781
+ )
782
+ back_btn.click(
783
+ fn=lambda: (gr.update(visible=True), gr.update(visible=False)),
784
+ inputs=None,
785
+ outputs=[main_view, settings_view]
786
+ )
787
+
788
  # Explicit user gesture to play intro TTS (works across browsers)
789
  intro_play_btn.click(fn=_play_intro_tts, inputs=[tts_enable], outputs=[tts_audio_main])
790