Update app.py
Browse files
app.py
CHANGED
|
@@ -1,12 +1,34 @@
|
|
| 1 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import whisper
|
| 3 |
import gradio as gr
|
| 4 |
import torch
|
| 5 |
from transformers import BertTokenizer, BertForSequenceClassification, pipeline
|
| 6 |
from app.questions import get_question
|
| 7 |
|
| 8 |
-
#
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
# Load models
|
| 12 |
whisper_model = whisper.load_model("small")
|
|
@@ -120,7 +142,7 @@ def transcribe_and_analyze_tech(audio, question):
|
|
| 120 |
except Exception as e:
|
| 121 |
return f"Error: {str(e)}", "", ""
|
| 122 |
|
| 123 |
-
# UI layout
|
| 124 |
with gr.Blocks(css="textarea, .gr-box { font-size: 18px !important; }") as demo:
|
| 125 |
gr.HTML("<h1 style='text-align: center; font-size: 32px;'>INTERVIEW PREPARATION MODEL</h1>")
|
| 126 |
|
|
@@ -160,23 +182,42 @@ with gr.Blocks(css="textarea, .gr-box { font-size: 18px !important; }") as demo:
|
|
| 160 |
outputs=[question_display_2, audio_input_2, transcribed_text_2,
|
| 161 |
context_analysis_result, confidence_analysis_result])
|
| 162 |
|
| 163 |
-
|
| 164 |
-
|
|
|
|
| 165 |
|
| 166 |
-
#
|
| 167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
try:
|
| 169 |
-
|
| 170 |
-
demo.launch(share=False, show_api=False)
|
| 171 |
except Exception as e:
|
| 172 |
-
print(f"
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
# Last resort - minimal launch
|
| 179 |
-
demo.launch()
|
| 180 |
-
else:
|
| 181 |
-
# For local development
|
| 182 |
-
demo.launch(share=True, show_api=False)
|
|
|
|
| 1 |
import os
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
# Set environment variables before importing any libraries
|
| 5 |
+
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
| 6 |
+
|
| 7 |
+
# Import libraries
|
| 8 |
import whisper
|
| 9 |
import gradio as gr
|
| 10 |
import torch
|
| 11 |
from transformers import BertTokenizer, BertForSequenceClassification, pipeline
|
| 12 |
from app.questions import get_question
|
| 13 |
|
| 14 |
+
# Apply monkey patch to prevent the API generation error
|
| 15 |
+
try:
|
| 16 |
+
# Save original method reference
|
| 17 |
+
original_method = gr.Blocks.get_api_info
|
| 18 |
+
|
| 19 |
+
# Create a safer version of the method that catches the specific error
|
| 20 |
+
def safe_get_api_info(self):
|
| 21 |
+
try:
|
| 22 |
+
return original_method(self)
|
| 23 |
+
except TypeError as e:
|
| 24 |
+
print(f"API info generation error suppressed: {str(e)}", file=sys.stderr)
|
| 25 |
+
return {} # Return empty API info instead of crashing
|
| 26 |
+
|
| 27 |
+
# Apply the patch
|
| 28 |
+
gr.Blocks.get_api_info = safe_get_api_info
|
| 29 |
+
print("Applied API info generation patch", file=sys.stderr)
|
| 30 |
+
except Exception as e:
|
| 31 |
+
print(f"Failed to apply patch: {str(e)}", file=sys.stderr)
|
| 32 |
|
| 33 |
# Load models
|
| 34 |
whisper_model = whisper.load_model("small")
|
|
|
|
| 142 |
except Exception as e:
|
| 143 |
return f"Error: {str(e)}", "", ""
|
| 144 |
|
| 145 |
+
# UI layout
|
| 146 |
with gr.Blocks(css="textarea, .gr-box { font-size: 18px !important; }") as demo:
|
| 147 |
gr.HTML("<h1 style='text-align: center; font-size: 32px;'>INTERVIEW PREPARATION MODEL</h1>")
|
| 148 |
|
|
|
|
| 182 |
outputs=[question_display_2, audio_input_2, transcribed_text_2,
|
| 183 |
context_analysis_result, confidence_analysis_result])
|
| 184 |
|
| 185 |
+
# Also patch the client utils function that's failing
|
| 186 |
+
try:
|
| 187 |
+
import gradio_client.utils
|
| 188 |
|
| 189 |
+
# Original function reference
|
| 190 |
+
original_json_schema = gradio_client.utils._json_schema_to_python_type
|
| 191 |
+
|
| 192 |
+
# Create patched version
|
| 193 |
+
def patched_json_schema(schema, defs=None):
|
| 194 |
+
try:
|
| 195 |
+
if isinstance(schema, bool):
|
| 196 |
+
return "bool" # Handle boolean schema case directly
|
| 197 |
+
return original_json_schema(schema, defs)
|
| 198 |
+
except Exception as e:
|
| 199 |
+
print(f"JSON schema conversion error suppressed: {str(e)}", file=sys.stderr)
|
| 200 |
+
return "any" # Return a safe fallback type
|
| 201 |
+
|
| 202 |
+
# Apply patch
|
| 203 |
+
gradio_client.utils._json_schema_to_python_type = patched_json_schema
|
| 204 |
+
print("Applied JSON schema conversion patch", file=sys.stderr)
|
| 205 |
+
except Exception as e:
|
| 206 |
+
print(f"Failed to apply client utils patch: {str(e)}", file=sys.stderr)
|
| 207 |
+
|
| 208 |
+
if __name__ == "__main__":
|
| 209 |
+
# Simple launch with error handling
|
| 210 |
+
try:
|
| 211 |
+
demo.launch(show_api=False)
|
| 212 |
+
except Exception as e:
|
| 213 |
+
print(f"Launch failed: {str(e)}", file=sys.stderr)
|
| 214 |
+
# Try minimal launch as fallback
|
| 215 |
try:
|
| 216 |
+
demo.launch()
|
|
|
|
| 217 |
except Exception as e:
|
| 218 |
+
print(f"Minimal launch also failed: {str(e)}", file=sys.stderr)
|
| 219 |
+
# Create a minimal error app as last resort
|
| 220 |
+
with gr.Blocks() as error_app:
|
| 221 |
+
gr.Markdown("# Error Starting App")
|
| 222 |
+
gr.Markdown("The application encountered errors during startup. Please check the logs.")
|
| 223 |
+
error_app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|