Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,6 +13,7 @@ logger = logging.getLogger(__name__)
|
|
| 13 |
|
| 14 |
# Model configuration
|
| 15 |
MODEL_NAME = "wizcodes12/snaxfix-model"
|
|
|
|
| 16 |
SUPPORTED_LANGUAGES = [
|
| 17 |
"python", "javascript", "java", "c", "cpp", "csharp", "rust",
|
| 18 |
"php", "html", "css", "sql"
|
|
@@ -73,16 +74,29 @@ class SyntaxFixerApp:
|
|
| 73 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 74 |
logger.info(f"Using device: {self.device}")
|
| 75 |
|
| 76 |
-
# Load model and tokenizer
|
|
|
|
| 77 |
try:
|
|
|
|
| 78 |
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 79 |
self.model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
|
| 80 |
-
self.
|
| 81 |
-
|
| 82 |
-
logger.info("Model and tokenizer loaded successfully")
|
| 83 |
except Exception as e:
|
| 84 |
-
logger.
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
# Initialize history
|
| 88 |
self.history = []
|
|
@@ -96,8 +110,14 @@ class SyntaxFixerApp:
|
|
| 96 |
return f"Error: Language '{language}' is not supported. Choose from: {', '.join(SUPPORTED_LANGUAGES)}"
|
| 97 |
|
| 98 |
try:
|
| 99 |
-
# Prepare input
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
inputs = self.tokenizer(
|
| 102 |
input_text,
|
| 103 |
max_length=MAX_LENGTH,
|
|
@@ -163,7 +183,10 @@ def create_gradio_interface():
|
|
| 163 |
|
| 164 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 165 |
gr.Markdown("# SnaxFix: Advanced Syntax Error Fixer")
|
| 166 |
-
gr.Markdown("Fix syntax errors in code across multiple programming languages using
|
|
|
|
|
|
|
|
|
|
| 167 |
|
| 168 |
with gr.Row():
|
| 169 |
with gr.Column(scale=2):
|
|
|
|
| 13 |
|
| 14 |
# Model configuration
|
| 15 |
MODEL_NAME = "wizcodes12/snaxfix-model"
|
| 16 |
+
FALLBACK_MODEL = "google/flan-t5-small" # Fallback model if main model fails
|
| 17 |
SUPPORTED_LANGUAGES = [
|
| 18 |
"python", "javascript", "java", "c", "cpp", "csharp", "rust",
|
| 19 |
"php", "html", "css", "sql"
|
|
|
|
| 74 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 75 |
logger.info(f"Using device: {self.device}")
|
| 76 |
|
| 77 |
+
# Load model and tokenizer with fallback
|
| 78 |
+
self.model_name_used = None
|
| 79 |
try:
|
| 80 |
+
logger.info(f"Attempting to load primary model: {MODEL_NAME}")
|
| 81 |
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 82 |
self.model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
|
| 83 |
+
self.model_name_used = MODEL_NAME
|
| 84 |
+
logger.info("Primary model and tokenizer loaded successfully")
|
|
|
|
| 85 |
except Exception as e:
|
| 86 |
+
logger.warning(f"Failed to load primary model: {e}")
|
| 87 |
+
logger.info(f"Attempting to load fallback model: {FALLBACK_MODEL}")
|
| 88 |
+
try:
|
| 89 |
+
self.tokenizer = AutoTokenizer.from_pretrained(FALLBACK_MODEL)
|
| 90 |
+
self.model = AutoModelForSeq2SeqLM.from_pretrained(FALLBACK_MODEL)
|
| 91 |
+
self.model_name_used = FALLBACK_MODEL
|
| 92 |
+
logger.info("Fallback model and tokenizer loaded successfully")
|
| 93 |
+
except Exception as fallback_error:
|
| 94 |
+
logger.error(f"Failed to load fallback model: {fallback_error}")
|
| 95 |
+
raise Exception(f"Failed to load both primary and fallback models. Primary: {e}, Fallback: {fallback_error}")
|
| 96 |
+
|
| 97 |
+
self.model.to(self.device)
|
| 98 |
+
self.model.eval()
|
| 99 |
+
logger.info(f"Using model: {self.model_name_used}")
|
| 100 |
|
| 101 |
# Initialize history
|
| 102 |
self.history = []
|
|
|
|
| 110 |
return f"Error: Language '{language}' is not supported. Choose from: {', '.join(SUPPORTED_LANGUAGES)}"
|
| 111 |
|
| 112 |
try:
|
| 113 |
+
# Prepare input - adjust prompt based on model being used
|
| 114 |
+
if self.model_name_used == FALLBACK_MODEL:
|
| 115 |
+
# Simplified prompt for fallback model
|
| 116 |
+
input_text = f"Fix the syntax errors in this {language} code: {broken_code}"
|
| 117 |
+
else:
|
| 118 |
+
# Original prompt for specialized model
|
| 119 |
+
input_text = f"<{language.upper()}> Fix the syntax errors in this {language} code: {broken_code}"
|
| 120 |
+
|
| 121 |
inputs = self.tokenizer(
|
| 122 |
input_text,
|
| 123 |
max_length=MAX_LENGTH,
|
|
|
|
| 183 |
|
| 184 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 185 |
gr.Markdown("# SnaxFix: Advanced Syntax Error Fixer")
|
| 186 |
+
gr.Markdown("Fix syntax errors in code across multiple programming languages using AI models.")
|
| 187 |
+
|
| 188 |
+
# Show which model is being used
|
| 189 |
+
gr.Markdown(f"**Currently using model:** `{app.model_name_used}`")
|
| 190 |
|
| 191 |
with gr.Row():
|
| 192 |
with gr.Column(scale=2):
|