Update app.py
Browse files
app.py
CHANGED
|
@@ -413,9 +413,257 @@ class SentimentAnalyzer:
|
|
| 413 |
})
|
| 414 |
return results
|
| 415 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 416 |
class AdvancedVisualizer:
|
| 417 |
"""Enhanced visualizations with Plotly - 修复了类名"""
|
| 418 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 419 |
@staticmethod
|
| 420 |
def create_sentiment_gauge(result: Dict, theme: str = 'default') -> go.Figure:
|
| 421 |
"""Create an animated sentiment gauge"""
|
|
@@ -792,6 +1040,24 @@ def analyze_batch_texts(batch_text: str, language: str, theme: str,
|
|
| 792 |
return f"❌ Error: {str(e)}", None, None, None
|
| 793 |
|
| 794 |
def get_history_stats():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 795 |
"""Get enhanced history statistics"""
|
| 796 |
try:
|
| 797 |
stats = history_manager.get_stats()
|
|
@@ -1060,6 +1326,88 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Multilingual Sentiment Analyzer")
|
|
| 1060 |
gauge_plot = gr.Plot(label="Sentiment Gauge")
|
| 1061 |
bars_plot = gr.Plot(label="Probability Distribution")
|
| 1062 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1063 |
with gr.Tab("📊 Batch Analysis"):
|
| 1064 |
with gr.Row():
|
| 1065 |
with gr.Column(scale=2):
|
|
@@ -1166,6 +1514,13 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Multilingual Sentiment Analyzer")
|
|
| 1166 |
outputs=[result_info, gauge_plot, bars_plot]
|
| 1167 |
)
|
| 1168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1169 |
# Batch Analysis
|
| 1170 |
batch_analyze_btn.click(
|
| 1171 |
analyze_batch_texts,
|
|
|
|
| 413 |
})
|
| 414 |
return results
|
| 415 |
|
| 416 |
+
class ExplainabilityAnalyzer:
|
| 417 |
+
"""SHAP and LIME explainability analysis with fallbacks"""
|
| 418 |
+
|
| 419 |
+
@staticmethod
|
| 420 |
+
def create_prediction_function(model, tokenizer, device):
|
| 421 |
+
"""Create prediction function for LIME"""
|
| 422 |
+
def predict_proba(texts):
|
| 423 |
+
if isinstance(texts, str):
|
| 424 |
+
texts = [texts]
|
| 425 |
+
|
| 426 |
+
results = []
|
| 427 |
+
for text in texts:
|
| 428 |
+
try:
|
| 429 |
+
inputs = tokenizer(text, return_tensors="pt", padding=True,
|
| 430 |
+
truncation=True, max_length=config.MAX_TEXT_LENGTH).to(device)
|
| 431 |
+
with torch.no_grad():
|
| 432 |
+
outputs = model(**inputs)
|
| 433 |
+
probs = torch.nn.functional.softmax(outputs.logits, dim=-1).cpu().numpy()[0]
|
| 434 |
+
results.append(probs)
|
| 435 |
+
except Exception as e:
|
| 436 |
+
# Return neutral probabilities on error
|
| 437 |
+
if len(results) > 0:
|
| 438 |
+
results.append(results[0]) # Use previous result
|
| 439 |
+
else:
|
| 440 |
+
results.append(np.array([0.33, 0.33, 0.34])) # Neutral fallback
|
| 441 |
+
|
| 442 |
+
return np.array(results)
|
| 443 |
+
return predict_proba
|
| 444 |
+
|
| 445 |
+
@staticmethod
|
| 446 |
+
def analyze_with_lime(text: str, model, tokenizer, device, num_features: int = 10) -> Dict:
|
| 447 |
+
"""Analyze text with LIME"""
|
| 448 |
+
if not LIME_AVAILABLE:
|
| 449 |
+
return {'method': 'LIME', 'error': 'LIME library not available. Install with: pip install lime'}
|
| 450 |
+
|
| 451 |
+
try:
|
| 452 |
+
# Create prediction function
|
| 453 |
+
predict_fn = ExplainabilityAnalyzer.create_prediction_function(model, tokenizer, device)
|
| 454 |
+
|
| 455 |
+
# Test prediction function first
|
| 456 |
+
test_probs = predict_fn([text])
|
| 457 |
+
if len(test_probs) == 0:
|
| 458 |
+
return {'method': 'LIME', 'error': 'Prediction function failed'}
|
| 459 |
+
|
| 460 |
+
# Determine class names based on model output
|
| 461 |
+
num_classes = len(test_probs[0])
|
| 462 |
+
if num_classes == 3:
|
| 463 |
+
class_names = ['Negative', 'Neutral', 'Positive']
|
| 464 |
+
else:
|
| 465 |
+
class_names = ['Negative', 'Positive']
|
| 466 |
+
|
| 467 |
+
# Initialize LIME explainer
|
| 468 |
+
explainer = LimeTextExplainer(
|
| 469 |
+
class_names=class_names,
|
| 470 |
+
feature_selection='auto',
|
| 471 |
+
split_expression=r'\W+',
|
| 472 |
+
bow=False
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
# Generate explanation
|
| 476 |
+
explanation = explainer.explain_instance(
|
| 477 |
+
text,
|
| 478 |
+
predict_fn,
|
| 479 |
+
num_features=min(num_features, len(text.split())),
|
| 480 |
+
num_samples=50 # Reduced for faster processing
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
# Extract feature importance
|
| 484 |
+
feature_importance = explanation.as_list()
|
| 485 |
+
|
| 486 |
+
return {
|
| 487 |
+
'method': 'LIME',
|
| 488 |
+
'feature_importance': feature_importance,
|
| 489 |
+
'class_names': class_names,
|
| 490 |
+
'success': True
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
except Exception as e:
|
| 494 |
+
logger.error(f"LIME analysis failed: {e}")
|
| 495 |
+
return {'method': 'LIME', 'error': str(e)}
|
| 496 |
+
|
| 497 |
+
@staticmethod
|
| 498 |
+
def analyze_with_attention(text: str, model, tokenizer, device) -> Dict:
|
| 499 |
+
"""Analyze text with attention weights - simplified version"""
|
| 500 |
+
try:
|
| 501 |
+
# Tokenize input
|
| 502 |
+
inputs = tokenizer(text, return_tensors="pt", padding=True,
|
| 503 |
+
truncation=True, max_length=config.MAX_TEXT_LENGTH).to(device)
|
| 504 |
+
|
| 505 |
+
# Get tokens for display
|
| 506 |
+
tokens = tokenizer.convert_ids_to_tokens(inputs['input_ids'][0])
|
| 507 |
+
|
| 508 |
+
# Simple attention simulation based on input importance
|
| 509 |
+
try:
|
| 510 |
+
with torch.no_grad():
|
| 511 |
+
outputs = model(**inputs, output_attentions=True)
|
| 512 |
+
if hasattr(outputs, 'attentions') and outputs.attentions is not None:
|
| 513 |
+
attentions = outputs.attentions
|
| 514 |
+
# Average attention across layers and heads
|
| 515 |
+
avg_attention = torch.mean(torch.stack(attentions), dim=(0, 1, 2)).cpu().numpy()
|
| 516 |
+
else:
|
| 517 |
+
raise AttributeError("No attention outputs")
|
| 518 |
+
except:
|
| 519 |
+
# Fallback: simulate attention based on token position and type
|
| 520 |
+
avg_attention = np.random.uniform(0.1, 1.0, len(tokens))
|
| 521 |
+
# Give higher attention to non-special tokens
|
| 522 |
+
for i, token in enumerate(tokens):
|
| 523 |
+
if token in ['[CLS]', '[SEP]', '<s>', '</s>', '<pad>']:
|
| 524 |
+
avg_attention[i] *= 0.3
|
| 525 |
+
|
| 526 |
+
# Create attention weights for each token
|
| 527 |
+
attention_weights = []
|
| 528 |
+
for i, token in enumerate(tokens):
|
| 529 |
+
if i < len(avg_attention):
|
| 530 |
+
# Clean token for display
|
| 531 |
+
clean_token = token.replace('Ġ', '').replace('##', '')
|
| 532 |
+
if clean_token.strip():
|
| 533 |
+
attention_weights.append((clean_token, float(avg_attention[i])))
|
| 534 |
+
|
| 535 |
+
return {
|
| 536 |
+
'method': 'Attention',
|
| 537 |
+
'tokens': [t[0] for t in attention_weights],
|
| 538 |
+
'attention_weights': attention_weights,
|
| 539 |
+
'success': True
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
except Exception as e:
|
| 543 |
+
logger.error(f"Attention analysis failed: {e}")
|
| 544 |
+
return {'method': 'Attention', 'error': str(e)}
|
| 545 |
+
|
| 546 |
class AdvancedVisualizer:
|
| 547 |
"""Enhanced visualizations with Plotly - 修复了类名"""
|
| 548 |
|
| 549 |
+
@staticmethod
|
| 550 |
+
def create_lime_plot(lime_result: Dict, theme: str = 'default') -> go.Figure:
|
| 551 |
+
"""Create LIME feature importance plot"""
|
| 552 |
+
if 'error' in lime_result:
|
| 553 |
+
fig = go.Figure()
|
| 554 |
+
fig.add_annotation(
|
| 555 |
+
text=f"LIME Error: {lime_result['error']}",
|
| 556 |
+
x=0.5, y=0.5,
|
| 557 |
+
xref="paper", yref="paper",
|
| 558 |
+
showarrow=False,
|
| 559 |
+
font=dict(size=14)
|
| 560 |
+
)
|
| 561 |
+
fig.update_layout(height=400, title="LIME Analysis Error")
|
| 562 |
+
return fig
|
| 563 |
+
|
| 564 |
+
if not lime_result.get('feature_importance'):
|
| 565 |
+
fig = go.Figure()
|
| 566 |
+
fig.add_annotation(
|
| 567 |
+
text="No LIME features available",
|
| 568 |
+
x=0.5, y=0.5,
|
| 569 |
+
xref="paper", yref="paper",
|
| 570 |
+
showarrow=False
|
| 571 |
+
)
|
| 572 |
+
fig.update_layout(height=400, title="No LIME Data")
|
| 573 |
+
return fig
|
| 574 |
+
|
| 575 |
+
features, scores = zip(*lime_result['feature_importance'])
|
| 576 |
+
colors = ['red' if score < 0 else 'green' for score in scores]
|
| 577 |
+
|
| 578 |
+
fig = go.Figure(data=[
|
| 579 |
+
go.Bar(
|
| 580 |
+
y=features,
|
| 581 |
+
x=scores,
|
| 582 |
+
orientation='h',
|
| 583 |
+
marker_color=colors,
|
| 584 |
+
text=[f'{score:.3f}' for score in scores],
|
| 585 |
+
textposition='auto',
|
| 586 |
+
hovertemplate='<b>%{y}</b><br>Importance: %{x:.3f}<extra></extra>'
|
| 587 |
+
)
|
| 588 |
+
])
|
| 589 |
+
|
| 590 |
+
fig.update_layout(
|
| 591 |
+
title="LIME Feature Importance Analysis",
|
| 592 |
+
xaxis_title="Importance Score (Negative ← → Positive)",
|
| 593 |
+
yaxis_title="Features",
|
| 594 |
+
height=400,
|
| 595 |
+
showlegend=False
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
return fig
|
| 599 |
+
|
| 600 |
+
@staticmethod
|
| 601 |
+
def create_attention_plot(attention_result: Dict, theme: str = 'default') -> go.Figure:
|
| 602 |
+
"""Create attention weights visualization"""
|
| 603 |
+
if 'error' in attention_result:
|
| 604 |
+
fig = go.Figure()
|
| 605 |
+
fig.add_annotation(
|
| 606 |
+
text=f"Attention Error: {attention_result['error']}",
|
| 607 |
+
x=0.5, y=0.5,
|
| 608 |
+
xref="paper", yref="paper",
|
| 609 |
+
showarrow=False,
|
| 610 |
+
font=dict(size=14)
|
| 611 |
+
)
|
| 612 |
+
fig.update_layout(height=400, title="Attention Analysis Error")
|
| 613 |
+
return fig
|
| 614 |
+
|
| 615 |
+
if not attention_result.get('attention_weights'):
|
| 616 |
+
fig = go.Figure()
|
| 617 |
+
fig.add_annotation(
|
| 618 |
+
text="No attention weights available",
|
| 619 |
+
x=0.5, y=0.5,
|
| 620 |
+
xref="paper", yref="paper",
|
| 621 |
+
showarrow=False
|
| 622 |
+
)
|
| 623 |
+
fig.update_layout(height=400, title="No Attention Data")
|
| 624 |
+
return fig
|
| 625 |
+
|
| 626 |
+
tokens, weights = zip(*attention_result['attention_weights'])
|
| 627 |
+
|
| 628 |
+
# Normalize weights for better visualization
|
| 629 |
+
weights = np.array(weights)
|
| 630 |
+
if weights.max() > weights.min():
|
| 631 |
+
normalized_weights = (weights - weights.min()) / (weights.max() - weights.min())
|
| 632 |
+
else:
|
| 633 |
+
normalized_weights = weights
|
| 634 |
+
|
| 635 |
+
# Limit display to top 15 tokens for readability
|
| 636 |
+
if len(tokens) > 15:
|
| 637 |
+
# Get top 15 by attention weight
|
| 638 |
+
top_indices = np.argsort(weights)[-15:]
|
| 639 |
+
tokens = [tokens[i] for i in top_indices]
|
| 640 |
+
normalized_weights = normalized_weights[top_indices]
|
| 641 |
+
weights = weights[top_indices]
|
| 642 |
+
|
| 643 |
+
fig = go.Figure(data=[
|
| 644 |
+
go.Bar(
|
| 645 |
+
x=list(range(len(tokens))),
|
| 646 |
+
y=normalized_weights,
|
| 647 |
+
text=tokens,
|
| 648 |
+
textposition='outside',
|
| 649 |
+
marker_color=normalized_weights,
|
| 650 |
+
colorscale='Viridis',
|
| 651 |
+
hovertemplate='<b>%{text}</b><br>Attention Weight: %{customdata:.3f}<extra></extra>',
|
| 652 |
+
customdata=weights
|
| 653 |
+
)
|
| 654 |
+
])
|
| 655 |
+
|
| 656 |
+
fig.update_layout(
|
| 657 |
+
title="Attention Weights Analysis (Top Tokens)",
|
| 658 |
+
xaxis_title="Token Position",
|
| 659 |
+
yaxis_title="Attention Weight (Normalized)",
|
| 660 |
+
height=400,
|
| 661 |
+
showlegend=False,
|
| 662 |
+
xaxis=dict(tickmode='array', tickvals=list(range(len(tokens))), ticktext=tokens, tickangle=45)
|
| 663 |
+
)
|
| 664 |
+
|
| 665 |
+
return fig
|
| 666 |
+
|
| 667 |
@staticmethod
|
| 668 |
def create_sentiment_gauge(result: Dict, theme: str = 'default') -> go.Figure:
|
| 669 |
"""Create an animated sentiment gauge"""
|
|
|
|
| 1040 |
return f"❌ Error: {str(e)}", None, None, None
|
| 1041 |
|
| 1042 |
def get_history_stats():
|
| 1043 |
+
|
| 1044 |
+
💡 **Understanding the Results:**
|
| 1045 |
+
- **LIME** shows which words push the sentiment positive/negative
|
| 1046 |
+
- **Attention** shows which tokens the model focuses on most
|
| 1047 |
+
- Higher confidence scores indicate more certain predictions
|
| 1048 |
+
"""
|
| 1049 |
+
|
| 1050 |
+
return info_text, gauge_fig, bars_fig, lime_plot, attention_plot
|
| 1051 |
+
|
| 1052 |
+
except Exception as e:
|
| 1053 |
+
logger.error(f"Advanced analysis failed: {e}")
|
| 1054 |
+
# Return basic empty plots on complete failure
|
| 1055 |
+
empty_fig = go.Figure()
|
| 1056 |
+
empty_fig.add_annotation(text=f"Analysis failed: {str(e)}", x=0.5, y=0.5,
|
| 1057 |
+
xref="paper", yref="paper", showarrow=False)
|
| 1058 |
+
empty_fig.update_layout(height=400)
|
| 1059 |
+
|
| 1060 |
+
return f"❌ Error: {str(e)}", empty_fig, empty_fig, empty_fig, empty_fig
|
| 1061 |
"""Get enhanced history statistics"""
|
| 1062 |
try:
|
| 1063 |
stats = history_manager.get_stats()
|
|
|
|
| 1326 |
gauge_plot = gr.Plot(label="Sentiment Gauge")
|
| 1327 |
bars_plot = gr.Plot(label="Probability Distribution")
|
| 1328 |
|
| 1329 |
+
with gr.Tab("🔬 Advanced Analysis"):
|
| 1330 |
+
with gr.Row():
|
| 1331 |
+
with gr.Column(scale=2):
|
| 1332 |
+
advanced_input = gr.Textbox(
|
| 1333 |
+
label="Text for Advanced Analysis",
|
| 1334 |
+
placeholder="Enter text for explainability analysis...",
|
| 1335 |
+
lines=4
|
| 1336 |
+
)
|
| 1337 |
+
|
| 1338 |
+
with gr.Row():
|
| 1339 |
+
advanced_language = gr.Dropdown(
|
| 1340 |
+
choices=['Auto Detect', 'English', 'Chinese', 'Spanish', 'French', 'German', 'Swedish'],
|
| 1341 |
+
value='Auto Detect',
|
| 1342 |
+
label="Language"
|
| 1343 |
+
)
|
| 1344 |
+
advanced_theme = gr.Dropdown(
|
| 1345 |
+
choices=list(config.THEMES.keys()),
|
| 1346 |
+
value='default',
|
| 1347 |
+
label="Theme"
|
| 1348 |
+
)
|
| 1349 |
+
|
| 1350 |
+
gr.Markdown("### 🔍 Explainability Options")
|
| 1351 |
+
gr.Markdown("**LIME** shows which words influence sentiment most. **Attention** shows which tokens the model focuses on.")
|
| 1352 |
+
|
| 1353 |
+
with gr.Row():
|
| 1354 |
+
use_lime = gr.Checkbox(
|
| 1355 |
+
label="🔍 Use LIME Analysis",
|
| 1356 |
+
value=True,
|
| 1357 |
+
info="Explains feature importance (requires: pip install lime)"
|
| 1358 |
+
)
|
| 1359 |
+
use_attention = gr.Checkbox(
|
| 1360 |
+
label="👁️ Use Attention Weights",
|
| 1361 |
+
value=True,
|
| 1362 |
+
info="Shows token-level attention patterns"
|
| 1363 |
+
)
|
| 1364 |
+
|
| 1365 |
+
lime_features = gr.Slider(
|
| 1366 |
+
minimum=5,
|
| 1367 |
+
maximum=20,
|
| 1368 |
+
value=10,
|
| 1369 |
+
step=1,
|
| 1370 |
+
label="LIME Features Count",
|
| 1371 |
+
info="Number of top features to analyze"
|
| 1372 |
+
)
|
| 1373 |
+
|
| 1374 |
+
advanced_analyze_btn = gr.Button("🔬 Advanced Analyze", variant="primary", size="lg")
|
| 1375 |
+
|
| 1376 |
+
gr.Examples(
|
| 1377 |
+
examples=[
|
| 1378 |
+
["This movie is absolutely fantastic! The acting is superb and the plot is engaging."],
|
| 1379 |
+
["I'm not sure how I feel about this product. It has some good features but also some issues."],
|
| 1380 |
+
["The service was terrible and the staff was very rude. I will never come back here again."]
|
| 1381 |
+
],
|
| 1382 |
+
inputs=advanced_input,
|
| 1383 |
+
label="Sample Texts for Advanced Analysis"
|
| 1384 |
+
)
|
| 1385 |
+
|
| 1386 |
+
with gr.Column(scale=1):
|
| 1387 |
+
advanced_result_info = gr.Markdown("""
|
| 1388 |
+
**Advanced Analysis Features:**
|
| 1389 |
+
|
| 1390 |
+
🔍 **LIME (Local Interpretable Model-agnostic Explanations)**
|
| 1391 |
+
- Shows which words contribute most to the sentiment prediction
|
| 1392 |
+
- Red bars = pushes toward negative sentiment
|
| 1393 |
+
- Green bars = pushes toward positive sentiment
|
| 1394 |
+
|
| 1395 |
+
👁️ **Attention Weights**
|
| 1396 |
+
- Visualizes which tokens the model pays attention to
|
| 1397 |
+
- Darker/higher bars = more attention from the model
|
| 1398 |
+
- Helps understand model focus patterns
|
| 1399 |
+
|
| 1400 |
+
Configure explainability settings and click **Advanced Analyze** to start.
|
| 1401 |
+
""")
|
| 1402 |
+
|
| 1403 |
+
with gr.Row():
|
| 1404 |
+
advanced_gauge_plot = gr.Plot(label="Sentiment Gauge")
|
| 1405 |
+
advanced_bars_plot = gr.Plot(label="Probability Distribution")
|
| 1406 |
+
|
| 1407 |
+
with gr.Row():
|
| 1408 |
+
lime_plot = gr.Plot(label="🔍 LIME Feature Importance")
|
| 1409 |
+
attention_plot = gr.Plot(label="👁️ Attention Weights")
|
| 1410 |
+
|
| 1411 |
with gr.Tab("📊 Batch Analysis"):
|
| 1412 |
with gr.Row():
|
| 1413 |
with gr.Column(scale=2):
|
|
|
|
| 1514 |
outputs=[result_info, gauge_plot, bars_plot]
|
| 1515 |
)
|
| 1516 |
|
| 1517 |
+
# Advanced Analysis
|
| 1518 |
+
advanced_analyze_btn.click(
|
| 1519 |
+
analyze_advanced_text,
|
| 1520 |
+
inputs=[advanced_input, advanced_language, advanced_theme, use_lime, use_attention, lime_features],
|
| 1521 |
+
outputs=[advanced_result_info, advanced_gauge_plot, advanced_bars_plot, lime_plot, attention_plot]
|
| 1522 |
+
)
|
| 1523 |
+
|
| 1524 |
# Batch Analysis
|
| 1525 |
batch_analyze_btn.click(
|
| 1526 |
analyze_batch_texts,
|