File size: 3,414 Bytes
6b3d060 2d855ab 6b3d060 0798794 6b3d060 2d855ab 6b3d060 2d855ab 6b3d060 2d855ab 6b3d060 2d855ab c031815 6b3d060 2d855ab 6b3d060 2d855ab 6b3d060 2d855ab 5a5588f 2d855ab 3763495 2d855ab e138277 6b3d060 e138277 6b3d060 5a5588f 6b3d060 2d855ab 6b3d060 2d855ab 6b3d060 5a5588f e138277 6b3d060 5a5588f e138277 5a5588f e138277 5a5588f 6b3d060 0798794 c031815 2d855ab 3763495 2d855ab e138277 c031815 2d855ab 0798794 2d855ab 6b3d060 3763495 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
import os
from pathlib import Path
import time
import gradio as gr
from gradio.themes import Soft
import numpy as np
import matplotlib.pyplot as plt
from huggingface_hub import hf_hub_download
from src.infer import load_model, predict
os.environ.setdefault("HF_HOME", str(Path.home() / ".cache" / "huggingface"))
_model = None
def _warmup():
global _model
if _model is not None:
return
t0 = time.time()
ckpt_path = hf_hub_download(
repo_id="rhasan/UPLME",
filename="UPLME_NewsEmp_tuned-lambdas.ckpt",
repo_type="model"
)
load_model(ckpt_path)
return f"Model loaded in {time.time() - t0:.1f} seconds."
def ci_plot(mean: float, low: float, upp: float):
fig, ax = plt.subplots(figsize=(6, 1.4))
ax.errorbar(
x=mean, y=0,
xerr=[[mean - low], [upp - mean]],
fmt='o', color='blue',
ecolor='orange',
elinewidth=5,
capsize=8,
capthick=4,
markersize=10
)
ax.set_xlim(0, 100)
ax.set_yticks([])
ax.set_xlabel("Empathy Score (0-100) +/- 95% CI")
fig.tight_layout()
return fig
def predict_with_ci(article: str, essay: str) -> tuple[float, float, float, plt.Figure]:
_warmup()
mean, var = predict(essay, article) # the order is essay-article in UPLME model
# scores were originally in [1, 7]
# lets scale them to [0, 100]
scale = 100 / 6
mean = (mean - 1) * scale
std = np.sqrt(var) * scale
ci_low = max(0.0, mean - 1.96 * std)
ci_upp = min(100.0, mean + 1.96 * std)
fig = ci_plot(mean, ci_low, ci_upp)
return mean, ci_low, ci_upp, fig
with gr.Blocks(title="UPLME", theme=Soft(primary_hue="blue")) as demo:
gr.Markdown("# Empathy Prediction with Uncertainty Estimation")
with gr.Row():
with gr.Column():
article_input = gr.Textbox(label="Stimulus (E.g., News Article)", lines=6)
essay_input = gr.Textbox(label="Response (E.g., Essay) towards the stimulus", lines=6)
button = gr.Button("Predict")
gr.Examples(
examples=[
["A month after Hurricane Matthew, 800,000 Haitians urgently need food.", "My heart just breaks for the people who are suffering."],
["A month after Hurricane Matthew, 800,000 Haitians urgently need food.", "I see, but this doesn't sound too worrisome to me."],
],
inputs=[article_input, essay_input]
)
with gr.Column():
output_mean = gr.Number(label="Predicted Empathy Score (0-100)", precision=2)
ci_low = gr.Number(label="95% CI Lower Bound", precision=2)
ci_upp = gr.Number(label="95% CI Upper Bound", precision=2)
fig = gr.Plot(show_label=False)
button.click(fn=predict_with_ci, inputs=[article_input, essay_input], outputs=[output_mean, ci_low, ci_upp, fig])
gr.Markdown("## About")
gr.Markdown("""
This application predicts empathy score and uncertainty estimates using the UPLME model proposed in **UPLME: Uncertainty-Aware Probabilistic Language Modelling for Robust Empathy Regression** by **Md Rakibul Hasan, Md Zakir Hossain, Aneesh Krishna, Shafin Rahman and Tom Gedeon**.
- Paper: https://arxiv.org/abs/2508.03520
- Code: https://github.com/hasan-rakibul/UPLME
""")
if __name__ == "__main__":
demo.launch(share=True)
|