Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -136,7 +136,7 @@ iface = gr.Interface(
|
|
| 136 |
inputs=gr.Image(type="pil", label="Upload an Image"),
|
| 137 |
outputs=[gr.Textbox(label="Emotion"), gr.Textbox(label="Memorability Score"), gr.Textbox(label="IQA Score")],
|
| 138 |
title="PerceptCLIP",
|
| 139 |
-
description="This is an official demo of PerceptCLIP from the paper: [Don’t Judge Before You CLIP: A Unified Approach for Perceptual Tasks](https://
|
| 140 |
examples=example_images
|
| 141 |
)
|
| 142 |
|
|
|
|
| 136 |
inputs=gr.Image(type="pil", label="Upload an Image"),
|
| 137 |
outputs=[gr.Textbox(label="Emotion"), gr.Textbox(label="Memorability Score"), gr.Textbox(label="IQA Score")],
|
| 138 |
title="PerceptCLIP",
|
| 139 |
+
description="This is an official demo of PerceptCLIP from the paper: [Don’t Judge Before You CLIP: A Unified Approach for Perceptual Tasks](https://navvewas.github.io/PerceptCLIP/). For each specific task, we fine-tune CLIP with LoRA and an MLP head. Our models achieve state-of-the-art performance. \nThis demo shows results from three models, each corresponding to a different task - visual emotion analysis, memorability prediction, and image quality assessment.",
|
| 140 |
examples=example_images
|
| 141 |
)
|
| 142 |
|