Update app.py
Browse files
app.py
CHANGED
|
@@ -44,11 +44,13 @@ def img_to_text(image_path):
|
|
| 44 |
# Task used here : "image-to-text".
|
| 45 |
# Model used here: "Salesforce/blip-image-captioning-base".
|
| 46 |
# Backup model: "nlpconnect/vit-gpt2-image-captioning".
|
|
|
|
| 47 |
|
| 48 |
image_to_text = pipeline(
|
| 49 |
"image-to-text", model="Salesforce/blip-image-captioning-base"
|
| 50 |
)
|
| 51 |
# image_to_text = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
|
|
|
|
| 52 |
|
| 53 |
scenario = image_to_text(image_path)[0]["generated_text"]
|
| 54 |
|
|
|
|
| 44 |
# Task used here : "image-to-text".
|
| 45 |
# Model used here: "Salesforce/blip-image-captioning-base".
|
| 46 |
# Backup model: "nlpconnect/vit-gpt2-image-captioning".
|
| 47 |
+
# Backup model: "Salesforce/blip-image-captioning-large"
|
| 48 |
|
| 49 |
image_to_text = pipeline(
|
| 50 |
"image-to-text", model="Salesforce/blip-image-captioning-base"
|
| 51 |
)
|
| 52 |
# image_to_text = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
|
| 53 |
+
# image_to_text = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
|
| 54 |
|
| 55 |
scenario = image_to_text(image_path)[0]["generated_text"]
|
| 56 |
|