Duplicate from SRDdev/Image-Caption
Browse filesCo-authored-by: Shreyas Dixit <SRDdev@users.noreply.huggingface.co>
- .gitattributes +27 -0
- README.md +13 -0
- app.py +39 -0
- app2.py +50 -0
- example1.jpg +0 -0
- example2.jpg +0 -0
- example3.jpg +0 -0
- example4.jpg +0 -0
- example5.jpg +0 -0
- example6.jpg +0 -0
- requirements.txt +2 -0
.gitattributes
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Image Caption
|
| 3 |
+
emoji: 🏅
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.0.5
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
duplicated_from: SRDdev/Image-Caption
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
app.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import re
|
| 3 |
+
import gradio as gr
|
| 4 |
+
from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
|
| 5 |
+
|
| 6 |
+
device='cpu'
|
| 7 |
+
encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
| 8 |
+
decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
| 9 |
+
model_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
| 10 |
+
feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
|
| 12 |
+
model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def predict(image,max_length=64, num_beams=4):
|
| 16 |
+
image = image.convert('RGB')
|
| 17 |
+
image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
|
| 18 |
+
clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
|
| 19 |
+
caption_ids = model.generate(image, max_length = max_length)[0]
|
| 20 |
+
caption_text = clean_text(tokenizer.decode(caption_ids))
|
| 21 |
+
return caption_text
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
|
| 26 |
+
output = gr.outputs.Textbox(type="auto",label="Captions")
|
| 27 |
+
examples = [f"example{i}.jpg" for i in range(1,7)]
|
| 28 |
+
|
| 29 |
+
title = "Image Captioning "
|
| 30 |
+
|
| 31 |
+
interface = gr.Interface(
|
| 32 |
+
fn=predict,
|
| 33 |
+
inputs = input,
|
| 34 |
+
theme="grass",
|
| 35 |
+
outputs=output,
|
| 36 |
+
examples = examples,
|
| 37 |
+
title=title,
|
| 38 |
+
)
|
| 39 |
+
interface.launch(debug=True)
|
app2.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import re
|
| 4 |
+
from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
|
| 5 |
+
|
| 6 |
+
device='cpu'
|
| 7 |
+
encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
| 8 |
+
decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
| 9 |
+
model_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
| 10 |
+
feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
|
| 12 |
+
model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
|
| 13 |
+
|
| 14 |
+
def predict(image,max_length=64, num_beams=4):
|
| 15 |
+
image = image.convert('RGB')
|
| 16 |
+
image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
|
| 17 |
+
clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
|
| 18 |
+
caption_ids = model.generate(image, max_length = max_length)[0]
|
| 19 |
+
caption_text = clean_text(tokenizer.decode(caption_ids))
|
| 20 |
+
return caption_text
|
| 21 |
+
|
| 22 |
+
def set_example_image(example: list) -> dict:
|
| 23 |
+
return gr.Image.update(value=example[0])
|
| 24 |
+
css = '''
|
| 25 |
+
h1#title {
|
| 26 |
+
text-align: center;
|
| 27 |
+
}
|
| 28 |
+
h3#header {
|
| 29 |
+
text-align: center;
|
| 30 |
+
}
|
| 31 |
+
img#overview {
|
| 32 |
+
max-width: 800px;
|
| 33 |
+
max-height: 600px;
|
| 34 |
+
}
|
| 35 |
+
img#style-image {
|
| 36 |
+
max-width: 1000px;
|
| 37 |
+
max-height: 600px;
|
| 38 |
+
}
|
| 39 |
+
'''
|
| 40 |
+
demo = gr.Blocks(css=css)
|
| 41 |
+
with demo:
|
| 42 |
+
gr.Markdown('''<h1 id="title">Image Caption 🖼️</h1>''')
|
| 43 |
+
gr.Markdown('''Made by : Shreyas Dixit''')
|
| 44 |
+
with gr.Column():
|
| 45 |
+
input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
|
| 46 |
+
output = gr.outputs.Textbox(type="auto",label="Captions")
|
| 47 |
+
btn = gr.Button("Genrate Caption")
|
| 48 |
+
btn.click(fn=predict, inputs=input, outputs=output)
|
| 49 |
+
|
| 50 |
+
demo.launch()
|
example1.jpg
ADDED
|
example2.jpg
ADDED
|
example3.jpg
ADDED
|
example4.jpg
ADDED
|
example5.jpg
ADDED
|
example6.jpg
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
transformers
|
| 2 |
+
torch
|