| from PIL import Image | |
| import requests | |
| import gradio as gr | |
| gr.load("models/Salesforce/blip-image-captioning-large").launch() | |
| from transformers import BlipProcessor, BlipForConditionalGeneration | |
| model_id = Salesforceblip-image-captioning-large | |
| model = BlipForConditionalGeneration.from_pretrained(model_id) | |
| processor = BlipProcessor.from_pretrained(model_id) | |
| def launch(input): | |
| image = Image.open(requests.get(input, stream=True).raw).convert('RGB') | |
| inputs = processor(image, return_tensors=pt) | |
| out = model.generate(inputs) | |
| return processor.decode(out[0], skip_special_tokens=True) | |
| iface = gr.Interface(launch, inputs=text, outputs=text) | |
| iface.launch() |