Spaces:
Runtime error
Runtime error
| from transformers import ViTConfig, ViTForImageClassification | |
| from transformers import ViTFeatureExtractor | |
| from PIL import Image | |
| import requests | |
| import matplotlib.pyplot as plt | |
| import gradio as gr | |
| from gradio.mix import Parallel | |
| from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor | |
| from transformers import VisionEncoderDecoderModel | |
| from transformers import AutoTokenizer | |
| import torch | |
| from transformers import ( | |
| AutoModelForCausalLM, | |
| LogitsProcessorList, | |
| MinLengthLogitsProcessor, | |
| StoppingCriteriaList, | |
| MaxLengthCriteria, | |
| ) | |
| # https://github.com/NielsRogge/Transformers-Tutorials/blob/master/HuggingFace_vision_ecosystem_overview_(June_2022).ipynb | |
| # option 1: load with randomly initialized weights (train from scratch) | |
| #tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B") | |
| #model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") | |
| config = ViTConfig(num_hidden_layers=12, hidden_size=768) | |
| model = ViTForImageClassification(config) | |
| #print(config) | |
| feature_extractor = ViTFeatureExtractor() | |
| # or, to load one that corresponds to a checkpoint on the hub: | |
| #feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224") | |
| #the following gets called by classify_image() | |
| feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv") | |
| model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv") | |
| #google/vit-base-patch16-224, deepmind/vision-perceiver-conv | |
| image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor) | |
| def create_story(text_seed): | |
| #tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
| #model = AutoModelForCausalLM.from_pretrained("gpt2") | |
| #eleutherAI gpt-3 based | |
| tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M") | |
| model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M") | |
| # set pad_token_id to eos_token_id because GPT2 does not have a EOS token | |
| model.config.pad_token_id = model.config.eos_token_id | |
| #input_prompt = "It might be possible to" | |
| input_prompt = text_seed | |
| input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids | |
| # instantiate logits processors | |
| logits_processor = LogitsProcessorList( | |
| [ | |
| MinLengthLogitsProcessor(10, eos_token_id=model.config.eos_token_id), | |
| ] | |
| ) | |
| stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=100)]) | |
| outputs = model.greedy_search( | |
| input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria | |
| ) | |
| result_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) | |
| return result_text | |
| def self_caption(image): | |
| repo_name = "ydshieh/vit-gpt2-coco-en" | |
| #test_image = "cats.jpg" | |
| test_image = image | |
| #url = 'http://images.cocodataset.org/val2017/000000039769.jpg' | |
| #test_image = Image.open(requests.get(url, stream=True).raw) | |
| #test_image.save("cats.png") | |
| feature_extractor2 = ViTFeatureExtractor.from_pretrained(repo_name) | |
| tokenizer = AutoTokenizer.from_pretrained(repo_name) | |
| model2 = VisionEncoderDecoderModel.from_pretrained(repo_name) | |
| pixel_values = feature_extractor2(test_image, return_tensors="pt").pixel_values | |
| print("Pixel Values") | |
| print(pixel_values) | |
| # autoregressively generate text (using beam search or other decoding strategy) | |
| generated_ids = model2.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True) | |
| # decode into text | |
| preds = tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True) | |
| preds = [pred.strip() for pred in preds] | |
| print("Predictions") | |
| print(preds) | |
| print("The preds type is : ",type(preds)) | |
| pred_keys = ["Prediction"] | |
| pred_value = preds | |
| pred_dictionary = dict(zip(pred_keys, pred_value)) | |
| print("Pred dictionary") | |
| print(pred_dictionary) | |
| #return(pred_dictionary) | |
| preds = ' '.join(preds) | |
| story = create_story(preds) | |
| story = ' '.join(story) | |
| return story | |
| def classify_image(image): | |
| results = image_pipe(image) | |
| print("RESULTS") | |
| print(results) | |
| # convert to format Gradio expects | |
| output = {} | |
| for prediction in results: | |
| predicted_label = prediction['label'] | |
| score = prediction['score'] | |
| output[predicted_label] = score | |
| print("OUTPUT") | |
| print(output) | |
| return output | |
| image = gr.inputs.Image(type="pil") | |
| label = gr.outputs.Label(num_top_classes=5) | |
| examples = [ ["cats.jpg"], ["batter.jpg"],["drinkers.jpg"] ] | |
| title = "Generate a Story from an Image" | |
| description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image and click 'submit', a story is autogenerated as well" | |
| article = "<p style='text-align: center'></p>" | |
| img_info1 = gr.Interface( | |
| fn=classify_image, | |
| inputs=image, | |
| outputs=label, | |
| ) | |
| img_info2 = gr.Interface( | |
| fn=self_caption, | |
| inputs=image, | |
| #outputs=label, | |
| outputs = [ | |
| gr.outputs.Textbox(label = 'Story') | |
| ], | |
| ) | |
| Parallel(img_info1,img_info2, inputs=image, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True) | |
| #Parallel(img_info1,img_info2, inputs=image, outputs=label, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True) | |