Spaces:
Runtime error
Runtime error
| import os | |
| import re | |
| import gradio as gr | |
| from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel | |
| import openai | |
| openai.api_key = os.environ['OPENAI_KEY'] | |
| ## Training models | |
| device='cpu' | |
| encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning" | |
| decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning" | |
| model_checkpoint = "nlpconnect/vit-gpt2-image-captioning" | |
| feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint) | |
| tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint) | |
| model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device) | |
| ## READING THE IMAGE | |
| ## Extracting features from image | |
| ## then create a context for the image like | |
| ## Then input the department and context extracted and send it to LLM to get captio meme | |
| def predict(department,image,max_length=64, num_beams=4): | |
| image = image.convert('RGB') | |
| image = feature_extractor(image, return_tensors="pt").pixel_values.to(device) | |
| print(image) | |
| clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0] | |
| print(clean_text) | |
| caption_ids = model.generate(image, max_length = max_length)[0] | |
| print(caption_ids) | |
| caption_text = clean_text(tokenizer.decode(caption_ids)) | |
| print(caption_text) | |
| dept=department | |
| context= caption_text | |
| response = openai.Completion.create( | |
| model="text-davinci-003", | |
| prompt=f'create non offensive one line meme for given department and context\n\ndepartment- data science\ncontext-a man sitting on a bench with a laptop\nmeme- \"I\'m not a data scientist, but I play one on my laptop.\"\n\ndepartment-startup\ncontext-a young boy is smiling while using a laptop\nmeme-\"When your startup gets funded and you can finally afford a new laptop\"\n\ndepartment- {dept}\ncontext-{context}\nmeme-', | |
| max_tokens=20, | |
| temperature=0.8) | |
| reponse = response.choices[0].text | |
| reponse = reponse.replace("department", "") | |
| Feedback_SQL="DEPT"+dept+"CAPT"+caption_text+"MAMAY"+reponse | |
| return reponse | |
| output = gr.outputs.Textbox(type="text",label="Meme") | |
| examples = [f"example{i}.png" for i in range(1,7)] | |
| ## GRADIO INTERFACE | |
| description= " Looking for a fun and easy way to generate memes? Look no further than Meme world! Leveraging large language models like GPT-3PT-3 / Ai21 / Cohere, you can create memes that are sure to be a hit with your friends or network. Created with ♥️ by Arsalan @[Xaheen](https://www.linkedin.com/in/sallu-mandya/). kindly share your thoughts in discussion session and use the app responsibly #NO_Offense \n \n built with ❤️ @[Xhaheen](https://www.linkedin.com/in/sallu-mandya/)" | |
| title = "Meme world 🖼️" | |
| dropdown=["data science", "product management","marketing","startup" ,"agile","crypto" , "SEO" ] | |
| article = "Created By : Xaheen " | |
| theme = gr.themes.Glass( | |
| primary_hue="cyan", | |
| neutral_hue="gray",font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], | |
| ) | |
| interface = gr.Interface( | |
| fn=predict, | |
| inputs = [gr.inputs.Dropdown(dropdown),gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)], | |
| theme=theme, | |
| outputs=output, | |
| examples =[['data science', 'example5.png'], | |
| ['product management', 'example2.png'], | |
| ['startup', 'example3.png'], | |
| ['marketing', 'example4.png'], | |
| ['agile', 'example1.png'], | |
| ['crypto', 'example6.png']], | |
| title=title, | |
| description=description, | |
| article = article | |
| ) | |
| interface.launch(debug=True) |