import gradio as gr import spaces import torch import os from compel import Compel, ReturnedEmbeddingsType from diffusers import DiffusionPipeline import requests # Model setup model_name = os.environ.get('MODEL_NAME', 'UnfilteredAI/NSFW-gen-v2') pipe = DiffusionPipeline.from_pretrained( model_name, torch_dtype=torch.float16 ) pipe.to('cuda') compel = Compel( tokenizer=[pipe.tokenizer, pipe.tokenizer_2], text_encoder=[pipe.text_encoder, pipe.text_encoder_2], returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, requires_pooled=[False, True] ) # Translation function @spaces.GPU def translate_albanian_to_english(text): if not text.strip(): return "" for attempt in range(2): try: response = requests.post( "https://hal1993-mdftranslation1234567890abcdef1234567890-fc073a6.hf.space/v1/translate", json={"from_language": "sq", "to_language": "en", "input_text": text}, headers={"accept": "application/json", "Content-Type": "application/json"}, timeout=5 ) response.raise_for_status() translated = response.json().get("translate", "") return translated except Exception as e: if attempt == 1: raise gr.Error(f"Përkthimi dështoi: {str(e)}") raise gr.Error("Përkthimi dështoi. Ju lutem provoni përsëri.") # Aspect ratio function def update_aspect_ratio(ratio): if ratio == "1:1": return 1024, 1024 elif ratio == "9:16": return 576, 1024 # 1024 * 9/16 = 576 elif ratio == "16:9": return 1024, 576 # 1024 * 9/16 = 576 return 1024, 1024 @spaces.GPU(duration=120) def generate(prompt, negative_prompt, num_inference_steps, guidance_scale, width, height, num_samples, progress=gr.Progress(track_tqdm=True)): # Translate Albanian prompt to English final_prompt = translate_albanian_to_english(prompt.strip()) if prompt.strip() else "" # Use Compel for prompt embeddings embeds, pooled = compel(final_prompt) neg_embeds, neg_pooled = compel(negative_prompt) # Run pipeline images = pipe( prompt_embeds=embeds, pooled_prompt_embeds=pooled, negative_prompt_embeds=neg_embeds, negative_pooled_prompt_embeds=neg_pooled, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, width=width, height=height, num_images_per_prompt=num_samples ).images # Return single image return images[0] # Gradio interface def create_demo(): with gr.Blocks() as demo: # CSS for layout, 320px gap, and download button scaling gr.HTML(""" """) gr.Markdown("# Krijo Imazhe") gr.Markdown("Gjenero imazhe të reja nga përshkrimin yt me fuqinë e inteligjencës artificiale.") with gr.Column(): prompt = gr.Textbox( label="Përshkrimi", placeholder="Shkruani përshkrimin këtu", lines=3 ) aspect_ratio = gr.Radio( label="Raporti i fotos", choices=["9:16", "1:1", "16:9"], value="1:1" ) generate_button = gr.Button(value="Gjenero") # Hidden components for processing negative_prompt = gr.Textbox( value="(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn, (deformed | distorted | disfigured:1.3), bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers:1.4, disconnected limbs, blurry, amputation.", visible=False ) num_inference_steps = gr.Slider( value=60, minimum=1, maximum=100, step=1, visible=False ) guidance_scale = gr.Slider( value=7, minimum=1, maximum=20, step=0.1, visible=False ) width_slider = gr.Slider( value=1024, minimum=256, maximum=1536, step=8, visible=False ) height_slider = gr.Slider( value=1024, minimum=256, maximum=1536, step=8, visible=False ) num_samples = gr.Slider( value=1, minimum=1, maximum=1, step=1, visible=False ) with gr.Row(): result_image = gr.Image( label="Imazhi i Gjeneruar", interactive=False ) # Update hidden sliders based on aspect ratio aspect_ratio.change( fn=update_aspect_ratio, inputs=[aspect_ratio], outputs=[width_slider, height_slider], queue=False ) # Bind the generate button inputs = [ prompt, negative_prompt, num_inference_steps, guidance_scale, width_slider, height_slider, num_samples ] generate_button.click( fn=generate, inputs=inputs, outputs=[result_image], show_progress="full" ) return demo if __name__ == "__main__": print(f"Gradio version: {gr.__version__}") app = create_demo() app.queue(max_size=12).launch(server_name='0.0.0.0')