xqt commited on
Commit
eb54863
·
1 Parent(s): aa855a1

UPD: Added main app

Browse files
Files changed (4) hide show
  1. README.md +3 -3
  2. main.py +179 -0
  3. prompts.py +38 -0
  4. requirements.txt +4 -0
README.md CHANGED
@@ -4,10 +4,10 @@ emoji: 🏢
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.20.0
8
- app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 5.22.0
8
+ app_file: main.py
9
  pinned: false
10
  license: apache-2.0
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
main.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio
2
+ import prompts
3
+ import json
4
+ from together import Together
5
+ import base64
6
+ import numpy as numpy
7
+ from PIL import Image
8
+ from io import BytesIO
9
+ import uuid
10
+ import datetime
11
+ import os
12
+ from huggingface_hub import HfApi
13
+
14
+
15
+ HF_KEY = os.environ.get("HF_KEY") if os.environ.get("HF_KEY") else ""
16
+ TOGETHER_KEY = os.environ.get("TOGETHER_KEY") if os.environ.get("TOGETHER_KEY") else ""
17
+ PASSWORDS = os.environ.get("PASSWORDS") if os.environ.get("PASSWORDS") else ""
18
+
19
+ hf_client = HfApi(
20
+ token = HF_KEY
21
+ )
22
+
23
+ together_client = Together(
24
+ api_key = TOGETHER_KEY
25
+ )
26
+
27
+ def process_token(secret_token):
28
+ global together_client
29
+ try:
30
+ passwords = PASSWORDS
31
+ passwords = passwords.split(":")
32
+
33
+ if secret_token in passwords:
34
+ secret_token = TOGETHER_KEY
35
+
36
+ together_client = Together(
37
+ api_key = secret_token
38
+ )
39
+ gradio.Info("API token has been set successfully.", duration = 2)
40
+ return secret_token
41
+ except:
42
+ return secret_token
43
+
44
+
45
+
46
+ def assisted_prompt_generation(prompt):
47
+ gradio.Info("Assisting prompt generation...", duration = 2)
48
+ try:
49
+ response = together_client.chat.completions.create(
50
+ model = "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
51
+ messages = [
52
+ {"role": "system", "content": prompts.assisted_prompt_generator.system_prompt},
53
+ {"role": "user", "content": f"{prompt}"},
54
+ {"role": "assistant", "content": ""}
55
+ ],
56
+ response_format = {"type": "json_object"}
57
+ )
58
+ output = json.loads(response.choices[0].message.content)
59
+
60
+ if output["return_code"] == 400:
61
+ gradio.Error("Prompt generation failed.", duration = 5)
62
+ return output["prompt"]
63
+ else:
64
+ gradio.Info("Prompt generated successfully.", duration = 2)
65
+ return output["prompt"]
66
+ except Exception as e:
67
+ gradio.Error("Prompt generation failed.", duration = 5)
68
+ return "Failed"
69
+
70
+ def verify_prompt(prompt):
71
+ gradio.Info("Veryfying prompt...", duration = 2)
72
+ try:
73
+ response = together_client.chat.completions.create(
74
+ model = "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free",
75
+ messages = [
76
+ {"role": "system", "content": prompts.prompt_verification_agent.system_prompt},
77
+ {"role": "user", "content": f"{prompt}"},
78
+ {"role": "assistant", "content": ""}
79
+ ],
80
+ response_format = {"type": "json_object"}
81
+ )
82
+ output = json.loads(response.choices[0].message.content)
83
+
84
+ if output["return_code"] == 400:
85
+ gradio.Error("Prompt verification failed.", duration = 5)
86
+ return "Failed"
87
+ else:
88
+ gradio.Info("Prompt verification successfully.", duration = 2)
89
+ return prompt
90
+ except Exception as e:
91
+ gradio.Error("Prompt verification failed.", duration = 5)
92
+ return "Failed"
93
+
94
+
95
+ def generate_image(prompt):
96
+ if prompt == "Failed":
97
+ gradio.Error("Prompt generation failed.", duration = 5)
98
+ return numpy.zeros((1024, 1024, 3), dtype = numpy.uint8)
99
+
100
+ response = together_client.images.generate(
101
+ prompt= prompt,
102
+ model = "black-forest-labs/FLUX.1-schnell-Free",
103
+ width = 1024,
104
+ height = 1024,
105
+ steps = 4,
106
+ n = 1,
107
+ response_format="b64_json",
108
+ stop=[]
109
+ )
110
+ b_64_image = response.data[0].b64_json
111
+ image_data = base64.b64decode(b_64_image)
112
+ image = Image.open(BytesIO(image_data))
113
+ image_np = numpy.array(image)
114
+ return image_np
115
+
116
+
117
+ def save_image(prompt, image):
118
+ temp_id = uuid.uuid4()
119
+ datetime_now = datetime.datetime.now()
120
+ year = datetime_now.year
121
+ month = datetime_now.month
122
+ day = datetime_now.day
123
+ hour = datetime_now.hour
124
+ minute = datetime_now.minute
125
+
126
+ image_PIL = Image.fromarray(image)
127
+ image_PIL.save(f"{temp_id}.png")
128
+ prompt = {
129
+ "prompt": prompt,
130
+ }
131
+ json.dump(prompt, open(f"{temp_id}.json", "w"))
132
+ hf_client.upload_file(
133
+ path_or_fileobj = f"{temp_id}.png",
134
+ path_in_repo = f"images/{year}/{month}/{day}/{hour}/{minute}/{temp_id}.png",
135
+ repo_type = "dataset",
136
+ repo_id = "xqt/fashion_model_generator",
137
+ commit_message = f"ADD: image {temp_id}.png",
138
+ )
139
+ hf_client.upload_file(
140
+ path_or_fileobj = f"{temp_id}.json",
141
+ path_in_repo = f"images/{year}/{month}/{day}/{hour}/{minute}/{temp_id}.json",
142
+ repo_type = "dataset",
143
+ repo_id = "xqt/fashion_model_generator",
144
+ commit_message = f"ADD: prompt {temp_id}.json",
145
+ )
146
+ gradio.Info(f"Image and prompt saved successfully at https://huggingface.co/datasets/xqt/fashion_model_generator/tree/main/images/{year}/{month}/{day}/{hour}/{minute}/{temp_id}.png", duration = 5)
147
+ os.remove(f"{temp_id}.png")
148
+ os.remove(f"{temp_id}.json")
149
+ return
150
+
151
+
152
+ with gradio.Blocks(fill_width = False) as app:
153
+ gradio.Markdown("""
154
+ # Fashion Model Generator
155
+ ## This app generates images of fashion model.
156
+ Synthetic Dataset: [xqt/fashion_model_generator](https://huggingface.co/datasets/xqt/fashion_model_generator)
157
+ """)
158
+
159
+ api_token_input = gradio.Textbox(label = "Together AI API Key (key is never stored and it uses free models only)", placeholder = "Enter your Together AI API Key here.", type = "password")
160
+
161
+ with gradio.Row(equal_height = True):
162
+ with gradio.Column(scale = 3):
163
+ prompt_input = gradio.Textbox(label = "Prompt", placeholder = "Enter your prompt here.")
164
+ with gradio.Column():
165
+ prompt_assist = gradio.Button(value = "Assisted Prompt Generation")
166
+
167
+ image_output = gradio.Image(label="Generated Image")
168
+
169
+ api_token_input.submit(process_token, inputs = [api_token_input], outputs = [api_token_input])
170
+ prompt_assist.click(assisted_prompt_generation, inputs = [prompt_input], outputs = [prompt_input])
171
+ prompt_input.submit(verify_prompt, inputs = [prompt_input], outputs = [prompt_input]).then(
172
+ generate_image, inputs = [prompt_input], outputs = [image_output]
173
+ ).then(
174
+ save_image, inputs = [prompt_input, image_output], outputs = []
175
+ )
176
+
177
+ if __name__ == "__main__":
178
+ app.launch()
179
+
prompts.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ class assisted_prompt_generator:
3
+ system_prompt = """
4
+ You are a prompt generation bot, your task is to generate a prompt for a given input.
5
+ The user will provide you with some inputs, you need to prepare a prompt based on the input.
6
+ The prompt needs to be related to Fashion Models. Your prompt will be used to generate image of a fashion model.
7
+ You need to imagine a background for the model in case the user does not provide one.
8
+ If possible, try to formulate the prompt so that the model's hands are not visible in the image, unless the user specifies otherwise.
9
+ If the prompt is inappropriate, return error 400. Return the response as a dictionary with the keys "return_code" and "prompt".
10
+
11
+ For example:
12
+ {
13
+ "return_code": 400,
14
+ "prompt": "Failed."
15
+ }
16
+
17
+ {
18
+ "return_code": 200,
19
+ "prompt": "The female is wearing a red dress and is standing in a garden."
20
+ }
21
+ """
22
+
23
+ class prompt_verification_agent:
24
+ system_prompt = """
25
+ You are a prompt verification agent, your task is to verify the user prompt. The prompt must be relevant to the input
26
+ for image generation of a fashion model. The prompt generation bot will provide you with a prompt, you need to verify if the prompt is appropriate or not.
27
+ If the prompt is inappropriate, return error 400. Return the response as a dictionary with the keys "return_code" and "message".
28
+
29
+ For example:
30
+ {
31
+ "return_code": 400,
32
+ "message": "Failed"
33
+ }
34
+ {
35
+ "return_code": 200,
36
+ "message": "Success"
37
+ }
38
+ """
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio==5.20.0
2
+ together==1.4.6
3
+ huggingface-hub==0.29.3
4
+ pillow==11.1.0