Spaces:
Runtime error
Runtime error
RohitGandikota
commited on
Commit
Β·
09d57a5
1
Parent(s):
991663d
inference clean
Browse files
app.py
CHANGED
|
@@ -92,7 +92,7 @@ class Demo:
|
|
| 92 |
|
| 93 |
self.seed_infr = gr.Number(
|
| 94 |
label="Seed",
|
| 95 |
-
value=
|
| 96 |
)
|
| 97 |
|
| 98 |
self.slider_scale_infr = gr.Slider(
|
|
@@ -144,28 +144,28 @@ class Demo:
|
|
| 144 |
self.target_concept = gr.Text(
|
| 145 |
placeholder="Enter target concept to make edit on ...",
|
| 146 |
label="Prompt of concept on which edit is made",
|
| 147 |
-
info="Prompt corresponding to concept to edit",
|
| 148 |
value = ''
|
| 149 |
)
|
| 150 |
|
| 151 |
self.positive_prompt = gr.Text(
|
| 152 |
placeholder="Enter the enhance prompt for the edit ...",
|
| 153 |
label="Prompt to enhance",
|
| 154 |
-
info="Prompt corresponding to concept to enhance",
|
| 155 |
value = ''
|
| 156 |
)
|
| 157 |
|
| 158 |
self.negative_prompt = gr.Text(
|
| 159 |
placeholder="Enter the suppress prompt for the edit ...",
|
| 160 |
label="Prompt to suppress",
|
| 161 |
-
info="Prompt corresponding to concept to supress",
|
| 162 |
value = ''
|
| 163 |
)
|
| 164 |
|
| 165 |
self.attributes_input = gr.Text(
|
| 166 |
placeholder="Enter the concepts to preserve (comma seperated). Leave empty if not required ...",
|
| 167 |
label="Concepts to Preserve",
|
| 168 |
-
info="Comma seperated concepts to preserve/disentangle",
|
| 169 |
value = ''
|
| 170 |
)
|
| 171 |
self.is_person = gr.Checkbox(
|
|
@@ -177,7 +177,13 @@ class Demo:
|
|
| 177 |
label="Rank of the Slider",
|
| 178 |
info='Slider Rank to train'
|
| 179 |
)
|
| 180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
self.iterations_input = gr.Number(
|
| 182 |
value=1000,
|
| 183 |
precision=0,
|
|
@@ -259,7 +265,7 @@ class Demo:
|
|
| 259 |
|
| 260 |
def inference(self, prompt, seed, start_noise, scale, model_name, pbar = gr.Progress(track_tqdm=True)):
|
| 261 |
|
| 262 |
-
seed = seed or
|
| 263 |
|
| 264 |
generator = torch.manual_seed(seed)
|
| 265 |
|
|
|
|
| 92 |
|
| 93 |
self.seed_infr = gr.Number(
|
| 94 |
label="Seed",
|
| 95 |
+
value=42
|
| 96 |
)
|
| 97 |
|
| 98 |
self.slider_scale_infr = gr.Slider(
|
|
|
|
| 144 |
self.target_concept = gr.Text(
|
| 145 |
placeholder="Enter target concept to make edit on ...",
|
| 146 |
label="Prompt of concept on which edit is made",
|
| 147 |
+
info="Prompt corresponding to concept to edit (eg: 'person')",
|
| 148 |
value = ''
|
| 149 |
)
|
| 150 |
|
| 151 |
self.positive_prompt = gr.Text(
|
| 152 |
placeholder="Enter the enhance prompt for the edit ...",
|
| 153 |
label="Prompt to enhance",
|
| 154 |
+
info="Prompt corresponding to concept to enhance (eg: 'person, old')",
|
| 155 |
value = ''
|
| 156 |
)
|
| 157 |
|
| 158 |
self.negative_prompt = gr.Text(
|
| 159 |
placeholder="Enter the suppress prompt for the edit ...",
|
| 160 |
label="Prompt to suppress",
|
| 161 |
+
info="Prompt corresponding to concept to supress (eg: 'person, young')",
|
| 162 |
value = ''
|
| 163 |
)
|
| 164 |
|
| 165 |
self.attributes_input = gr.Text(
|
| 166 |
placeholder="Enter the concepts to preserve (comma seperated). Leave empty if not required ...",
|
| 167 |
label="Concepts to Preserve",
|
| 168 |
+
info="Comma seperated concepts to preserve/disentangle (eg: 'male, female')",
|
| 169 |
value = ''
|
| 170 |
)
|
| 171 |
self.is_person = gr.Checkbox(
|
|
|
|
| 177 |
label="Rank of the Slider",
|
| 178 |
info='Slider Rank to train'
|
| 179 |
)
|
| 180 |
+
choices = ['xattn', 'noxattn', 'full']
|
| 181 |
+
self.train_method_input = gr.Dropdown(
|
| 182 |
+
choices=choices,
|
| 183 |
+
value='xattn',
|
| 184 |
+
label='Train Method',
|
| 185 |
+
info='Method of training. If xattn - loras will be on cross attns only. noxattn - all layers except cross attn (official implementation). full - all layers'
|
| 186 |
+
)
|
| 187 |
self.iterations_input = gr.Number(
|
| 188 |
value=1000,
|
| 189 |
precision=0,
|
|
|
|
| 265 |
|
| 266 |
def inference(self, prompt, seed, start_noise, scale, model_name, pbar = gr.Progress(track_tqdm=True)):
|
| 267 |
|
| 268 |
+
seed = seed or 42
|
| 269 |
|
| 270 |
generator = torch.manual_seed(seed)
|
| 271 |
|