Spaces:
Sleeping
Sleeping
Added images drop for training blocks
Browse files
app.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
import os
|
|
|
|
| 4 |
import requests
|
| 5 |
import subprocess
|
| 6 |
from subprocess import getoutput
|
| 7 |
-
from huggingface_hub import snapshot_download, HfApi
|
| 8 |
-
|
| 9 |
api = HfApi()
|
| 10 |
|
| 11 |
hf_token = os.environ.get("HF_TOKEN_WITH_WRITE_PERMISSION")
|
|
@@ -13,6 +13,7 @@ hf_token = os.environ.get("HF_TOKEN_WITH_WRITE_PERMISSION")
|
|
| 13 |
is_shared_ui = True if "fffiloni/train-dreambooth-lora-sdxl" in os.environ['SPACE_ID'] else False
|
| 14 |
|
| 15 |
is_gpu_associated = torch.cuda.is_available()
|
|
|
|
| 16 |
if is_gpu_associated:
|
| 17 |
gpu_info = getoutput('nvidia-smi')
|
| 18 |
if("A10G" in gpu_info):
|
|
@@ -22,6 +23,47 @@ if is_gpu_associated:
|
|
| 22 |
else:
|
| 23 |
which_gpu = "CPU"
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
def swap_hardware(hf_token, hardware="cpu-basic"):
|
| 26 |
hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
|
| 27 |
headers = { "authorization" : f"Bearer {hf_token}"}
|
|
@@ -194,7 +236,15 @@ with gr.Blocks(css=css) as demo:
|
|
| 194 |
</div>
|
| 195 |
''')
|
| 196 |
gr.Markdown("# SD-XL Dreambooth LoRa Training UI 💭")
|
| 197 |
-
gr.Markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
with gr.Row():
|
| 199 |
dataset_id = gr.Textbox(label="Dataset ID", info="use one of your previously uploaded image datasets on your HF profile", placeholder="diffusers/dog-example")
|
| 200 |
instance_prompt = gr.Textbox(label="Concept prompt", info="concept prompt - use a unique, made up word to avoid collisions")
|
|
@@ -207,8 +257,14 @@ with gr.Blocks(css=css) as demo:
|
|
| 207 |
train_button = gr.Button("Train !")
|
| 208 |
|
| 209 |
|
| 210 |
-
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
train_button.click(
|
| 213 |
fn = main,
|
| 214 |
inputs = [
|
|
@@ -219,7 +275,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 219 |
checkpoint_steps,
|
| 220 |
remove_gpu
|
| 221 |
],
|
| 222 |
-
outputs = [
|
| 223 |
)
|
| 224 |
|
| 225 |
demo.queue(default_enabled=False).launch(debug=True)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
import os
|
| 4 |
+
import shutil
|
| 5 |
import requests
|
| 6 |
import subprocess
|
| 7 |
from subprocess import getoutput
|
| 8 |
+
from huggingface_hub import snapshot_download, HfApi, create_repo
|
|
|
|
| 9 |
api = HfApi()
|
| 10 |
|
| 11 |
hf_token = os.environ.get("HF_TOKEN_WITH_WRITE_PERMISSION")
|
|
|
|
| 13 |
is_shared_ui = True if "fffiloni/train-dreambooth-lora-sdxl" in os.environ['SPACE_ID'] else False
|
| 14 |
|
| 15 |
is_gpu_associated = torch.cuda.is_available()
|
| 16 |
+
|
| 17 |
if is_gpu_associated:
|
| 18 |
gpu_info = getoutput('nvidia-smi')
|
| 19 |
if("A10G" in gpu_info):
|
|
|
|
| 23 |
else:
|
| 24 |
which_gpu = "CPU"
|
| 25 |
|
| 26 |
+
def load_images_to_dataset(images, dataset_name):
|
| 27 |
+
|
| 28 |
+
if dataset_name == "":
|
| 29 |
+
raise gr.Error("You forgot to name your new dataset. ")
|
| 30 |
+
|
| 31 |
+
# Create the directory if it doesn't exist
|
| 32 |
+
my_working_directory = f"my_working_directory_for_{dataset_name}"
|
| 33 |
+
if not os.path.exists(my_working_directory):
|
| 34 |
+
os.makedirs(my_working_directory)
|
| 35 |
+
|
| 36 |
+
# Assuming 'images' is a list of image file paths
|
| 37 |
+
for idx, image in enumerate(images):
|
| 38 |
+
# Get the base file name (without path) from the original location
|
| 39 |
+
image_name = os.path.basename(image.name)
|
| 40 |
+
|
| 41 |
+
# Construct the destination path in the working directory
|
| 42 |
+
destination_path = os.path.join(my_working_directory, image_name)
|
| 43 |
+
|
| 44 |
+
# Copy the image from the original location to the working directory
|
| 45 |
+
shutil.copy(image.name, destination_path)
|
| 46 |
+
|
| 47 |
+
# Print the image name and its corresponding save path
|
| 48 |
+
print(f"Image {idx + 1}: {image_name} copied to {destination_path}")
|
| 49 |
+
|
| 50 |
+
path_to_folder = my_working_directory
|
| 51 |
+
your_username = api.whoami(token=hf_token)["name"]
|
| 52 |
+
repo_id = f"{your_username}/{dataset_name}"
|
| 53 |
+
create_repo(repo_id=repo_id, repo_type="dataset", private=True, token=hf_token)
|
| 54 |
+
|
| 55 |
+
api.upload_folder(
|
| 56 |
+
folder_path=path_to_folder,
|
| 57 |
+
repo_id=repo_id,
|
| 58 |
+
repo_type="dataset",
|
| 59 |
+
token=hf_token
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
#print("pushing dataset to the hub")
|
| 63 |
+
#dataset.push_to_hub("fffiloni/new_dataset_eugene", private=True, token=hf_token)
|
| 64 |
+
|
| 65 |
+
return "Done, your dataset is ready and loaded for the training step!", repo_id
|
| 66 |
+
|
| 67 |
def swap_hardware(hf_token, hardware="cpu-basic"):
|
| 68 |
hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
|
| 69 |
headers = { "authorization" : f"Bearer {hf_token}"}
|
|
|
|
| 236 |
</div>
|
| 237 |
''')
|
| 238 |
gr.Markdown("# SD-XL Dreambooth LoRa Training UI 💭")
|
| 239 |
+
gr.Markdown("## Drop your training images (optional)")
|
| 240 |
+
gr.Markdown("Use this step to upload your training images. If you already have a dataset stored on your HF profile, you can skip this step, and provide your dataset ID in the `Datased ID` input below.")
|
| 241 |
+
images = gr.File(file_types=["image"], label="Upload your images", file_count="multiple", interactive=True, visible=True)
|
| 242 |
+
with gr.Row():
|
| 243 |
+
new_dataset_name = gr.Textbox(label="Set new dataset name", placeholder="e.g.: my_awesome_dataset")
|
| 244 |
+
load_btn = gr.Button("Load images to new dataset")
|
| 245 |
+
dataset_status = gr.Textbox(label="dataset status")
|
| 246 |
+
gr.Markdown("## Training ")
|
| 247 |
+
gr.Markdown("You can use an existing image dataset, find a dataset example here: [https://huggingface.co/datasets/diffusers/dog-example](https://huggingface.co/datasets/diffusers/dog-example) ;)")
|
| 248 |
with gr.Row():
|
| 249 |
dataset_id = gr.Textbox(label="Dataset ID", info="use one of your previously uploaded image datasets on your HF profile", placeholder="diffusers/dog-example")
|
| 250 |
instance_prompt = gr.Textbox(label="Concept prompt", info="concept prompt - use a unique, made up word to avoid collisions")
|
|
|
|
| 257 |
train_button = gr.Button("Train !")
|
| 258 |
|
| 259 |
|
| 260 |
+
train_status = gr.Textbox(label="Training status")
|
| 261 |
+
|
| 262 |
+
load_btn.click(
|
| 263 |
+
fn = load_images_to_dataset,
|
| 264 |
+
inputs = [images, new_dataset_name],
|
| 265 |
+
outputs = [dataset_status, dataset_id]
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
train_button.click(
|
| 269 |
fn = main,
|
| 270 |
inputs = [
|
|
|
|
| 275 |
checkpoint_steps,
|
| 276 |
remove_gpu
|
| 277 |
],
|
| 278 |
+
outputs = [train_status]
|
| 279 |
)
|
| 280 |
|
| 281 |
demo.queue(default_enabled=False).launch(debug=True)
|