|
|
import os |
|
|
import subprocess |
|
|
from huggingface_hub import HfApi, upload_folder |
|
|
import gradio as gr |
|
|
import hf_utils |
|
|
import utils |
|
|
from safetensors import safe_open |
|
|
import torch |
|
|
|
|
|
subprocess.run(["git", "clone", "https://github.com/huggingface/diffusers", "diffs"]) |
|
|
|
|
|
def error_str(error, title="Error"): |
|
|
return f"""#### {title} |
|
|
{error}""" if error else "" |
|
|
|
|
|
def on_token_change(token): |
|
|
model_names, error = hf_utils.get_my_model_names(token) |
|
|
if model_names: |
|
|
model_names.append("Other") |
|
|
|
|
|
return gr.update(visible=bool(model_names)), gr.update(choices=model_names, value=model_names[0] if model_names else None), gr.update(visible=bool(model_names)), gr.update(value=error_str(error)) |
|
|
|
|
|
def url_to_model_id(model_id_str): |
|
|
return model_id_str.split("/")[-2] + "/" + model_id_str.split("/")[-1] if model_id_str.startswith("https://huggingface.co/") else model_id_str |
|
|
|
|
|
def get_ckpt_names(token, radio_model_names, input_model): |
|
|
|
|
|
model_id = url_to_model_id(input_model) if radio_model_names == "Other" else radio_model_names |
|
|
|
|
|
if token == "" or model_id == "": |
|
|
return error_str("Please enter both a token and a model name.", title="Invalid input"), gr.update(choices=[]), gr.update(visible=False) |
|
|
|
|
|
try: |
|
|
api = HfApi(token=token) |
|
|
ckpt_files = [f for f in api.list_repo_files(repo_id=model_id) if f.endswith(".ckpt")] |
|
|
|
|
|
if not ckpt_files: |
|
|
return error_str("No checkpoint files found in the model repo."), gr.update(choices=[]), gr.update(visible=False) |
|
|
|
|
|
return None, gr.update(choices=ckpt_files, value=ckpt_files[0], visible=True), gr.update(visible=True) |
|
|
|
|
|
except Exception as e: |
|
|
return error_str(e), gr.update(choices=[]), None |
|
|
|
|
|
def convert_and_push(radio_model_names, input_model, ckpt_name, sd_version, token, path_in_repo, ema, safetensors): |
|
|
extract_ema = ema == "ema" |
|
|
|
|
|
if sd_version == None: |
|
|
return error_str("You must select a stable diffusion version.", title="Invalid input") |
|
|
|
|
|
model_id = url_to_model_id(input_model) if radio_model_names == "Other" else radio_model_names |
|
|
|
|
|
try: |
|
|
model_id = url_to_model_id(model_id) |
|
|
|
|
|
|
|
|
ckpt_path, revision = hf_utils.download_file(repo_id=model_id, filename=ckpt_name, token=token) |
|
|
|
|
|
if safetensors == "yes": |
|
|
tensors = {} |
|
|
with safe_open(ckpt_path, framework="pt", device="cpu") as f: |
|
|
for key in f.keys(): |
|
|
tensors[key] = f.get_tensor(key) |
|
|
|
|
|
new_checkpoint_path = "/".join(ckpt_path.split("/")[:-1] + ["model_safe.ckpt"]) |
|
|
torch.save(tensors, new_checkpoint_path) |
|
|
ckpt_path = new_checkpoint_path |
|
|
print("Converting ckpt_path", ckpt_path) |
|
|
|
|
|
print(ckpt_path) |
|
|
|
|
|
|
|
|
os.makedirs(model_id, exist_ok=True) |
|
|
run_command = [ |
|
|
"python3", |
|
|
"./diffs/scripts/convert_original_stable_diffusion_to_diffusers.py", |
|
|
"--checkpoint_path", |
|
|
ckpt_path, |
|
|
"--dump_path" , |
|
|
model_id, |
|
|
] |
|
|
if extract_ema: |
|
|
run_command.append("--extract_ema") |
|
|
subprocess.run(run_command) |
|
|
|
|
|
|
|
|
commit_message="Add Diffusers weights" |
|
|
upload_folder( |
|
|
folder_path=model_id, |
|
|
repo_id=model_id, |
|
|
path_in_repo=path_in_repo, |
|
|
token=token, |
|
|
create_pr=True, |
|
|
commit_message=commit_message, |
|
|
commit_description=f"Add Diffusers weights converted from checkpoint `{ckpt_name}` in revision {revision}", |
|
|
) |
|
|
|
|
|
|
|
|
hf_utils.delete_file(revision) |
|
|
subprocess.run(["rm", "-rf", model_id.split('/')[0]]) |
|
|
import glob |
|
|
for f in glob.glob("*.yaml*"): |
|
|
subprocess.run(["rm", "-rf", f]) |
|
|
|
|
|
return f"""Successfully converted the checkpoint and opened a PR to add the weights to the model repo. |
|
|
You can view and merge the PR [here]({hf_utils.get_pr_url(HfApi(token=token), model_id, commit_message)}).""" |
|
|
|
|
|
return "Done" |
|
|
|
|
|
except Exception as e: |
|
|
return error_str(e) |
|
|
|
|
|
|
|
|
DESCRIPTION = """### Convert a stable diffusion checkpoint to Diffusers🧨 |
|
|
With this space, you can easily convert a CompVis stable diffusion checkpoint to Diffusers and automatically create a pull request to the model repo. |
|
|
You can choose to convert a checkpoint from one of your own models, or from any other model on the Hub. |
|
|
You can skip the queue by running the app in the colab: [](https://colab.research.google.com/gist/qunash/f0f3152c5851c0c477b68b7b98d547fe/convert-sd-to-diffusers.ipynb)""" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
|
|
|
gr.Markdown(DESCRIPTION) |
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=11): |
|
|
with gr.Column(): |
|
|
gr.Markdown("## 1. Load model info") |
|
|
input_token = gr.Textbox( |
|
|
max_lines=1, |
|
|
type="password", |
|
|
label="Enter your Hugging Face token", |
|
|
placeholder="READ permission is sufficient" |
|
|
) |
|
|
gr.Markdown("You can get a token [here](https://huggingface.co/settings/tokens)") |
|
|
with gr.Group(visible=False) as group_model: |
|
|
radio_model_names = gr.Radio(label="Choose a model") |
|
|
input_model = gr.Textbox( |
|
|
max_lines=1, |
|
|
label="Model name or URL", |
|
|
placeholder="username/model_name", |
|
|
visible=False, |
|
|
) |
|
|
|
|
|
btn_get_ckpts = gr.Button("Load", visible=False) |
|
|
|
|
|
with gr.Column(scale=10): |
|
|
with gr.Column(visible=False) as group_convert: |
|
|
gr.Markdown("## 2. Convert to Diffusers🧨") |
|
|
radio_ckpts = gr.Radio(label="Choose the checkpoint to convert", visible=False) |
|
|
path_in_repo = gr.Textbox(label="Path where the weights will be saved", placeholder="Leave empty for root folder") |
|
|
ema = gr.Radio(label="Extract EMA or non-EMA?", choices=["ema", "non-ema"]) |
|
|
safetensors = gr.Radio(label="Extract from safetensors", choices=["yes", "no"], value="no") |
|
|
radio_sd_version = gr.Radio(label="Choose the model version", choices=["v1", "v2", "v2.1"]) |
|
|
gr.Markdown("Conversion may take a few minutes.") |
|
|
btn_convert = gr.Button("Convert & Push") |
|
|
|
|
|
error_output = gr.Markdown(label="Output") |
|
|
|
|
|
input_token.change( |
|
|
fn=on_token_change, |
|
|
inputs=input_token, |
|
|
outputs=[group_model, radio_model_names, btn_get_ckpts, error_output], |
|
|
queue=False, |
|
|
scroll_to_output=True) |
|
|
|
|
|
radio_model_names.change( |
|
|
lambda x: gr.update(visible=x == "Other"), |
|
|
inputs=radio_model_names, |
|
|
outputs=input_model, |
|
|
queue=False, |
|
|
scroll_to_output=True) |
|
|
|
|
|
btn_get_ckpts.click( |
|
|
fn=get_ckpt_names, |
|
|
inputs=[input_token, radio_model_names, input_model], |
|
|
outputs=[error_output, radio_ckpts, group_convert], |
|
|
scroll_to_output=True, |
|
|
queue=False |
|
|
) |
|
|
|
|
|
btn_convert.click( |
|
|
fn=convert_and_push, |
|
|
inputs=[radio_model_names, input_model, radio_ckpts, radio_sd_version, input_token, path_in_repo, ema, safetensors], |
|
|
outputs=error_output, |
|
|
scroll_to_output=True |
|
|
) |
|
|
|
|
|
|
|
|
gr.HTML(""" |
|
|
<div style="border-top: 1px solid #303030;"> |
|
|
<br> |
|
|
<p>Space by: <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a></p><br> |
|
|
<a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" ></a><br><br> |
|
|
<p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.sd-to-diffusers" alt="visitors"></p> |
|
|
</div> |
|
|
""") |
|
|
|
|
|
demo.queue() |
|
|
demo.launch(debug=True, share=utils.is_google_colab()) |
|
|
|