Spaces:
Running
on
Zero
Running
on
Zero
| import gradio as gr | |
| import numpy as np | |
| import spaces | |
| import torch | |
| import torchvision | |
| from latent_utils import compress | |
| from util.file import generate_binary_file, load_numpy_from_binary_bitwise | |
| from util.img_utils import resize_and_crop | |
| def main(img_to_compress, T, K, model_type='512x512', bitstream=None, avail_models=None, | |
| progress=gr.Progress(track_tqdm=True)): | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| indices = load_numpy_from_binary_bitwise(bitstream, K, T, model_type, T - 1) | |
| if indices is not None: | |
| indices = indices.to(device) | |
| if indices is None: | |
| img_to_compress = resize_and_crop(img_to_compress, int(model_type.split('x')[0])) | |
| img_to_compress = (torchvision.transforms.ToTensor()(img_to_compress) * 2) - 1 | |
| img_to_compress = img_to_compress.unsqueeze(0).to(device) | |
| else: | |
| img_to_compress = None | |
| print(T, K, model_type) | |
| # model, _ = load_model(img_size_to_id[img_size], T, device, float16=True, compile=False) | |
| model = avail_models[model_type].to(device) | |
| model.device = device | |
| model.model.to(device=device) | |
| model.model.scheduler.device = device | |
| # model.model.scheduler.scheduler = model.model.scheduler.scheduler.to(device) | |
| model.set_timesteps(T, device=device) | |
| model.num_timesteps = T | |
| with torch.no_grad(): | |
| x, indices = compress(model, img_to_compress, K, indices, device=device) | |
| x = (x / 2 + 0.5).clamp(0, 1) | |
| x = x.detach().cpu().squeeze().numpy() | |
| x = np.transpose(x, (1, 2, 0)) | |
| torch.cuda.empty_cache() | |
| indices = generate_binary_file(indices.numpy(), K, T, model_type) | |
| if bitstream is None: | |
| return x, indices | |
| return x | |