Spaces:
Runtime error
Runtime error
Commit
·
9c259e8
1
Parent(s):
3cf1439
half precision
Browse files
visual_foundation_models.py
CHANGED
|
@@ -108,7 +108,7 @@ class MaskFormer:
|
|
| 108 |
def __init__(self, device):
|
| 109 |
self.device = device
|
| 110 |
self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
|
| 111 |
-
self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to(device)
|
| 112 |
|
| 113 |
def inference(self, image_path, text):
|
| 114 |
threshold = 0.5
|
|
@@ -137,7 +137,7 @@ class ImageEditing:
|
|
| 137 |
print("Initializing StableDiffusionInpaint to %s" % device)
|
| 138 |
self.device = device
|
| 139 |
self.mask_former = MaskFormer(device=self.device)
|
| 140 |
-
self.inpainting = StableDiffusionInpaintPipeline.from_pretrained(
|
| 141 |
|
| 142 |
def remove_part_of_image(self, input):
|
| 143 |
image_path, to_be_removed_txt = input.split(",")
|
|
@@ -177,8 +177,8 @@ class T2I:
|
|
| 177 |
self.device = device
|
| 178 |
self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
| 179 |
self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
|
| 180 |
-
self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
|
| 181 |
-
self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device)
|
| 182 |
self.pipe.to(device)
|
| 183 |
|
| 184 |
def inference(self, text):
|
|
@@ -194,8 +194,8 @@ class ImageCaptioning:
|
|
| 194 |
def __init__(self, device):
|
| 195 |
print("Initializing ImageCaptioning to %s" % device)
|
| 196 |
self.device = device
|
| 197 |
-
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 198 |
-
self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device)
|
| 199 |
|
| 200 |
def inference(self, image_path):
|
| 201 |
inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
|
|
@@ -225,11 +225,11 @@ class image2canny_new:
|
|
| 225 |
class canny2image_new:
|
| 226 |
def __init__(self, device):
|
| 227 |
self.controlnet = ControlNetModel.from_pretrained(
|
| 228 |
-
"fusing/stable-diffusion-v1-5-controlnet-canny"
|
| 229 |
)
|
| 230 |
|
| 231 |
self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 232 |
-
"runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None
|
| 233 |
)
|
| 234 |
|
| 235 |
self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
|
|
|
|
| 108 |
def __init__(self, device):
|
| 109 |
self.device = device
|
| 110 |
self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
|
| 111 |
+
self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined", torch_dtype=torch.float16).to(device)
|
| 112 |
|
| 113 |
def inference(self, image_path, text):
|
| 114 |
threshold = 0.5
|
|
|
|
| 137 |
print("Initializing StableDiffusionInpaint to %s" % device)
|
| 138 |
self.device = device
|
| 139 |
self.mask_former = MaskFormer(device=self.device)
|
| 140 |
+
self.inpainting = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16).to(device)
|
| 141 |
|
| 142 |
def remove_part_of_image(self, input):
|
| 143 |
image_path, to_be_removed_txt = input.split(",")
|
|
|
|
| 177 |
self.device = device
|
| 178 |
self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
| 179 |
self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
|
| 180 |
+
self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion", torch_dtype=torch.float16)
|
| 181 |
+
self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device, torch_dtype=torch.float16)
|
| 182 |
self.pipe.to(device)
|
| 183 |
|
| 184 |
def inference(self, text):
|
|
|
|
| 194 |
def __init__(self, device):
|
| 195 |
print("Initializing ImageCaptioning to %s" % device)
|
| 196 |
self.device = device
|
| 197 |
+
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16)
|
| 198 |
+
self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16).to(self.device)
|
| 199 |
|
| 200 |
def inference(self, image_path):
|
| 201 |
inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
|
|
|
|
| 225 |
class canny2image_new:
|
| 226 |
def __init__(self, device):
|
| 227 |
self.controlnet = ControlNetModel.from_pretrained(
|
| 228 |
+
"fusing/stable-diffusion-v1-5-controlnet-canny", torch_dtype=torch.float16
|
| 229 |
)
|
| 230 |
|
| 231 |
self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 232 |
+
"runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, torch_dtype=torch.float16
|
| 233 |
)
|
| 234 |
|
| 235 |
self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
|