File size: 1,090 Bytes
			
			| 4cb2cd7 524ae85 4cb2cd7 524ae85 e9e3ad5 4cb2cd7 4cdcbb4 4cb2cd7 524ae85 65ecd1d 5be9b97 e9e3ad5 4cb2cd7 524ae85 e6421bf 524ae85 4cb2cd7 524ae85 4cb2cd7 4f7b85b 524ae85 4f7b85b 524ae85 4cb2cd7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | from skimage import io
import torch, os
from PIL import Image
from briarmbg import BriaRMBG
from utilities import preprocess_image, postprocess_image
from huggingface_hub import hf_hub_download
def example_inference():
    im_path = f"{os.path.dirname(os.path.abspath(__file__))}/example_input.jpg"
    net = BriaRMBG()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net = BriaRMBG.from_pretrained("briaai/RMBG-1.4")
    net.to(device)
    net.eval()    
    # prepare input
    model_input_size = [1024,1024]
    orig_im = io.imread(im_path)
    orig_im_size = orig_im.shape[0:2]
    image = preprocess_image(orig_im, model_input_size).to(device)
    # inference 
    result=net(image)
    # post process
    result_image = postprocess_image(result[0][0], orig_im_size)
    # save result
    pil_mask_im = Image.fromarray(result_image)
    orig_image = Image.open(im_path)
    no_bg_image = orig_image.copy()
    no_bg_image.putalpha(pil_mask_im)    
    no_bg_image.save("example_image_no_bg.png")
if __name__ == "__main__":
    example_inference() | 
