File size: 2,993 Bytes
00fb916
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98

import cv2
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import models, transforms

import warnings

warnings.filterwarnings("ignore")

# εŠ θ½½ζ¨‘εž‹
models_dict = {
    'DeepLabv3': models.segmentation.deeplabv3_resnet50(pretrained=True).eval(),
    'DeepLabv3+': models.segmentation.deeplabv3_resnet101(pretrained=True).eval(),
    'FCN-ResNet50': models.segmentation.fcn_resnet50(pretrained=True).eval(),
    'FCN-ResNet101': models.segmentation.fcn_resnet101(pretrained=True).eval(),
    'LRR': models.segmentation.lraspp_mobilenet_v3_large(pretrained=True).eval(),
}

# 图像钄倄理
image_transforms = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225]
    )
])

def download_test_img():
    # Images
    torch.hub.download_url_to_file(
        'https://user-images.githubusercontent.com/59380685/266264420-21575a83-4057-41cf-8a4a-b3ea6f332d79.jpg',
        'bus.jpg')
    torch.hub.download_url_to_file(
        'https://user-images.githubusercontent.com/59380685/266264536-82afdf58-6b9a-4568-b9df-551ee72cb6d9.jpg',
        'dogs.jpg')
    torch.hub.download_url_to_file(
        'https://user-images.githubusercontent.com/59380685/266264600-9d0c26ca-8ba6-45f2-b53b-4dc98460c43e.jpg',
        'zidane.jpg')

def predict_segmentation(image, model_name):

    # 图像钄倄理
    image_tensor = image_transforms(image).unsqueeze(0)

    # ζ¨‘εž‹ζŽ¨η†
    with torch.no_grad():
        output = models_dict[model_name](image_tensor)['out'][0]
        output_predictions = output.argmax(0)
        segmentation = F.interpolate(
            output.float().unsqueeze(0),
            size=image.size[::-1],
            mode='bicubic',
            align_corners=False
        )[0].argmax(0).numpy()

    # εˆ†ε‰²ε›Ύ
    segmentation_image = np.uint8(segmentation)
    segmentation_image = cv2.applyColorMap(segmentation_image, cv2.COLORMAP_JET)

    # θžεˆε›Ύ
    blend_image = cv2.addWeighted(np.array(image), 0.5, segmentation_image, 0.5, 0)
    blend_image = cv2.cvtColor(blend_image, cv2.COLOR_BGR2RGB)

    return segmentation_image, blend_image


import gradio as gr

examples = [
    ['bus.jpg', 'DeepLabv3'],
    ['dogs.jpg', 'DeepLabv3'],
    ['zidane.jpg', 'DeepLabv3']
]
download_test_img()
model_list = ['DeepLabv3', 'DeepLabv3+', 'FCN-ResNet50', 'FCN-ResNet101', 'LRR']
inputs = [
    gr.inputs.Image(type='pil', label='εŽŸε§‹ε›Ύεƒ'),
    gr.inputs.Dropdown(model_list, label='ι€‰ζ‹©ζ¨‘εž‹', default='DeepLabv3')
]
outputs = [
    gr.outputs.Image(type='pil',label='εˆ†ε‰²ε›Ύ'),
    gr.outputs.Image(type='pil',label='θžεˆε›Ύ')
]
interface = gr.Interface(
    predict_segmentation,
    inputs,
    outputs,
    examples=examples,
    capture_session=True,
    title='torchvision-segmentation-webui',
    description='torchvision segmentation webui on gradio'
)

interface.launch()