Spaces:
Runtime error
Runtime error
| import gdown | |
| import os | |
| import torch | |
| import requests | |
| import numpy as np | |
| import numpy.matlib | |
| import copy | |
| import cv2 | |
| from PIL import Image | |
| from typing import List | |
| import timm | |
| import gradio as gr | |
| import torchvision.transforms as transforms | |
| from pim_module import PluginMoodel # Assure-toi que ce fichier est présent | |
| # === Téléchargement automatique depuis Google Drive === | |
| if not os.path.exists("weights.pt"): | |
| print("Téléchargement des poids depuis Google Drive avec gdown...") | |
| file_id = "1Ck9qyjs4_c_fqgaEpZ0eN9jIV5TiqkXp" | |
| url = f"https://drive.google.com/uc?id={file_id}" | |
| gdown.download(url, "weights.pt", quiet=False) | |
| # === Classes | |
| classes_list = [ | |
| "Ferrage_et_accessoires_ANTI_FAUSSE_MANOEUVRE", | |
| "Ferrage_et_accessoires_Busettes", | |
| "Ferrage_et_accessoires_Butees", | |
| "Ferrage_et_accessoires_Chariots", | |
| "Ferrage_et_accessoires_Charniere", | |
| "Ferrage_et_accessoires_Compas_limiteur", | |
| "Ferrage_et_accessoires_Renvois_d'angle", | |
| "Joints_et_consommables_Equerres_aluminium_moulees", | |
| "Joints_et_consommables_Joints_a_glisser", | |
| "Joints_et_consommables_Joints_EPDM", | |
| "Joints_et_consommables_Joints_PVC_aluminium", | |
| "Joints_et_consommables_Joints_a_clipser", | |
| "Joints_et_consommables_Joints_a_coller", | |
| "Joints_et_consommables_Silicone_pour_vitrage_alu", | |
| "Joints_et_consommables_Visserie_inox_alu", | |
| "Poignee_carre_7_mm", | |
| "Poignee_carre_8_mm", | |
| "Poignee_cremone", | |
| "Poignee_cuvette", | |
| "Poignee_de_tirage", | |
| "Poignee_pour_Levant_Coulissant", | |
| "Serrure_Cremone_multipoints", | |
| "Serrure_Cuvette", | |
| "Serrure_Gaches", | |
| "Serrure_Pene_Crochet", | |
| "Serrure_Tringles", | |
| "Serrure_pour_Porte", | |
| ] | |
| # === Classes : attention elles doivent être dans l'ordre que donne liste.sort() en Python | |
| classes_list = ['Ferrage_et_accessoires_ANTI_FAUSSE_MANOEUVRE', 'Ferrage_et_accessoires_Busettes', 'Ferrage_et_accessoires_Butees', 'Ferrage_et_accessoires_Chariots', 'Ferrage_et_accessoires_Charniere', 'Ferrage_et_accessoires_Compas_limiteur', "Ferrage_et_accessoires_Renvois_d'angle", 'Joints_et_consommables_Equerres_aluminium_moulees', 'Joints_et_consommables_Joints_EPDM', 'Joints_et_consommables_Joints_PVC_aluminium', 'Joints_et_consommables_Joints_a_clipser', 'Joints_et_consommables_Joints_a_coller', 'Joints_et_consommables_Joints_a_glisser', 'Joints_et_consommables_Silicone_pour_vitrage_alu', 'Joints_et_consommables_Visserie_inox_alu', 'Poignee_carre_7_mm', 'Poignee_carre_8_mm', 'Poignee_cremone', 'Poignee_cuvette', 'Poignee_de_tirage', 'Poignee_pour_Levant_Coulissant', 'Serrure_Cremone_multipoints', 'Serrure_Cuvette', 'Serrure_Gaches', 'Serrure_Pene_Crochet', 'Serrure_Tringles', 'Serrure_pour_Porte'] | |
| data_size = 384 | |
| fpn_size = 1536 | |
| num_classes = 27 | |
| num_selects = {'layer1': 256, 'layer2': 128, 'layer3': 64, 'layer4': 32} | |
| features, grads, module_id_mapper = {}, {}, {} | |
| def forward_hook(module, inp_hs, out_hs): | |
| layer_id = len(features) + 1 | |
| module_id_mapper[module] = layer_id | |
| features[layer_id] = {"in": inp_hs, "out": out_hs} | |
| def backward_hook(module, inp_grad, out_grad): | |
| layer_id = module_id_mapper[module] | |
| grads[layer_id] = {"in": inp_grad, "out": out_grad} | |
| def build_model(path: str): | |
| backbone = timm.create_model('swin_large_patch4_window12_384_in22k', pretrained=True) | |
| model = PluginMoodel( | |
| backbone=backbone, | |
| return_nodes=None, | |
| img_size=data_size, | |
| use_fpn=True, | |
| fpn_size=fpn_size, | |
| proj_type="Linear", | |
| upsample_type="Conv", | |
| use_selection=True, | |
| num_classes=num_classes, | |
| num_selects=num_selects, | |
| use_combiner=True, | |
| comb_proj_size=None | |
| ) | |
| ckpt = torch.load(path, map_location="cpu", weights_only=False) | |
| model.load_state_dict(ckpt["model"], strict=False) | |
| model.eval() | |
| for layer in [0, 1, 2, 3]: | |
| model.backbone.layers[layer].register_forward_hook(forward_hook) | |
| model.backbone.layers[layer].register_full_backward_hook(backward_hook) | |
| for i in range(1, 5): | |
| getattr(model.fpn_down, f'Proj_layer{i}').register_forward_hook(forward_hook) | |
| getattr(model.fpn_down, f'Proj_layer{i}').register_full_backward_hook(backward_hook) | |
| getattr(model.fpn_up, f'Proj_layer{i}').register_forward_hook(forward_hook) | |
| getattr(model.fpn_up, f'Proj_layer{i}').register_full_backward_hook(backward_hook) | |
| return model | |
| class ImgLoader: | |
| def __init__(self, img_size): | |
| self.transform = transforms.Compose([ | |
| transforms.Resize((510, 510), Image.BILINEAR), | |
| transforms.CenterCrop((img_size, img_size)), | |
| transforms.ToTensor(), | |
| transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |
| ]) | |
| def load(self, input_img): | |
| if isinstance(input_img, str): | |
| ori_img = cv2.imread(input_img) | |
| img = Image.fromarray(cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB)) | |
| elif isinstance(input_img, Image.Image): | |
| img = input_img | |
| else: | |
| raise ValueError("Image invalide") | |
| if img.mode != "RGB": | |
| img = img.convert("RGB") | |
| return self.transform(img).unsqueeze(0) | |
| def cal_backward(out) -> dict: | |
| target_layer_names = ['layer1', 'layer2', 'layer3', 'layer4', | |
| 'FPN1_layer1', 'FPN1_layer2', 'FPN1_layer3', 'FPN1_layer4', 'comb_outs'] | |
| sum_out = None | |
| for name in target_layer_names: | |
| tmp_out = out[name].mean(1) if name != "comb_outs" else out[name] | |
| tmp_out = torch.softmax(tmp_out, dim=-1) | |
| sum_out = tmp_out if sum_out is None else sum_out + tmp_out | |
| with torch.no_grad(): | |
| smax = torch.softmax(sum_out, dim=-1) | |
| A = np.transpose(np.matlib.repmat(smax[0], num_classes, 1)) - np.eye(num_classes) | |
| _, _, V = np.linalg.svd(A, full_matrices=True) | |
| V = V[num_classes - 1, :] | |
| if V[0] < 0: | |
| V = -V | |
| V = np.log(V) | |
| V = V - min(V) | |
| V = V / sum(V) | |
| top5_indices = np.argsort(-V)[:5] | |
| top5_scores = -np.sort(-V)[:5] | |
| # Construction du dictionnaire pour gr.Label | |
| top5_dict = {classes_list[int(idx)]: float(f"{score:.4f}") for idx, score in zip(top5_indices, top5_scores)} | |
| return top5_dict | |
| # === Chargement du modèle | |
| model = build_model("weights.pt") | |
| img_loader = ImgLoader(data_size) | |
| def predict_image(image: Image.Image): | |
| global features, grads, module_id_mapper | |
| features, grads, module_id_mapper = {}, {}, {} | |
| if image is None: | |
| return {} | |
| # raise ValueError("Aucune image reçue. Vérifie l'entrée.") | |
| if image.mode != "RGB": | |
| image = image.convert("RGB") | |
| image_path = "temp.jpg" | |
| image.save(image_path) | |
| img_tensor = img_loader.load(image_path) | |
| out = model(img_tensor) | |
| top5_dict = cal_backward(out) # {classe: score} | |
| gallery_outputs = [] | |
| for idx, class_name in enumerate(list(top5_dict.keys())): | |
| images = [ | |
| (f"imgs/{class_name}/{class_name}_0001.jpg", f"Exemple {class_name} 1"), | |
| (f"imgs/{class_name}/{class_name}_0002.jpg", f"Exemple {class_name} 2"), | |
| (f"imgs/{class_name}/{class_name}_0003.jpg", f"Exemple {class_name} 3"), | |
| ] | |
| gallery_outputs.append(images) | |
| return top5_dict, *gallery_outputs | |
| # === Interface Gradio | |
| with gr.Blocks(css=""" | |
| .gr-image-upload { display: none !important } | |
| .gallery-container .gr-box { height: auto !important; padding: 0 !important; } | |
| """) as demo: | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| with gr.Tab("Téléversement"): | |
| image_input_upload = gr.Image(type="pil", label="Image à classer (upload)", sources=["upload"]) | |
| with gr.Tab("Webcam"): | |
| image_input_webcam = gr.Image(type="pil", label="Image à classer (webcam)", sources=["webcam"]) | |
| with gr.Column(scale=1.5): | |
| label_output = gr.Label(label="Prédictions") | |
| gallery_outputs = [ | |
| gr.Gallery(label=f"", columns=3, height=300, container=True, elem_classes=["gallery-container"]) | |
| for i in range(5) | |
| ] | |
| image_input_upload.change(fn=predict_image, inputs=image_input_upload, outputs=[label_output] + gallery_outputs) | |
| image_input_webcam.change(fn=predict_image, inputs=image_input_webcam, outputs=[label_output] + gallery_outputs) | |
| if __name__ == "__main__": | |
| demo.launch() | |