Spaces:
Running
Running
| # analyze_model.py | |
| import torch | |
| from torchvision import models, transforms | |
| from torch.utils.data import DataLoader | |
| from torchvision.datasets import ImageFolder | |
| model = models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1) | |
| model.classifier[1] = torch.nn.Linear(1280, 18) # 18 classes | |
| model.load_state_dict(torch.load("custom_image_model.pth")) | |
| model.eval() | |
| transform = transforms.Compose([ | |
| transforms.Resize(256), | |
| transforms.CenterCrop(224), | |
| transforms.ToTensor(), | |
| transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) | |
| ]) | |
| dataset = ImageFolder(root="categorized_images", transform=transform) | |
| val_loader = DataLoader(dataset, batch_size=16, shuffle=False) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model.to(device) | |
| correct = 0 | |
| total = 0 | |
| with torch.no_grad(): | |
| for images, labels in val_loader: | |
| images, labels = images.to(device), labels.to(device) | |
| outputs = model(images) | |
| _, predicted = torch.max(outputs, 1) | |
| total += labels.size(0) | |
| correct += (predicted == labels).sum().item() | |
| accuracy = 100 * correct / total | |
| print(f"β Model Accuracy: {accuracy:.2f}% on {total} images") |