File size: 39,265 Bytes
169af49 |
1 |
{"cells":[{"cell_type":"code","source":["# Step 1: Mount Google Drive\n","from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"id":"AEA3l5bSLSo1"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["##Aesthetic sorting using CLIP"],"metadata":{"id":"5teR85z0-l_S"}},{"cell_type":"code","source":["# --------------------------------------------------------------\n","# 0. INSTALL & MOUNT\n","# --------------------------------------------------------------\n","!pip install -q open_clip_torch torch torchvision pillow tqdm pandas ftfy regex scikit-learn\n","\n","import zipfile, os, shutil, tempfile, requests, numpy as np\n","from tqdm import tqdm\n","import torch, open_clip\n","from PIL import Image\n","import glob, pandas as pd\n","from sklearn.metrics.pairwise import cosine_similarity\n","from google.colab import drive\n","\n","drive.mount('/content/drive')"],"metadata":{"id":"pjMqYPkO-q6q"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# --------------------------------------------------------------\n","# 1. PARAMETERS (Google-Colab Form UI)\n","# --------------------------------------------------------------\n","\n","#@title **Input Settings** { display-mode: \"form\" }\n","\n","ZIP_PATH = \"/content/drive/MyDrive/training_data_31635.zip\" #@param {type:\"string\"}\n","EXTRACT_TO = \"/content/hellsing_aesthetic_sorted\" #@param {type:\"string\"}\n","OUT_DIR = \"/content/drive/MyDrive/hellsing_aesthetic_sorted\" #@param {type:\"string\"}\n","\n","#@title **Deduplication Settings** { display-mode: \"form\" }\n","\n","SIMILARITY_THRESHOLD = 0.99 #@param {type:\"slider\", min:0.9, max:1.0, step:0.005}\n","FILESIZE_TOLERANCE = 0.05 #@param {type:\"slider\", min:0.0, max:0.2, step:0.01}\n","\n","#@title **Packing Settings** { display-mode: \"form\" }\n","\n","MAX_ZIP_BYTES = 300 #@param {type:\"slider\", min:50, max:1000, step:50}\n","MAX_ZIP_BYTES = MAX_ZIP_BYTES * 1024 * 1024 # convert MiB → bytes\n","\n","# -----------------------------------------------------------------\n","# (no changes needed below – the rest of the notebook uses these vars)\n","# -----------------------------------------------------------------\n","os.makedirs(EXTRACT_TO, exist_ok=True)\n","os.makedirs(OUT_DIR, exist_ok=True)\n","\n","print(\"Parameters loaded from the form\")"],"metadata":{"id":"wyfUVs9g_Bxl"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# --------------------------------------------------------------\n","# 2. EXTRACT ONLY IMAGES (skip if already done)\n","# --------------------------------------------------------------\n","if not os.listdir(EXTRACT_TO):\n"," with zipfile.ZipFile(ZIP_PATH, 'r') as z:\n"," members = [m for m in z.namelist()\n"," if m.lower().split('.')[-1] in {'png','jpg','jpeg','bmp','webp'}]\n"," for member in tqdm(members, desc='Extracting images'):\n"," z.extract(member, EXTRACT_TO)\n","else:\n"," print(\"Folder already contains files – skipping extraction.\")"],"metadata":{"id":"p9C4hwLF_O4v"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# --------------------------------------------------------------\n","# 3. LOAD CLIP + AESTHETIC HEAD\n","# --------------------------------------------------------------\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","\n","clip_model, _, preprocess = open_clip.create_model_and_transforms(\n"," model_name=\"ViT-B-32\", pretrained=\"laion400m_e32\"\n",")\n","clip_model.to(device).eval()\n","\n","# ---- aesthetic head ------------------------------------------------\n","AESTHETIC_URL = \"https://github.com/LAION-AI/aesthetic-predictor/raw/main/sa_0_4_vit_b_32_linear.pth\"\n","ckpt_path = \"/content/laion_aesthetic_vit_b_32.pth\"\n","if not os.path.exists(ckpt_path):\n"," print(\"Downloading aesthetic weights …\")\n"," r = requests.get(AESTHETIC_URL); r.raise_for_status()\n"," open(ckpt_path, 'wb').write(r.content)\n","\n","aesthetic_head = torch.nn.Linear(512, 1).to(device)\n","aesthetic_head.load_state_dict(torch.load(ckpt_path, map_location=device))\n","aesthetic_head.eval()\n","print(\"CLIP + aesthetic head ready\")"],"metadata":{"id":"-0QOGVMY_Pwa"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# --------------------------------------------------------------\n","# 4. SCORE ALL IMAGES\n","# --------------------------------------------------------------\n","image_paths = glob.glob(f\"{EXTRACT_TO}/**/*.*\", recursive=True)\n","image_paths = [p for p in image_paths\n"," if p.lower().split('.')[-1] in {'png','jpg','jpeg','bmp','webp'}]\n","\n","results = []\n","with torch.no_grad():\n"," for p in tqdm(image_paths, desc=\"Scoring\"):\n"," try:\n"," img = Image.open(p).convert(\"RGB\")\n"," x = preprocess(img).unsqueeze(0).to(device)\n"," emb = clip_model.encode_image(x) # (1,512)\n","\n"," logit = aesthetic_head(emb)\n"," score = torch.sigmoid(logit).item() * 10.0 # 0-10 scale\n","\n"," results.append({'path': p, 'score': score})\n"," except Exception as e:\n"," print(f\"Skip {p}: {e}\")\n","\n","df = pd.DataFrame(results)\n","df = df.sort_values('score', ascending=False).reset_index(drop=True)\n","print(f\"Scored {len(df)} images | best {df['score'].iloc[0]:.2f} | worst {df['score'].iloc[-1]:.2f}\")"],"metadata":{"id":"O3IiHdvX_QsW"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# --------------------------------------------------------------\n","# 5. DEDUPLICATION (file-size + CLIP similarity)\n","# --------------------------------------------------------------\n","@torch.no_grad()\n","def get_clip_embedding(p):\n"," try:\n"," img = Image.open(p).convert(\"RGB\")\n"," x = preprocess(img).unsqueeze(0).to(device)\n"," emb = clip_model.encode_image(x)\n"," emb = emb / emb.norm(dim=-1, keepdim=True)\n"," return emb.cpu().numpy().flatten()\n"," except Exception:\n"," return None\n","\n","# ---- group by approximate size ------------------------------------\n","size_groups = {}\n","for p in df['path']:\n"," sz = os.path.getsize(p)\n"," key = round(sz * (1 / (1 + FILESIZE_TOLERANCE)))\n"," size_groups.setdefault(key, []).append(p)\n","\n","kept_paths = []\n","for key, group in tqdm(size_groups.items(), desc=\"Deduplicate groups\"):\n"," if len(group) == 1:\n"," kept_paths.append(group[0])\n"," continue\n","\n"," # compute embeddings only for this small group\n"," embeddings = []\n"," valid_paths = []\n"," for p in group:\n"," emb = get_clip_embedding(p)\n"," if emb is not None:\n"," embeddings.append(emb)\n"," valid_paths.append(p)\n","\n"," if len(embeddings) <= 1:\n"," kept_paths.extend(valid_paths)\n"," continue\n","\n"," embeddings = np.stack(embeddings)\n"," sim = cosine_similarity(embeddings)\n","\n"," keep = [True] * len(valid_paths)\n"," for i in range(len(valid_paths)):\n"," if not keep[i]: continue\n"," for j in range(i+1, len(valid_paths)):\n"," if sim[i, j] >= SIMILARITY_THRESHOLD:\n"," keep[j] = False\n","\n"," for idx, k in enumerate(keep):\n"," if k:\n"," kept_paths.append(valid_paths[idx])\n","\n","# ---- filter original dataframe ------------------------------------\n","df_clean = df[df['path'].isin(kept_paths)].copy()\n","df_clean = df_clean.sort_values('score', ascending=False).reset_index(drop=True)\n","print(f\"After deduplication: {len(df_clean)} images (removed {len(df)-len(df_clean)} duplicates)\")"],"metadata":{"id":"mAhkH_da_Q8G"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# --------------------------------------------------------------\n","# 6. PACK CLEANED IMAGES INTO ≤300 MiB ZIPs\n","# --------------------------------------------------------------\n","temp_dir = '/content/zip_temp'\n","os.makedirs(temp_dir, exist_ok=True)\n","\n","current_files = [] # (temp_path, arcname)\n","current_size = 0\n","zip_idx = 1\n","\n","def finish_zip():\n"," global zip_idx, current_size, current_files\n"," if not current_files: return\n"," zip_path = f\"{OUT_DIR}/mia_panels_part_{zip_idx:03d}.zip\"\n"," with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as z:\n"," for src, arc in current_files:\n"," z.write(src, arc)\n"," mb = os.path.getsize(zip_path) / 1e6\n"," print(f\"Saved {zip_path} ({len(current_files)} files, {mb:.1f} MB)\")\n"," for src, _ in current_files:\n"," os.remove(src)\n"," current_files = []\n"," current_size = 0\n"," zip_idx += 1\n","\n","for idx, row in enumerate(tqdm(df_clean.itertuples(), total=len(df_clean), desc=\"Packing\")):\n"," src = row.path\n"," rank = idx + 1\n"," ext = os.path.splitext(src)[1].lower()\n"," tmp = f\"{temp_dir}/{rank}{ext}\"\n"," shutil.copyfile(src, tmp)\n","\n"," fsize = os.path.getsize(tmp)\n"," if current_size + fsize > MAX_ZIP_BYTES and current_files:\n"," finish_zip()\n","\n"," current_files.append((tmp, f\"{rank}{ext}\"))\n"," current_size += fsize\n","\n","finish_zip()\n","shutil.rmtree(temp_dir, ignore_errors=True)\n","\n","# --------------------------------------------------------------\n","# 7. SAVE SCORE CSV (only the kept images)\n","# --------------------------------------------------------------\n","df_clean.to_csv(f\"{OUT_DIR}/aesthetic_scores_clean.csv\", index=False)\n","print(\"\\nAll done! Cleaned ZIPs + CSV are in:\", OUT_DIR)"],"metadata":{"id":"sWx4fBSe_hSI"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["##Sort images into folders using CLIP image feature extraction"],"metadata":{"id":"haUt4RKmRbjQ"}},{"cell_type":"code","execution_count":null,"metadata":{"id":"59Tf9llpSGoz"},"outputs":[],"source":["#@markdown Unzip training data from drive to /content/ (if required)\n","path = '/content/drive/MyDrive/training_data_66708.zip' #@param {type:'string'}\n","\n","%cd /content/\n","!unzip {path}"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"WncaEzzGiaO2"},"outputs":[],"source":["!pip install ftfy regex tqdm\n","!pip install git+https://github.com/openai/CLIP.git\n","!pip install scikit-learn matplotlib pillow umap-learn # UMAP is optional for 2D visualization"]},{"cell_type":"markdown","metadata":{"id":"EnqyKHcOilVA"},"source":["Load Images and Extract CLIP Embeddings\n","\n","Upload your images the normal way ( `/content/`) prior to running this cell.\n","\n","This code loads all images (supports JPG, PNG, etc.), preprocesses them, and extracts 512-dimensional embeddings using the ViT-B/32 CLIP model."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"XCKB0QeiJmIG"},"outputs":[],"source":["!pip install open_clip_torch\n","\n","import os\n","import numpy as np\n","import torch\n","import open_clip\n","from PIL import Image\n","\n","# Configuration\n","image_dir = '/content/' #@param {type:'string'}\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","model_name = \"ViT-B-32\" # Available per error message\n","pretrained = \"laion400m_e32\" # Robust pretrained weights\n","\n","# Load OpenCLIP model\n","model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained=pretrained)\n","model.to(device)\n","model.eval()\n","\n","# Load images and extract embeddings\n","embeddings = []\n","image_paths = []\n","image_names = []\n","\n","# Recursively find images in subdirectories, excluding /content/drive/\n","for root, _, files in os.walk(image_dir):\n"," if '/drive/' in root: # Explicitly skip any directory containing '/drive/'\n"," continue\n"," for filename in files:\n"," if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):\n"," img_path = os.path.join(root, filename)\n"," try:\n"," image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," embedding = model.encode_image(image)\n"," embeddings.append(embedding.cpu().numpy().flatten())\n"," image_paths.append(img_path)\n"," image_names.append(filename)\n"," #print(f\"Processed: {filename}\")\n"," except Exception as e:\n"," print(f\"Error processing {filename}: {e}\")\n","\n","embeddings = np.array(embeddings)\n","print(f\"Extracted embeddings for {len(embeddings)} images. Shape: {embeddings.shape}\")"]},{"cell_type":"markdown","metadata":{"id":"HQsc2r-ii6cK"},"source":["Perform Clustering\n","We'll use K-Means clustering on the embeddings. You can choose the number of clusters (`n_clusters`) based on your dataset size (e.g., try 5-10). We'll also compute the silhouette score to evaluate cluster quality (higher is better).\n","\n","For visualization, we'll optionally reduce dimensions to 2D using UMAP."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"WM9wug70jCtR"},"outputs":[],"source":["from umap import UMAP # For 2D projection (optional)\n","import os\n","import numpy as np\n","import torch\n","import clip\n","from PIL import Image\n","import matplotlib.pyplot as plt\n","from sklearn.cluster import KMeans\n","from sklearn.metrics import silhouette_score\n","import warnings\n","warnings.filterwarnings('ignore')\n","#@markdown Choose number of clusters (experiment with this)\n","n_clusters = 100 # @param {type:'slider' , min:1 , max:200, step:1}\n","\n","# Perform K-Means clustering\n","kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)\n","cluster_labels = kmeans.fit_predict(embeddings)\n","\n","# Evaluate clustering quality\n","sil_score = silhouette_score(embeddings, cluster_labels)\n","print(f\"Silhouette Score: {sil_score:.3f} (closer to 1 is better)\")\n","\n","# Optional: 2D visualization with UMAP\n","reducer = UMAP(random_state=42, n_components=2)\n","embed_2d = reducer.fit_transform(embeddings)\n","\n","plt.figure(figsize=(10, 8))\n","scatter = plt.scatter(embed_2d[:, 0], embed_2d[:, 1], c=cluster_labels, cmap='tab10', s=50)\n","plt.colorbar(scatter)\n","plt.title(f'2D UMAP Projection of CLIP Embeddings (K={n_clusters} Clusters)')\n","plt.xlabel('UMAP 1')\n","plt.ylabel('UMAP 2')\n","plt.show()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1fMT3PmCOSyh"},"outputs":[],"source":["import shutil\n","import os\n","from PIL import Image\n","\n","# Create output directories\n","output_dir = '/content/clusters' # Output base directory\n","os.makedirs(output_dir, exist_ok=True)\n","\n","move_files = False # Set to True to move files, False to copy\n","\n","# Create directories for each cluster\n","for i in range(n_clusters):\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," os.makedirs(cluster_dir, exist_ok=True)\n","\n","# Form inputs using Colab's # @param\n","output_format = \"JPEG\" # @param [\"JPEG\", \"PNG\", \"WEBP\"]\n","quality = 100 # @param {type:\"slider\", min:0, max:100, step:1}\n","\n","# Function to convert and save images\n","for idx, label in enumerate(cluster_labels):\n"," src_path = image_paths[idx] # Use full path\n"," # Create destination filename with selected extension\n"," dst_filename = os.path.splitext(image_names[idx])[0] + f'.{output_format.lower()}'\n"," dst_path = os.path.join(output_dir, f'cluster_{label}', dst_filename)\n","\n"," try:\n"," # Open and convert image\n"," with Image.open(src_path).convert('RGB') as img:\n"," if output_format == 'JPEG':\n"," img.save(dst_path, 'JPEG', quality=quality, optimize=True)\n"," elif output_format == 'PNG':\n"," # PNG compression: 0 (max compression) to 9 (no compression)\n"," # Map quality 0-100 to PNG compression 9-0\n"," png_compression = int(9 - (quality / 100 * 9))\n"," img.save(dst_path, 'PNG', compress_level=png_compression)\n"," elif output_format == 'WEBP':\n"," img.save(dst_path, 'WEBP', quality=quality)\n","\n"," if move_files:\n"," os.remove(src_path) # Delete original if moving\n"," print(f\"Assigned {image_names[idx]} as {dst_filename} to cluster_{label}\")\n"," except Exception as e:\n"," print(f\"Error converting {image_names[idx]} to {output_format}: {e}\")\n","\n","print(f\"Images sorted into {n_clusters} clusters in '{output_dir}' as .{output_format.lower()}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ooH5mMsDjUYs"},"outputs":[],"source":["from PIL import Image\n","import matplotlib.pyplot as plt\n","import os\n","\n","def display_cluster_samples(cluster_dir, n_samples=3):\n"," # Check if cluster directory exists\n"," if not os.path.exists(cluster_dir):\n"," print(f\"Cluster directory {cluster_dir} does not exist\")\n"," return\n","\n"," # Updated to include .jpg, .png, and .webp files\n"," images = [f for f in os.listdir(cluster_dir) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))][:n_samples]\n"," if not images:\n"," print(f\"No images in {cluster_dir}\")\n"," return\n","\n"," fig, axs = plt.subplots(1, len(images), figsize=(5 * len(images), 5))\n"," if len(images) == 1:\n"," axs = [axs]\n"," for j, img_file in enumerate(images):\n"," img_path = os.path.join(cluster_dir, img_file)\n"," try:\n"," img = Image.open(img_path).convert('RGB') # Ensure RGB for display\n"," axs[j].imshow(img)\n"," axs[j].set_title(img_file)\n"," axs[j].axis('off')\n"," except Exception as e:\n"," print(f\"Error displaying {img_file}: {e}\")\n"," plt.show()\n","\n","# Create output directories\n","output_dir = '/content/clusters' # Output base directory\n","\n","# Check if output directory exists\n","if not os.path.exists(output_dir):\n"," print(f\"Output directory {output_dir} does not exist\")\n","else:\n"," # Display samples from each cluster\n"," for i in range(n_clusters): # Ensure n_clusters is defined\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," print(f\"\\nSamples from Cluster {i}:\")\n"," display_cluster_samples(cluster_dir)"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"fBcPNLh9jeZ7","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1761653969025,"user_tz":-60,"elapsed":63,"user":{"displayName":"","userId":""}},"outputId":"93b7fd9b-0e83-4098-e8ff-b0fe3aa1779b"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n"]}],"source":["import shutil\n","%cd /content/\n","#@markdown Remove cluster_N. You can set multiple indices at once 1,4,5,9,...\n","nums ='1,5,9,12,16,18,20,22,31,33,34,36,37,38,53,58,60,62,66,71,74,79,85,87,88,92,93,95,98'#@param {type:'string'}\n","\n","for num in nums.split(','):\n"," if num.strip() == '': continue\n"," shutil.rmtree(f'/content/clusters/cluster_{num.strip()}')"]},{"cell_type":"markdown","metadata":{"id":"aWSOgPj5jLLI"},"source":["Sort Images into Clusters\n","This creates subdirectories for each cluster and moves/copies the images there. Set `move_files=True` to move (or False to copy)."]},{"cell_type":"markdown","metadata":{"id":"Tg_q68KnjUb5"},"source":["Visualize Sample Images per Cluster\n","Display a few sample images from each cluster to inspect the results."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"w2Gzortz0NuD"},"outputs":[],"source":["#@markdown Upload to Google Drive as .zip folder (Be mindful of Google Drive Terms of Service)\n","drive_folder_name = 'hellsing_clusters' # @param {type:'string'}\n","\n","%cd /content/\n","!zip -r /content/drive/MyDrive/{drive_folder_name}.zip {output_dir}\n","\n"]},{"cell_type":"markdown","source":["##Comic/Manga Panel Extract from images using Deep Panel trained model"],"metadata":{"id":"zE6pTnHnTux6"}},{"cell_type":"markdown","metadata":{"id":"anu5LZ5rEFTy"},"source":["https://huggingface.co/datasets/codeShare/lora-training-data/tree/main/Made%20In%20Abyss\n","\n","Step 2: Do mangapanel sorting. You need to download the keras model off the hf repo for this :"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"lMJtrtME8IVE"},"outputs":[],"source":["# Cell 1: Unzip the new dataset\n","#@markdown Unzip training data from drive to /content/ (if required)\n","path = '/content/drive/MyDrive/hellsing_clusters.zip' #@param {type:'string'}\n","\n","%cd /content/\n","!unzip {path}\n","\n","# Cell 2: Setup and panel extraction\n","# Install required dependencies\n","!pip install tensorflow\n","!pip install opencv-python-headless\n","!pip install numpy\n","!pip install tqdm\n","!pip install datasets # For Hugging Face dataset\n","\n","# Clone the DeepPanel repository (for any required utilities)\n","!git clone https://github.com/pedrovgs/DeepPanel.git\n","%cd DeepPanel\n","\n","# Import necessary libraries\n","import os\n","import cv2\n","import numpy as np\n","import tensorflow as tf\n","from tqdm import tqdm\n","from google.colab import drive\n","from datasets import Dataset\n","import pandas as pd\n","import zipfile\n","\n","# Mount Google Drive\n","drive.mount('/content/drive')\n","\n","# Verify dataset structure\n","!ls /content/content\n","!ls /content/content/clusters\n","# Verify a sample cluster (uncomment if needed for debugging)\n","# !ls /content/content/clusters/cluster_1\n","\n","# Define paths\n","model_path = '/content/drive/MyDrive/deeppanel_model.keras' # Updated to .keras\n","clusters_path = '/content/content/clusters'\n","output_dir = '/content/extracted_panels'\n","zip_output_path = '/content/extracted_panels.zip'\n","\n","# Create output directory\n","os.makedirs(output_dir, exist_ok=True)\n","\n","# Load the trained model\n","model = tf.keras.models.load_model(model_path)\n","\n","# Function to preprocess image for model input\n","def preprocess_image(image_path, target_size=(256, 256)):\n"," image = cv2.imread(image_path)\n"," if image is None:\n"," raise ValueError(f\"Failed to load image: {image_path}\")\n"," image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n"," original_shape = image.shape[:2]\n"," image = cv2.resize(image, target_size)\n"," image = image / 255.0 # Normalize to [0, 1]\n"," return image, original_shape\n","\n","# Function to post-process mask and extract panels\n","def extract_panels(image_path, mask, original_shape, output_dir, cluster_name, image_idx):\n"," # Resize mask to original image size\n"," mask = cv2.resize(mask, (original_shape[1], original_shape[0]), interpolation=cv2.INTER_NEAREST)\n"," mask = (mask > 0.5).astype(np.uint8) * 255 # Threshold to binary mask\n","\n"," # Find contours\n"," contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n","\n"," # Load original image for cropping\n"," original_image = cv2.imread(image_path)\n"," if original_image is None:\n"," raise ValueError(f\"Failed to load original image: {image_path}\")\n","\n"," # Create output directory for this cluster\n"," cluster_output_dir = os.path.join(output_dir, cluster_name)\n"," os.makedirs(cluster_output_dir, exist_ok=True)\n","\n"," # Extract panels\n"," for i, contour in enumerate(contours):\n"," # Get bounding box for each contour\n"," x, y, w, h = cv2.boundingRect(contour)\n"," # Skip small contours (optional, adjust threshold as needed)\n"," if w * h < 1000: # Ignore small regions (e.g., noise)\n"," continue\n"," # Crop panel from original image\n"," panel = original_image[y:y+h, x:x+w]\n"," # Save panel\n"," panel_path = os.path.join(cluster_output_dir, f'panel_{image_idx}_{i}.jpg')\n"," cv2.imwrite(panel_path, panel)\n","\n","# Option 1: Process images directly (if dataset is small)\n","def process_images_directly(clusters_path, model, output_dir):\n"," cluster_folders = [f for f in os.listdir(clusters_path) if f.startswith('cluster_')]\n","\n"," for cluster_name in tqdm(cluster_folders, desc=\"Processing clusters\"):\n"," cluster_path = os.path.join(clusters_path, cluster_name)\n"," image_files = [f for f in os.listdir(cluster_path) if f.endswith(('.jpg', '.jpeg'))]\n","\n"," for idx, image_file in enumerate(tqdm(image_files, desc=f\"Processing {cluster_name}\")):\n"," image_path = os.path.join(cluster_path, image_file)\n"," try:\n"," # Preprocess image\n"," image, original_shape = preprocess_image(image_path)\n"," # Predict mask\n"," mask = model.predict(np.expand_dims(image, axis=0), verbose=0)[0]\n"," if mask.shape[-1] > 1: # Handle multi-class masks\n"," mask = np.argmax(mask, axis=-1) # Convert to single-channel\n"," else:\n"," mask = mask[..., 0] # Binary mask\n"," # Extract and save panels\n"," extract_panels(image_path, mask, original_shape, output_dir, cluster_name, idx)\n"," except Exception as e:\n"," print(f\"Error processing {image_path}: {e}\")\n","\n","# Option 2: Create Hugging Face dataset (for large datasets)\n","def create_hf_dataset(clusters_path):\n"," data = []\n"," cluster_folders = [f for f in os.listdir(clusters_path) if f.startswith('cluster_')]\n","\n"," for cluster_name in cluster_folders:\n"," cluster_path = os.path.join(clusters_path, cluster_name)\n"," image_files = [f for f in os.listdir(cluster_path) if f.endswith(('.jpg', '.jpeg'))]\n"," for image_file in image_files:\n"," data.append({\n"," 'image_path': os.path.join(cluster_path, image_file),\n"," 'cluster': cluster_name\n"," })\n","\n"," df = pd.DataFrame(data)\n"," dataset = Dataset.from_pandas(df)\n"," return dataset\n","\n","# Choose processing method\n","use_hf_dataset = False # Set to True for large datasets\n","\n","if use_hf_dataset:\n"," # Process using Hugging Face dataset\n"," dataset = create_hf_dataset(clusters_path)\n"," for example in tqdm(dataset, desc=\"Processing images\"):\n"," image_path = example['image_path']\n"," cluster_name = example['cluster']\n"," try:\n"," image, original_shape = preprocess_image(image_path)\n"," mask = model.predict(np.expand_dims(image, axis=0), verbose=0)[0]\n"," if mask.shape[-1] > 1:\n"," mask = np.argmax(mask, axis=-1)\n"," else:\n"," mask = mask[..., 0]\n"," extract_panels(image_path, mask, original_shape, output_dir, cluster_name, example['index'])\n"," except Exception as e:\n"," print(f\"Error processing {image_path}: {e}\")\n","else:\n"," # Process directly\n"," process_images_directly(clusters_path, model, output_dir)\n","\n","# Cell 3: Zip and save extracted panels to Google Drive\n","def zip_directory(directory, zip_path):\n"," with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:\n"," for root, _, files in os.walk(directory):\n"," for file in files:\n"," file_path = os.path.join(root, file)\n"," arcname = os.path.relpath(file_path, directory)\n"," zipf.write(file_path, os.path.join('extracted_panels', arcname))\n","\n","# Create zip file\n","#zip_directory(output_dir, zip_output_path)\n","#print(f\"Extracted panels saved to {output_dir}\")\n","#print(f\"Zipped panels saved to {zip_output_path}\")\n","\n","# Optional: Debug a sample prediction\n","sample_cluster = [f for f in os.listdir(clusters_path) if f.startswith('cluster_')][0]\n","sample_image = [f for f in os.listdir(os.path.join(clusters_path, sample_cluster)) if f.endswith(('.jpg', '.jpeg'))][0]\n","sample_image_path = os.path.join(clusters_path, sample_cluster, sample_image)\n","sample_image, original_shape = preprocess_image(sample_image_path)\n","sample_mask = model.predict(np.expand_dims(sample_image, axis=0), verbose=0)[0]\n","if sample_mask.shape[-1] > 1:\n"," sample_mask = np.argmax(sample_mask, axis=-1)\n","else:\n"," sample_mask = sample_mask[..., 0]\n","cv2.imwrite('/content/sample_pred_mask.png', sample_mask * 255)\n","print(\"Sample predicted mask saved to /content/sample_pred_mask.png\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"MR7C33klYD3E"},"outputs":[],"source":["import os\n","import zipfile\n","import random\n","from pathlib import Path\n","\n","#@markdown Create a numbered set for further clustering\n","\n","# Define the root folder containing subfolders with images\n","root_folder = \"/content/extracted_panels\"\n","#/content/drive/MyDrive/extracted_panels\"\n","\n","# Generate a random 5-digit number\n","random_number = f\"{random.randint(0, 99999):05d}\"\n","\n","# Define the output zip file path\n","zip_file_name = f\"/content/drive/MyDrive/training_data_{random_number}.zip\"\n","\n","# Create a zip file\n","with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zipf:\n"," # Walk through all subfolders in the root folder\n"," for subfolder in Path(root_folder).glob(\"cluster_*\"):\n"," if subfolder.is_dir():\n"," # Iterate through all files in the subfolder\n"," for file_path in subfolder.glob(\"*\"):\n"," if file_path.is_file() and file_path.suffix.lower() in ('.jpg', '.jpeg', '.png', '.bmp', '.gif'):\n"," # Add the image to the zip file, preserving the subfolder structure\n"," zipf.write(file_path, arcname=file_path.relative_to(root_folder))\n","\n","print(f\"Images zipped successfully into {zip_file_name}\")\n"]},{"cell_type":"markdown","metadata":{"id":"Wu5UTloaE2S0"},"source":["Go back to top of notebook and repeat clustering with panels to see results ⬆"]},{"cell_type":"markdown","metadata":{"id":"6FsMtyMkFQBH"},"source":["#Training Deep Panel finetune for specific task"]},{"cell_type":"markdown","metadata":{"id":"UPzIJ402VVMd"},"source":["Train a Deep Panel Model\n","\n","Refer to https://github.com/pedrovgs/DeepPanel in how to build a dataset"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"JFtsOMxP6dJ7"},"outputs":[],"source":["# Install required dependencies\n","!pip install tensorflow\n","!pip install opencv-python-headless\n","!pip install numpy\n","!pip install tqdm\n","\n","# Clone the DeepPanel repository\n","!git clone https://github.com/pedrovgs/DeepPanel.git\n","%cd DeepPanel\n","\n","# Import necessary libraries\n","import os\n","import zipfile\n","from google.colab import drive\n","import tensorflow as tf\n","import cv2\n","import numpy as np\n","from tqdm import tqdm\n","\n","# Mount Google Drive\n","drive.mount('/content/drive')\n","\n","# Unzip the dataset\n","dataset_path = '/content/drive/MyDrive/mia_panel_dataset.zip'\n","extract_path = '/content/mia_panel_dataset'\n","\n","with zipfile.ZipFile(dataset_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_path)\n","\n","# Remove __MACOSX folder if it exists\n","!rm -rf /content/mia_panel_dataset/__MACOSX\n","\n","# Verify dataset structure\n","!ls /content/mia_panel_dataset\n","#!ls /content/mia_panel_dataset\n","# Verify number of files in training folders\n","!echo \"Training raw images:\"\n","!ls -l /content/mia_panel_dataset/training/raw | wc -l\n","!echo \"Training masks:\"\n","!ls -l /content/mia_panel_dataset/training/segmentation_mask | wc -l\n","# Verify subfolder structure (uncomment if needed for debugging)\n","# !ls /content/mia_panel_dataset/mia_panel_dataset/test\n","# !ls /content/mia_panel_dataset/mia_panel_dataset/training\n","\n","# Create necessary directories for model checkpoints\n","os.makedirs('checkpoints', exist_ok=True)\n","\n","# Define dataset paths (updated for nested mia_panel_dataset folder)\n","train_raw_path = '/content/mia_panel_dataset/training/raw'\n","train_mask_path = '/content/mia_panel_dataset/training/segmentation_mask'\n","test_raw_path = '/content/mia_panel_dataset/test/raw'\n","test_mask_path = '/content/mia_panel_dataset/test/segmentation_mask'\n","\n","# Define configuration\n","class Config:\n"," INPUT_SHAPE = (256, 256, 3) # Adjust based on your image size\n"," BATCH_SIZE = 5\n"," EPOCHS = 200\n"," LEARNING_RATE = 1e-4\n"," MODEL_PATH = 'checkpoints/model.keras' # Updated to .keras format\n","\n","# Custom data loader\n","def load_image_and_mask(image_path, mask_path, target_size):\n"," image = tf.io.read_file(image_path)\n"," image = tf.image.decode_png(image, channels=3)\n"," image = tf.image.resize(image, target_size[:2])\n"," image = image / 255.0 # Normalize to [0, 1]\n","\n"," mask = tf.io.read_file(mask_path)\n"," mask = tf.image.decode_png(mask, channels=1)\n"," mask = tf.image.resize(mask, target_size[:2], method='nearest')\n"," mask = tf.cast(mask, tf.float32)\n"," # Normalize mask to [0, 1] for binary segmentation\n"," mask = mask / tf.reduce_max(mask) # Ensure mask values are [0, 1]\n"," mask = tf.where(mask > 0.5, 1.0, 0.0) # Binarize mask\n","\n"," return image, mask\n","\n","def create_dataset(raw_path, mask_path, batch_size, input_shape, is_train=True):\n"," image_files = sorted([os.path.join(raw_path, f) for f in os.listdir(raw_path) if f.endswith(('.png', '.jpg', '.jpeg'))])\n"," mask_files = sorted([os.path.join(mask_path, f) for f in os.listdir(mask_path) if f.endswith(('.png', '.jpg', '.jpeg'))])\n","\n"," # Ensure matching pairs\n"," print(f\"Found {len(image_files)} images and {len(mask_files)} masks\")\n"," assert len(image_files) == len(mask_files), \"Number of images and masks must match\"\n"," assert len(image_files) > 0, \"No images found in dataset\"\n","\n"," dataset = tf.data.Dataset.from_tensor_slices((image_files, mask_files))\n"," dataset = dataset.map(\n"," lambda x, y: load_image_and_mask(x, y, input_shape),\n"," num_parallel_calls=tf.data.AUTOTUNE\n"," )\n","\n"," if is_train:\n"," dataset = dataset.shuffle(buffer_size=1000)\n","\n"," dataset = dataset.batch(batch_size).prefetch(tf.data.AUTOTUNE)\n"," return dataset\n","\n","# Create datasets\n","train_dataset = create_dataset(\n"," train_raw_path, train_mask_path, Config.BATCH_SIZE, Config.INPUT_SHAPE, is_train=True\n",")\n","test_dataset = create_dataset(\n"," test_raw_path, test_mask_path, Config.BATCH_SIZE, Config.INPUT_SHAPE, is_train=False\n",")\n","\n","# Inspect a sample mask to verify format (optional debugging)\n","sample_image, sample_mask = next(iter(train_dataset))\n","print(f\"Sample mask shape: {sample_mask.shape}, min: {tf.reduce_min(sample_mask)}, max: {tf.reduce_max(sample_mask)}\")\n","\n","# Define the model (simplified U-Net inspired by DeepPanel's segmentation goal)\n","def build_model(input_shape):\n"," inputs = tf.keras.Input(shape=input_shape)\n","\n"," # Encoder\n"," c1 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(inputs)\n"," c1 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(c1)\n"," p1 = tf.keras.layers.MaxPooling2D()(c1)\n","\n"," c2 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(p1)\n"," c2 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(c2)\n"," p2 = tf.keras.layers.MaxPooling2D()(c2)\n","\n"," # Bottleneck\n"," b = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(p2)\n"," b = tf.keras.layers.Conv2D(256, 3, padding='same', activation='relu')(b)\n","\n"," # Decoder\n"," u1 = tf.keras.layers.Conv2DTranspose(128, 2, strides=2, padding='same')(b)\n"," u1 = tf.keras.layers.Concatenate()([u1, c2])\n"," c3 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(u1)\n"," c3 = tf.keras.layers.Conv2D(128, 3, padding='same', activation='relu')(c3)\n","\n"," u2 = tf.keras.layers.Conv2DTranspose(64, 2, strides=2, padding='same')(c3)\n"," u2 = tf.keras.layers.Concatenate()([u2, c1])\n"," c4 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(u2)\n"," c4 = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(c4)\n","\n"," outputs = tf.keras.layers.Conv2D(1, 1, activation='sigmoid')(c4) # Binary segmentation\n"," model = tf.keras.Model(inputs, outputs)\n"," return model\n","\n","# Build and compile model\n","model = build_model(Config.INPUT_SHAPE)\n","model.compile(\n"," optimizer=tf.keras.optimizers.Adam(learning_rate=Config.LEARNING_RATE),\n"," loss='binary_crossentropy', # Adjust if masks are multi-class\n"," metrics=['accuracy']\n",")\n","\n","# Train the model\n","history = model.fit(\n"," train_dataset,\n"," validation_data=test_dataset,\n"," epochs=Config.EPOCHS,\n"," callbacks=[\n"," tf.keras.callbacks.ModelCheckpoint(\n"," Config.MODEL_PATH, save_best_only=True, monitor='val_loss'\n"," )\n"," ]\n",")\n","\n","# Save the trained model to Google Drive\n","!cp checkpoints/model.keras /content/drive/MyDrive/deeppanel_model.keras"]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Comic_Panel_sorter.ipynb","timestamp":1761657960208},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Comic_Panel_sorter.ipynb","timestamp":1761655398456},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Comic Panel sorter.ipynb","timestamp":1761581042008},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1761516682984},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1761514713164},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760890109028},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760880784010},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760509652530},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760371508137},{"file_id":"1wufnt5hqKHLuoX9wDdyzarENUjOO_s3N","timestamp":1760363981901},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_cluster.ipynb","timestamp":1760363231133}],"gpuType":"T4","collapsed_sections":["5teR85z0-l_S","haUt4RKmRbjQ","zE6pTnHnTux6","6FsMtyMkFQBH"]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"},"accelerator":"GPU"},"nbformat":4,"nbformat_minor":0} |