File size: 51,786 Bytes
bf2187e
1
{"cells":[{"cell_type":"markdown","metadata":{"id":"K-eonuB2TeQ4"},"source":["How to use:  \n","1) Fill up raw_backgrounds image on your drive folder with items you wish to process\n","2) Run Stage 0 and 1\n","You now have the created items!\n","//---//\n","3) For finding the best items among the created bunch, use Stage 2 and Stage 3 (up to preference)"]},{"cell_type":"markdown","metadata":{"id":"BpCyL0CM19cF"},"source":["##Stage 0: Process Raw images into square backgrounds"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"9JWcF9vhyDV_"},"outputs":[],"source":["# ─────────────────────────────────────────────────────────────────────────────\n","#  GOOGLE COLAB – FINAL VERSION (macOS junk skipped, radial edge bias)\n","# ─────────────────────────────────────────────────────────────────────────────\n","# @title **Crop & Pad Settings** { run: \"auto\" }\n","padding_percent = 90   # @param {type:\"slider\", min:50, max:250, step:5}\n","edge_bias       = 65   # @param {type:\"slider\", min:0,   max:100, step:5}\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","from google.colab import drive\n","drive.mount('/content/drive', force_remount=True)\n","\n","import os, random, zipfile, shutil, math\n","from pathlib import Path\n","from PIL import Image\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","RAW_DRIVE = '/content/drive/MyDrive/backgrounds_raw'\n","OUT_DRIVE = '/content/drive/MyDrive/backgrounds_cropped'\n","ZIP_OUT   = '/content/drive/MyDrive/backgrounds_cropped_squares.zip'\n","\n","FRAME_SIZE = 1024\n","STEP_SIZE  = 512\n","PAD_COLOR  = (24, 24, 24)                     # #181818\n","EXTS       = ('.jpg','.jpeg','.JPG','.JPEG','.webp','.WEBP')\n","\n","# clean previous run\n","os.makedirs(OUT_DRIVE, exist_ok=True)\n","for f in os.listdir(OUT_DRIVE): os.remove(os.path.join(OUT_DRIVE, f))\n","if os.path.exists(ZIP_OUT): os.remove(ZIP_OUT)\n","\n","target_size = int(FRAME_SIZE * (padding_percent / 100.0))\n","print(f\"Padding % = {padding_percent} β†’ scale largest side to {target_size}px\")\n","print(f\"Edge Bias = {edge_bias}% β†’ radial bias in 1024Γ—1024 canvas\")\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","zip_paths = [os.path.join(RAW_DRIVE, f) for f in os.listdir(RAW_DRIVE)\n","             if f.lower().endswith('.zip')]\n","\n","if not zip_paths:\n","    print(\"No zip files found – nothing to do.\")\n","else:\n","    print(f\"Found {len(zip_paths)} zip file(s):\")\n","    for p in zip_paths: print(\"  β€’\", os.path.basename(p))\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","def is_macos_junk(path: str) -> bool:\n","    \"\"\"Return True for __MACOSX, .DS_Store, ._AppleDouble files, etc.\"\"\"\n","    name = os.path.basename(path).lower()\n","    return (\n","        '__macosx' in path.lower() or\n","        name.startswith('._') or\n","        name == '.ds_store' or\n","        name.endswith('~')\n","    )\n","\n","def radial_pad(img, target_w, target_h, bias_percent):\n","    w, h = img.size\n","    if w == 0 or h == 0:\n","        return Image.new('RGB', (target_w, target_h), PAD_COLOR)\n","\n","    # uniform random when bias = 0\n","    if bias_percent <= 0:\n","        pad_l = random.randint(0, target_w - w)\n","        pad_t = random.randint(0, target_h - h)\n","    else:\n","        max_dist = math.hypot(target_w / 2, target_h / 2)\n","        attempts = 0\n","        while attempts < 1000:\n","            pad_l = random.randint(0, target_w - w)\n","            pad_t = random.randint(0, target_h - h)\n","            cx = pad_l + w / 2\n","            cy = pad_t + h / 2\n","            dist = min(cx, target_w - cx, cy, target_h - cy)\n","            prob = (1 - dist / max_dist) ** (bias_percent / 20.0)\n","            if random.random() < prob:\n","                break\n","            attempts += 1\n","        else:                               # fallback\n","            pad_l = random.randint(0, target_w - w)\n","            pad_t = random.randint(0, target_h - h)\n","\n","    canvas = Image.new('RGB', (target_w, target_h), PAD_COLOR)\n","    canvas.paste(img, (pad_l, pad_t))\n","    return canvas\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","with zipfile.ZipFile(ZIP_OUT, 'w') as master_zip:\n","    total_frames = 0\n","\n","    for zip_idx, zip_path in enumerate(zip_paths, 1):\n","        temp_dir = f'/content/raw_zip_{zip_idx}'\n","        os.makedirs(temp_dir, exist_ok=True)\n","        print(f\"\\n[{zip_idx}/{len(zip_paths)}] Extracting {os.path.basename(zip_path)}\")\n","        with zipfile.ZipFile(zip_path, 'r') as z:\n","            z.extractall(temp_dir)\n","\n","        for root, _, files in os.walk(temp_dir):\n","            for filename in files:\n","                img_path = os.path.join(root, filename)\n","\n","                # ---- SKIP macOS junk -------------------------------------------------\n","                if is_macos_junk(img_path):\n","                    continue\n","\n","                if not filename.lower().endswith(EXTS):\n","                    continue\n","\n","                try:\n","                    with Image.open(img_path) as im:\n","                        img = im.convert('RGB')\n","                        orig_w, orig_h = img.size\n","\n","                    max_side = max(orig_w, orig_h)\n","                    scale = target_size / max_side\n","                    new_w = int(orig_w * scale)\n","                    new_h = int(orig_h * scale)\n","                    img_resized = img.resize((new_w, new_h), Image.LANCZOS)\n","\n","                    print(f\"  β†’ {filename} ({orig_w}Γ—{orig_h}) β†’ {new_w}Γ—{new_h}\")\n","\n","                    # ---- PAD INTO 1024Γ—1024 ------------------------------------------------\n","                    if new_w <= FRAME_SIZE and new_h <= FRAME_SIZE:\n","                        final_img = radial_pad(img_resized, FRAME_SIZE, FRAME_SIZE, edge_bias)\n","                        name = f\"zip{zip_idx}_{Path(filename).stem}_f0.jpg\"\n","                        out_path = os.path.join(OUT_DRIVE, name)\n","                        final_img.save(out_path, \"JPEG\", quality=100)\n","                        master_zip.write(out_path, arcname=name)\n","                        total_frames += 1\n","                        print(f\"      padded β†’ 1 frame\")\n","\n","                    # ---- CROP SLIDING WINDOWS ------------------------------------------------\n","                    else:\n","                        frame_cnt = 0\n","                        for y in range(0, new_h - FRAME_SIZE + 1, STEP_SIZE):\n","                            for x in range(0, new_w - FRAME_SIZE + 1, STEP_SIZE):\n","                                crop = img_resized.crop((x, y, x + FRAME_SIZE, y + FRAME_SIZE))\n","                                name = f\"zip{zip_idx}_{Path(filename).stem}_f{frame_cnt}.jpg\"\n","                                out_path = os.path.join(OUT_DRIVE, name)\n","                                crop.save(out_path, \"JPEG\", quality=100)\n","                                master_zip.write(out_path, arcname=name)\n","                                frame_cnt += 1\n","                                total_frames += 1\n","                        print(f\"      cropped β†’ {frame_cnt} frame(s)\")\n","\n","                except Exception as e:\n","                    print(f\"  [ERROR] {filename}: {e}\")\n","\n","        shutil.rmtree(temp_dir, ignore_errors=True)\n","\n","print(\"\\n=== ALL DONE ===\")\n","print(f\"Total 1024Γ—1024 crops : {total_frames}\")\n","print(f\"Saved in               : {OUT_DRIVE}\")\n","print(f\"ZIP archive            : {ZIP_OUT}\")"]},{"cell_type":"markdown","metadata":{"id":"BEoxbD0jGdcT"},"source":["##Stage 1: Randomly add panels to the newly created backgrounds using rembg"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":17308,"status":"ok","timestamp":1762693356025,"user":{"displayName":"No Name","userId":"10578412414437288386"},"user_tz":-60},"id":"J39ASCBOUwWX","outputId":"da0fa471-635d-43dc-ce37-1f8e5b17151e"},"outputs":[{"name":"stdout","output_type":"stream","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","colab":{"background_save":true},"id":"1fSHYCUgoycd"},"outputs":[],"source":["# @title 1. Setup & Unzip Manga Images\n","\n","!pip install rembg pillow numpy onnxruntime -q\n","\n","from google.colab import drive\n","drive.mount('/content/drive')\n","\n","import os, random, zipfile, shutil\n","from pathlib import Path\n","from PIL import Image\n","import numpy as np\n","from rembg import remove\n","\n","# === CONFIG ===\n","MANGA_ZIP = '/content/drive/MyDrive/backgrounds_cropped3.zip' #@param {type:'string'}\n","BG_FOLDER = '/content/drive/MyDrive/backgrounds_final_rounded3'  # From previous step\n","OUTPUT_FOLDER = '/content/manga_on_bg'\n","\n","# Generate a random 5-digit number for unique ZIP name\n","random_suffix = random.randint(10000, 99999)\n","ZIP_OUTPUT = f'/content/drive/MyDrive/manga_on_backgrounds_{random_suffix}.zip'\n","\n","# Unzip manga images\n","!unzip -q \"$MANGA_ZIP\" -d /content/\n","\n","# Clean & recreate output folder\n","if os.path.exists(OUTPUT_FOLDER):\n","    shutil.rmtree(OUTPUT_FOLDER)\n","os.makedirs(OUTPUT_FOLDER, exist_ok=True)\n","\n","print(\"Manga images extracted to /content/\")\n","print(f\"Backgrounds folder: {BG_FOLDER}\")\n","print(f\"Output will be saved to: {ZIP_OUTPUT}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"IY13u9UqA2-D"},"outputs":[],"source":["# @title 2. Process: Place **2 Random Manga Panels** on **Random Backgrounds**\n","\n","# @markdown **Number of composite images to generate:**\n","num_images_to_create = 169 #@param {type:\"slider\", min:1, max:1000, step:1}\n","\n","\n","\n","import os, random, math, zipfile\n","import numpy as np\n","from PIL import Image\n","from rembg import remove\n","from pathlib import Path\n","\n","# ------------------------------------------------------------------\n","# 1. Load & SHUFFLE background images\n","# ------------------------------------------------------------------\n","all_bgs = [\n","    os.path.join(BG_FOLDER, f)\n","    for f in os.listdir(BG_FOLDER)\n","    if f.lower().endswith(('.png', '.jpg', '.jpeg'))\n","]\n","if not all_bgs:\n","    raise ValueError(f\"No background images found in {BG_FOLDER}\")\n","\n","random.shuffle(all_bgs)                     # different order every run\n","all_bgs = all_bgs[:num_images_to_create]   # respect slider\n","\n","print(f\"Will generate {len(all_bgs)} composite images (slider limit: {num_images_to_create})\")\n","\n","# ------------------------------------------------------------------\n","# 2. Gather *all* manga file paths (no processing yet)\n","# ------------------------------------------------------------------\n","manga_paths = [\n","    os.path.join('/content', f)\n","    for f in os.listdir('/content')\n","    if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n","    and not f.startswith('.')\n","    and os.path.isfile(os.path.join('/content', f))\n","]\n","if len(manga_paths) < 2:\n","    raise ValueError(f\"Need at least 2 manga panels, found {len(manga_paths)}\")\n","\n","print(f\"Found {len(manga_paths)} raw manga panels (backgrounds will be removed on-the-fly)\")\n","\n","# ------------------------------------------------------------------\n","# 3. Helper: remove BG + resize + optional flip\n","# ------------------------------------------------------------------\n","def prepare_panel(panel_path, target_h, bg_w):\n","    \"\"\"Open β†’ remove BG β†’ resize to target height β†’ random flip β†’ return ready image + meta.\"\"\"\n","    with Image.open(panel_path).convert(\"RGBA\") as img:\n","        # ---- remove background ----\n","        img_np = np.array(img)\n","        nobg = remove(img_np)\n","        panel = Image.fromarray(nobg).convert(\"RGBA\")\n","\n","        # ---- resize to background height ----\n","        ratio = target_h / panel.height\n","        new_w = int(panel.width * ratio)\n","        panel = panel.resize((new_w, target_h), Image.LANCZOS)\n","\n","        # ---- random horizontal flip ----\n","        flip = random.random() < 0.5\n","        if flip:\n","            panel = panel.transpose(Image.FLIP_LEFT_RIGHT)\n","        flip_desc = \"flip\" if flip else \"noflip\"\n","\n","        # ---- decide side (left / right) ----\n","        # we will assign later when we know the other panel's width\n","        return panel, new_w, flip_desc, os.path.basename(panel_path)\n","\n","# ------------------------------------------------------------------\n","# 4. Create composites\n","# ------------------------------------------------------------------\n","os.makedirs(OUTPUT_FOLDER, exist_ok=True)\n","\n","with zipfile.ZipFile(ZIP_OUTPUT, 'w', zipfile.ZIP_DEFLATED) as zipf:\n","    for bg_idx, bg_path in enumerate(all_bgs, 1):\n","        bg_name = os.path.basename(bg_path)\n","        print(f\"\\nProcessing {bg_idx}/{len(all_bgs)}: {bg_name}\")\n","\n","        # ---- open background ----\n","        with Image.open(bg_path).convert(\"RGBA\") as bg_img:\n","            bg = bg_img.copy()\n","            target_h = bg.height\n","            bg_w = bg.width\n","\n","        # ---- pick 2 *different* manga files ----\n","        panel_a_path, panel_b_path = random.sample(manga_paths, k=2)\n","\n","        # ---- prepare both panels (rembg happens *here*) ----\n","        panel_a_img, a_w, a_flip, a_name = prepare_panel(panel_a_path, target_h, bg_w)\n","        panel_b_img, b_w, b_flip, b_name = prepare_panel(panel_b_path, target_h, bg_w)\n","\n","        # ---- assign sides (left / right) ----\n","        # put narrower panel on the side that gives more overlap-prevention\n","        if a_w < b_w:\n","            left_img, left_w, left_flip, left_name = panel_a_img, a_w, a_flip, a_name\n","            right_img, right_w, right_flip, right_name = panel_b_img, b_w, b_flip, b_name\n","        else:\n","            left_img, left_w, left_flip, left_name = panel_b_img, b_w, b_flip, b_name\n","            right_img, right_w, right_flip, right_name = panel_a_img, a_w, a_flip, a_name\n","\n","        # ---- paste ----\n","        result = bg.copy()\n","        result.paste(left_img,  (0, 0),               left_img)   # left side\n","        result.paste(right_img, (bg_w - right_w, 0), right_img)  # right side\n","\n","        # ---- build filename ----\n","        clean_l = Path(left_name).stem[:20]\n","        clean_r = Path(right_name).stem[:20]\n","        combined_name = f\"bg{bg_idx:03d}_{clean_l}_left_{left_flip}_AND_{clean_r}_right_{right_flip}.png\"\n","        out_path = os.path.join(OUTPUT_FOLDER, combined_name)\n","\n","        result.convert(\"RGB\").save(out_path, \"PNG\")\n","        zipf.write(out_path, combined_name)\n","\n","        print(f\"  [Saved] {combined_name}\")\n","\n","print(f\"\\nAll done! Generated {len(all_bgs)} composite images.\")\n","print(f\"ZIP β†’ {ZIP_OUTPUT}\")\n","print(f\"Files β†’ {OUTPUT_FOLDER}\")\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1JlaBNIKODCT"},"outputs":[],"source":["from google.colab import runtime\n","runtime.unassign()\n","\n","\n"]},{"cell_type":"code","source":["# ─────────────────────────────────────────────────────────────────────────────\n","#  GOOGLE COLAB – 3-COLUMN TRIPLETS WITH EXACT CENTER-CROP FILL\n","# ─────────────────────────────────────────────────────────────────────────────\n","# @title **Triplet Generator – Exact Center-Crop Fill** { run: \"auto\" }\n","padding_to_edge         = 20   # @param {type:\"slider\", min:20, max:150, step:10}  \"Horizontal padding (left/right)\"\n","padding_to_top_bottom   = 20   # @param {type:\"slider\", min:20, max:200, step:10}  \"Vertical padding (top/bottom)\"\n","corner_radius           = 40   # @param {type:\"slider\", min:0, max:100, step:5}\n","output_count            = 10   # @param {type:\"slider\", min:1, max:50, step:1}\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","from google.colab import drive\n","drive.mount('/content/drive', force_remount=True)\n","\n","import os, random, zipfile, shutil\n","from pathlib import Path\n","from PIL import Image, ImageDraw\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","RAW_DRIVE = '/content/drive/MyDrive/backgrounds_raw'\n","TEMP_DIR  = '/content/temp_images'\n","OUT_DRIVE = '/content/drive/MyDrive/backgrounds_triplets'\n","\n","FRAME_SIZE = 1024\n","PAD_COLOR  = (24, 24, 24)  # #181818\n","EXTS       = ('.jpg','.jpeg','.JPG','.JPEG','.webp','.WEBP')\n","\n","os.makedirs(TEMP_DIR, exist_ok=True)\n","os.makedirs(OUT_DRIVE, exist_ok=True)\n","\n","# Clean previous output\n","for f in os.listdir(OUT_DRIVE):\n","    os.remove(os.path.join(OUT_DRIVE, f))\n","\n","print(f\"Extracting images from {RAW_DRIVE}...\")\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","def is_macos_junk(p: str) -> bool:\n","    name = os.path.basename(p).lower()\n","    return (\n","        '__macosx' in p.lower() or\n","        name.startswith('._') or\n","        name == '.ds_store' or\n","        name.endswith('~')\n","    )\n","\n","all_image_paths = []\n","\n","for zip_name in os.listdir(RAW_DRIVE):\n","    if not zip_name.lower().endswith('.zip'): continue\n","    zip_path = os.path.join(RAW_DRIVE, zip_name)\n","    with zipfile.ZipFile(zip_path, 'r') as z:\n","        for member in z.namelist():\n","            if any(member.lower().endswith(e) for e in EXTS) and not is_macos_junk(member):\n","                z.extract(member, TEMP_DIR)\n","                all_image_paths.append(os.path.join(TEMP_DIR, member))\n","\n","print(f\"Found {len(all_image_paths)} valid images.\")\n","if len(all_image_paths) < 3:\n","    raise ValueError(\"Need at least 3 images!\")\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","def add_rounded_corners(img: Image.Image, radius: int) -> Image.Image:\n","    \"\"\"Apply rounded corners using gray background.\"\"\"\n","    rgba = img.convert('RGBA') if img.mode != 'RGBA' else img\n","    mask = Image.new('L', rgba.size, 0)\n","    draw = ImageDraw.Draw(mask)\n","    draw.rounded_rectangle([(0,0), rgba.size], radius, fill=255)\n","    result = Image.new('RGBA', rgba.size)\n","    result.paste(rgba, (0,0))\n","    result.putalpha(mask)\n","    bg = Image.new('RGB', result.size, PAD_COLOR)\n","    bg.paste(result, (0,0), result)\n","    return bg\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","def center_crop_to_size(img: Image.Image, target_w: int, target_h: int) -> Image.Image:\n","    \"\"\"Center-crop image to exactly (target_w, target_h).\"\"\"\n","    w, h = img.size\n","    if w == target_w and h == target_h:\n","        return img\n","\n","    # Scale to cover target\n","    scale = max(target_w / w, target_h / h)\n","    new_w = int(w * scale)\n","    new_h = int(h * scale)\n","    img_resized = img.resize((new_w, new_h), Image.LANCZOS)\n","\n","    # Center crop\n","    left = (new_w - target_w) // 2\n","    top = (new_h - target_h) // 2\n","    right = left + target_w\n","    bottom = top + target_h\n","    return img_resized.crop((left, top, right, bottom))\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","def create_triplet(img_paths, idx):\n","    canvas = Image.new('RGB', (FRAME_SIZE, FRAME_SIZE), PAD_COLOR)\n","\n","    # === EXACT COLUMN DIMENSIONS ===\n","    col_gap = 20\n","    total_h_gaps = 2 * col_gap\n","    col_width = (FRAME_SIZE - 2 * padding_to_edge - total_h_gaps) // 3\n","    col_height = FRAME_SIZE - 2 * padding_to_top_bottom  # e.g. 1024 - 40 = 984\n","\n","    x_offsets = [\n","        padding_to_edge,\n","        padding_to_edge + col_width + col_gap,\n","        padding_to_edge + 2 * (col_width + col_gap)\n","    ]\n","\n","    for i, path in enumerate(img_paths):\n","        with Image.open(path) as im:\n","            img = im.convert('RGB')\n","\n","        # === CENTER-CROP TO EXACT COLUMN SIZE ===\n","        img_cropped = center_crop_to_size(img, col_width, col_height)\n","\n","        # === APPLY ROUNDED CORNERS ===\n","        img_rounded = add_rounded_corners(img_cropped, corner_radius)\n","\n","        # === PASTE AT EXACT POSITION (no extra offset) ===\n","        x = x_offsets[i]\n","        y = padding_to_top_bottom\n","        canvas.paste(img_rounded, (x, y))\n","\n","    return canvas\n","\n","# ─────────────────────────────────────────────────────────────────────────────\n","print(f\"Generating {output_count} triplet(s)...\")\n","for i in range(output_count):\n","    selected = random.sample(all_image_paths, 3)\n","    triplet = create_triplet(selected, i)\n","    out_path = os.path.join(OUT_DRIVE, f\"triplet_{i:03d}.jpg\")\n","    triplet.save(out_path, \"JPEG\", quality=95)\n","    print(f\"  β†’ {os.path.basename(out_path)}\")\n","\n","# Clean up\n","shutil.rmtree(TEMP_DIR, ignore_errors=True)\n","\n","print(\"\\n=== ALL DONE ===\")\n","print(f\"Generated {output_count} triplet(s)\")\n","print(f\"Column size: {((FRAME_SIZE - 2*padding_to_edge - 40)//3)} Γ— {FRAME_SIZE - 2*padding_to_top_bottom}\")\n","print(f\"Horizontal padding: {padding_to_edge}px\")\n","print(f\"Vertical padding:   {padding_to_top_bottom}px\")\n","print(f\"Column gap:         20px\")\n","print(f\"Corner radius:      {corner_radius}px\")\n","print(f\"Saved to: {OUT_DRIVE}\")\n","\n","# Show first result\n","from IPython.display import Image as IPImage, display\n","display(IPImage(filename=os.path.join(OUT_DRIVE, \"triplet_000.jpg\")))"],"metadata":{"id":"fHiSpVHt1HWv"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"MrcSq6YG1Ax7"},"source":["Copy the saved filename from above cell ⬆️ and paste in cell below for aesthetic sorting.  \n","\n","Activate T4 for below cells for faster processing (no data is lost since output is saved to the drive in above cells , and below)."]},{"cell_type":"markdown","metadata":{"id":"TstzuP5HKBtZ"},"source":["##T4 Stuff"]},{"cell_type":"code","execution_count":null,"metadata":{"collapsed":true,"id":"ff446645"},"outputs":[],"source":["import os\n","import zipfile\n","import shutil\n","from google.colab import drive\n","\n","# Mount Google Drive (if not already mounted)\n","try:\n","    drive.mount('/content/drive')\n","except:\n","    print(\"Drive already mounted.\")\n","\n","# Define source folder and destination path\n","source_folder = '/content/manga_on_bg'\n","destination_zip = '/content/drive/MyDrive/manga_on_bg.zip'\n","\n","# Create the zip file\n","print(f\"Creating zip file from {source_folder}...\")\n","shutil.make_archive(destination_zip.replace('.zip', ''), 'zip', source_folder)\n","print(f\"Zip file created at {destination_zip}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":8521,"status":"ok","timestamp":1762397559494,"user":{"displayName":"No Name","userId":"10578412414437288386"},"user_tz":-60},"id":"c8f544cc","outputId":"92c68eee-9792-4fa4-fd4f-532969b34799"},"outputs":[{"name":"stdout","output_type":"stream","text":["Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n","Creating zip file from /content/drive/MyDrive/backgrounds_cropped3...\n","Zip file created at /content/drive/MyDrive/backgrounds_cropped3.zip containing 325 images.\n"]}],"source":["import os\n","import zipfile\n","import shutil\n","from google.colab import drive\n","from PIL import Image\n","\n","# Mount Google Drive (if not already mounted)\n","try:\n","    drive.mount('/content/drive')\n","except:\n","    print(\"Drive already mounted.\")\n","\n","# Define source folder and destination zip path\n","source_folder = '/content/drive/MyDrive/backgrounds_cropped3'\n","destination_zip = '/content/drive/MyDrive/backgrounds_cropped3.zip'\n","\n","# Ensure the source folder exists\n","if not os.path.exists(source_folder):\n","    raise FileNotFoundError(f\"Source folder not found: {source_folder}\")\n","\n","# Create the zip file\n","print(f\"Creating zip file from {source_folder}...\")\n","\n","# Get a list of image files in the source folder, sorted by name\n","image_files = sorted([f for f in os.listdir(source_folder) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.webp'))])\n","\n","if not image_files:\n","    print(f\"No image files found in {source_folder}. Zip file will not be created.\")\n","else:\n","    with zipfile.ZipFile(destination_zip, 'w', zipfile.ZIP_DEFLATED) as zipf:\n","        for i, filename in enumerate(image_files):\n","            src_path = os.path.join(source_folder, filename)\n","            # Define the new name inside the zip (1, 2, 3, ...) with original extension\n","            new_name = f\"{i + 1}{os.path.splitext(filename)[1].lower()}\"\n","            try:\n","                # Use PIL to ensure the image is valid before adding to zip\n","                with Image.open(src_path) as img:\n","                    img.verify() # Verify it's an image\n","\n","                zipf.write(src_path, arcname=new_name)\n","                # print(f\"Added {filename} as {new_name} to zip\") # Optional: uncomment for detailed progress\n","            except Exception as e:\n","                print(f\"Skipping invalid or corrupted image {filename}: {e}\")\n","\n","    print(f\"Zip file created at {destination_zip} containing {len(image_files)} images.\")"]},{"cell_type":"markdown","metadata":{"id":"5teR85z0-l_S"},"source":["##Aesthetic sorting using CLIP (T4)"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"pjMqYPkO-q6q"},"outputs":[],"source":["# --------------------------------------------------------------\n","# 0. INSTALL & MOUNT\n","# --------------------------------------------------------------\n","!pip install -q open_clip_torch torch torchvision pillow tqdm pandas ftfy regex scikit-learn\n","\n","import zipfile, os, shutil, tempfile, requests, numpy as np\n","from tqdm import tqdm\n","import torch, open_clip\n","from PIL import Image\n","import glob, pandas as pd\n","from sklearn.metrics.pairwise import cosine_similarity\n","from google.colab import drive\n","\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"wyfUVs9g_Bxl"},"outputs":[],"source":["# --------------------------------------------------------------\n","# 1. PARAMETERS  (Google-Colab Form UI)\n","# --------------------------------------------------------------\n","\n","#@title **Input Settings** { display-mode: \"form\" }\n","\n","ZIP_PATH = \"/content/drive/MyDrive/manga_on_backgrounds_10852.zip\" #@param {type:\"string\"}\n","EXTRACT_TO = \"/content/dbg_aesthetic_sorted\" #@param {type:\"string\"}\n","OUT_DIR = \"/content/drive/MyDrive/dbg_aesthetic_sorted\" #@param {type:\"string\"}\n","\n","#@title **Deduplication Settings** { display-mode: \"form\" }\n","\n","SIMILARITY_THRESHOLD = 0.9 #@param {type:\"slider\", min:0.8, max:1.0, step:0.005}\n","FILESIZE_TOLERANCE    = 0.2 #@param {type:\"slider\", min:0.0, max:0.4, step:0.01}\n","\n","#@title **Packing Settings** { display-mode: \"form\" }\n","\n","MAX_ZIP_BYTES = 300 #@param {type:\"slider\", min:50, max:1000, step:50}\n","MAX_ZIP_BYTES = MAX_ZIP_BYTES * 1024 * 1024   # convert MiB β†’ bytes\n","\n","# -----------------------------------------------------------------\n","# (no changes needed below – the rest of the notebook uses these vars)\n","# -----------------------------------------------------------------\n","os.makedirs(EXTRACT_TO, exist_ok=True)\n","os.makedirs(OUT_DIR,    exist_ok=True)\n","\n","print(\"Parameters loaded from the form\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"p9C4hwLF_O4v"},"outputs":[],"source":["# --------------------------------------------------------------\n","# 2. EXTRACT ONLY IMAGES (skip if already done)\n","# --------------------------------------------------------------\n","if not os.listdir(EXTRACT_TO):\n","    with zipfile.ZipFile(ZIP_PATH, 'r') as z:\n","        members = [m for m in z.namelist()\n","                   if m.lower().split('.')[-1] in {'png','jpg','jpeg','bmp','webp'}]\n","        for member in tqdm(members, desc='Extracting images'):\n","            z.extract(member, EXTRACT_TO)\n","else:\n","    print(\"Folder already contains files – skipping extraction.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"-0QOGVMY_Pwa"},"outputs":[],"source":["# --------------------------------------------------------------\n","# 3. LOAD CLIP + AESTHETIC HEAD\n","# --------------------------------------------------------------\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","\n","clip_model, _, preprocess = open_clip.create_model_and_transforms(\n","    model_name=\"ViT-B-32\", pretrained=\"laion400m_e32\"\n",")\n","clip_model.to(device).eval()\n","\n","# ---- aesthetic head ------------------------------------------------\n","AESTHETIC_URL = \"https://github.com/LAION-AI/aesthetic-predictor/raw/main/sa_0_4_vit_b_32_linear.pth\"\n","ckpt_path = \"/content/laion_aesthetic_vit_b_32.pth\"\n","if not os.path.exists(ckpt_path):\n","    print(\"Downloading aesthetic weights …\")\n","    r = requests.get(AESTHETIC_URL); r.raise_for_status()\n","    open(ckpt_path, 'wb').write(r.content)\n","\n","aesthetic_head = torch.nn.Linear(512, 1).to(device)\n","aesthetic_head.load_state_dict(torch.load(ckpt_path, map_location=device))\n","aesthetic_head.eval()\n","print(\"CLIP + aesthetic head ready\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"O3IiHdvX_QsW"},"outputs":[],"source":["# --------------------------------------------------------------\n","# 4. SCORE ALL IMAGES\n","# --------------------------------------------------------------\n","image_paths = glob.glob(f\"{EXTRACT_TO}/**/*.*\", recursive=True)\n","image_paths = [p for p in image_paths\n","               if p.lower().split('.')[-1] in {'png','jpg','jpeg','bmp','webp'}]\n","\n","results = []\n","with torch.no_grad():\n","    for p in tqdm(image_paths, desc=\"Scoring\"):\n","        try:\n","            img = Image.open(p).convert(\"RGB\")\n","            x   = preprocess(img).unsqueeze(0).to(device)\n","            emb = clip_model.encode_image(x)               # (1,512)\n","\n","            logit = aesthetic_head(emb)\n","            score = torch.sigmoid(logit).item() * 10.0     # 0-10 scale\n","\n","            results.append({'path': p, 'score': score})\n","        except Exception as e:\n","            print(f\"Skip {p}: {e}\")\n","\n","df = pd.DataFrame(results)\n","df = df.sort_values('score', ascending=False).reset_index(drop=True)\n","print(f\"Scored {len(df)} images | best {df['score'].iloc[0]:.2f} | worst {df['score'].iloc[-1]:.2f}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":56,"status":"ok","timestamp":1762280539891,"user":{"displayName":"","userId":""},"user_tz":-60},"id":"mAhkH_da_Q8G","outputId":"f189612f-27b8-4f35-feca-76d1f64bbf81"},"outputs":[{"name":"stderr","output_type":"stream","text":["Deduplicate groups: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 234/234 [00:00<00:00, 1106501.84it/s]"]},{"name":"stdout","output_type":"stream","text":["After deduplication: 234 images (removed 0 duplicates)\n"]},{"name":"stderr","output_type":"stream","text":["\n"]}],"source":["# --------------------------------------------------------------\n","# 5. DEDUPLICATION (file-size + CLIP similarity)\n","# --------------------------------------------------------------\n","@torch.no_grad()\n","def get_clip_embedding(p):\n","    try:\n","        img = Image.open(p).convert(\"RGB\")\n","        x   = preprocess(img).unsqueeze(0).to(device)\n","        emb = clip_model.encode_image(x)\n","        emb = emb / emb.norm(dim=-1, keepdim=True)\n","        return emb.cpu().numpy().flatten()\n","    except Exception:\n","        return None\n","\n","# ---- group by approximate size ------------------------------------\n","size_groups = {}\n","for p in df['path']:\n","    sz = os.path.getsize(p)\n","    key = round(sz * (1 / (1 + FILESIZE_TOLERANCE)))\n","    size_groups.setdefault(key, []).append(p)\n","\n","kept_paths = []\n","for key, group in tqdm(size_groups.items(), desc=\"Deduplicate groups\"):\n","    if len(group) == 1:\n","        kept_paths.append(group[0])\n","        continue\n","\n","    # compute embeddings only for this small group\n","    embeddings = []\n","    valid_paths = []\n","    for p in group:\n","        emb = get_clip_embedding(p)\n","        if emb is not None:\n","            embeddings.append(emb)\n","            valid_paths.append(p)\n","\n","    if len(embeddings) <= 1:\n","        kept_paths.extend(valid_paths)\n","        continue\n","\n","    embeddings = np.stack(embeddings)\n","    sim = cosine_similarity(embeddings)\n","\n","    keep = [True] * len(valid_paths)\n","    for i in range(len(valid_paths)):\n","        if not keep[i]: continue\n","        for j in range(i+1, len(valid_paths)):\n","            if sim[i, j] >= SIMILARITY_THRESHOLD:\n","                keep[j] = False\n","\n","    for idx, k in enumerate(keep):\n","        if k:\n","            kept_paths.append(valid_paths[idx])\n","\n","# ---- filter original dataframe ------------------------------------\n","df_clean = df[df['path'].isin(kept_paths)].copy()\n","df_clean = df_clean.sort_values('score', ascending=False).reset_index(drop=True)\n","print(f\"After deduplication: {len(df_clean)} images (removed {len(df)-len(df_clean)} duplicates)\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":14218,"status":"ok","timestamp":1762280554104,"user":{"displayName":"","userId":""},"user_tz":-60},"id":"sWx4fBSe_hSI","outputId":"6c95a420-b4e9-42e9-813c-ae4a514e2ba2"},"outputs":[{"name":"stderr","output_type":"stream","text":["Packing: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 234/234 [00:00<00:00, 843.77it/s] \n"]},{"name":"stdout","output_type":"stream","text":["Saved /content/drive/MyDrive/dbg_aesthetic_sorted/mia_panels_part_001.zip  (234 files, 226.5 MB)\n","\n","All done! Cleaned ZIPs + CSV are in: /content/drive/MyDrive/dbg_aesthetic_sorted\n"]}],"source":["# --------------------------------------------------------------\n","# 6. PACK CLEANED IMAGES INTO ≀300 MiB ZIPs\n","# --------------------------------------------------------------\n","temp_dir = '/content/zip_temp'\n","os.makedirs(temp_dir, exist_ok=True)\n","\n","current_files = []      # (temp_path, arcname)\n","current_size  = 0\n","zip_idx       = 1\n","\n","def finish_zip():\n","    global zip_idx, current_size, current_files\n","    if not current_files: return\n","    zip_path = f\"{OUT_DIR}/mia_panels_part_{zip_idx:03d}.zip\"\n","    with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as z:\n","        for src, arc in current_files:\n","            z.write(src, arc)\n","    mb = os.path.getsize(zip_path) / 1e6\n","    print(f\"Saved {zip_path}  ({len(current_files)} files, {mb:.1f} MB)\")\n","    for src, _ in current_files:\n","        os.remove(src)\n","    current_files = []\n","    current_size = 0\n","    zip_idx += 1\n","\n","for idx, row in enumerate(tqdm(df_clean.itertuples(), total=len(df_clean), desc=\"Packing\")):\n","    src  = row.path\n","    rank = idx + 1\n","    ext  = os.path.splitext(src)[1].lower()\n","    tmp  = f\"{temp_dir}/{rank}{ext}\"\n","    shutil.copyfile(src, tmp)\n","\n","    fsize = os.path.getsize(tmp)\n","    if current_size + fsize > MAX_ZIP_BYTES and current_files:\n","        finish_zip()\n","\n","    current_files.append((tmp, f\"{rank}{ext}\"))\n","    current_size += fsize\n","\n","finish_zip()\n","shutil.rmtree(temp_dir, ignore_errors=True)\n","\n","# --------------------------------------------------------------\n","# 7. SAVE SCORE CSV (only the kept images)\n","# --------------------------------------------------------------\n","df_clean.to_csv(f\"{OUT_DIR}/aesthetic_scores_clean.csv\", index=False)\n","print(\"\\nAll done! Cleaned ZIPs + CSV are in:\", OUT_DIR)"]},{"cell_type":"markdown","metadata":{"id":"haUt4RKmRbjQ"},"source":["##Sort images into folders using CLIP image feature extraction (T4)"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"59Tf9llpSGoz"},"outputs":[],"source":["#@markdown Unzip training data from drive to /content/ (if required)\n","path = '/content/drive/MyDrive/training_data_66708.zip' #@param {type:'string'}\n","\n","%cd /content/\n","!unzip  {path}"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"WncaEzzGiaO2"},"outputs":[],"source":["!pip install ftfy regex tqdm\n","!pip install git+https://github.com/openai/CLIP.git\n","!pip install scikit-learn matplotlib pillow umap-learn  # UMAP is optional for 2D visualization"]},{"cell_type":"markdown","metadata":{"id":"EnqyKHcOilVA"},"source":["Load Images and Extract CLIP Embeddings\n","\n","Upload your images the normal way ( `/content/`) prior to running this cell.\n","\n","This code loads all images (supports JPG, PNG, etc.), preprocesses them, and extracts 512-dimensional embeddings using the ViT-B/32 CLIP model."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"XCKB0QeiJmIG"},"outputs":[],"source":["!pip install open_clip_torch\n","\n","import os\n","import numpy as np\n","import torch\n","import open_clip\n","from PIL import Image\n","\n","# Configuration\n","image_dir = '/content/'  #@param {type:'string'}\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","model_name = \"ViT-B-32\"  # Available per error message\n","pretrained = \"laion400m_e32\"  # Robust pretrained weights\n","\n","# Load OpenCLIP model\n","model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained=pretrained)\n","model.to(device)\n","model.eval()\n","\n","# Load images and extract embeddings\n","embeddings = []\n","image_paths = []\n","image_names = []\n","\n","# Recursively find images in subdirectories, excluding /content/drive/\n","for root, _, files in os.walk(image_dir):\n","    if '/drive/' in root:  # Explicitly skip any directory containing '/drive/'\n","        continue\n","    for filename in files:\n","        if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):\n","            img_path = os.path.join(root, filename)\n","            try:\n","                image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n","                with torch.no_grad():\n","                    embedding = model.encode_image(image)\n","                embeddings.append(embedding.cpu().numpy().flatten())\n","                image_paths.append(img_path)\n","                image_names.append(filename)\n","                #print(f\"Processed: {filename}\")\n","            except Exception as e:\n","                print(f\"Error processing {filename}: {e}\")\n","\n","embeddings = np.array(embeddings)\n","print(f\"Extracted embeddings for {len(embeddings)} images. Shape: {embeddings.shape}\")"]},{"cell_type":"markdown","metadata":{"id":"HQsc2r-ii6cK"},"source":["Perform Clustering\n","We'll use K-Means clustering on the embeddings. You can choose the number of clusters (`n_clusters`) based on your dataset size (e.g., try 5-10). We'll also compute the silhouette score to evaluate cluster quality (higher is better).\n","\n","For visualization, we'll optionally reduce dimensions to 2D using UMAP."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"WM9wug70jCtR"},"outputs":[],"source":["from umap import UMAP  # For 2D projection (optional)\n","import os\n","import numpy as np\n","import torch\n","import clip\n","from PIL import Image\n","import matplotlib.pyplot as plt\n","from sklearn.cluster import KMeans\n","from sklearn.metrics import silhouette_score\n","import warnings\n","warnings.filterwarnings('ignore')\n","#@markdown Choose number of clusters (experiment with this)\n","n_clusters = 100  # @param {type:'slider' , min:1 , max:200, step:1}\n","\n","# Perform K-Means clustering\n","kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)\n","cluster_labels = kmeans.fit_predict(embeddings)\n","\n","# Evaluate clustering quality\n","sil_score = silhouette_score(embeddings, cluster_labels)\n","print(f\"Silhouette Score: {sil_score:.3f} (closer to 1 is better)\")\n","\n","# Optional: 2D visualization with UMAP\n","reducer = UMAP(random_state=42, n_components=2)\n","embed_2d = reducer.fit_transform(embeddings)\n","\n","plt.figure(figsize=(10, 8))\n","scatter = plt.scatter(embed_2d[:, 0], embed_2d[:, 1], c=cluster_labels, cmap='tab10', s=50)\n","plt.colorbar(scatter)\n","plt.title(f'2D UMAP Projection of CLIP Embeddings (K={n_clusters} Clusters)')\n","plt.xlabel('UMAP 1')\n","plt.ylabel('UMAP 2')\n","plt.show()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1fMT3PmCOSyh"},"outputs":[],"source":["import shutil\n","import os\n","from PIL import Image\n","\n","# Create output directories\n","output_dir = '/content/clusters'  # Output base directory\n","os.makedirs(output_dir, exist_ok=True)\n","\n","move_files = False  # Set to True to move files, False to copy\n","\n","# Create directories for each cluster\n","for i in range(n_clusters):\n","    cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n","    os.makedirs(cluster_dir, exist_ok=True)\n","\n","# Form inputs using Colab's # @param\n","output_format = \"JPEG\"  # @param [\"JPEG\", \"PNG\", \"WEBP\"]\n","quality = 100  # @param {type:\"slider\", min:0, max:100, step:1}\n","\n","# Function to convert and save images\n","for idx, label in enumerate(cluster_labels):\n","    src_path = image_paths[idx]  # Use full path\n","    # Create destination filename with selected extension\n","    dst_filename = os.path.splitext(image_names[idx])[0] + f'.{output_format.lower()}'\n","    dst_path = os.path.join(output_dir, f'cluster_{label}', dst_filename)\n","\n","    try:\n","        # Open and convert image\n","        with Image.open(src_path).convert('RGB') as img:\n","            if output_format == 'JPEG':\n","                img.save(dst_path, 'JPEG', quality=quality, optimize=True)\n","            elif output_format == 'PNG':\n","                # PNG compression: 0 (max compression) to 9 (no compression)\n","                # Map quality 0-100 to PNG compression 9-0\n","                png_compression = int(9 - (quality / 100 * 9))\n","                img.save(dst_path, 'PNG', compress_level=png_compression)\n","            elif output_format == 'WEBP':\n","                img.save(dst_path, 'WEBP', quality=quality)\n","\n","            if move_files:\n","                os.remove(src_path)  # Delete original if moving\n","            print(f\"Assigned {image_names[idx]} as {dst_filename} to cluster_{label}\")\n","    except Exception as e:\n","        print(f\"Error converting {image_names[idx]} to {output_format}: {e}\")\n","\n","print(f\"Images sorted into {n_clusters} clusters in '{output_dir}' as .{output_format.lower()}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ooH5mMsDjUYs"},"outputs":[],"source":["from PIL import Image\n","import matplotlib.pyplot as plt\n","import os\n","\n","def display_cluster_samples(cluster_dir, n_samples=3):\n","    # Check if cluster directory exists\n","    if not os.path.exists(cluster_dir):\n","        print(f\"Cluster directory {cluster_dir} does not exist\")\n","        return\n","\n","    # Updated to include .jpg, .png, and .webp files\n","    images = [f for f in os.listdir(cluster_dir) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))][:n_samples]\n","    if not images:\n","        print(f\"No images in {cluster_dir}\")\n","        return\n","\n","    fig, axs = plt.subplots(1, len(images), figsize=(5 * len(images), 5))\n","    if len(images) == 1:\n","        axs = [axs]\n","    for j, img_file in enumerate(images):\n","        img_path = os.path.join(cluster_dir, img_file)\n","        try:\n","            img = Image.open(img_path).convert('RGB')  # Ensure RGB for display\n","            axs[j].imshow(img)\n","            axs[j].set_title(img_file)\n","            axs[j].axis('off')\n","        except Exception as e:\n","            print(f\"Error displaying {img_file}: {e}\")\n","    plt.show()\n","\n","# Create output directories\n","output_dir = '/content/clusters'  # Output base directory\n","\n","# Check if output directory exists\n","if not os.path.exists(output_dir):\n","    print(f\"Output directory {output_dir} does not exist\")\n","else:\n","    # Display samples from each cluster\n","    for i in range(n_clusters):  # Ensure n_clusters is defined\n","        cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n","        print(f\"\\nSamples from Cluster {i}:\")\n","        display_cluster_samples(cluster_dir)"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":63,"status":"ok","timestamp":1761653969025,"user":{"displayName":"","userId":""},"user_tz":-60},"id":"fBcPNLh9jeZ7","outputId":"93b7fd9b-0e83-4098-e8ff-b0fe3aa1779b"},"outputs":[{"name":"stdout","output_type":"stream","text":["/content\n"]}],"source":["import shutil\n","%cd /content/\n","#@markdown Remove cluster_N.  You can set multiple indices at once 1,4,5,9,...\n","nums ='1,5,9,12,16,18,20,22,31,33,34,36,37,38,53,58,60,62,66,71,74,79,85,87,88,92,93,95,98'#@param {type:'string'}\n","\n","for num in nums.split(','):\n","  if num.strip() == '': continue\n","  shutil.rmtree(f'/content/clusters/cluster_{num.strip()}')"]},{"cell_type":"markdown","metadata":{"id":"aWSOgPj5jLLI"},"source":["Sort Images into Clusters\n","This creates subdirectories for each cluster and moves/copies the images there. Set `move_files=True` to move (or False to copy)."]},{"cell_type":"markdown","metadata":{"id":"Tg_q68KnjUb5"},"source":["Visualize Sample Images per Cluster\n","Display a few sample images from each cluster to inspect the results."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"w2Gzortz0NuD"},"outputs":[],"source":["#@markdown Upload to Google Drive as .zip folder (Be mindful of Google Drive Terms of Service)\n","drive_folder_name = 'my_clusters' # @param {type:'string'}\n","\n","%cd /content/\n","!zip -r /content/drive/MyDrive/{drive_folder_name}.zip {output_dir}\n","\n"]}],"metadata":{"colab":{"collapsed_sections":["TstzuP5HKBtZ","5teR85z0-l_S","haUt4RKmRbjQ"],"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/πŸ“¦dataset_builder.ipynb","timestamp":1762321685602},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/πŸ“¦dataset_builder.ipynb","timestamp":1762312437969},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/πŸ“¦dataset_builder.ipynb","timestamp":1762306283935},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/πŸ“¦dataset_builder.ipynb","timestamp":1762280779576},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/πŸ“¦dataset_builder.ipynb","timestamp":1762032430096},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/dataset_builder.ipynb","timestamp":1762002927139},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/dataset_builder.ipynb","timestamp":1761823511544},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1761731354034},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1761124521078},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1760628088876},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1756712618300},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}