codeShare commited on
Commit
0ec9e76
·
verified ·
1 Parent(s): f6af8e3

Upload CLIP_B32_finetune_cluster.ipynb

Browse files
Files changed (1) hide show
  1. CLIP_B32_finetune_cluster.ipynb +1 -1
CLIP_B32_finetune_cluster.ipynb CHANGED
@@ -1 +1 @@
1
- {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760371508137},{"file_id":"1wufnt5hqKHLuoX9wDdyzarENUjOO_s3N","timestamp":1760363981901},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_cluster.ipynb","timestamp":1760363231133}],"authorship_tag":"ABX9TyNuwOfHJMExZzoVAXYemrr1"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"G9yAxL_ViF7y"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["Install Required Libraries\n","Run this cell to install the necessary packages. CLIP requires PyTorch, and we'll use scikit-learn for clustering, along with Pillow for image loading and matplotlib for visualization."],"metadata":{"id":"ji2qFha2icZi"}},{"cell_type":"code","source":["#@markdown Unzip training data from drive to /content/ (if required)\n","%cd /content/\n","!unzip drive/MyDrive/training_data.zip"],"metadata":{"id":"59Tf9llpSGoz"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["!pip install ftfy regex tqdm\n","!pip install git+https://github.com/openai/CLIP.git\n","!pip install scikit-learn matplotlib pillow umap-learn # UMAP is optional for 2D visualization"],"metadata":{"id":"WncaEzzGiaO2"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Load Images and Extract CLIP Embeddings\n","\n","Upload your images the normal way ( `/content/`) prior to running this cell.\n","\n","This code loads all images (supports JPG, PNG, etc.), preprocesses them, and extracts 512-dimensional embeddings using the ViT-B/32 CLIP model."],"metadata":{"id":"EnqyKHcOilVA"}},{"cell_type":"code","source":["!pip install open_clip_torch\n","\n","import os\n","import numpy as np\n","import torch\n","import open_clip\n","from PIL import Image\n","\n","# Configuration\n","image_dir = '/content/' # Update this path\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","model_name = \"ViT-B-32\" # Available per error message\n","pretrained = \"laion400m_e32\" # Robust pretrained weights\n","\n","# Load OpenCLIP model\n","model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained=pretrained)\n","model.to(device)\n","model.eval()\n","\n","# Load images and extract embeddings\n","embeddings = []\n","image_paths = []\n","image_names = []\n","\n","for filename in os.listdir(image_dir):\n"," if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):\n"," img_path = os.path.join(image_dir, filename)\n"," try:\n"," image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," embedding = model.encode_image(image)\n"," embeddings.append(embedding.cpu().numpy().flatten())\n"," image_paths.append(img_path)\n"," image_names.append(filename)\n"," print(f\"Processed: {filename}\")\n"," except Exception as e:\n"," print(f\"Error processing {filename}: {e}\")\n","\n","embeddings = np.array(embeddings)\n","print(f\"Extracted embeddings for {len(embeddings)} images. Shape: {embeddings.shape}\")"],"metadata":{"id":"IcqN15af460q"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Perform Clustering\n","We'll use K-Means clustering on the embeddings. You can choose the number of clusters (`n_clusters`) based on your dataset size (e.g., try 5-10). We'll also compute the silhouette score to evaluate cluster quality (higher is better).\n","\n","For visualization, we'll optionally reduce dimensions to 2D using UMAP."],"metadata":{"id":"HQsc2r-ii6cK"}},{"cell_type":"code","source":["from umap import UMAP # For 2D projection (optional)\n","import os\n","import numpy as np\n","import torch\n","import clip\n","from PIL import Image\n","import matplotlib.pyplot as plt\n","from sklearn.cluster import KMeans\n","from sklearn.metrics import silhouette_score\n","import warnings\n","warnings.filterwarnings('ignore')\n","# Choose number of clusters (experiment with this)\n","n_clusters = 50 # Adjust based on your data\n","\n","# Perform K-Means clustering\n","kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)\n","cluster_labels = kmeans.fit_predict(embeddings)\n","\n","# Evaluate clustering quality\n","sil_score = silhouette_score(embeddings, cluster_labels)\n","print(f\"Silhouette Score: {sil_score:.3f} (closer to 1 is better)\")\n","\n","# Optional: 2D visualization with UMAP\n","reducer = UMAP(random_state=42, n_components=2)\n","embed_2d = reducer.fit_transform(embeddings)\n","\n","plt.figure(figsize=(10, 8))\n","scatter = plt.scatter(embed_2d[:, 0], embed_2d[:, 1], c=cluster_labels, cmap='tab10', s=50)\n","plt.colorbar(scatter)\n","plt.title(f'2D UMAP Projection of CLIP Embeddings (K={n_clusters} Clusters)')\n","plt.xlabel('UMAP 1')\n","plt.ylabel('UMAP 2')\n","plt.show()"],"metadata":{"id":"WM9wug70jCtR"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Sort Images into Clusters\n","This creates subdirectories for each cluster and moves/copies the images there. Set `move_files=True` to move (or False to copy)."],"metadata":{"id":"aWSOgPj5jLLI"}},{"cell_type":"code","source":["import shutil\n","import os\n","from PIL import Image\n","\n","# Create cluster directories\n","output_dir = '/content/clusters' # Output base directory\n","os.makedirs(output_dir, exist_ok=True)\n","\n","move_files = False # Set to True to move files, False to copy\n","\n","for i in range(n_clusters):\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," os.makedirs(cluster_dir, exist_ok=True)\n","\n","# Assign and sort images, saving as .webp\n","for idx, label in enumerate(cluster_labels):\n"," src_path = image_paths[idx] # Use full path (corrected from image_names)\n"," # Create destination filename with .webp extension\n"," dst_filename = os.path.splitext(image_names[idx])[0] + '.webp'\n"," dst_path = os.path.join(output_dir, f'cluster_{label}', dst_filename)\n","\n"," try:\n"," # Open and convert image to WebP\n"," with Image.open(src_path).convert('RGB') as img:\n"," img.save(dst_path, 'WEBP', quality=90) # Save as WebP, adjustable quality\n"," if move_files:\n"," os.remove(src_path) # Delete original if moving\n"," print(f\"Assigned {image_names[idx]} as {dst_filename} to cluster_{label}\")\n"," except Exception as e:\n"," print(f\"Error converting {image_names[idx]} to WebP: {e}\")\n","\n","print(f\"Images sorted into {n_clusters} clusters in '{output_dir}' as .webp\")"],"metadata":{"id":"fSOyq0uaNJCQ"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Visualize Sample Images per Cluster\n","Display a few sample images from each cluster to inspect the results."],"metadata":{"id":"Tg_q68KnjUb5"}},{"cell_type":"code","source":["from PIL import Image\n","import matplotlib.pyplot as plt\n","import os\n","\n","def display_cluster_samples(cluster_dir, n_samples=3):\n"," # Updated to include .webp files\n"," images = [f for f in os.listdir(cluster_dir) if f.lower().endswith('.webp')][:n_samples]\n"," if not images:\n"," print(f\"No images in {cluster_dir}\")\n"," return\n","\n"," fig, axs = plt.subplots(1, len(images), figsize=(5 * len(images), 5))\n"," if len(images) == 1:\n"," axs = [axs]\n"," for j, img_file in enumerate(images):\n"," img_path = os.path.join(cluster_dir, img_file)\n"," try:\n"," img = Image.open(img_path).convert('RGB') # Ensure RGB for display\n"," axs[j].imshow(img)\n"," axs[j].set_title(img_file)\n"," axs[j].axis('off')\n"," except Exception as e:\n"," print(f\"Error displaying {img_file}: {e}\")\n"," plt.show()\n","\n","# Display samples from each cluster\n","for i in range(n_clusters):\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," print(f\"\\nSamples from Cluster {i}:\")\n"," display_cluster_samples(cluster_dir)"],"metadata":{"id":"pzy3-9bBT231"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown Upload to Google Drive as .zip folder (Be mindful of Google Drive Terms of Service)\n","drive_folder_name = 'clusters' # @param {type:'string'}\n","\n","%cd /content/\n","!zip -r /content/drive/MyDrive/{drive_folder_name}.zip {output_dir}\n","\n"],"metadata":{"id":"w2Gzortz0NuD"},"execution_count":null,"outputs":[]}]}
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760509652530},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_B32_finetune_cluster.ipynb","timestamp":1760371508137},{"file_id":"1wufnt5hqKHLuoX9wDdyzarENUjOO_s3N","timestamp":1760363981901},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/CLIP_cluster.ipynb","timestamp":1760363231133}],"gpuType":"T4","authorship_tag":"ABX9TyPZHt+6HrPEBWIdiDEZWwlw"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"G9yAxL_ViF7y"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["Install Required Libraries\n","Run this cell to install the necessary packages. CLIP requires PyTorch, and we'll use scikit-learn for clustering, along with Pillow for image loading and matplotlib for visualization."],"metadata":{"id":"ji2qFha2icZi"}},{"cell_type":"code","source":["#@markdown Unzip training data from drive to /content/ (if required)\n","path = '/content/drive/MyDrive/training_data.zip' #@param {type:'string'}\n","\n","%cd /content/\n","!unzip {path}"],"metadata":{"id":"59Tf9llpSGoz"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["!pip install ftfy regex tqdm\n","!pip install git+https://github.com/openai/CLIP.git\n","!pip install scikit-learn matplotlib pillow umap-learn # UMAP is optional for 2D visualization"],"metadata":{"id":"WncaEzzGiaO2"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Load Images and Extract CLIP Embeddings\n","\n","Upload your images the normal way ( `/content/`) prior to running this cell.\n","\n","This code loads all images (supports JPG, PNG, etc.), preprocesses them, and extracts 512-dimensional embeddings using the ViT-B/32 CLIP model."],"metadata":{"id":"EnqyKHcOilVA"}},{"cell_type":"code","source":["!pip install open_clip_torch\n","\n","import os\n","import numpy as np\n","import torch\n","import open_clip\n","from PIL import Image\n","\n","# Configuration\n","image_dir = '/content/' # Update this path\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","model_name = \"ViT-B-32\" # Available per error message\n","pretrained = \"laion400m_e32\" # Robust pretrained weights\n","\n","# Load OpenCLIP model\n","model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained=pretrained)\n","model.to(device)\n","model.eval()\n","\n","# Load images and extract embeddings\n","embeddings = []\n","image_paths = []\n","image_names = []\n","\n","for filename in os.listdir(image_dir):\n"," if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):\n"," img_path = os.path.join(image_dir, filename)\n"," try:\n"," image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," embedding = model.encode_image(image)\n"," embeddings.append(embedding.cpu().numpy().flatten())\n"," image_paths.append(img_path)\n"," image_names.append(filename)\n"," print(f\"Processed: {filename}\")\n"," except Exception as e:\n"," print(f\"Error processing {filename}: {e}\")\n","\n","embeddings = np.array(embeddings)\n","print(f\"Extracted embeddings for {len(embeddings)} images. Shape: {embeddings.shape}\")"],"metadata":{"id":"IcqN15af460q"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Perform Clustering\n","We'll use K-Means clustering on the embeddings. You can choose the number of clusters (`n_clusters`) based on your dataset size (e.g., try 5-10). We'll also compute the silhouette score to evaluate cluster quality (higher is better).\n","\n","For visualization, we'll optionally reduce dimensions to 2D using UMAP."],"metadata":{"id":"HQsc2r-ii6cK"}},{"cell_type":"code","source":["from umap import UMAP # For 2D projection (optional)\n","import os\n","import numpy as np\n","import torch\n","import clip\n","from PIL import Image\n","import matplotlib.pyplot as plt\n","from sklearn.cluster import KMeans\n","from sklearn.metrics import silhouette_score\n","import warnings\n","warnings.filterwarnings('ignore')\n","#@markdown Choose number of clusters (experiment with this)\n","n_clusters = 50 # @param {type:'slider' , min:1 , max:100, step:1}\n","\n","# Perform K-Means clustering\n","kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)\n","cluster_labels = kmeans.fit_predict(embeddings)\n","\n","# Evaluate clustering quality\n","sil_score = silhouette_score(embeddings, cluster_labels)\n","print(f\"Silhouette Score: {sil_score:.3f} (closer to 1 is better)\")\n","\n","# Optional: 2D visualization with UMAP\n","reducer = UMAP(random_state=42, n_components=2)\n","embed_2d = reducer.fit_transform(embeddings)\n","\n","plt.figure(figsize=(10, 8))\n","scatter = plt.scatter(embed_2d[:, 0], embed_2d[:, 1], c=cluster_labels, cmap='tab10', s=50)\n","plt.colorbar(scatter)\n","plt.title(f'2D UMAP Projection of CLIP Embeddings (K={n_clusters} Clusters)')\n","plt.xlabel('UMAP 1')\n","plt.ylabel('UMAP 2')\n","plt.show()"],"metadata":{"id":"WM9wug70jCtR"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Sort Images into Clusters\n","This creates subdirectories for each cluster and moves/copies the images there. Set `move_files=True` to move (or False to copy)."],"metadata":{"id":"aWSOgPj5jLLI"}},{"cell_type":"code","source":["import shutil\n","import os\n","from PIL import Image\n","\n","# Create cluster directories\n","output_dir = '/content/clusters' # Output base directory\n","os.makedirs(output_dir, exist_ok=True)\n","\n","move_files = False # Set to True to move files, False to copy\n","\n","for i in range(n_clusters):\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," os.makedirs(cluster_dir, exist_ok=True)\n","\n","# Assign and sort images, saving as .webp\n","for idx, label in enumerate(cluster_labels):\n"," src_path = image_paths[idx] # Use full path (corrected from image_names)\n"," # Create destination filename with .webp extension\n"," dst_filename = os.path.splitext(image_names[idx])[0] + '.webp'\n"," dst_path = os.path.join(output_dir, f'cluster_{label}', dst_filename)\n","\n"," try:\n"," # Open and convert image to WebP\n"," with Image.open(src_path).convert('RGB') as img:\n"," img.save(dst_path, 'WEBP', quality=90) # Save as WebP, adjustable quality\n"," if move_files:\n"," os.remove(src_path) # Delete original if moving\n"," print(f\"Assigned {image_names[idx]} as {dst_filename} to cluster_{label}\")\n"," except Exception as e:\n"," print(f\"Error converting {image_names[idx]} to WebP: {e}\")\n","\n","print(f\"Images sorted into {n_clusters} clusters in '{output_dir}' as .webp\")"],"metadata":{"id":"fSOyq0uaNJCQ"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Visualize Sample Images per Cluster\n","Display a few sample images from each cluster to inspect the results."],"metadata":{"id":"Tg_q68KnjUb5"}},{"cell_type":"code","source":["from PIL import Image\n","import matplotlib.pyplot as plt\n","import os\n","\n","def display_cluster_samples(cluster_dir, n_samples=3):\n"," # Updated to include .webp files\n"," images = [f for f in os.listdir(cluster_dir) if f.lower().endswith('.webp')][:n_samples]\n"," if not images:\n"," print(f\"No images in {cluster_dir}\")\n"," return\n","\n"," fig, axs = plt.subplots(1, len(images), figsize=(5 * len(images), 5))\n"," if len(images) == 1:\n"," axs = [axs]\n"," for j, img_file in enumerate(images):\n"," img_path = os.path.join(cluster_dir, img_file)\n"," try:\n"," img = Image.open(img_path).convert('RGB') # Ensure RGB for display\n"," axs[j].imshow(img)\n"," axs[j].set_title(img_file)\n"," axs[j].axis('off')\n"," except Exception as e:\n"," print(f\"Error displaying {img_file}: {e}\")\n"," plt.show()\n","\n","# Display samples from each cluster\n","for i in range(n_clusters):\n"," cluster_dir = os.path.join(output_dir, f'cluster_{i}')\n"," print(f\"\\nSamples from Cluster {i}:\")\n"," display_cluster_samples(cluster_dir)"],"metadata":{"id":"pzy3-9bBT231"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown Upload to Google Drive as .zip folder (Be mindful of Google Drive Terms of Service)\n","drive_folder_name = 'clusters' # @param {type:'string'}\n","\n","%cd /content/\n","!zip -r /content/drive/MyDrive/{drive_folder_name}.zip {output_dir}\n","\n"],"metadata":{"id":"w2Gzortz0NuD"},"execution_count":null,"outputs":[]}]}