|
|
|
|
|
""" |
|
|
awesome_final_repos.py |
|
|
|
|
|
Build a list of "final" GitHub repositories starting from the Awesome list-of-lists. |
|
|
"Final" repos are GitHub repositories whose repo name does NOT contain "awesome" |
|
|
(case-insensitive). We recursively traverse linked Awesome lists to collect end repositories. |
|
|
|
|
|
We fetch README markdown via the GitHub API (contents) instead of parsing HTML. From the README |
|
|
markdown we parse bullet entries like: |
|
|
* [Fuse](https://github.com/owner/repo) - Mobile development tools. |
|
|
and extract: name (link text), canonical GitHub link, and description (text after " - "). |
|
|
If there is no " - ", we capture the entire line (with the markdown link removed) as description. |
|
|
We exclude any repos whose name contains "awesome" and recursively traverse those as Awesome lists. |
|
|
|
|
|
Output: a Parquet file with columns: name, link, description |
|
|
awesome-repos.parquet |
|
|
|
|
|
Usage: |
|
|
python3 data_collection_utils/awesome_final_repos.py \ |
|
|
--root https://github.com/sindresorhus/awesome \ |
|
|
--depth 2 |
|
|
""" |
|
|
|
|
|
from collections import deque |
|
|
from pathlib import Path |
|
|
from typing import List, Set, Tuple, Optional, Dict, Any |
|
|
from urllib.parse import urlparse |
|
|
import asyncio |
|
|
import aiohttp |
|
|
import json |
|
|
import os |
|
|
|
|
|
import argparse |
|
|
import re |
|
|
import sys |
|
|
import yaml |
|
|
from dotenv import load_dotenv |
|
|
import pandas as pd |
|
|
from github_api_utils import ( |
|
|
github_headers, |
|
|
fetch_repo_description, |
|
|
fetch_repo_readme_markdown, |
|
|
) |
|
|
from tqdm.auto import tqdm |
|
|
|
|
|
load_dotenv() |
|
|
assert os.getenv("GH_TOKEN") is not None or os.getenv("GITHUB_TOKEN") is not None |
|
|
|
|
|
DEFAULT_ROOT = "https://github.com/sindresorhus/awesome" |
|
|
|
|
|
|
|
|
|
|
|
MAX_DESC_LEN = 400 |
|
|
|
|
|
|
|
|
MIN_DESC_LEN = 20 |
|
|
|
|
|
|
|
|
def load_cache(cache_file: Path) -> Dict[str, Any]: |
|
|
if cache_file.exists(): |
|
|
with open(cache_file, "r", encoding="utf-8") as f: |
|
|
return json.load(f) |
|
|
return {} |
|
|
|
|
|
|
|
|
def save_cache(cache_file: Path, cache: Dict[str, Any]) -> None: |
|
|
cache_file.parent.mkdir(parents=True, exist_ok=True) |
|
|
with open(cache_file, "w", encoding="utf-8") as f: |
|
|
json.dump(cache, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
|
|
|
async def fetch_readme_with_cache( |
|
|
session: aiohttp.ClientSession, |
|
|
cache: Dict[str, Any], |
|
|
cache_file: Path, |
|
|
owner: str, |
|
|
repo: str, |
|
|
) -> Tuple[Optional[str], bool]: |
|
|
""" |
|
|
Return the README markdown and a flag indicating whether it was freshly fetched (True) or served from cache (False). |
|
|
""" |
|
|
key = f"{owner}/{repo}" |
|
|
if key in cache: |
|
|
return cache[key], False |
|
|
md = await fetch_repo_readme_markdown(session, owner, repo) |
|
|
if md is not None: |
|
|
cache[key] = md |
|
|
return md, True |
|
|
return None, False |
|
|
|
|
|
|
|
|
def canonical_repo_url(url: str) -> Optional[str]: |
|
|
try: |
|
|
p = urlparse(url) |
|
|
except ValueError: |
|
|
return None |
|
|
if p.netloc != "github.com" or p.scheme not in {"http", "https"}: |
|
|
return None |
|
|
parts = [part for part in p.path.split("/") if part] |
|
|
if len(parts) < 2: |
|
|
return None |
|
|
owner, repo = parts[0], parts[1] |
|
|
blocked_owners = { |
|
|
"topics", |
|
|
"collections", |
|
|
"orgs", |
|
|
"marketplace", |
|
|
"features", |
|
|
"pricing", |
|
|
"about", |
|
|
"site", |
|
|
"blog", |
|
|
"events", |
|
|
"apps", |
|
|
"sponsors", |
|
|
"login", |
|
|
"join", |
|
|
"explore", |
|
|
"contact", |
|
|
"settings", |
|
|
"search", |
|
|
"codespaces", |
|
|
} |
|
|
if owner in blocked_owners: |
|
|
return None |
|
|
|
|
|
if repo.endswith(".git"): |
|
|
repo = repo[:-4] |
|
|
return f"https://github.com/{owner}/{repo}" |
|
|
|
|
|
|
|
|
def parse_owner_repo(canonical_url: str) -> Tuple[str, str]: |
|
|
p = urlparse(canonical_url) |
|
|
parts = [part for part in p.path.split("/") if part] |
|
|
assert len(parts) >= 2 |
|
|
return parts[0], parts[1] |
|
|
|
|
|
|
|
|
def is_awesome_repo_name(repo: str) -> bool: |
|
|
return "awesome" in repo.lower() |
|
|
|
|
|
|
|
|
def extract_github_links_from_markdown(md: str) -> List[str]: |
|
|
|
|
|
urls: Set[str] = set() |
|
|
|
|
|
for m in re.finditer(r"\]\((https?://github\.com/[^)\s#]+)\)", md): |
|
|
urls.add(m.group(1)) |
|
|
|
|
|
for m in re.finditer(r"https?://github\.com/[^)\]\s#]+", md): |
|
|
urls.add(m.group(0)) |
|
|
return sorted(urls) |
|
|
|
|
|
|
|
|
def _extract_entries_from_markdown_lines( |
|
|
md: str, current_owner: str, current_repo: str |
|
|
) -> List[Dict[str, str]]: |
|
|
""" |
|
|
Extract entries of the form: bullet + [name](url) optionally followed by " - description". |
|
|
If there's no " - ", use the entire line as description but remove the [name](url) part. |
|
|
Returns a list of dicts with keys: name, url, description (raw url before canonicalization). |
|
|
""" |
|
|
entries: List[Dict[str, str]] = [] |
|
|
lines = md.splitlines() |
|
|
|
|
|
link_re = re.compile(r"\[([^\]]+)\]\((https?://[^)\s]+)\)") |
|
|
bullet_re = re.compile(r"^\s*[-*+]\s+") |
|
|
for line in lines: |
|
|
|
|
|
chosen = None |
|
|
for m in link_re.finditer(line): |
|
|
url_candidate = m.group(2).strip() |
|
|
cu = canonical_repo_url(url_candidate) |
|
|
if cu is None: |
|
|
continue |
|
|
o, r = parse_owner_repo(cu) |
|
|
|
|
|
if o == current_owner and r == current_repo: |
|
|
continue |
|
|
chosen = m |
|
|
break |
|
|
if not chosen: |
|
|
continue |
|
|
name = chosen.group(1).strip() |
|
|
url = chosen.group(2).strip() |
|
|
|
|
|
right = line[chosen.end() :] |
|
|
desc: str |
|
|
|
|
|
m_dash = re.match(r"^\s*-\s+(.*)$", right) |
|
|
if m_dash is not None: |
|
|
desc = m_dash.group(1).strip() |
|
|
else: |
|
|
|
|
|
without_link = (line[: chosen.start()] + line[chosen.end() :]).strip() |
|
|
|
|
|
if bullet_re.match(without_link): |
|
|
|
|
|
without_link = bullet_re.sub("", without_link, count=1).strip() |
|
|
desc = without_link |
|
|
entries.append({"name": name, "url": url, "description": desc}) |
|
|
return entries |
|
|
|
|
|
|
|
|
async def crawl_awesome_final_entries( |
|
|
session: aiohttp.ClientSession, |
|
|
cache: Dict[str, Any], |
|
|
cache_file: Path, |
|
|
root_repo_url: str, |
|
|
max_depth: int, |
|
|
*, |
|
|
save_every: Optional[int] = None, |
|
|
out_parquet: Optional[Path] = None, |
|
|
) -> List[Dict[str, str]]: |
|
|
root_cu = canonical_repo_url(root_repo_url) |
|
|
assert root_cu is not None, f"Not a canonical GitHub repo URL: {root_repo_url}" |
|
|
root_owner, root_repo = parse_owner_repo(root_cu) |
|
|
|
|
|
visited_awesome: Set[str] = set() |
|
|
queue: deque[Tuple[str, str, int]] = deque() |
|
|
|
|
|
visited_awesome.add(root_cu) |
|
|
queue.append((root_owner, root_repo, 0)) |
|
|
|
|
|
|
|
|
results: Dict[str, Dict[str, str]] = {} |
|
|
|
|
|
|
|
|
newly_fetched_readmes = 0 |
|
|
|
|
|
while queue: |
|
|
owner, repo, depth = queue.popleft() |
|
|
print(f"[depth={depth}] awesome: {owner}/{repo}") |
|
|
md, is_new_fetch = await fetch_readme_with_cache( |
|
|
session, cache, cache_file, owner, repo |
|
|
) |
|
|
if md is None: |
|
|
print( |
|
|
f" README not found or temporarily unavailable (rate-limited) for {owner}/{repo}", |
|
|
file=sys.stderr, |
|
|
) |
|
|
continue |
|
|
|
|
|
if is_new_fetch: |
|
|
newly_fetched_readmes += 1 |
|
|
if save_every and out_parquet and (newly_fetched_readmes % save_every == 0): |
|
|
|
|
|
checkpoint_rows: List[Dict[str, str]] = [] |
|
|
for v in results.values(): |
|
|
if "source_depth" in v: |
|
|
v2 = {k: v[k] for k in v.keys() if k != "source_depth"} |
|
|
else: |
|
|
v2 = v |
|
|
checkpoint_rows.append(v2) |
|
|
if checkpoint_rows: |
|
|
persist_rows_to_parquet(checkpoint_rows, out_parquet) |
|
|
print( |
|
|
f"Checkpoint: saved {len(checkpoint_rows)} rows after {newly_fetched_readmes} newly fetched READMEs" |
|
|
) |
|
|
save_cache(cache_file, cache) |
|
|
print("Checkpoint: README cache saved") |
|
|
entries = _extract_entries_from_markdown_lines(md, owner, repo) |
|
|
for e in entries: |
|
|
cu = canonical_repo_url(e["url"]) |
|
|
if cu is None: |
|
|
continue |
|
|
o, r = parse_owner_repo(cu) |
|
|
|
|
|
if o == owner and r == repo: |
|
|
continue |
|
|
if is_awesome_repo_name(r): |
|
|
if cu not in visited_awesome and depth < max_depth: |
|
|
visited_awesome.add(cu) |
|
|
queue.append((o, r, depth + 1)) |
|
|
else: |
|
|
|
|
|
if depth == max_depth: |
|
|
if cu not in results: |
|
|
|
|
|
results[cu] = { |
|
|
"name": e["name"], |
|
|
"link": cu, |
|
|
"description": _clean_description(e["description"]), |
|
|
"source_repo": f"{repo}", |
|
|
"source_depth": depth, |
|
|
} |
|
|
else: |
|
|
|
|
|
prev_depth = results[cu].get("source_depth") |
|
|
if prev_depth is None or depth > prev_depth: |
|
|
results[cu]["source_repo"] = f"{repo}" |
|
|
results[cu]["source_depth"] = depth |
|
|
|
|
|
if not results[cu]["description"] and e["description"]: |
|
|
results[cu]["description"] = _clean_description( |
|
|
e["description"] |
|
|
) |
|
|
|
|
|
|
|
|
out: List[Dict[str, str]] = [] |
|
|
for v in results.values(): |
|
|
if "source_depth" in v: |
|
|
v = {k: v[k] for k in v.keys() if k != "source_depth"} |
|
|
out.append(v) |
|
|
return out |
|
|
|
|
|
|
|
|
async def fetch_repo_stars( |
|
|
session: aiohttp.ClientSession, owner: str, repo: str |
|
|
) -> Optional[int]: |
|
|
url = f"https://api.github.com/repos/{owner}/{repo}" |
|
|
try: |
|
|
async with session.get(url, headers=github_headers()) as resp: |
|
|
if resp.status == 200: |
|
|
data = await resp.json() |
|
|
if isinstance(data, dict) and "stargazers_count" in data: |
|
|
return data["stargazers_count"] |
|
|
except Exception: |
|
|
return None |
|
|
return None |
|
|
|
|
|
|
|
|
async def enrich_with_stars( |
|
|
session: aiohttp.ClientSession, rows: List[Dict[str, str]], concurrency: int |
|
|
) -> None: |
|
|
sem = asyncio.Semaphore(concurrency if concurrency and concurrency > 0 else 10) |
|
|
|
|
|
async def one(row: Dict[str, str]): |
|
|
async with sem: |
|
|
owner, repo = parse_owner_repo(row["link"]) |
|
|
stars = await fetch_repo_stars(session, owner, repo) |
|
|
row["stars"] = stars if stars is not None else None |
|
|
|
|
|
await asyncio.gather(*(one(r) for r in rows)) |
|
|
|
|
|
|
|
|
async def enrich_with_readme_preview( |
|
|
session: aiohttp.ClientSession, |
|
|
rows: List[Dict[str, str]], |
|
|
concurrency: int, |
|
|
max_chars: int, |
|
|
) -> None: |
|
|
sem = asyncio.Semaphore(concurrency if concurrency and concurrency > 0 else 10) |
|
|
|
|
|
async def one(row: Dict[str, str]): |
|
|
async with sem: |
|
|
owner, repo = parse_owner_repo(row["link"]) |
|
|
md = await fetch_repo_readme_markdown(session, owner, repo) |
|
|
if md is not None: |
|
|
row["readme_preview"] = md[:max_chars] |
|
|
|
|
|
tasks = [asyncio.create_task(one(r)) for r in rows] |
|
|
for fut in tqdm( |
|
|
asyncio.as_completed(tasks), total=len(tasks), desc="README enrichment" |
|
|
): |
|
|
await fut |
|
|
|
|
|
|
|
|
def _desc_is_missing(desc: Optional[str]) -> bool: |
|
|
if desc is None: |
|
|
return True |
|
|
s = desc.strip() |
|
|
return s == "" or s == "-" or s == "—" |
|
|
|
|
|
|
|
|
def _clean_description(desc: Optional[str]) -> Optional[str]: |
|
|
""" |
|
|
If description length is outside [MIN_DESC_LEN, MAX_DESC_LEN], blank it out |
|
|
so it can be refetched by the enrichment step. Otherwise return as-is. |
|
|
""" |
|
|
if desc is None: |
|
|
return None |
|
|
try: |
|
|
length = len(desc) |
|
|
return desc if (length >= MIN_DESC_LEN and length <= MAX_DESC_LEN) else "" |
|
|
except Exception: |
|
|
|
|
|
s = str(desc) |
|
|
length = len(s) |
|
|
return s if (length >= MIN_DESC_LEN and length <= MAX_DESC_LEN) else "" |
|
|
|
|
|
|
|
|
def persist_rows_to_parquet(rows: List[Dict[str, Any]], out_parquet: Path) -> None: |
|
|
"""Persist the current set of rows to a Parquet file with a stable column order.""" |
|
|
out_parquet.parent.mkdir(parents=True, exist_ok=True) |
|
|
df = pd.DataFrame(rows) |
|
|
cols = [ |
|
|
c |
|
|
for c in [ |
|
|
"name", |
|
|
"link", |
|
|
"description", |
|
|
"source_repo", |
|
|
"stars", |
|
|
"readme_preview", |
|
|
] |
|
|
if c in df.columns |
|
|
] |
|
|
df = df[cols] |
|
|
df.to_parquet(out_parquet, index=False) |
|
|
|
|
|
|
|
|
async def enrich_missing_descriptions( |
|
|
session: aiohttp.ClientSession, |
|
|
rows: List[Dict[str, str]], |
|
|
concurrency: int, |
|
|
save_every: int | None = None, |
|
|
out_parquet: Optional[Path] = None, |
|
|
) -> None: |
|
|
sem = asyncio.Semaphore(concurrency if concurrency and concurrency > 0 else 10) |
|
|
lock = asyncio.Lock() |
|
|
fetch_attempts = 0 |
|
|
|
|
|
async def one(row: Dict[str, str]): |
|
|
if not _desc_is_missing(row.get("description")): |
|
|
return |
|
|
async with sem: |
|
|
owner, repo = parse_owner_repo(row["link"]) |
|
|
desc = await fetch_repo_description(session, owner, repo) |
|
|
if desc: |
|
|
row["description"] = _clean_description(desc) |
|
|
|
|
|
nonlocal fetch_attempts |
|
|
async with lock: |
|
|
fetch_attempts += 1 |
|
|
if save_every and out_parquet and (fetch_attempts % save_every == 0): |
|
|
persist_rows_to_parquet(rows, out_parquet) |
|
|
|
|
|
tasks = [asyncio.create_task(one(r)) for r in rows] |
|
|
for fut in tqdm( |
|
|
asyncio.as_completed(tasks), total=len(tasks), desc="Description enrichment" |
|
|
): |
|
|
await fut |
|
|
|
|
|
|
|
|
def main() -> None: |
|
|
cfg_dir = Path(__file__).resolve().parent |
|
|
cfg_path = cfg_dir / "awesome_scrap_config.yaml" |
|
|
cfg = {} |
|
|
if cfg_path.exists(): |
|
|
with open(cfg_path, "r", encoding="utf-8") as f: |
|
|
cfg = yaml.safe_load(f) or {} |
|
|
|
|
|
ap = argparse.ArgumentParser( |
|
|
description="Collect final GitHub repos from Awesome list-of-lists" |
|
|
) |
|
|
ap.add_argument( |
|
|
"--root", |
|
|
default=cfg.get("root", DEFAULT_ROOT), |
|
|
help="Root Awesome repository URL (https://github.com/<owner>/<repo>)", |
|
|
) |
|
|
ap.add_argument( |
|
|
"--depth", |
|
|
type=int, |
|
|
default=cfg.get("depth", 2), |
|
|
help="Maximum recursion depth for Awesome repos (0 = only root)", |
|
|
) |
|
|
ap.add_argument( |
|
|
"--workers", |
|
|
type=int, |
|
|
default=cfg.get("workers", 1), |
|
|
help="Number of concurrent workers for fetching", |
|
|
) |
|
|
ap.add_argument( |
|
|
"--output-dir", |
|
|
default=cfg.get("output_dir", "."), |
|
|
help="Output directory for awesome-repos.parquet", |
|
|
) |
|
|
ap.add_argument( |
|
|
"--cache-dir", |
|
|
default=cfg.get("cache_dir", "output/awesome_parse_cache"), |
|
|
help="Cache directory for README content", |
|
|
) |
|
|
ap.add_argument( |
|
|
"--fetch-readme-preview", |
|
|
action="store_true", |
|
|
default=cfg.get("fetch_readme_preview", False), |
|
|
help="When set, fetch first N characters of README for each final repo and include as readme_preview", |
|
|
) |
|
|
ap.add_argument( |
|
|
"--readme-preview-chars", |
|
|
type=int, |
|
|
default=cfg.get("readme_preview_chars", 1000), |
|
|
help="Number of characters from README to include in readme_preview", |
|
|
) |
|
|
ap.add_argument( |
|
|
"--save-every", |
|
|
type=int, |
|
|
default=cfg.get("save_every", 500), |
|
|
help=( |
|
|
"Checkpoint frequency: after this many newly fetched READMEs during crawl, " |
|
|
"persist partial Parquet and save README cache. Also used for description enrichment." |
|
|
), |
|
|
) |
|
|
|
|
|
fetch_stars_value = bool(cfg.get("fetch_stars", False)) |
|
|
args = ap.parse_args() |
|
|
|
|
|
|
|
|
output_dir = cfg_dir / args.output_dir |
|
|
out_parquet = output_dir / "awesome-repos.parquet" |
|
|
cache_dir = cfg_dir / args.cache_dir |
|
|
cache_file = cache_dir / "readme_cache.json" |
|
|
|
|
|
|
|
|
cache: Dict[str, Any] = load_cache(cache_file) |
|
|
|
|
|
|
|
|
async def run(): |
|
|
async with aiohttp.ClientSession() as session: |
|
|
rows = await crawl_awesome_final_entries( |
|
|
session, |
|
|
cache, |
|
|
cache_file, |
|
|
args.root, |
|
|
args.depth, |
|
|
save_every=args.save_every, |
|
|
out_parquet=out_parquet, |
|
|
) |
|
|
if fetch_stars_value and rows: |
|
|
print( |
|
|
f"Fetching stargazers_count for {len(rows)} repos (concurrency={args.workers})..." |
|
|
) |
|
|
await enrich_with_stars(session, rows, args.workers) |
|
|
|
|
|
if rows: |
|
|
print("Filling missing descriptions from GitHub repo metadata...") |
|
|
await enrich_missing_descriptions( |
|
|
session, |
|
|
rows, |
|
|
args.workers, |
|
|
save_every=args.save_every, |
|
|
out_parquet=out_parquet, |
|
|
) |
|
|
|
|
|
persist_rows_to_parquet(rows, out_parquet) |
|
|
|
|
|
if getattr(args, "fetch_readme_preview", False) and rows: |
|
|
print( |
|
|
f"Fetching README previews (first {args.readme_preview_chars} chars) for {len(rows)} repos..." |
|
|
) |
|
|
await enrich_with_readme_preview( |
|
|
session, rows, args.workers, args.readme_preview_chars |
|
|
) |
|
|
|
|
|
persist_rows_to_parquet(rows, out_parquet) |
|
|
|
|
|
save_cache(cache_file, cache) |
|
|
print(f"Collected {len(rows)} final repositories with descriptions") |
|
|
print(f"Wrote to {out_parquet}") |
|
|
|
|
|
asyncio.run(run()) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|