Shami96 commited on
Commit
b0a4dc4
·
verified ·
1 Parent(s): b2d3d7e

Update extract_red_text.py

Browse files
Files changed (1) hide show
  1. extract_red_text.py +139 -435
extract_red_text.py CHANGED
@@ -1,453 +1,157 @@
1
  #!/usr/bin/env python3
2
- """
3
- extract_red_text.py - Enhanced version with improved red text detection and master key alignment
4
- """
5
-
6
- from __future__ import annotations
7
- import json
8
  import re
 
9
  import sys
10
- import logging
11
- from collections import defaultdict
12
- from typing import List, Dict, Optional, Any, Tuple
13
-
14
- # attempt to import python-docx (document processing)
15
- try:
16
- from docx import Document
17
- from docx.oxml.ns import qn
18
- from docx.shared import RGBColor
19
- except Exception as e:
20
- raise RuntimeError("python-docx is required. Install with: pip install python-docx") from e
21
-
22
- # ------------------------------
23
- # Import master_key configurations
24
- # ------------------------------
25
- try:
26
- import master_key as mk
27
- GLOBAL_SETTINGS = mk.GLOBAL_SETTINGS
28
- EXTRA_HEADER_SYNONYMS = mk.EXTRA_HEADER_SYNONYMS
29
- TABLE_SCHEMAS = getattr(mk, "TABLE_SCHEMAS", {})
30
- except ImportError as e:
31
- logging.error("Failed to import master_key.py: %s", e)
32
- raise RuntimeError("master_key.py is required for configuration") from e
33
- except AttributeError as e:
34
- logging.error("Missing required configuration in master_key.py: %s", e)
35
- raise RuntimeError("master_key.py missing required GLOBAL_SETTINGS or EXTRA_HEADER_SYNONYMS") from e
36
-
37
- # ------------------------------
38
- # Logging
39
- # ------------------------------
40
- logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s: %(message)s")
41
- log = logging.getLogger("extract_red_text")
42
-
43
- # ------------------------------
44
- # Normalization & OCR-repair utilities (aligned to GLOBAL_SETTINGS)
45
- # ------------------------------
46
- def _apply_ocr_repair_rules(text: str) -> str:
47
- """Apply OCR repair rules from GLOBAL_SETTINGS."""
48
- s = text or ""
49
- for pat, repl in GLOBAL_SETTINGS.get("ocr_repair_rules", []):
50
- try:
51
- s = re.sub(pat, repl, s, flags=re.I)
52
- except re.error:
53
- # skip invalid rule
54
- continue
55
- return s
56
-
57
- def _normalize_text(text: str) -> str:
58
- """Normalize text according to GLOBAL_SETTINGS (readable normalized form)."""
59
- s = _apply_ocr_repair_rules(text or "")
60
- norm_cfg = GLOBAL_SETTINGS.get("normalize", {})
61
-
62
- if norm_cfg.get("replace_smart_dashes", False):
63
- s = s.replace("–", "-").replace("—", "-")
64
- if norm_cfg.get("lower", False):
65
- s = s.lower()
66
- if norm_cfg.get("strip_punctuation", False):
67
- # keep hyphen, ampersand, parentheses, slash, colon; drop other punctuation
68
- s = re.sub(r"[^\w\s\-\&\(\)\/:]", " ", s)
69
- if norm_cfg.get("collapse_whitespace", False):
70
- s = re.sub(r"\s+", " ", s)
71
-
72
- return s.strip()
73
-
74
- def _compact_key(text: str) -> str:
75
- """Create compact key (no non-word chars) for deterministic lookup."""
76
- if text is None:
77
- return ""
78
- normalized = _normalize_text(text)
79
- return re.sub(r"[^\w]", "", normalized)
80
-
81
- def map_header_using_extra_synonyms(header_text: str) -> Optional[str]:
82
- """
83
- Try deterministic mapping using EXTRA_HEADER_SYNONYMS.
84
- Return canonical label if found, else None.
85
- """
86
- if not header_text:
87
- return None
88
-
89
- normalized = _normalize_text(header_text)
90
- compact = _compact_key(header_text)
91
-
92
- # try compact key
93
- if compact in EXTRA_HEADER_SYNONYMS:
94
- return EXTRA_HEADER_SYNONYMS[compact]
95
-
96
- # try normalized key directly
97
- if normalized in EXTRA_HEADER_SYNONYMS:
98
- return EXTRA_HEADER_SYNONYMS[normalized]
99
-
100
- # also try case-insensitive match on keys
101
- for k, v in EXTRA_HEADER_SYNONYMS.items():
102
- if k.lower() == normalized.lower() or k.lower() == compact.lower():
103
- return v
104
-
105
- return None
106
-
107
- # ------------------------------
108
- # Enhanced red font detection using hf_utils pattern
109
- # ------------------------------
110
- def _run_is_red(run) -> bool:
111
- """
112
- Enhanced red color detection for docx.run objects.
113
- Uses multiple methods to detect red text robustly.
114
- """
115
- try:
116
- # Method 1: Check run.font.color.rgb
117
- col = getattr(run.font, "color", None)
118
- if col is not None and getattr(col, "rgb", None):
119
- rgb = col.rgb
120
- try:
121
- # rgb may be sequence-like or have attributes
122
- if hasattr(rgb, '__getitem__'): # sequence-like
123
- r, g, b = rgb[0], rgb[1], rgb[2]
124
- else: # attribute access
125
- r = getattr(rgb, "r", None) or getattr(rgb, "red", None)
126
- g = getattr(rgb, "g", None) or getattr(rgb, "green", None)
127
- b = getattr(rgb, "b", None) or getattr(rgb, "blue", None)
128
-
129
- if r is not None and g is not None and b is not None:
130
- # Tolerant heuristic: red must be noticeably higher than green/blue
131
- if r >= 160 and g <= 120 and b <= 120 and (r - g) >= 30 and (r - b) >= 30:
132
- return True
133
- except Exception:
134
- pass
135
- except Exception:
136
- pass
137
-
138
- # Method 2: Check raw XML color code
139
- try:
140
- rPr = run._element.rPr
141
- if rPr is not None:
142
- clr = rPr.find('{http://schemas.openxmlformats.org/wordprocessingml/2006/main}color')
143
- if clr is not None:
144
- val = clr.get('{http://schemas.openxmlformats.org/wordprocessingml/2006/main}val')
145
- if val and re.fullmatch(r"[0-9A-Fa-f]{6}", val):
146
- rr = int(val[:2], 16)
147
- gg = int(val[2:4], 16)
148
- bb = int(val[4:], 16)
149
- if rr >= 160 and gg <= 120 and bb <= 120 and (rr - gg) >= 30 and (rr - bb) >= 30:
150
- return True
151
- except Exception:
152
- pass
153
-
154
- # Method 3: Check theme color
155
- try:
156
- color = run.font.color
157
- if color is not None:
158
- theme_color = getattr(color, "theme_color", None)
159
- if theme_color:
160
- theme_str = str(theme_color).lower()
161
- if "red" in theme_str or "accent_2" in theme_str: # Common red theme
162
  return True
163
- except Exception:
164
- pass
165
-
166
- # Method 4: String representation fallback
167
- try:
168
- if hasattr(run.font.color, "rgb") and run.font.color.rgb is not None:
169
- s = str(run.font.color.rgb)
170
- # Look for patterns like "FF0000" or similar high-red values
171
- if re.search(r"[Ff]{2}0{4}|[Ee]{2}0{4}|[Dd]{2}0{4}", s):
172
- return True
173
- except Exception:
174
- pass
175
-
176
  return False
177
 
178
- def _extract_red_text_segments(cell):
179
- """Extract red text segments from a table cell."""
180
- segments = []
181
- for p_idx, paragraph in enumerate(cell.paragraphs):
182
- current_text = ""
183
- current_runs = []
184
-
185
- for r_idx, run in enumerate(paragraph.runs):
186
- if _run_is_red(run) and run.text.strip():
187
- current_text += run.text
188
- current_runs.append((p_idx, r_idx, run))
189
- else:
190
- # End of red segment
191
- if current_runs:
192
- segments.append({
193
- 'text': current_text.strip(),
194
- 'runs': current_runs.copy(),
195
- 'paragraph_idx': p_idx
196
- })
197
- current_text = ""
198
- current_runs = []
199
-
200
- # Handle segment at end of paragraph
201
- if current_runs:
202
- segments.append({
203
- 'text': current_text.strip(),
204
- 'runs': current_runs.copy(),
205
- 'paragraph_idx': p_idx
206
- })
207
-
208
- return segments
209
-
210
- def _has_red_text(cell) -> bool:
211
- """Check if a cell contains any red text."""
212
- for paragraph in cell.paragraphs:
213
- for run in paragraph.runs:
214
- if _run_is_red(run) and run.text.strip():
215
- return True
216
- return False
 
 
217
 
218
- # ------------------------------
219
- # Enhanced table processing with schema-aware header mapping
220
- # ------------------------------
221
- def _process_table_with_schema_mapping(table, t_index: int) -> Dict[str, Any]:
222
- """Process table with enhanced header mapping using master key schemas."""
223
- nrows = len(table.rows)
224
- ncols = max(len(row.cells) for row in table.rows) if nrows > 0 else 0
225
-
226
- if nrows == 0:
227
- return {
228
- "table_index": t_index,
229
- "nrows": 0,
230
- "ncols": 0,
231
- "headers": [],
232
- "rows": [],
233
- "red_cells": [],
234
- "mapped_headers": []
235
- }
236
-
237
- # Process headers from first row
238
- header_row = table.rows[0]
239
- headers = []
240
- mapped_headers = []
241
-
242
- for c_idx, cell in enumerate(header_row.cells[:ncols]):
243
- cell_text = cell.text.strip()
244
-
245
- # Try mapping using EXTRA_HEADER_SYNONYMS first
246
- mapped = map_header_using_extra_synonyms(cell_text)
247
- if mapped:
248
- header_label = mapped
249
- log.debug(f"Mapped header '{cell_text}' -> '{mapped}'")
250
- else:
251
- header_label = cell_text
252
-
253
- headers.append(cell_text) # Original header
254
- mapped_headers.append(header_label) # Mapped header
255
-
256
- # Process all rows
257
- rows_text = []
258
- rows_red_cells = []
259
- rows_red_metadata = []
260
-
261
- for r_i, row in enumerate(table.rows):
262
- row_texts = []
263
- row_reds = []
264
- row_red_meta = []
265
-
266
- for c_i, cell in enumerate(row.cells[:ncols]):
267
- cell_text = cell.text.strip()
268
-
269
- # Extract red text segments with metadata
270
- red_segments = _extract_red_text_segments(cell)
271
-
272
- if red_segments:
273
- # Join all red text segments
274
- red_text_parts = [seg['text'] for seg in red_segments if seg['text']]
275
- red_text_joined = " ".join(red_text_parts).strip()
276
-
277
- # Store metadata about red text location
278
- red_metadata = {
279
- "has_red": True,
280
- "red_text": red_text_joined,
281
- "segments": len(red_segments),
282
- "total_red_runs": sum(len(seg['runs']) for seg in red_segments)
283
- }
284
- else:
285
- red_text_joined = None
286
- red_metadata = {"has_red": False}
287
-
288
- row_texts.append(cell_text)
289
- row_reds.append(red_text_joined)
290
- row_red_meta.append(red_metadata)
291
-
292
- rows_text.append(row_texts)
293
- rows_red_cells.append(row_reds)
294
- rows_red_metadata.append(row_red_meta)
295
-
296
- return {
297
- "table_index": t_index,
298
- "nrows": nrows,
299
- "ncols": ncols,
300
- "headers": headers, # Original headers
301
- "mapped_headers": mapped_headers, # Mapped headers
302
- "rows": rows_text,
303
- "red_cells": rows_red_cells,
304
- "red_metadata": rows_red_metadata # Additional red text metadata
305
- }
306
 
307
- # ------------------------------
308
- # Extraction: paragraphs, headings, tables
309
- # ------------------------------
310
- def extract_from_docx(path: str) -> Dict[str, Any]:
311
- """Extract content from DOCX with enhanced red text detection and schema mapping."""
312
- log.info(f"Opening document: {path}")
313
  doc = Document(path)
314
-
315
- headings: List[str] = []
316
- paragraphs_red: List[Dict[str, Any]] = []
317
- red_runs: List[Dict[str, Any]] = []
318
- tables_out: List[Dict[str, Any]] = []
319
-
320
- # Extract headings and paragraphs with red runs
321
- log.info("Processing paragraphs and headings...")
322
- for p_index, para in enumerate(doc.paragraphs):
323
- text = para.text or ""
324
-
325
- # Identify heading level from style name if available
326
- style_name = getattr(para.style, "name", "") if para.style is not None else ""
327
- is_heading = bool(re.search(r"Heading\s*\d+|HEADING|TITLE|SUBTITLE", style_name, flags=re.I)) or \
328
- bool(re.search(r"^(MAINTENANCE|MASS|FATIGUE|NHVAS|Vehicle Registration|CORRECTIVE)", text, flags=re.I))
329
-
330
- if is_heading:
331
- headings.append(text.strip())
332
- log.debug(f"Found heading: {text.strip()}")
333
 
334
- # Gather red runs in this paragraph
335
- paragraph_red_texts = []
336
- char_cursor = 0
337
-
338
- for run in para.runs:
339
- run_text = run.text or ""
340
- run_len = len(run_text)
341
-
342
- if _run_is_red(run) and run_text.strip():
343
- # Store a red run entry
344
- rr = {
345
- "text": run_text,
346
- "paragraph_index": p_index,
347
- "char_index": char_cursor,
348
- "style_name": style_name,
349
- "normalized_text": _normalize_text(run_text)
350
- }
351
- red_runs.append(rr)
352
- paragraph_red_texts.append(run_text)
353
- log.debug(f"Found red text in paragraph {p_index}: '{run_text.strip()}'")
354
-
355
- char_cursor += run_len
356
-
357
- if paragraph_red_texts:
358
- paragraphs_red.append({
359
- "paragraph_index": p_index,
360
- "text": text,
361
- "red_texts": paragraph_red_texts,
362
- "style_name": style_name,
363
- "red_text_joined": " ".join(paragraph_red_texts).strip()
364
- })
365
 
366
- # Extract tables with enhanced processing
367
- log.info(f"Processing {len(doc.tables)} tables...")
368
- for t_index, table in enumerate(doc.tables):
369
- table_data = _process_table_with_schema_mapping(table, t_index)
370
- tables_out.append(table_data)
371
-
372
- # Log red text findings
373
- red_cell_count = sum(1 for row in table_data["red_cells"] for cell in row if cell)
374
- if red_cell_count > 0:
375
- log.info(f"Table {t_index}: Found {red_cell_count} cells with red text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
 
377
- # Assemble output structure
378
- out = {
379
- "headings": headings,
380
- "paragraphs": paragraphs_red,
381
- "tables": tables_out,
382
- "red_runs": red_runs,
383
- # Enhanced metadata
384
- "meta": {
385
- "source_file": path,
386
- "total_headings": len(headings),
387
- "total_red_paragraphs": len(paragraphs_red),
388
- "total_tables": len(tables_out),
389
- "total_red_runs": len(red_runs),
390
- "total_red_cells": sum(
391
- sum(1 for cell in row_red_cells if cell)
392
- for table in tables_out
393
- for row_red_cells in table["red_cells"]
394
- ),
395
- "global_settings_used": {
396
- "normalization": GLOBAL_SETTINGS.get("normalize", {}),
397
- "ocr_repair_rules_count": len(GLOBAL_SETTINGS.get("ocr_repair_rules", [])),
398
- "synonyms_count": len(EXTRA_HEADER_SYNONYMS) if EXTRA_HEADER_SYNONYMS else 0
399
- }
400
- }
401
- }
402
-
403
- return out
404
 
405
- # ------------------------------
406
- # Command-line interface
407
- # ------------------------------
408
- def main(argv):
409
- if len(argv) < 3:
410
- print("Usage: python extract_red_text.py input.docx output.json")
411
- sys.exit(2)
412
-
413
- input_docx = argv[1]
414
- output_json = argv[2]
415
 
416
- log.info("Starting red text extraction from: %s", input_docx)
417
- log.info("Using master_key configuration with %d header synonyms",
418
- len(EXTRA_HEADER_SYNONYMS) if EXTRA_HEADER_SYNONYMS else 0)
419
-
420
- try:
421
- result = extract_from_docx(input_docx)
422
- except Exception as exc:
423
- log.exception("Failed to extract from docx: %s", exc)
424
- raise
425
 
426
- # Save JSON pretty-printed for debugging by default
427
- try:
428
- with open(output_json, "w", encoding="utf-8") as fh:
429
- json.dump(result, fh, ensure_ascii=False, indent=2)
430
- log.info("Saved extracted data to: %s", output_json)
431
- except Exception:
432
- log.exception("Failed to write output JSON to %s", output_json)
433
- raise
434
 
435
- # Print comprehensive summary
436
- meta = result.get("meta", {})
437
- log.info("=== EXTRACTION SUMMARY ===")
438
- log.info("Headings found: %d", meta.get("total_headings", 0))
439
- log.info("Red paragraphs: %d", meta.get("total_red_paragraphs", 0))
440
- log.info("Red runs total: %d", meta.get("total_red_runs", 0))
441
- log.info("Tables processed: %d", meta.get("total_tables", 0))
442
- log.info("Red cells found: %d", meta.get("total_red_cells", 0))
443
- log.info("Header synonyms used: %d", meta.get("global_settings_used", {}).get("synonyms_count", 0))
444
 
445
  if __name__ == "__main__":
446
- main(sys.argv)
447
- # Print output for verification
448
- if len(sys.argv) >= 3:
449
- try:
450
- with open(sys.argv[2], 'r') as f:
451
- print(f"\n📄 EXTRACT_RED_TEXT OUTPUT:\n{f.read()}")
452
- except Exception as e:
453
- print(f"\n❌ Could not read output file: {e}")
 
1
  #!/usr/bin/env python3
 
 
 
 
 
 
2
  import re
3
+ import json
4
  import sys
5
+ from docx import Document
6
+ from docx.oxml.ns import qn
7
+ from master_key import TABLE_SCHEMAS, HEADING_PATTERNS, PARAGRAPH_PATTERNS
8
+
9
+ def is_red_font(run):
10
+ col = run.font.color
11
+ if col and col.rgb:
12
+ r, g, b = col.rgb
13
+ if r>150 and g<100 and b<100 and (r-g)>30 and (r-b)>30:
14
+ return True
15
+ rPr = getattr(run._element, "rPr", None)
16
+ if rPr is not None:
17
+ clr = rPr.find(qn('w:color'))
18
+ if clr is not None:
19
+ val = clr.get(qn('w:val'))
20
+ if re.fullmatch(r"[0-9A-Fa-f]{6}", val):
21
+ rr, gg, bb = int(val[:2],16), int(val[2:4],16), int(val[4:],16)
22
+ if rr>150 and gg<100 and bb<100 and (rr-gg)>30 and (rr-bb)>30:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  return True
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  return False
25
 
26
+ def _prev_para_text(tbl):
27
+ prev = tbl._tbl.getprevious()
28
+ while prev is not None and not prev.tag.endswith("}p"):
29
+ prev = prev.getprevious()
30
+ if prev is None:
31
+ return ""
32
+ return "".join(node.text for node in prev.iter() if node.tag.endswith("}t") and node.text).strip()
33
+
34
+ def match_table_schema(tbl):
35
+ # look for explicit heading constraint
36
+ heading = _prev_para_text(tbl)
37
+ headers = [c.text.strip() for c in tbl.rows[0].cells]
38
+ col0 = [r.cells[0].text.strip() for r in tbl.rows]
39
+
40
+ # 1) exact first-cell name
41
+ first = tbl.rows[0].cells[0].text.strip()
42
+ if first in TABLE_SCHEMAS:
43
+ spec = TABLE_SCHEMAS[first]
44
+ if not spec.get("headings") or any(h["text"]==heading for h in spec.get("headings",[])):
45
+ return first
46
+
47
+ # 2) any other schema with explicit headings
48
+ for name, spec in TABLE_SCHEMAS.items():
49
+ if any(h["text"]==heading for h in spec.get("headings",[])):
50
+ return name
51
+
52
+ # 3) by two-column 'columns'
53
+ for name, spec in TABLE_SCHEMAS.items():
54
+ cols = spec.get("columns")
55
+ if cols and all(col in headers for col in cols):
56
+ return name
57
+
58
+ # 4) row1 tables
59
+ for name, spec in TABLE_SCHEMAS.items():
60
+ if spec["orientation"]=="row1" and all(lbl in headers for lbl in spec["labels"]):
61
+ return name
62
+
63
+ # 5) left tables
64
+ for name, spec in TABLE_SCHEMAS.items():
65
+ if spec["orientation"]=="left" and all(lbl in col0 for lbl in spec["labels"]):
66
+ return name
67
 
68
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
+ def extract_red_text(path):
 
 
 
 
 
71
  doc = Document(path)
72
+ out = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ # --- TABLES ---
75
+ for tbl in doc.tables:
76
+ schema = match_table_schema(tbl)
77
+ if not schema:
78
+ continue
79
+ spec = TABLE_SCHEMAS[schema]
80
+
81
+ # handle the special split_labels (row1 only)
82
+ if spec.get("split_labels") and spec["orientation"]=="row1":
83
+ cell_txt = tbl.rows[1].cells[0].text.strip()
84
+ first_lbl = spec["split_labels"][0]
85
+ narrative, _, tail = cell_txt.partition(first_lbl)
86
+ narrative = narrative.strip()
87
+ if narrative:
88
+ out.setdefault(schema, {}).setdefault(spec["labels"][0], []).append(narrative)
89
+
90
+ for i, lbl in enumerate(spec["split_labels"]):
91
+ nxt = spec["split_labels"][i+1] if i+1<len(spec["split_labels"]) else None
92
+ pattern = rf"{re.escape(lbl)}\s*(.+?)(?={re.escape(nxt)})" if nxt else rf"{re.escape(lbl)}\s*(.+)$"
93
+ m = re.search(pattern, cell_txt, flags=re.DOTALL)
94
+ if m:
95
+ val = m.group(1).strip()
96
+ out.setdefault(schema, {}).setdefault(lbl, []).append(val)
97
+ continue
 
 
 
 
 
 
 
98
 
99
+ # normal tables
100
+ labels = spec["labels"] + [schema]
101
+ collected = {lbl: [] for lbl in labels}
102
+ seen = {lbl: set() for lbl in labels}
103
+ by_col = (spec["orientation"]=="row1")
104
+
105
+ rows = tbl.rows[1:]
106
+ for ri, row in enumerate(rows):
107
+ for ci, cell in enumerate(row.cells):
108
+ red_txt = "".join(run.text for p in cell.paragraphs for run in p.runs if is_red_font(run)).strip()
109
+ if not red_txt:
110
+ continue
111
+
112
+ if by_col:
113
+ # column header → your defined label
114
+ lbl = spec["labels"][ci] if ci < len(spec["labels"]) else schema
115
+ else:
116
+ # first cell in this row → must be one of your labels
117
+ raw = row.cells[0].text.strip()
118
+ lbl = raw if raw in spec["labels"] else schema
119
+
120
+ if red_txt not in seen[lbl]:
121
+ seen[lbl].add(red_txt)
122
+ collected[lbl].append(red_txt)
123
+
124
+ # keep only non-empty
125
+ data = {k:v for k,v in collected.items() if v}
126
+ if data:
127
+ out[schema] = data
128
+
129
+ # --- PARAGRAPHS ---
130
+ paras = {}
131
+ for idx, para in enumerate(doc.paragraphs):
132
+ red_txt = "".join(r.text for r in para.runs if is_red_font(r)).strip()
133
+ if not red_txt:
134
+ continue
135
 
136
+ # find nearest heading above
137
+ context = None
138
+ for j in range(idx-1, -1, -1):
139
+ txt = doc.paragraphs[j].text.strip()
140
+ if txt and any(re.search(p, txt) for p in HEADING_PATTERNS["main"]+HEADING_PATTERNS["sub"]):
141
+ context = txt
142
+ break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
+ # fallback for date line
145
+ if not context and re.fullmatch(PARAGRAPH_PATTERNS["date_line"], red_txt):
146
+ context = "Date"
 
 
 
 
 
 
 
147
 
148
+ paras.setdefault(context or "(para)", []).append(red_txt)
 
 
 
 
 
 
 
 
149
 
150
+ if paras:
151
+ out["paragraphs"] = paras
 
 
 
 
 
 
152
 
153
+ return out
 
 
 
 
 
 
 
 
154
 
155
  if __name__ == "__main__":
156
+ fn = sys.argv[1] if len(sys.argv)>1 else "test.docx"
157
+ print(json.dumps(extract_red_text(fn), indent=2, ensure_ascii=False))