File size: 12,383 Bytes
73c6377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
"""
Context Enrichment Module for Medical RAG

This module enriches retrieved documents with surrounding context (adjacent pages)
to provide comprehensive information for expert medical professionals.
"""

from typing import List, Dict, Set, Optional
from langchain.schema import Document
from pathlib import Path
from .config import logger


class ContextEnricher:
    """
    Enriches retrieved documents with surrounding pages for richer context.
    """
    
    def __init__(self, cache_size: int = 100):
        """
        Initialize context enricher with document cache.
        
        Args:
            cache_size: Maximum number of source documents to cache
        """
        self._document_cache: Dict[str, List[Document]] = {}
        self._cache_size = cache_size
        self._all_chunks_cache: Optional[List[Document]] = None  # Cache all chunks to avoid reloading
    
    def enrich_documents(
        self,
        retrieved_docs: List[Document],
        pages_before: int = 1,
        pages_after: int = 1,
        max_enriched_docs: int = 5
    ) -> List[Document]:
        """
        Enrich retrieved documents by adding separate context pages.
        
        Args:
            retrieved_docs: List of retrieved documents
            pages_before: Number of pages to include before each document
            pages_after: Number of pages to include after each document
            max_enriched_docs: Maximum number of documents to enrich (top results)
        
        Returns:
            List with original documents + separate context page documents
        """
        if not retrieved_docs:
            return []
        
        result_docs = []
        processed_sources = set()
        enriched_count = 0
        
        # Only enrich top documents to avoid overwhelming context
        docs_to_enrich = retrieved_docs[:max_enriched_docs]
        
        for doc in docs_to_enrich:
            try:
                # Get source information
                source = doc.metadata.get('source', 'unknown')
                page_num = doc.metadata.get('page_number', 1)
                
                # Skip if already processed this source-page combination
                source_page_key = f"{source}_{page_num}"
                if source_page_key in processed_sources:
                    continue
                
                processed_sources.add(source_page_key)
                
                # Get surrounding pages
                surrounding_docs = self._get_surrounding_pages(
                    doc, 
                    pages_before, 
                    pages_after
                )
                
                if surrounding_docs:
                    # Add separate documents for each page
                    page_docs = self._create_separate_page_documents(
                        doc, 
                        surrounding_docs,
                        pages_before,
                        pages_after
                    )
                    result_docs.extend(page_docs)
                    enriched_count += 1
                    
                    # Log enrichment details
                    page_numbers = [int(d.metadata.get('page_number', 0)) for d in page_docs]
                    logger.debug(f"Enriched {source} page {page_num} with pages: {page_numbers}")
                else:
                    # No surrounding pages found, add original with empty enrichment metadata
                    original_with_metadata = self._add_empty_enrichment_metadata(doc)
                    result_docs.append(original_with_metadata)
                    
            except Exception as e:
                logger.warning(f"Could not enrich document from {doc.metadata.get('source')}: {e}")
                original_with_metadata = self._add_empty_enrichment_metadata(doc)
                result_docs.append(original_with_metadata)
        
        # Add remaining documents without enrichment
        for doc in retrieved_docs[max_enriched_docs:]:
            original_with_metadata = self._add_empty_enrichment_metadata(doc)
            result_docs.append(original_with_metadata)
        
        logger.info(f"Enriched {enriched_count} documents with surrounding context pages")
        return result_docs
    
    def _get_surrounding_pages(
        self,
        doc: Document,
        pages_before: int,
        pages_after: int
    ) -> List[Document]:
        """
        Get surrounding pages for a document.
        
        Args:
            doc: Original document
            pages_before: Number of pages before
            pages_after: Number of pages after
        
        Returns:
            List of surrounding documents (including original), deduplicated by page number
        """
        source = doc.metadata.get('source', 'unknown')
        page_num = doc.metadata.get('page_number', 1)
        provider = doc.metadata.get('provider', 'unknown')
        disease = doc.metadata.get('disease', 'unknown')
        
        # Try to get full document from cache or load it
        full_doc_pages = self._get_full_document(source, provider, disease)
        
        if not full_doc_pages:
            return []
        
        # Find the target page and surrounding pages
        target_page = int(page_num) if isinstance(page_num, (int, str)) else 1
        
        # Use a dict to deduplicate by page number (keep first occurrence)
        pages_dict = {}
        
        for page_doc in full_doc_pages:
            doc_page_num = page_doc.metadata.get('page_number', 0)
            if isinstance(doc_page_num, str):
                try:
                    doc_page_num = int(doc_page_num)
                except:
                    continue
            
            # Include pages within range
            if target_page - pages_before <= doc_page_num <= target_page + pages_after:
                # Only add if not already present (deduplication)
                if doc_page_num not in pages_dict:
                    pages_dict[doc_page_num] = page_doc
        
        # Return sorted by page number
        surrounding = [pages_dict[pn] for pn in sorted(pages_dict.keys())]
        
        return surrounding
    
    def _get_full_document(
        self,
        source: str,
        provider: str,
        disease: str
    ) -> Optional[List[Document]]:
        """
        Get full document pages from chunks cache.
        
        Args:
            source: Source filename
            provider: Provider name
            disease: Disease name
        
        Returns:
            List of all pages in the document, or None if not found
        """
        cache_key = f"{provider}_{disease}_{source}"
        
        # Check cache
        if cache_key in self._document_cache:
            return self._document_cache[cache_key]
        
        # Load from chunks cache instead of trying to reload PDFs
        try:
            from . import utils
            
            # Load all chunks (use cached version to avoid redundant loading)
            if self._all_chunks_cache is None:
                self._all_chunks_cache = utils.load_chunks()
                if self._all_chunks_cache:
                    logger.debug(f"Loaded {len(self._all_chunks_cache)} chunks into enricher cache")
            
            all_chunks = self._all_chunks_cache
            if not all_chunks:
                logger.debug(f"No chunks available for enrichment")
                return None
            
            # Filter chunks for this specific document
            doc_pages = []
            for chunk in all_chunks:
                chunk_source = chunk.metadata.get('source', '')
                chunk_provider = chunk.metadata.get('provider', '')
                chunk_disease = chunk.metadata.get('disease', '')
                
                # Match by source, provider, and disease
                if (chunk_source == source and 
                    chunk_provider == provider and 
                    chunk_disease == disease):
                    doc_pages.append(chunk)
            
            if not doc_pages:
                logger.debug(f"Could not find chunks for document: {source} (Provider: {provider}, Disease: {disease})")
                return None
            
            # Sort by page number
            doc_pages.sort(key=lambda d: int(d.metadata.get('page_number', 0)))
            
            # Cache it (with size limit)
            if len(self._document_cache) >= self._cache_size:
                # Remove oldest entry
                self._document_cache.pop(next(iter(self._document_cache)))
            
            self._document_cache[cache_key] = doc_pages
            logger.debug(f"Loaded {len(doc_pages)} pages for {source} from chunks cache")
            return doc_pages
            
        except Exception as e:
            logger.warning(f"Error loading document from chunks cache {source}: {e}")
            return None
    
    def _create_separate_page_documents(
        self,
        original_doc: Document,
        surrounding_docs: List[Document],
        pages_before: int,
        pages_after: int
    ) -> List[Document]:
        """
        Create separate document objects for original page and context pages.
        
        Args:
            original_doc: Original retrieved document
            surrounding_docs: List of surrounding documents
            pages_before: Number of pages before
            pages_after: Number of pages after
        
        Returns:
            List of separate documents (context pages + original page + context pages)
        """
        # Sort by page number
        sorted_docs = sorted(
            surrounding_docs,
            key=lambda d: int(d.metadata.get('page_number', 0))
        )
        
        original_page = int(original_doc.metadata.get('page_number', 1))
        result_docs = []
        
        for doc in sorted_docs:
            page_num = int(doc.metadata.get('page_number', 0))
            
            # Determine if this is a context page or the original page
            is_context_page = (page_num != original_page)
            
            # Create document with appropriate metadata
            page_doc = Document(
                page_content=doc.page_content,
                metadata={
                    **doc.metadata,
                    'context_enrichment': is_context_page,
                    'enriched': False,
                    'pages_included': [],
                    'primary_page': None,
                    'context_pages_before': None,
                    'context_pages_after': None,
                }
            )
            
            result_docs.append(page_doc)
        
        return result_docs
    
    def _add_empty_enrichment_metadata(self, doc: Document) -> Document:
        """
        Add empty enrichment metadata fields to a document.
        
        Args:
            doc: Original document
        
        Returns:
            Document with enrichment metadata fields set to default values
        """
        return Document(
            page_content=doc.page_content,
            metadata={
                **doc.metadata,
                'enriched': False,
                'pages_included': [],
                'primary_page': None,
                'context_pages_before': None,
                'context_pages_after': None,
            }
        )


# Global enricher instance
_context_enricher = ContextEnricher(cache_size=100)


def enrich_retrieved_documents(
    documents: List[Document],
    pages_before: int = 1,
    pages_after: int = 1,
    max_enriched: int = 5
) -> List[Document]:
    """
    Convenience function to enrich retrieved documents.
    
    Args:
        documents: Retrieved documents
        pages_before: Number of pages to include before each document
        pages_after: Number of pages to include after each document
        max_enriched: Maximum number of documents to enrich
    
    Returns:
        Enriched documents with surrounding context
    """
    return _context_enricher.enrich_documents(
        documents,
        pages_before=pages_before,
        pages_after=pages_after,
        max_enriched_docs=max_enriched
    )


def get_context_enricher() -> ContextEnricher:
    """Get the global context enricher instance."""
    return _context_enricher