Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Update app.py
Browse files- patched the answer function to showcase only the the top match’s text. 
- Matches, still returns full top-k with scores for debugging.
- Cleaner, professional output
    	
        app.py
    CHANGED
    
    | 
         @@ -62,28 +62,25 @@ def answer(q, k, max_context_chars): 
     | 
|
| 62 | 
         
             
                    return {"answer": "Index is empty. Ingest first.", "matches": []}
         
     | 
| 63 | 
         
             
                qv = _normalize(_model.encode([q], convert_to_numpy=True))
         
     | 
| 64 | 
         
             
                D, I = _index.search(qv, int(k))
         
     | 
| 
         | 
|
| 65 | 
         
             
                matches = []
         
     | 
| 66 | 
         
             
                for i, s in zip(I[0].tolist(), D[0].tolist()):
         
     | 
| 67 | 
         
            -
                    if i < 0: 
     | 
| 
         | 
|
| 68 | 
         
             
                    matches.append({
         
     | 
| 69 | 
         
             
                        "id": _ids[i],
         
     | 
| 70 | 
         
             
                        "score": float(s),
         
     | 
| 71 | 
         
             
                        "text": _texts[i],
         
     | 
| 72 | 
         
             
                        "meta": _metas[i]
         
     | 
| 73 | 
         
             
                    })
         
     | 
| 74 | 
         
            -
             
     | 
| 75 | 
         
            -
                 
     | 
| 76 | 
         
            -
                for m in matches:
         
     | 
| 77 | 
         
            -
                    t = m["text"]; cut = min(len(t), max_context_chars - total)
         
     | 
| 78 | 
         
            -
                    if cut <= 0: break
         
     | 
| 79 | 
         
            -
                    blob.append(t[:cut]); total += cut
         
     | 
| 80 | 
         
            -
                    if total >= max_context_chars: break
         
     | 
| 81 | 
         
            -
                if not blob:
         
     | 
| 82 | 
         
             
                    out = "No relevant context."
         
     | 
| 83 | 
         
             
                else:
         
     | 
| 84 | 
         
            -
                     
     | 
| 85 | 
         
            -
                     
     | 
| 86 | 
         
            -
                    out = "Based on retrieved context:\n- " 
     | 
| 
         | 
|
| 87 | 
         
             
                return {"answer": out, "matches": matches}
         
     | 
| 88 | 
         | 
| 89 | 
         
             
            with gr.Blocks(title="RAG-as-a-Service") as demo:
         
     | 
| 
         | 
|
| 62 | 
         
             
                    return {"answer": "Index is empty. Ingest first.", "matches": []}
         
     | 
| 63 | 
         
             
                qv = _normalize(_model.encode([q], convert_to_numpy=True))
         
     | 
| 64 | 
         
             
                D, I = _index.search(qv, int(k))
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
             
                matches = []
         
     | 
| 67 | 
         
             
                for i, s in zip(I[0].tolist(), D[0].tolist()):
         
     | 
| 68 | 
         
            +
                    if i < 0:
         
     | 
| 69 | 
         
            +
                        continue
         
     | 
| 70 | 
         
             
                    matches.append({
         
     | 
| 71 | 
         
             
                        "id": _ids[i],
         
     | 
| 72 | 
         
             
                        "score": float(s),
         
     | 
| 73 | 
         
             
                        "text": _texts[i],
         
     | 
| 74 | 
         
             
                        "meta": _metas[i]
         
     | 
| 75 | 
         
             
                    })
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
                if not matches:
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 78 | 
         
             
                    out = "No relevant context."
         
     | 
| 79 | 
         
             
                else:
         
     | 
| 80 | 
         
            +
                    # 👇 only use the top match for the answer
         
     | 
| 81 | 
         
            +
                    top = matches[0]["text"]
         
     | 
| 82 | 
         
            +
                    out = f"Based on retrieved context:\n- {top}"
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
             
                return {"answer": out, "matches": matches}
         
     | 
| 85 | 
         | 
| 86 | 
         
             
            with gr.Blocks(title="RAG-as-a-Service") as demo:
         
     |