Spaces:
Sleeping
Sleeping
Update kig_core/graph_operations.py
Browse files
kig_core/graph_operations.py
CHANGED
|
@@ -47,7 +47,7 @@ def generate_cypher_auto(question: str) -> str:
|
|
| 47 |
| StrOutputParser()
|
| 48 |
| extract_cypher
|
| 49 |
)
|
| 50 |
-
return
|
| 51 |
|
| 52 |
def generate_cypher_guided(question: str, plan_step: int) -> str:
|
| 53 |
"""Generates Cypher using the 'guided' method based on concepts."""
|
|
@@ -64,7 +64,7 @@ def generate_cypher_guided(question: str, plan_step: int) -> str:
|
|
| 64 |
| concept_llm
|
| 65 |
| StrOutputParser()
|
| 66 |
)
|
| 67 |
-
selected_concept =
|
| 68 |
"question": question,
|
| 69 |
"concepts": "\n".join(concepts)
|
| 70 |
}).strip()
|
|
@@ -179,7 +179,7 @@ def evaluate_documents(
|
|
| 179 |
formatted_doc = format_doc_for_llm(doc)
|
| 180 |
if not formatted_doc.strip(): continue
|
| 181 |
try:
|
| 182 |
-
result =
|
| 183 |
logger.debug(f"Binary grader result for doc '{doc.get('title', 'N/A')}': {result}")
|
| 184 |
if result and 'yes' in result.lower():
|
| 185 |
valid_docs_with_scores.append((doc, 1.0)) # Score 1.0 for relevant
|
|
@@ -193,7 +193,7 @@ def evaluate_documents(
|
|
| 193 |
formatted_doc = format_doc_for_llm(doc)
|
| 194 |
if not formatted_doc.strip(): continue
|
| 195 |
try:
|
| 196 |
-
result: GradeDocumentsScore =
|
| 197 |
logger.debug(f"Score grader result for doc '{doc.get('title', 'N/A')}': Score={result.score}, Rationale={result.rationale}")
|
| 198 |
if result.score >= settings.eval_threshold:
|
| 199 |
valid_docs_with_scores.append((doc, result.score))
|
|
|
|
| 47 |
| StrOutputParser()
|
| 48 |
| extract_cypher
|
| 49 |
)
|
| 50 |
+
return invoke_llm(chain,question)
|
| 51 |
|
| 52 |
def generate_cypher_guided(question: str, plan_step: int) -> str:
|
| 53 |
"""Generates Cypher using the 'guided' method based on concepts."""
|
|
|
|
| 64 |
| concept_llm
|
| 65 |
| StrOutputParser()
|
| 66 |
)
|
| 67 |
+
selected_concept = invoke_llm(concept_chain,{
|
| 68 |
"question": question,
|
| 69 |
"concepts": "\n".join(concepts)
|
| 70 |
}).strip()
|
|
|
|
| 179 |
formatted_doc = format_doc_for_llm(doc)
|
| 180 |
if not formatted_doc.strip(): continue
|
| 181 |
try:
|
| 182 |
+
result = invoke_llm(binary_grader,{"question": query, "document": formatted_doc})
|
| 183 |
logger.debug(f"Binary grader result for doc '{doc.get('title', 'N/A')}': {result}")
|
| 184 |
if result and 'yes' in result.lower():
|
| 185 |
valid_docs_with_scores.append((doc, 1.0)) # Score 1.0 for relevant
|
|
|
|
| 193 |
formatted_doc = format_doc_for_llm(doc)
|
| 194 |
if not formatted_doc.strip(): continue
|
| 195 |
try:
|
| 196 |
+
result: GradeDocumentsScore = invoke_llm(score_grader,{"query": query, "document": formatted_doc})
|
| 197 |
logger.debug(f"Score grader result for doc '{doc.get('title', 'N/A')}': Score={result.score}, Rationale={result.rationale}")
|
| 198 |
if result.score >= settings.eval_threshold:
|
| 199 |
valid_docs_with_scores.append((doc, result.score))
|