|
|
import os |
|
|
import asyncio |
|
|
from fastapi import FastAPI, HTTPException, Query |
|
|
from dotenv import load_dotenv |
|
|
import aiohttp |
|
|
from bs4 import BeautifulSoup |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
LLM_API_KEY = os.getenv("LLM_API_KEY") |
|
|
|
|
|
if not LLM_API_KEY: |
|
|
raise RuntimeError("LLM_API_KEY must be set in a .env file.") |
|
|
|
|
|
|
|
|
SNAPZION_API_URL = "https://search.snapzion.com/get-snippets" |
|
|
SNAPZION_HEADERS = { |
|
|
'accept': '*/*', |
|
|
'accept-language': 'en-US,en;q=0.9', |
|
|
'content-type': 'application/json', |
|
|
'origin': 'https://search.snapzion.com', |
|
|
'priority': 'u=1, i', |
|
|
'referer': 'https://search.snapzion.com/docs', |
|
|
'sec-ch-ua': '"Chromium";v="140", "Not=A?Brand";v="24", "Google Chrome";v="140"', |
|
|
'sec-ch-ua-mobile': '?0', |
|
|
'sec-ch-ua-platform': '"Windows"', |
|
|
'sec-fetch-dest': 'empty', |
|
|
'sec-fetch-mode': 'cors', |
|
|
'sec-fetch-site': 'same-origin', |
|
|
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36', |
|
|
} |
|
|
|
|
|
|
|
|
LLM_API_URL = "https://api.inference.net/v1/chat/completions" |
|
|
LLM_MODEL = "meta-llama/llama-3.1-8b-instruct/fp-8" |
|
|
|
|
|
|
|
|
app = FastAPI( |
|
|
title="AI Search Snippets API (Snapzion)", |
|
|
description="Provides AI-generated summaries from Snapzion search results.", |
|
|
version="1.0.1" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
async def call_snapzion_search(session: aiohttp.ClientSession, query: str) -> list: |
|
|
"""Calls the Snapzion search API and returns a list of organic results.""" |
|
|
try: |
|
|
async with session.post(SNAPZION_API_URL, headers=SNAPZION_HEADERS, json={"query": query}, timeout=15) as response: |
|
|
response.raise_for_status() |
|
|
data = await response.json() |
|
|
return data.get("organic_results", []) |
|
|
except Exception as e: |
|
|
raise HTTPException(status_code=503, detail=f"Search service (Snapzion) failed: {e}") |
|
|
|
|
|
async def scrape_url(session: aiohttp.ClientSession, url: str) -> str: |
|
|
"""Asynchronously scrapes the primary text content from a URL, ignoring PDFs.""" |
|
|
if url.lower().endswith('.pdf'): |
|
|
return "Content is a PDF, which cannot be scraped." |
|
|
try: |
|
|
async with session.get(url, timeout=10) as response: |
|
|
if response.status != 200: |
|
|
return f"Error: Failed to fetch {url} with status {response.status}" |
|
|
html = await response.text() |
|
|
soup = BeautifulSoup(html, "html.parser") |
|
|
for tag in soup(['script', 'style', 'nav', 'footer', 'header', 'aside']): |
|
|
tag.decompose() |
|
|
return " ".join(soup.stripped_strings) |
|
|
except Exception as e: |
|
|
return f"Error: Could not scrape {url}. Reason: {e}" |
|
|
|
|
|
async def get_ai_snippet(query: str, context: str, sources: list) -> str: |
|
|
"""Generates a synthesized answer using an LLM based on the provided context.""" |
|
|
headers = {"Authorization": f"Bearer {LLM_API_KEY}", "Content-Type": "application/json"} |
|
|
source_list_str = "\n".join([f"[{i+1}] {source['title']}: {source['link']}" for i, source in enumerate(sources)]) |
|
|
|
|
|
prompt = f""" |
|
|
Based *only* on the provided context from web pages, provide a concise, factual answer to the user's query. Cite every sentence with the corresponding source number(s), like `[1]`, `[2]`, or `[1, 3]`. |
|
|
|
|
|
Sources: |
|
|
{source_list_str} |
|
|
|
|
|
Context: |
|
|
--- |
|
|
{context} |
|
|
--- |
|
|
|
|
|
User Query: "{query}" |
|
|
|
|
|
Answer with citations: |
|
|
""" |
|
|
data = {"model": LLM_MODEL, "messages": [{"role": "user", "content": prompt}], "max_tokens": 500} |
|
|
|
|
|
async with aiohttp.ClientSession() as session: |
|
|
try: |
|
|
async with session.post(LLM_API_URL, headers=headers, json=data, timeout=45) as response: |
|
|
response.raise_for_status() |
|
|
result = await response.json() |
|
|
return result['choices'][0]['message']['content'] |
|
|
except Exception as e: |
|
|
raise HTTPException(status_code=502, detail=f"Failed to get response from LLM: {e}") |
|
|
|
|
|
|
|
|
|
|
|
@app.get("/search") |
|
|
async def ai_search(q: str = Query(..., min_length=3, description="The search query.")): |
|
|
""" |
|
|
Performs an AI-powered search using Snapzion. It finds relevant web pages, |
|
|
scrapes their content, and generates a synthesized answer with citations. |
|
|
""" |
|
|
async with aiohttp.ClientSession() as session: |
|
|
|
|
|
search_results = await call_snapzion_search(session, q) |
|
|
if not search_results: |
|
|
raise HTTPException(status_code=404, detail="Could not find any relevant sources for the query.") |
|
|
|
|
|
|
|
|
sources = search_results[:4] |
|
|
|
|
|
|
|
|
scrape_tasks = [scrape_url(session, source["link"]) for source in sources] |
|
|
scraped_contents = await asyncio.gather(*scrape_tasks) |
|
|
|
|
|
|
|
|
full_context = "\n\n".join( |
|
|
f"Source [{i+1}] (from {sources[i]['link']}):\nOriginal Snippet: {sources[i]['snippet']}\nScraped Content: {content}" |
|
|
for i, content in enumerate(scraped_contents) if not content.startswith("Error:") |
|
|
) |
|
|
|
|
|
if not full_context.strip(): |
|
|
raise HTTPException(status_code=500, detail="Failed to scrape content from all available sources.") |
|
|
|
|
|
|
|
|
ai_summary = await get_ai_snippet(q, full_context, sources) |
|
|
|
|
|
return {"ai_summary": ai_summary, "sources": sources} |
|
|
|
|
|
@app.get("/") |
|
|
def root(): |
|
|
return {"message": "AI Search API is active. Use the /docs endpoint to test."} |