rkihacker commited on
Commit
2a0098d
·
verified ·
1 Parent(s): e73b280

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +121 -66
main.py CHANGED
@@ -1,85 +1,140 @@
1
  import os
2
- from fastapi import FastAPI, HTTPException
3
- import requests
4
- from bs4 import BeautifulSoup
5
  import aiohttp
 
6
 
7
  # --- Configuration ---
8
- # It's recommended to use environment variables for sensitive data like API keys.
9
- # Replace with your actual API key and endpoint.
10
- LLM_API_URL = os.getenv("LLM_API_URL", "https://api.inference.net/v1/chat/completions")
11
- LLM_API_KEY = os.getenv("LLM_API_KEY", "inference-00050468cc1c4a20bd5ca0997c752329") # Replace with your key
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  LLM_MODEL = "meta-llama/llama-3.1-8b-instruct/fp-8"
13
 
 
14
  app = FastAPI(
15
- title="Web Scraper and AI Processor",
16
- description="An API to scrape web content and process it with a large language model.",
17
- version="1.0.0"
18
  )
19
 
20
- async def scrape_url(session, url: str):
21
- """Asynchronously scrapes the text content from a given URL."""
 
 
22
  try:
23
- async with session.get(url, timeout=10) as response:
24
  response.raise_for_status()
25
- html_content = await response.text()
26
- soup = BeautifulSoup(html_content, "html.parser")
27
- # Remove script and style elements
28
- for script_or_style in soup(["script", "style"]):
29
- script_or_style.decompose()
30
- # Get text and clean it up
31
- text = soup.get_text()
32
- lines = (line.strip() for line in text.splitlines())
33
- chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
34
- return " ".join(chunk for chunk in chunks if chunk)
35
- except requests.exceptions.RequestException as e:
36
- raise HTTPException(status_code=400, detail=f"Error fetching the URL: {e}")
37
-
38
- async def process_with_llm(session, content: str, query: str):
39
- """Sends the scraped content and a query to the LLM for processing."""
40
- headers = {
41
- "Content-Type": "application/json",
42
- "Authorization": f"Bearer {LLM_API_KEY}",
43
- }
44
- data = {
45
- "messages": [
46
- {
47
- "role": "system",
48
- "content": "You are a helpful assistant that analyzes web content."
49
- },
50
- {
51
- "role": "user",
52
- "content": f"Based on the following content, please answer this question: '{query}'\n\nContent:\n{content}"
53
- }
54
- ],
55
- "model": LLM_MODEL,
56
- "stream": False # Set to False for a single response
57
- }
58
  try:
59
- async with session.post(LLM_API_URL, headers=headers, json=data, timeout=30) as response:
60
- response.raise_for_status()
61
- return await response.json()
62
- except aiohttp.ClientError as e:
63
- raise HTTPException(status_code=500, detail=f"Error communicating with the LLM API: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
- @app.post("/scrape-and-process/")
66
- async def scrape_and_process(url: str, query: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  """
68
- Scrapes a URL, sends the content to a large language model with a query,
69
- and returns the model's response.
70
  """
71
  async with aiohttp.ClientSession() as session:
72
- scraped_content = await scrape_url(session, url)
73
- if not scraped_content:
74
- raise HTTPException(status_code=404, detail="Could not scrape any content from the URL.")
 
 
 
 
75
 
76
- llm_response = await process_with_llm(session, scraped_content, query)
77
- return llm_response
 
78
 
79
- @app.get("/")
80
- def read_root():
81
- return {"message": "Welcome to the Web Scraper and AI Processor API."}
 
 
 
 
 
 
 
 
82
 
83
- if __name__ == "__main__":
84
- import uvicorn
85
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
 
 
1
  import os
2
+ import asyncio
3
+ from fastapi import FastAPI, HTTPException, Query
4
+ from dotenv import load_dotenv
5
  import aiohttp
6
+ from bs4 import BeautifulSoup
7
 
8
  # --- Configuration ---
9
+ load_dotenv()
10
+ LLM_API_KEY = os.getenv("LLM_API_KEY")
11
+
12
+ if not LLM_API_KEY:
13
+ raise RuntimeError("LLM_API_KEY must be set in a .env file.")
14
+
15
+ # Snapzion Search API Configuration
16
+ SNAPZION_API_URL = "https://search.snapzion.com/get-snippets"
17
+ SNAPZION_HEADERS = {
18
+ 'accept': '*/*',
19
+ 'accept-language': 'en-US,en;q=0.9',
20
+ 'content-type': 'application/json',
21
+ 'origin': 'https://search.snapzion.com',
22
+ 'priority': 'u=1, i',
23
+ 'referer': 'https://search.snapzion.com/docs',
24
+ 'sec-ch-ua': '"Chromium";v="140", "Not=A?Brand";v="24", "Google Chrome";v="140"',
25
+ 'sec-ch-ua-mobile': '?0',
26
+ 'sec-ch-ua-platform': '"Windows"',
27
+ 'sec-fetch-dest': 'empty',
28
+ 'sec-fetch-mode': 'cors',
29
+ 'sec-fetch-site': 'same-origin',
30
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36',
31
+ }
32
+
33
+ # LLM Configuration
34
+ LLM_API_URL = "https://api.inference.net/v1/chat/completions"
35
  LLM_MODEL = "meta-llama/llama-3.1-8b-instruct/fp-8"
36
 
37
+ # --- FastAPI App Initialization ---
38
  app = FastAPI(
39
+ title="AI Search Snippets API (Snapzion)",
40
+ description="Provides AI-generated summaries from Snapzion search results.",
41
+ version="1.0.1"
42
  )
43
 
44
+ # --- Core Asynchronous Functions ---
45
+
46
+ async def call_snapzion_search(session: aiohttp.ClientSession, query: str) -> list:
47
+ """Calls the Snapzion search API and returns a list of organic results."""
48
  try:
49
+ async with session.post(SNAPZION_API_URL, headers=SNAPZION_HEADERS, json={"query": query}, timeout=15) as response:
50
  response.raise_for_status()
51
+ data = await response.json()
52
+ return data.get("organic_results", [])
53
+ except Exception as e:
54
+ raise HTTPException(status_code=503, detail=f"Search service (Snapzion) failed: {e}")
55
+
56
+ async def scrape_url(session: aiohttp.ClientSession, url: str) -> str:
57
+ """Asynchronously scrapes the primary text content from a URL, ignoring PDFs."""
58
+ if url.lower().endswith('.pdf'):
59
+ return "Content is a PDF, which cannot be scraped."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  try:
61
+ async with session.get(url, timeout=10) as response:
62
+ if response.status != 200:
63
+ return f"Error: Failed to fetch {url} with status {response.status}"
64
+ html = await response.text()
65
+ soup = BeautifulSoup(html, "html.parser")
66
+ for tag in soup(['script', 'style', 'nav', 'footer', 'header', 'aside']):
67
+ tag.decompose()
68
+ return " ".join(soup.stripped_strings)
69
+ except Exception as e:
70
+ return f"Error: Could not scrape {url}. Reason: {e}"
71
+
72
+ async def get_ai_snippet(query: str, context: str, sources: list) -> str:
73
+ """Generates a synthesized answer using an LLM based on the provided context."""
74
+ headers = {"Authorization": f"Bearer {LLM_API_KEY}", "Content-Type": "application/json"}
75
+ source_list_str = "\n".join([f"[{i+1}] {source['title']}: {source['link']}" for i, source in enumerate(sources)])
76
+
77
+ prompt = f"""
78
+ Based *only* on the provided context from web pages, provide a concise, factual answer to the user's query. Cite every sentence with the corresponding source number(s), like `[1]`, `[2]`, or `[1, 3]`.
79
+
80
+ Sources:
81
+ {source_list_str}
82
 
83
+ Context:
84
+ ---
85
+ {context}
86
+ ---
87
+
88
+ User Query: "{query}"
89
+
90
+ Answer with citations:
91
+ """
92
+ data = {"model": LLM_MODEL, "messages": [{"role": "user", "content": prompt}], "max_tokens": 500}
93
+
94
+ async with aiohttp.ClientSession() as session:
95
+ try:
96
+ async with session.post(LLM_API_URL, headers=headers, json=data, timeout=45) as response:
97
+ response.raise_for_status()
98
+ result = await response.json()
99
+ return result['choices'][0]['message']['content']
100
+ except Exception as e:
101
+ raise HTTPException(status_code=502, detail=f"Failed to get response from LLM: {e}")
102
+
103
+ # --- API Endpoint ---
104
+
105
+ @app.get("/search")
106
+ async def ai_search(q: str = Query(..., min_length=3, description="The search query.")):
107
  """
108
+ Performs an AI-powered search using Snapzion. It finds relevant web pages,
109
+ scrapes their content, and generates a synthesized answer with citations.
110
  """
111
  async with aiohttp.ClientSession() as session:
112
+ # 1. Search for relevant web pages using Snapzion
113
+ search_results = await call_snapzion_search(session, q)
114
+ if not search_results:
115
+ raise HTTPException(status_code=404, detail="Could not find any relevant sources for the query.")
116
+
117
+ # Limit to the top 4 results for speed and relevance
118
+ sources = search_results[:4]
119
 
120
+ # 2. Scrape all pages concurrently for speed
121
+ scrape_tasks = [scrape_url(session, source["link"]) for source in sources]
122
+ scraped_contents = await asyncio.gather(*scrape_tasks)
123
 
124
+ # 3. Combine content and snippets for a rich context
125
+ full_context = "\n\n".join(
126
+ f"Source [{i+1}] (from {sources[i]['link']}):\nOriginal Snippet: {sources[i]['snippet']}\nScraped Content: {content}"
127
+ for i, content in enumerate(scraped_contents) if not content.startswith("Error:")
128
+ )
129
+
130
+ if not full_context.strip():
131
+ raise HTTPException(status_code=500, detail="Failed to scrape content from all available sources.")
132
+
133
+ # 4. Generate the final AI snippet
134
+ ai_summary = await get_ai_snippet(q, full_context, sources)
135
 
136
+ return {"ai_summary": ai_summary, "sources": sources}
137
+
138
+ @app.get("/")
139
+ def root():
140
+ return {"message": "AI Search API is active. Use the /docs endpoint to test."}