Spaces:
Running
Running
| import gradio as gr | |
| import os | |
| import requests | |
| import json | |
| import time | |
| from dotenv import load_dotenv | |
| # Load .env file (if it exists) | |
| load_dotenv() | |
| def create_deepseek_interface(): | |
| # Get API keys from environment variables | |
| api_key = os.getenv("FW_API_KEY") | |
| serphouse_api_key = os.getenv("SERPHOUSE_API_KEY") | |
| if not api_key: | |
| print("Warning: FW_API_KEY environment variable is not set.") | |
| if not serphouse_api_key: | |
| print("Warning: SERPHOUSE_API_KEY environment variable is not set.") | |
| # Keyword extraction function (LLM-based) | |
| def extract_keywords_with_llm(query): | |
| if not api_key: | |
| return "FW_API_KEY not set for LLM keyword extraction.", query | |
| # Extract keywords using LLM (DeepSeek model) | |
| url = "https://api.fireworks.ai/inference/v1/chat/completions" | |
| payload = { | |
| "model": "accounts/fireworks/models/deepseek-r1-0528", | |
| "max_tokens": 200, | |
| "temperature": 0.1, # Low temperature for consistent results | |
| "messages": [ | |
| { | |
| "role": "system", | |
| "content": "Extract key search terms from the user's question that would be effective for web searches. Provide these as a search query with words separated by spaces only, without commas. For example: 'Prime Minister Han Duck-soo impeachment results'" | |
| }, | |
| { | |
| "role": "user", | |
| "content": query | |
| } | |
| ] | |
| } | |
| headers = { | |
| "Accept": "application/json", | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {api_key}" | |
| } | |
| try: | |
| response = requests.post(url, headers=headers, json=payload) | |
| response.raise_for_status() | |
| result = response.json() | |
| # Extract keywords from response | |
| keywords = result["choices"][0]["message"]["content"].strip() | |
| # Use original query if keywords are too long or improperly formatted | |
| if len(keywords) > 100: | |
| return f"Extracted keywords: {keywords}", query | |
| return f"Extracted keywords: {keywords}", keywords | |
| except Exception as e: | |
| print(f"Error during keyword extraction: {str(e)}") | |
| return f"Error during keyword extraction: {str(e)}", query | |
| # Search function using SerpHouse API | |
| def search_with_serphouse(query): | |
| if not serphouse_api_key: | |
| return "SERPHOUSE_API_KEY is not set." | |
| try: | |
| # Extract keywords | |
| extraction_result, search_query = extract_keywords_with_llm(query) | |
| print(f"Original query: {query}") | |
| print(extraction_result) | |
| # Basic GET method seems best after analyzing documentation | |
| url = "https://api.serphouse.com/serp/live" | |
| # Check if query is in Korean | |
| is_korean = any('\uAC00' <= c <= '\uD7A3' for c in search_query) | |
| # Simplified parameters | |
| params = { | |
| "q": search_query, | |
| "domain": "google.com", | |
| "serp_type": "web", # Changed to basic web search | |
| "device": "desktop", | |
| "lang": "ko" if is_korean else "en" | |
| } | |
| headers = { | |
| "Authorization": f"Bearer {serphouse_api_key}" | |
| } | |
| print(f"Calling SerpHouse API with basic GET method...") | |
| print(f"Search term: {search_query}") | |
| print(f"Request URL: {url} - Parameters: {params}") | |
| # Execute GET request | |
| response = requests.get(url, headers=headers, params=params) | |
| response.raise_for_status() | |
| print(f"SerpHouse API response status code: {response.status_code}") | |
| search_results = response.json() | |
| # Check response structure | |
| print(f"Response structure: {list(search_results.keys()) if isinstance(search_results, dict) else 'Not a dictionary'}") | |
| # Parse and format search results (in Markdown) | |
| formatted_results = [] | |
| formatted_results.append(f"## Search term: {search_query}\n\n") | |
| # Handle various possible response structures | |
| organic_results = None | |
| # Possible response structure 1 | |
| if "results" in search_results and "organic" in search_results["results"]: | |
| organic_results = search_results["results"]["organic"] | |
| # Possible response structure 2 | |
| elif "organic" in search_results: | |
| organic_results = search_results["organic"] | |
| # Possible response structure 3 (nested results) | |
| elif "results" in search_results and "results" in search_results["results"]: | |
| if "organic" in search_results["results"]["results"]: | |
| organic_results = search_results["results"]["results"]["organic"] | |
| # Process organic results if available | |
| if organic_results and len(organic_results) > 0: | |
| # Output response structure | |
| print(f"First organic result structure: {organic_results[0].keys() if len(organic_results) > 0 else 'empty'}") | |
| for i, result in enumerate(organic_results[:5], 1): # Show only top 5 results | |
| title = result.get("title", "No title") | |
| snippet = result.get("snippet", "No content") | |
| link = result.get("link", "#") | |
| displayed_link = result.get("displayed_link", link) | |
| # Format in Markdown (including number and link) | |
| formatted_results.append( | |
| f"### {i}. [{title}]({link})\n\n" | |
| f"{snippet}\n\n" | |
| f"**Source**: [{displayed_link}]({link})\n\n" | |
| f"---\n\n" | |
| ) | |
| print(f"Found {len(organic_results)} search results") | |
| return "".join(formatted_results) | |
| # Handle case with no results or unexpected structure | |
| print("No search results or unexpected response structure") | |
| print(f"Detailed response structure: {search_results.keys() if hasattr(search_results, 'keys') else 'Unclear structure'}") | |
| # Find error messages in response | |
| error_msg = "No search results found or response format is different than expected" | |
| if "error" in search_results: | |
| error_msg = search_results["error"] | |
| elif "message" in search_results: | |
| error_msg = search_results["message"] | |
| return f"## Results for '{search_query}'\n\n{error_msg}" | |
| except Exception as e: | |
| error_msg = f"Error during search: {str(e)}" | |
| print(error_msg) | |
| import traceback | |
| print(traceback.format_exc()) | |
| # Add API request details for debugging (in Markdown) | |
| return f"## Error Occurred\n\n" + \ | |
| f"An error occurred during search: **{str(e)}**\n\n" + \ | |
| f"### API Request Details:\n" + \ | |
| f"- **URL**: {url}\n" + \ | |
| f"- **Search Term**: {search_query}\n" + \ | |
| f"- **Parameters**: {params}\n" | |
| # Function to call DeepSeek API with streaming | |
| def query_deepseek_streaming(message, history, use_deep_research): | |
| if not api_key: | |
| yield history, "Environment variable FW_API_KEY is not set. Please check the environment variables on the server." | |
| return | |
| search_context = "" | |
| search_info = "" | |
| if use_deep_research: | |
| try: | |
| # Start search (first message) | |
| yield history + [(message, "🔍 Extracting optimal keywords and searching the web...")], "" | |
| # Execute search - add logs for debugging | |
| print(f"Deep Research activated: Starting search for '{message}'") | |
| search_results = search_with_serphouse(message) | |
| print(f"Search results received: {search_results[:100]}...") # Output first part of results | |
| if not search_results.startswith("Error during search") and not search_results.startswith("SERPHOUSE_API_KEY"): | |
| search_context = f""" | |
| Here are recent search results related to the user's question. Use this information to provide an accurate response with the latest information: | |
| {search_results} | |
| Based on the above search results, answer the user's question. If you cannot find a clear answer in the search results, use your knowledge to provide the best answer. | |
| When citing search results, mention the source, and ensure your answer reflects the latest information. | |
| """ | |
| search_info = f"🔍 Deep Research feature activated: Generating response based on relevant web search results..." | |
| else: | |
| print(f"Search failed or no results: {search_results}") | |
| except Exception as e: | |
| print(f"Exception occurred during Deep Research: {str(e)}") | |
| search_info = f"🔍 Deep Research feature error: {str(e)}" | |
| # Prepare conversation history for API request | |
| messages = [] | |
| for user, assistant in history: | |
| messages.append({"role": "user", "content": user}) | |
| messages.append({"role": "assistant", "content": assistant}) | |
| # Add system message with search context if available | |
| if search_context: | |
| # DeepSeek model supports system messages | |
| messages.insert(0, {"role": "system", "content": search_context}) | |
| # Add new user message | |
| messages.append({"role": "user", "content": message}) | |
| # Prepare API request | |
| url = "https://api.fireworks.ai/inference/v1/chat/completions" | |
| payload = { | |
| "model": "accounts/fireworks/models/deepseek-v3-0324", | |
| "max_tokens": 20480, | |
| "top_p": 1, | |
| "top_k": 40, | |
| "presence_penalty": 0, | |
| "frequency_penalty": 0, | |
| "temperature": 0.6, | |
| "messages": messages, | |
| "stream": True # Enable streaming | |
| } | |
| headers = { | |
| "Accept": "application/json", | |
| "Content-Type": "application/json", | |
| "Authorization": f"Bearer {api_key}" | |
| } | |
| try: | |
| # Request streaming response | |
| response = requests.request("POST", url, headers=headers, data=json.dumps(payload), stream=True) | |
| response.raise_for_status() # Raise exception for HTTP errors | |
| # Add message and start with initial response | |
| new_history = history.copy() | |
| # Include search_info in starting message if available | |
| start_msg = search_info if search_info else "" | |
| new_history.append((message, start_msg)) | |
| # Full response text | |
| full_response = start_msg | |
| # Process streaming response | |
| for line in response.iter_lines(): | |
| if line: | |
| line_text = line.decode('utf-8') | |
| # Remove 'data: ' prefix | |
| if line_text.startswith("data: "): | |
| line_text = line_text[6:] | |
| # Check for stream end message | |
| if line_text == "[DONE]": | |
| break | |
| try: | |
| # Parse JSON | |
| chunk = json.loads(line_text) | |
| chunk_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "") | |
| if chunk_content: | |
| full_response += chunk_content | |
| # Update chat history | |
| new_history[-1] = (message, full_response) | |
| yield new_history, "" | |
| except json.JSONDecodeError: | |
| continue | |
| # Return final response | |
| yield new_history, "" | |
| except requests.exceptions.RequestException as e: | |
| error_msg = f"API error: {str(e)}" | |
| if hasattr(e, 'response') and e.response and e.response.status_code == 401: | |
| error_msg = "Authentication failed. Please check your FW_API_KEY environment variable." | |
| yield history, error_msg | |
| # Create Gradio interface | |
| with gr.Blocks(theme="soft", fill_height=True) as demo: | |
| # Header section | |
| gr.Markdown( | |
| """ | |
| # 🤖 DeepSeek V3-0324 + Research | |
| ### DeepSeek V3-0324 Latest Model + Real-time 'Deep Research' Agentic AI System @ https://discord.gg/openfreeai | |
| """ | |
| ) | |
| # Main layout | |
| with gr.Row(): | |
| # Main content area | |
| with gr.Column(): | |
| # Chat interface | |
| chatbot = gr.Chatbot( | |
| height=500, | |
| show_label=False, | |
| container=True | |
| ) | |
| # Add Deep Research toggle and status display | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| use_deep_research = gr.Checkbox( | |
| label="Enable Deep Research", | |
| info="Utilize optimal keyword extraction and web search for latest information", | |
| value=False | |
| ) | |
| with gr.Column(scale=1): | |
| api_status = gr.Markdown("API Status: Ready") | |
| # Check and display API key status | |
| if not serphouse_api_key: | |
| api_status.value = "⚠️ SERPHOUSE_API_KEY is not set" | |
| if not api_key: | |
| api_status.value = "⚠️ FW_API_KEY is not set" | |
| if api_key and serphouse_api_key: | |
| api_status.value = "✅ API keys configured" | |
| # Input area | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| label="Message", | |
| placeholder="Enter your prompt here...", | |
| show_label=False, | |
| scale=9 | |
| ) | |
| submit = gr.Button("Send", variant="primary", scale=1) | |
| # Clear conversation button | |
| with gr.Row(): | |
| clear = gr.ClearButton([msg, chatbot], value="🧹 Clear Conversation") | |
| # Example queries | |
| gr.Examples( | |
| examples=[ | |
| "Explain the difference between Transformers and RNNs in deep learning.", | |
| "Write a Python function to find prime numbers within a specific range.", | |
| "Summarize the key concepts of reinforcement learning." | |
| ], | |
| inputs=msg | |
| ) | |
| # Error message display | |
| error_box = gr.Markdown("") | |
| # Connect buttons to functions | |
| submit.click( | |
| query_deepseek_streaming, | |
| inputs=[msg, chatbot, use_deep_research], | |
| outputs=[chatbot, error_box] | |
| ).then( | |
| lambda: "", | |
| None, | |
| [msg] | |
| ) | |
| # Allow Enter key submission | |
| msg.submit( | |
| query_deepseek_streaming, | |
| inputs=[msg, chatbot, use_deep_research], | |
| outputs=[chatbot, error_box] | |
| ).then( | |
| lambda: "", | |
| None, | |
| [msg] | |
| ) | |
| return demo | |
| # Run interface | |
| if __name__ == "__main__": | |
| demo = create_deepseek_interface() | |
| demo.launch(debug=True) | |