Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| from bs4 import BeautifulSoup | |
| import os | |
| import json | |
| import logging | |
| import pandas as pd | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| from typing import Optional, List, Dict, Any | |
| # ------------------------ | |
| # Configuration | |
| # ------------------------ | |
| WORDLIFT_API_URL = "https://api.wordlift.io/content-evaluations" | |
| WORDLIFT_API_KEY = os.getenv("WORDLIFT_API_KEY") # Get API key from environment variable | |
| # Set up logging | |
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | |
| logger = logging.getLogger(__name__) | |
| # ------------------------ | |
| # Custom CSS & Theme | |
| # ------------------------ | |
| css = """ | |
| @import url('https://fonts.googleapis.com/css2?family=Open+Sans:wght@300;400;600;700&display=swap'); | |
| body { | |
| font-family: 'Open Sans', sans-serif !important; | |
| } | |
| .primary-btn { | |
| background-color: #3452db !important; | |
| color: white !important; | |
| } | |
| .primary-btn:hover { | |
| background-color: #2a41af !important; | |
| } | |
| .gradio-container { | |
| max-width: 1200px; /* Limit width for better readability */ | |
| margin: auto; | |
| } | |
| .plot-container { | |
| min-height: 400px; /* Ensure plot area is visible */ | |
| display: flex; | |
| justify-content: center; /* Center the plot */ | |
| align-items: center; /* Center vertically if needed */ | |
| } | |
| /* Specific style for the plot title to potentially reduce overlap */ | |
| .plot-container .gradio-html-title { | |
| text-align: center; | |
| width: 100%; /* Ensure title centers */ | |
| } | |
| """ | |
| theme = gr.themes.Soft( | |
| primary_hue=gr.themes.colors.Color( | |
| name="blue", | |
| c50="#eef1ff", | |
| c100="#e0e5ff", | |
| c200="#c3cbff", | |
| c300="#a5b2ff", | |
| c400="#8798ff", | |
| c500="#6a7eff", | |
| c600="#3452db", | |
| c700="#2a41af", | |
| c800="#1f3183", | |
| c900="#152156", | |
| c950="#0a102b", | |
| ) | |
| ) | |
| # ------------------------ | |
| # Content Fetching Logic | |
| # ------------------------ | |
| def fetch_content_from_url(url: str, timeout: int = 15) -> str: | |
| """Fetches main text content from a URL.""" | |
| logger.info(f"Fetching content from: {url}") | |
| try: | |
| headers = { | |
| 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' | |
| } | |
| # Use stream=True and then process content to handle large files efficiently, | |
| # though BeautifulSoup will load it all eventually. Timeout is for connection. | |
| with requests.get(url, headers=headers, timeout=timeout, stream=True) as response: | |
| response.raise_for_status() # Raise an exception for bad status codes | |
| # Limit the amount of data read to avoid excessive memory usage | |
| max_bytes_to_read = 2 * 1024 * 1024 # 2MB limit for initial read | |
| # Read only up to max_bytes_to_read | |
| content_bytes = b'' | |
| for chunk in response.iter_content(chunk_size=8192): | |
| if not chunk: | |
| break | |
| content_bytes += chunk | |
| if len(content_bytes) >= max_bytes_to_read: | |
| logger.warning(f"Content for {url} exceeded {max_bytes_to_read} bytes, stopped reading.") | |
| break | |
| # Use detect_encoding if possible, fallback to utf-8 | |
| try: | |
| # Attempt to get encoding from headers or detect it | |
| encoding = requests.utils.get_encoding_from_headers(response.headers) or requests.utils.guess_json_utf(content_bytes) | |
| content = content_bytes.decode(encoding, errors='replace') | |
| except Exception as e: | |
| logger.warning(f"Could not detect encoding for {url}, falling back to utf-8: {e}") | |
| content = content_bytes.decode('utf-8', errors='replace') | |
| soup = BeautifulSoup(content, 'html.parser') | |
| # Attempt to find main content block | |
| # Prioritize more specific semantic tags | |
| # Added some common class names as fallback | |
| main_content = soup.find('article') or soup.find('main') or soup.find(class_=lambda x: x and ('content' in x.lower() or 'article' in x.lower() or 'post' in x.lower() or 'body' in x.lower())) | |
| if main_content: | |
| # Extract text from common text-containing tags within the main block | |
| text_elements = main_content.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'blockquote', 'figcaption', 'pre', 'code']) | |
| text = ' '.join([elem.get_text() for elem in text_elements]) | |
| else: | |
| # Fallback to extracting text from body if no main block found | |
| text_elements = soup.body.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'blockquote', 'figcaption', 'pre', 'code']) | |
| text = ' '.join([elem.get_text() for elem in text_elements]) | |
| logger.warning(f"No specific content tags (<article>, <main>, etc.) or common class names found for {url}, extracting from body.") | |
| # Clean up extra whitespace | |
| text = ' '.join(text.split()) | |
| # Limit text length *after* extraction and cleaning | |
| # Adjust based on API limits/cost. WordLift's typical text APIs handle up to ~1M chars. | |
| max_text_length = 1000000 # 1 Million characters | |
| if len(text) > max_text_length: | |
| logger.warning(f"Extracted text for {url} is too long ({len(text)} chars), truncating to {max_text_length} chars.") | |
| text = text[:max_text_length] | |
| return text.strip() if text and text.strip() else None # Return None if text is empty after processing | |
| except requests.exceptions.RequestException as e: | |
| logger.error(f"Failed to fetch content from {url}: {e}") | |
| return None | |
| except Exception as e: | |
| logger.error(f"Error processing content from {url}: {e}") | |
| return None | |
| # ------------------------ | |
| # WordLift API Call Logic | |
| # ------------------------ | |
| def call_wordlift_api(text: str, keywords: Optional[List[str]] = None) -> Optional[Dict[str, Any]]: | |
| """Calls the WordLift Content Evaluation API.""" | |
| if not WORDLIFT_API_KEY: | |
| logger.error("WORDLIFT_API_KEY environment variable not set.") | |
| return {"error": "API key not configured."} | |
| if not text or not text.strip(): | |
| return {"error": "No significant content to evaluate."} | |
| payload = { | |
| "text": text, | |
| "keywords": keywords if keywords else [] | |
| } | |
| headers = { | |
| 'Authorization': f'Key {WORDLIFT_API_KEY}', | |
| 'Content-Type': 'application/json', | |
| 'Accept': 'application/json' | |
| } | |
| logger.info(f"Calling WordLift API with text length {len(text)} and {len(keywords or [])} keywords.") | |
| try: | |
| response = requests.post(WORDLIFT_API_URL, headers=headers, json=payload, timeout=90) # Increased timeout again | |
| response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) | |
| return response.json() | |
| except requests.exceptions.HTTPError as e: | |
| logger.error(f"WordLift API HTTP error for {e.request.url}: {e.response.status_code} - {e.response.text}") | |
| try: | |
| error_detail = e.response.json() | |
| except json.JSONDecodeError: | |
| error_detail = e.response.text | |
| return {"error": f"API returned status code {e.response.status_code}", "details": error_detail} | |
| except requests.exceptions.Timeout as e: | |
| logger.error(f"WordLift API request timed out for {e.request.url}: {e}") | |
| return {"error": f"API request timed out."} | |
| except requests.exceptions.RequestException as e: | |
| logger.error(f"WordLift API request error for {e.request.url}: {e}") | |
| return {"error": f"API request failed: {e}"} | |
| except Exception as e: | |
| logger.error(f"Unexpected error during API call: {e}") | |
| return {"error": f"An unexpected error occurred: {e}"} | |
| # ------------------------ | |
| # Plotting Logic | |
| # ------------------------ | |
| def plot_average_radar(average_scores: Dict[str, Optional[float]], avg_overall: Optional[float]) -> Any: | |
| """Return a radar (spider) plot as a Matplotlib figure showing average scores.""" | |
| # Check if we have any valid scores to plot | |
| if not average_scores or all(v is None or pd.isna(v) for v in average_scores.values()): | |
| # Return a placeholder figure if no valid data is available | |
| fig, ax = plt.subplots(figsize=(6, 6)) | |
| ax.text(0.5, 0.5, "No successful evaluations to plot\naverage scores.", horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize=12) | |
| ax.axis('off') # Hide axes | |
| plt.title("Average Content Quality Scores", size=16, y=1.05) | |
| plt.tight_layout() | |
| return fig | |
| categories = list(average_scores.keys()) | |
| # Convert None/NaN values to 0 for plotting, but keep track of original for annotation | |
| values_raw = [average_scores[cat] for cat in categories] | |
| values_for_plot = [float(v) if v is not None and pd.notna(v) else 0 for v in values_raw] | |
| num_vars = len(categories) | |
| # Calculate angles for the radar chart | |
| angles = [n / float(num_vars) * 2 * np.pi for n in range(num_vars)] | |
| angles += angles[:1] # Complete the circle | |
| values_for_plot += values_for_plot[:1] # Complete the circle for values | |
| fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(projection='polar')) | |
| line_color = '#3452DB' | |
| fill_color = '#A1A7AF' | |
| background_color = '#F6F6F7' | |
| annotation_color = '#191919' | |
| # Plot data | |
| ax.plot(angles, values_for_plot, 'o-', linewidth=2, color=line_color, label='Average Scores') | |
| ax.fill(angles, values_for_plot, alpha=0.4, color=fill_color) | |
| # Set tick locations and labels | |
| ax.set_xticks(angles[:-1]) | |
| ax.set_xticklabels(categories, color=line_color, fontsize=10) | |
| # Set y-axis limits. Max score is 100. | |
| ax.set_ylim(0, 100) | |
| ax.set_yticks([0, 20, 40, 60, 80, 100]) # Explicitly set y-ticks | |
| # Draw grid lines and axes | |
| ax.grid(True, alpha=0.5, color=fill_color) | |
| ax.set_facecolor(background_color) | |
| # Add score annotations next to points - Use raw values if not None/NaN | |
| for angle, value_raw, value_plotted in zip(angles[:-1], values_raw, values_for_plot[:-1]): | |
| if value_raw is not None and pd.notna(value_raw): | |
| # Adjust position slightly based on angle and value | |
| # More sophisticated positioning needed for perfect placement, simple offset below | |
| # Let's just add text slightly outside the point along the radial line | |
| radius = value_plotted + 5 # Offset outward | |
| # Ensure annotation stays within limits if needed, but 105 should be fine for ylim 100 | |
| ax.text(angle, radius, f'{value_raw:.1f}', color=annotation_color, | |
| horizontalalignment='center', verticalalignment='center', fontsize=9) | |
| # Add title - Only "Overall: XX.X/100" part | |
| overall_title_text = f'Overall: {avg_overall:.1f}/100' if avg_overall is not None and pd.notna(avg_overall) else 'Overall: -' | |
| plt.title(overall_title_text, size=16, y=1.1, color=annotation_color) # y=1.1 places it above the plot area | |
| plt.tight_layout() | |
| return fig | |
| # ------------------------ | |
| # Main Evaluation Batch Function | |
| # ------------------------ | |
| def evaluate_urls_batch(url_data: pd.DataFrame): | |
| """ | |
| Evaluates a batch of URLs using the WordLift API. | |
| Args: | |
| url_data: A pandas DataFrame with columns ['URL', 'Target Keywords (comma-separated)']. | |
| Returns: | |
| A tuple containing: | |
| - A pandas DataFrame with the summary results. | |
| - A dictionary containing the full results (including errors) keyed by URL. | |
| - A Matplotlib figure for the average radar chart. | |
| """ | |
| # Check if the DataFrame has any rows (correct way using .empty) | |
| if url_data.empty: | |
| logger.info("Input DataFrame is empty. Returning empty results.") | |
| # Return empty summary DF, empty full results, and an empty placeholder plot | |
| empty_summary_df = pd.DataFrame(columns=[ | |
| 'URL', 'Status', 'Overall Score', 'Content Purpose', | |
| 'Content Accuracy', 'Content Depth', 'Readability Score (API)', | |
| 'Readability Grade Level', 'SEO Score', 'Word Count', 'Error/Details' | |
| ]) | |
| return empty_summary_df, {}, plot_average_radar(None, None) # Pass None, None to plotting function | |
| summary_results = [] | |
| full_results = {} | |
| # Lists to store scores for calculating averages | |
| # Initialize with None/NaN to correctly handle empty inputs or failures | |
| purpose_scores = [] | |
| accuracy_scores = [] | |
| depth_scores = [] | |
| readability_scores = [] # Note: API returns float like 2.5 | |
| seo_scores = [] | |
| overall_scores = [] | |
| # Ensure columns exist and handle potential NaNs from the DataFrame input | |
| urls = url_data.get('URL', pd.Series(dtype=str)).fillna('') # Replace NaN URLs with empty strings | |
| keywords_col = url_data.get('Target Keywords (comma-separated)', pd.Series(dtype=str)).fillna('') # Replace NaN keywords with empty strings | |
| for index, url in enumerate(urls): | |
| url = url.strip() | |
| keywords_str = keywords_col.iloc[index].strip() | |
| keywords = [kw.strip() for kw in keywords_str.split(',') if kw.strip()] | |
| # Generate a unique key for full_results based on index and URL/placeholder | |
| result_key = f"Row_{index}" + (f": {url}" if url else "") | |
| if not url: | |
| summary_results.append(["", "Skipped", "-", "-", "-", "-", "-", "-", "-", "-", "Empty URL"]) | |
| full_results[result_key] = {"status": "Skipped", "error": "Empty URL input."} | |
| logger.warning(f"Skipping evaluation for row {index}: Empty URL") | |
| # Append None to scores lists for skipped/failed rows | |
| purpose_scores.append(np.nan) | |
| accuracy_scores.append(np.nan) | |
| depth_scores.append(np.nan) | |
| readability_scores.append(np.nan) | |
| seo_scores.append(np.nan) | |
| overall_scores.append(np.nan) | |
| continue # Move to next URL | |
| logger.info(f"Processing URL: {url} (Row {index}) with keywords: {keywords}") | |
| # 1. Fetch Content | |
| content = fetch_content_from_url(url) | |
| if content is None or not content.strip(): | |
| status = "Failed" | |
| error_msg = "Failed to fetch or extract content." | |
| summary_results.append([url, status, "-", "-", "-", "-", "-", "-", "-", "-", error_msg]) | |
| full_results[result_key] = {"status": status, "error": error_msg} | |
| logger.error(f"Processing failed for {url} (Row {index}): {error_msg}") | |
| # Append None to scores lists for skipped/failed rows | |
| purpose_scores.append(np.nan) | |
| accuracy_scores.append(np.nan) | |
| depth_scores.append(np.nan) | |
| readability_scores.append(np.nan) | |
| seo_scores.append(np.nan) | |
| overall_scores.append(np.nan) | |
| continue # Move to next URL | |
| # 2. Call WordLift API | |
| api_result = call_wordlift_api(content, keywords) | |
| # 3. Process API Result | |
| summary_row = [url] | |
| if api_result and "error" not in api_result: | |
| status = "Success" | |
| qs = api_result.get('quality_score', {}) | |
| breakdown = qs.get('breakdown', {}) | |
| content_breakdown = breakdown.get('content', {}) | |
| readability_breakdown = breakdown.get('readability', {}) | |
| seo_breakdown = breakdown.get('seo', {}) | |
| metadata = api_result.get('metadata', {}) | |
| # Append scores for average calculation (only for successful calls) | |
| # Use .get() with None default, then convert to float, allowing NaN | |
| purpose_scores.append(float(content_breakdown.get('purpose')) if content_breakdown.get('purpose') is not None else np.nan) | |
| accuracy_scores.append(float(content_breakdown.get('accuracy')) if content_breakdown.get('accuracy') is not None else np.nan) | |
| depth_scores.append(float(content_breakdown.get('depth')) if content_breakdown.get('depth') is not None else np.nan) | |
| readability_scores.append(float(readability_breakdown.get('score')) if readability_breakdown.get('score') is not None else np.nan) | |
| seo_scores.append(float(seo_breakdown.get('score')) if seo_breakdown.get('score') is not None else np.nan) | |
| overall_scores.append(float(qs.get('overall')) if qs.get('overall') is not None else np.nan) | |
| # Append data for the summary table row | |
| # Use .get() with '-' default for display | |
| summary_row.extend([ | |
| status, | |
| f'{qs.get("overall", "-"): .1f}' if qs.get('overall') is not None else "-", | |
| f'{content_breakdown.get("purpose", "-"): .0f}' if content_breakdown.get('purpose') is not None else "-", | |
| f'{content_breakdown.get("accuracy", "-"): .0f}' if content_breakdown.get('accuracy') is not None else "-", | |
| f'{content_breakdown.get("depth", "-"): .0f}' if content_breakdown.get('depth') is not None else "-", | |
| f'{readability_breakdown.get("score", "-"): .1f}' if readability_breakdown.get('score') is not None else "-", | |
| f'{readability_breakdown.get("grade_level", "-"): .0f}' if readability_breakdown.get('grade_level') is not None else "-", | |
| f'{seo_breakdown.get("score", "-"): .1f}' if seo_breakdown.get('score') is not None else "-", | |
| f'{metadata.get("word_count", "-"): .0f}' if metadata.get('word_count') is not None else "-", | |
| None # No error | |
| ]) | |
| full_results[result_key] = api_result # Store full API result | |
| else: | |
| status = "Failed" | |
| error_msg = api_result.get("error", "Unknown API error.") if api_result else "API call failed." | |
| details = api_result.get("details", "") if api_result else "" | |
| summary_row.extend([ | |
| status, | |
| "-", "-", "-", "-", "-", "-", "-", "-", | |
| f"{error_msg} {details}" | |
| ]) | |
| full_results[result_key] = {"status": status, "error": error_msg, "details": details} | |
| logger.error(f"API call failed for {url} (Row {index}): {error_msg} {details}") | |
| # Append None/NaN to scores lists for failed rows | |
| purpose_scores.append(np.nan) | |
| accuracy_scores.append(np.nan) | |
| depth_scores.append(np.nan) | |
| readability_scores.append(np.nan) | |
| seo_scores.append(np.nan) | |
| overall_scores.append(np.nan) | |
| summary_results.append(summary_row) | |
| # Calculate Averages *after* processing all URLs, ignoring NaNs | |
| avg_purpose = np.nanmean(purpose_scores) | |
| avg_accuracy = np.nanmean(accuracy_scores) | |
| avg_depth = np.nanmean(depth_scores) | |
| avg_readability = np.nanmean(readability_scores) | |
| avg_seo = np.nanmean(seo_scores) | |
| avg_overall = np.nanmean(overall_scores) | |
| # Convert potentially NaN averages to None if there were no valid scores | |
| avg_purpose = avg_purpose if pd.notna(avg_purpose) else None | |
| avg_accuracy = avg_accuracy if pd.notna(avg_accuracy) else None | |
| avg_depth = avg_depth if pd.notna(avg_depth) else None | |
| avg_readability = avg_readability if pd.notna(avg_readability) else None | |
| avg_seo = avg_seo if pd.notna(avg_seo) else None | |
| avg_overall = avg_overall if pd.notna(avg_overall) else None | |
| # Prepare scores for the radar plot function | |
| average_scores_dict = { | |
| 'Purpose': avg_purpose, | |
| 'Accuracy': avg_accuracy, | |
| 'Depth': avg_depth, | |
| 'Readability': avg_readability, | |
| 'SEO': avg_seo | |
| } | |
| # Generate the average radar plot | |
| average_radar_fig = plot_average_radar(average_scores_dict, avg_overall) | |
| # Create pandas DataFrame for summary output | |
| summary_df = pd.DataFrame(summary_results, columns=[ | |
| 'URL', 'Status', 'Overall Score', 'Content Purpose', | |
| 'Content Accuracy', 'Content Depth', 'Readability Score (API)', | |
| 'Readability Grade Level', 'SEO Score', 'Word Count', 'Error/Details' | |
| ]) | |
| # Note: Formatting is already done when creating the summary_row list above | |
| # using f-strings like f'{value: .1f}' or f'{value: .0f}', and setting '-' for None. | |
| # This ensures that pandas DataFrame displays formatted strings directly. | |
| return summary_df, full_results, average_radar_fig # Return the plot too | |
| # ------------------------ | |
| # Gradio Blocks Interface Setup | |
| # ------------------------ | |
| with gr.Blocks(css=css, theme=theme) as demo: | |
| gr.Markdown("# WordLift Multi-URL Content Evaluator") | |
| gr.Markdown( | |
| "Enter up to 30 URLs in the table below. " | |
| "Optionally, provide comma-separated target keywords for each URL. " | |
| "The app will fetch content from each URL and evaluate it using the WordLift API." | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| url_input_df = gr.Dataframe( | |
| headers=["URL", "Target Keywords (comma-separated)"], | |
| datatype=["str", "str"], | |
| row_count=(1, 30), # Allow adding rows up to 30 | |
| col_count=(2, "fixed"), | |
| value=[ | |
| ["https://wordlift.io/blog/en/query-fan-out-ai-search/", "query fan out, ai search, google, ai"], # Added first URL | |
| ["https://wordlift.io/blog/en/entity/google-knowledge-graph/", "google knowledge graph, entity, semantic web, seo"], # Added second URL | |
| ["https://www.example.com/non-existent-page", ""], # Example of a failing URL | |
| ["", ""], # Example of an empty row | |
| ["", ""], # Add some extra empty rows for easier input | |
| ["", ""], | |
| ["", ""], | |
| ], | |
| label="URLs and Keywords" | |
| ) | |
| submit_button = gr.Button("Evaluate All URLs", elem_classes=["primary-btn"]) | |
| with gr.Column(scale=1, elem_classes="plot-container"): | |
| # New component for the average radar plot | |
| average_radar_output = gr.Plot(label="Average Content Quality Scores Radar") | |
| gr.Markdown("## Detailed Results") | |
| with gr.Column(): | |
| summary_output_df = gr.DataFrame( | |
| label="Summary Results", | |
| # Data types are all string now because we formatted them with f-strings to include '-' | |
| headers=['URL', 'Status', 'Overall Score', 'Content Purpose', | |
| 'Content Accuracy', 'Content Depth', 'Readability Score (API)', | |
| 'Readability Grade Level', 'SEO Score', 'Word Count', 'Error/Details'], | |
| datatype=["str"] * 11, | |
| wrap=True # Wrap text in columns | |
| ) | |
| with gr.Accordion("Full JSON Results", open=False): | |
| # Changed the output type to gr.JSON | |
| full_results_json = gr.JSON(label="Raw API Results per URL (or Error)") | |
| submit_button.click( | |
| fn=evaluate_urls_batch, | |
| inputs=[url_input_df], | |
| # Updated outputs to include the average radar plot | |
| outputs=[summary_output_df, full_results_json, average_radar_output] | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| if not WORDLIFT_API_KEY: | |
| logger.error("\n----------------------------------------------------------") | |
| logger.error("WORDLIFT_API_KEY environment variable is not set.") | |
| logger.error("Please set it before running the script:") | |
| logger.error(" export WORDLIFT_API_KEY='YOUR_API_KEY'") | |
| logger.error("Or if using a .env file and python-dotenv:") | |
| logger.error(" pip install python-dotenv") | |
| logger.error(" # Add WORDLIFT_API_KEY=YOUR_API_KEY to a .env file") | |
| logger.error(" # import dotenv; dotenv.load_dotenv()") | |
| logger.error(" # in your script before getting the key.") | |
| logger.error("----------------------------------------------------------\n") | |
| # You might want to sys.exit(1) here if the API key is mandatory | |
| # import sys | |
| # sys.exit(1) | |
| logger.info("Launching Gradio app...") | |
| # Consider using share=True for easy sharing, but be mindful of security/costs | |
| # demo.launch(share=True) | |
| demo.launch() |