Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Commit 
							
							·
						
						164dce0
	
1
								Parent(s):
							
							bbc85bc
								
doc: update readme
Browse files- README.md +15 -4
- app-withshortcut.py +234 -0
    	
        README.md
    CHANGED
    
    | @@ -27,6 +27,7 @@ DuckDuckGo and GoogleSearch have too many rate limit, | |
| 27 | 
             
            This code assume that a local running instance of searxng is on http://localhost:8888
         | 
| 28 |  | 
| 29 | 
             
            On windows run it with:
         | 
|  | |
| 30 | 
             
            $PORT=8888
         | 
| 31 | 
             
            docker run --rm `
         | 
| 32 | 
             
             -p ${PORT}:8080 `
         | 
| @@ -34,13 +35,23 @@ docker run --rm ` | |
| 34 | 
             
             -e "BASE_URL=http://localhost:$PORT/" `
         | 
| 35 | 
             
             -e "INSTANCE_NAME=my-instance" `
         | 
| 36 | 
             
             -d searxng/searxng
         | 
| 37 | 
            -
             | 
| 38 | 
             
             be sure to allow the json format in /etc/seraxng/settings.yml
         | 
| 39 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 40 |  | 
| 41 | 
            -
            For the same reason, for my tool for text to speech I locally run a speech to text docker image running whisper.cpp
         | 
| 42 | 
             
            whith ffmpeg installed for mp3 > wav conversion
         | 
| 43 |  | 
|  | |
|  | |
| 44 | 
             
            And if I really have no longer any available token on openai or gemini, I can run a VLLM instance.
         | 
| 45 |  | 
| 46 | 
             
            ## Instrumentation
         | 
| @@ -48,8 +59,8 @@ And if I really have no longer any available token on openai or gemini, I can ru | |
| 48 | 
             
            Instrumentation is enabled, 
         | 
| 49 | 
             
            an done locally with (Arize-ai phoenix)[https://github.com/Arize-ai/phoenix]
         | 
| 50 | 
             
            a server is launched with: 
         | 
| 51 | 
            -
             | 
| 52 | 
             
            python -m phoenix.server.main serve
         | 
| 53 | 
            -
             | 
| 54 | 
             
            and can be consulted on: http://127.0.0.1:6006
         | 
| 55 |  | 
|  | |
| 27 | 
             
            This code assume that a local running instance of searxng is on http://localhost:8888
         | 
| 28 |  | 
| 29 | 
             
            On windows run it with:
         | 
| 30 | 
            +
            ```
         | 
| 31 | 
             
            $PORT=8888
         | 
| 32 | 
             
            docker run --rm `
         | 
| 33 | 
             
             -p ${PORT}:8080 `
         | 
|  | |
| 35 | 
             
             -e "BASE_URL=http://localhost:$PORT/" `
         | 
| 36 | 
             
             -e "INSTANCE_NAME=my-instance" `
         | 
| 37 | 
             
             -d searxng/searxng
         | 
| 38 | 
            +
            ```
         | 
| 39 | 
             
             be sure to allow the json format in /etc/seraxng/settings.yml
         | 
| 40 |  | 
| 41 | 
            +
             ```
         | 
| 42 | 
            +
              # remove format to deny access, use lower case.
         | 
| 43 | 
            +
              # formats: [html, csv, json, rss]
         | 
| 44 | 
            +
              formats:
         | 
| 45 | 
            +
                - html
         | 
| 46 | 
            +
                - json
         | 
| 47 | 
            +
            ```
         | 
| 48 | 
            +
             | 
| 49 |  | 
| 50 | 
            +
            For the same reason, for my tool for text to speech I locally run a speech to text docker image running whisper.cpp  
         | 
| 51 | 
             
            whith ffmpeg installed for mp3 > wav conversion
         | 
| 52 |  | 
| 53 | 
            +
            It is launched by ttools/sst.py
         | 
| 54 | 
            +
             | 
| 55 | 
             
            And if I really have no longer any available token on openai or gemini, I can run a VLLM instance.
         | 
| 56 |  | 
| 57 | 
             
            ## Instrumentation
         | 
|  | |
| 59 | 
             
            Instrumentation is enabled, 
         | 
| 60 | 
             
            an done locally with (Arize-ai phoenix)[https://github.com/Arize-ai/phoenix]
         | 
| 61 | 
             
            a server is launched with: 
         | 
| 62 | 
            +
            ```
         | 
| 63 | 
             
            python -m phoenix.server.main serve
         | 
| 64 | 
            +
            ```
         | 
| 65 | 
             
            and can be consulted on: http://127.0.0.1:6006
         | 
| 66 |  | 
    	
        app-withshortcut.py
    ADDED
    
    | @@ -0,0 +1,234 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import os
         | 
| 2 | 
            +
            import gradio as gr
         | 
| 3 | 
            +
            import requests
         | 
| 4 | 
            +
            import inspect
         | 
| 5 | 
            +
            import pandas as pd
         | 
| 6 | 
            +
            from dotenv import load_dotenv
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            from myagent import BasicAgent  # Import your agent class from myagent.py
         | 
| 9 | 
            +
            from multiagents import MultiAgent
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            from phoenix.otel import register
         | 
| 12 | 
            +
            from openinference.instrumentation.smolagents import SmolagentsInstrumentor
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            # use space_host var to determine if running in HF space or locally, if so register local instrumentation
         | 
| 15 | 
            +
            space_host_startup = os.getenv("SPACE_HOST")
         | 
| 16 | 
            +
            if not space_host_startup:
         | 
| 17 | 
            +
                register()
         | 
| 18 | 
            +
                SmolagentsInstrumentor().instrument()
         | 
| 19 | 
            +
             | 
| 20 | 
            +
            # (Keep Constants as is)
         | 
| 21 | 
            +
            # --- Constants ---
         | 
| 22 | 
            +
            DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
         | 
| 23 | 
            +
            load_dotenv()
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            max_questions = 20
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            # known answer, already solved, to avoid computation cost
         | 
| 28 | 
            +
            known_answers = {
         | 
| 29 | 
            +
                "f918266a-b3e0-4914-865d-4faa564f1aef": "0",
         | 
| 30 | 
            +
                "a1e91b78-d3d8-4675-bb8d-62741b4b68a6": "3",
         | 
| 31 | 
            +
                "2d83110e-a098-4ebb-9987-066c06fa42d0": "right",
         | 
| 32 | 
            +
                "8e867cd7-cff9-4e6c-867a-ff5ddc2550be": "3",
         | 
| 33 | 
            +
                "9d191bce-651d-4746-be2d-7ef8ecadb9c2": "extremely",
         | 
| 34 | 
            +
                # Add more known answers as needed
         | 
| 35 | 
            +
            }
         | 
| 36 | 
            +
             | 
| 37 | 
            +
             | 
| 38 | 
            +
             | 
| 39 | 
            +
            def run_and_submit_all(nb_questions: int, profile: gr.OAuthProfile | None): 
         | 
| 40 | 
            +
                """
         | 
| 41 | 
            +
                Fetches all questions, runs my Agent on them, submits all answers,
         | 
| 42 | 
            +
                and displays the results.
         | 
| 43 | 
            +
                """
         | 
| 44 | 
            +
                # --- Determine HF Space Runtime URL and Repo URL ---
         | 
| 45 | 
            +
                space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code   
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                if profile:
         | 
| 48 | 
            +
                    username= f"{profile.username}"
         | 
| 49 | 
            +
                    print(f"User logged in: {username}")
         | 
| 50 | 
            +
                else:
         | 
| 51 | 
            +
                    print("User not logged in.")
         | 
| 52 | 
            +
                    return "Please Login to Hugging Face with the button.", None
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                api_url = DEFAULT_API_URL
         | 
| 55 | 
            +
                questions_url = f"{api_url}/questions"
         | 
| 56 | 
            +
                file_url = f"{api_url}/files"    
         | 
| 57 | 
            +
                submit_url = f"{api_url}/submit"
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                # 1. Instantiate Agent 
         | 
| 60 | 
            +
                try:
         | 
| 61 | 
            +
                    # agent = BasicAgent()
         | 
| 62 | 
            +
                    agent = MultiAgent()
         | 
| 63 | 
            +
                except Exception as e:
         | 
| 64 | 
            +
                    print(f"Error instantiating agent: {e}")
         | 
| 65 | 
            +
                    return f"Error initializing agent: {e}", None
         | 
| 66 | 
            +
                # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
         | 
| 67 | 
            +
                agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
         | 
| 68 | 
            +
                print(agent_code)
         | 
| 69 | 
            +
             | 
| 70 | 
            +
                # 2. Fetch Questions
         | 
| 71 | 
            +
                print(f"Fetching questions from: {questions_url}")
         | 
| 72 | 
            +
                try:
         | 
| 73 | 
            +
                    response = requests.get(questions_url, timeout=15)
         | 
| 74 | 
            +
                    response.raise_for_status()
         | 
| 75 | 
            +
                    questions_data = response.json()
         | 
| 76 | 
            +
                    if not questions_data:
         | 
| 77 | 
            +
                         print("Fetched questions list is empty.")
         | 
| 78 | 
            +
                         return "Fetched questions list is empty or invalid format.", None
         | 
| 79 | 
            +
                    print(f"Fetched {len(questions_data)} questions.")
         | 
| 80 | 
            +
                except requests.exceptions.RequestException as e:
         | 
| 81 | 
            +
                    print(f"Error fetching questions: {e}")
         | 
| 82 | 
            +
                    return f"Error fetching questions: {e}", None
         | 
| 83 | 
            +
                except requests.exceptions.JSONDecodeError as e:
         | 
| 84 | 
            +
                     print(f"Error decoding JSON response from questions endpoint: {e}")
         | 
| 85 | 
            +
                     print(f"Response text: {response.text[:500]}")
         | 
| 86 | 
            +
                     return f"Error decoding server response for questions: {e}", None
         | 
| 87 | 
            +
                except Exception as e:
         | 
| 88 | 
            +
                    print(f"An unexpected error occurred fetching questions: {e}")
         | 
| 89 | 
            +
                    return f"An unexpected error occurred fetching questions: {e}", None
         | 
| 90 | 
            +
             | 
| 91 | 
            +
                # 3. Run your Agent
         | 
| 92 | 
            +
                results_log = []
         | 
| 93 | 
            +
                answers_payload = []
         | 
| 94 | 
            +
             | 
| 95 | 
            +
                # for testing keep only some questions
         | 
| 96 | 
            +
                questions_data = questions_data[:nb_questions]
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                print(f"Running agent on {len(questions_data)} questions...")
         | 
| 99 | 
            +
                for item in questions_data:
         | 
| 100 | 
            +
                    task_id = item.get("task_id")
         | 
| 101 | 
            +
                    question_text = item.get("question")
         | 
| 102 | 
            +
                    file_name = item.get("file_name")
         | 
| 103 | 
            +
                    file_question_url = None
         | 
| 104 | 
            +
                    if file_name:
         | 
| 105 | 
            +
                        file_question_url = f"{file_url}/{task_id}"
         | 
| 106 | 
            +
                    if not task_id or question_text is None:
         | 
| 107 | 
            +
                        print(f"Skipping item with missing task_id or question: {item}")
         | 
| 108 | 
            +
                        continue
         | 
| 109 | 
            +
                    try:
         | 
| 110 | 
            +
                        agent_question = question_text
         | 
| 111 | 
            +
                        if file_question_url:
         | 
| 112 | 
            +
                            agent_question += f"\n\nFile URL: {file_question_url}"
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                        shortcut = known_answers.get(task_id)
         | 
| 115 | 
            +
                        if shortcut:
         | 
| 116 | 
            +
                            submitted_answer = shortcut
         | 
| 117 | 
            +
                        else:
         | 
| 118 | 
            +
                            submitted_answer = agent(agent_question)
         | 
| 119 | 
            +
                        answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
         | 
| 120 | 
            +
                        results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
         | 
| 121 | 
            +
                    except Exception as e:
         | 
| 122 | 
            +
                         print(f"Error running agent on task {task_id}: {e}")
         | 
| 123 | 
            +
                         results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                if not answers_payload:
         | 
| 126 | 
            +
                    print("Agent did not produce any answers to submit.")
         | 
| 127 | 
            +
                    return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
         | 
| 128 | 
            +
             | 
| 129 | 
            +
                # 4. Prepare Submission 
         | 
| 130 | 
            +
                submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
         | 
| 131 | 
            +
                status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
         | 
| 132 | 
            +
                print(status_update)
         | 
| 133 | 
            +
             | 
| 134 | 
            +
                # 5. Submit
         | 
| 135 | 
            +
                print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
         | 
| 136 | 
            +
                try:
         | 
| 137 | 
            +
                    response = requests.post(submit_url, json=submission_data, timeout=60)
         | 
| 138 | 
            +
                    response.raise_for_status()
         | 
| 139 | 
            +
                    result_data = response.json()
         | 
| 140 | 
            +
                    final_status = (
         | 
| 141 | 
            +
                        f"Submission Successful!\n"
         | 
| 142 | 
            +
                        f"User: {result_data.get('username')}\n"
         | 
| 143 | 
            +
                        f"Overall Score: {result_data.get('score', 'N/A')}% "
         | 
| 144 | 
            +
                        f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
         | 
| 145 | 
            +
                        f"Message: {result_data.get('message', 'No message received.')}"
         | 
| 146 | 
            +
                    )
         | 
| 147 | 
            +
                    print("Submission successful.")
         | 
| 148 | 
            +
                    results_df = pd.DataFrame(results_log)
         | 
| 149 | 
            +
                    return final_status, results_df
         | 
| 150 | 
            +
                except requests.exceptions.HTTPError as e:
         | 
| 151 | 
            +
                    error_detail = f"Server responded with status {e.response.status_code}."
         | 
| 152 | 
            +
                    try:
         | 
| 153 | 
            +
                        error_json = e.response.json()
         | 
| 154 | 
            +
                        error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
         | 
| 155 | 
            +
                    except requests.exceptions.JSONDecodeError:
         | 
| 156 | 
            +
                        error_detail += f" Response: {e.response.text[:500]}"
         | 
| 157 | 
            +
                    status_message = f"Submission Failed: {error_detail}"
         | 
| 158 | 
            +
                    print(status_message)
         | 
| 159 | 
            +
                    results_df = pd.DataFrame(results_log)
         | 
| 160 | 
            +
                    return status_message, results_df
         | 
| 161 | 
            +
                except requests.exceptions.Timeout:
         | 
| 162 | 
            +
                    status_message = "Submission Failed: The request timed out."
         | 
| 163 | 
            +
                    print(status_message)
         | 
| 164 | 
            +
                    results_df = pd.DataFrame(results_log)
         | 
| 165 | 
            +
                    return status_message, results_df
         | 
| 166 | 
            +
                except requests.exceptions.RequestException as e:
         | 
| 167 | 
            +
                    status_message = f"Submission Failed: Network error - {e}"
         | 
| 168 | 
            +
                    print(status_message)
         | 
| 169 | 
            +
                    results_df = pd.DataFrame(results_log)
         | 
| 170 | 
            +
                    return status_message, results_df
         | 
| 171 | 
            +
                except Exception as e:
         | 
| 172 | 
            +
                    status_message = f"An unexpected error occurred during submission: {e}"
         | 
| 173 | 
            +
                    print(status_message)
         | 
| 174 | 
            +
                    results_df = pd.DataFrame(results_log)
         | 
| 175 | 
            +
                    return status_message, results_df
         | 
| 176 | 
            +
             | 
| 177 | 
            +
             | 
| 178 | 
            +
            # --- Build Gradio Interface using Blocks ---
         | 
| 179 | 
            +
            with gr.Blocks() as demo:
         | 
| 180 | 
            +
                gr.Markdown("# Basic Agent Evaluation Runner")
         | 
| 181 | 
            +
                gr.Markdown(
         | 
| 182 | 
            +
                    """
         | 
| 183 | 
            +
                    **Instructions:**
         | 
| 184 | 
            +
             | 
| 185 | 
            +
                    1.  Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
         | 
| 186 | 
            +
                    2.  Log in to your Hugging Face account using the button below. This uses your HF username for submission.
         | 
| 187 | 
            +
                    3.  Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
         | 
| 188 | 
            +
             | 
| 189 | 
            +
                    ---
         | 
| 190 | 
            +
                    **Disclaimers:**
         | 
| 191 | 
            +
                    Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
         | 
| 192 | 
            +
                    This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
         | 
| 193 | 
            +
                    """
         | 
| 194 | 
            +
                )
         | 
| 195 | 
            +
             | 
| 196 | 
            +
                gr.LoginButton()
         | 
| 197 | 
            +
             | 
| 198 | 
            +
                nb_questions = gr.Number(value=20)
         | 
| 199 | 
            +
             | 
| 200 | 
            +
                run_button = gr.Button("Run Evaluation & Submit All Answers")
         | 
| 201 | 
            +
             | 
| 202 | 
            +
                status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
         | 
| 203 | 
            +
                # Removed max_rows=10 from DataFrame constructor
         | 
| 204 | 
            +
                results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
         | 
| 205 | 
            +
             | 
| 206 | 
            +
                run_button.click(
         | 
| 207 | 
            +
                    fn=run_and_submit_all,
         | 
| 208 | 
            +
                    inputs=[nb_questions],
         | 
| 209 | 
            +
                    outputs=[status_output, results_table]
         | 
| 210 | 
            +
                )
         | 
| 211 | 
            +
             | 
| 212 | 
            +
            if __name__ == "__main__":
         | 
| 213 | 
            +
                print("\n" + "-"*30 + " App Starting " + "-"*30)
         | 
| 214 | 
            +
                # Check for SPACE_HOST and SPACE_ID at startup for information
         | 
| 215 | 
            +
                space_host_startup = os.getenv("SPACE_HOST")
         | 
| 216 | 
            +
                space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
         | 
| 217 | 
            +
             | 
| 218 | 
            +
                if space_host_startup:
         | 
| 219 | 
            +
                    print(f"✅ SPACE_HOST found: {space_host_startup}")
         | 
| 220 | 
            +
                    print(f"   Runtime URL should be: https://{space_host_startup}.hf.space")
         | 
| 221 | 
            +
                else:
         | 
| 222 | 
            +
                    print("ℹ️  SPACE_HOST environment variable not found (running locally?).")
         | 
| 223 | 
            +
             | 
| 224 | 
            +
                if space_id_startup: # Print repo URLs if SPACE_ID is found
         | 
| 225 | 
            +
                    print(f"✅ SPACE_ID found: {space_id_startup}")
         | 
| 226 | 
            +
                    print(f"   Repo URL: https://huggingface.co/spaces/{space_id_startup}")
         | 
| 227 | 
            +
                    print(f"   Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
         | 
| 228 | 
            +
                else:
         | 
| 229 | 
            +
                    print("ℹ️  SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
         | 
| 230 | 
            +
             | 
| 231 | 
            +
                print("-"*(60 + len(" App Starting ")) + "\n")
         | 
| 232 | 
            +
             | 
| 233 | 
            +
                print("Launching Gradio Interface for Basic Agent Evaluation...")
         | 
| 234 | 
            +
                demo.launch(debug=True, share=False)
         | 
