Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Upload 2 files
Browse files- TYPEGPT/typegpt.py +409 -0
- TYPEGPT/typegpt_normal.py +231 -0
    	
        TYPEGPT/typegpt.py
    ADDED
    
    | @@ -0,0 +1,409 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import requests
         | 
| 2 | 
            +
            import json
         | 
| 3 | 
            +
            from typing import *
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            from webscout.AIutel import Optimizers
         | 
| 6 | 
            +
            from webscout.AIutel import Conversation
         | 
| 7 | 
            +
            from webscout.AIutel import AwesomePrompts
         | 
| 8 | 
            +
            from webscout.AIbase import Provider
         | 
| 9 | 
            +
            from webscout import exceptions
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            class TypeGPT(Provider):
         | 
| 12 | 
            +
                """
         | 
| 13 | 
            +
                A class to interact with the TypeGPT.net API. Improved to match webscout standards.
         | 
| 14 | 
            +
                """
         | 
| 15 | 
            +
                url = "https://chat.typegpt.net"
         | 
| 16 | 
            +
                working = True
         | 
| 17 | 
            +
                supports_message_history = True
         | 
| 18 | 
            +
             | 
| 19 | 
            +
                models = [
         | 
| 20 | 
            +
                    # OpenAI Models
         | 
| 21 | 
            +
                    "gpt-3.5-turbo",
         | 
| 22 | 
            +
                    "gpt-3.5-turbo-202201",
         | 
| 23 | 
            +
                    "gpt-4o",
         | 
| 24 | 
            +
                    "gpt-4o-2024-05-13",
         | 
| 25 | 
            +
                    "o1-preview",
         | 
| 26 | 
            +
                    
         | 
| 27 | 
            +
                    # Claude Models
         | 
| 28 | 
            +
                    "claude",
         | 
| 29 | 
            +
                    "claude-3-5-sonnet",
         | 
| 30 | 
            +
                    "claude-sonnet-3.5",
         | 
| 31 | 
            +
                    "claude-3-5-sonnet-20240620",
         | 
| 32 | 
            +
                    
         | 
| 33 | 
            +
                    # Meta/LLaMA Models
         | 
| 34 | 
            +
                    "@cf/meta/llama-2-7b-chat-fp16",
         | 
| 35 | 
            +
                    "@cf/meta/llama-2-7b-chat-int8",
         | 
| 36 | 
            +
                    "@cf/meta/llama-3-8b-instruct",
         | 
| 37 | 
            +
                    "@cf/meta/llama-3.1-8b-instruct",
         | 
| 38 | 
            +
                    "@cf/meta-llama/llama-2-7b-chat-hf-lora",
         | 
| 39 | 
            +
                    "llama-3.1-405b",
         | 
| 40 | 
            +
                    "llama-3.1-70b",
         | 
| 41 | 
            +
                    "llama-3.1-8b",
         | 
| 42 | 
            +
                    "meta-llama/Llama-2-7b-chat-hf",
         | 
| 43 | 
            +
                    "meta-llama/Llama-3.1-70B-Instruct",
         | 
| 44 | 
            +
                    "meta-llama/Llama-3.1-8B-Instruct",
         | 
| 45 | 
            +
                    "meta-llama/Llama-3.2-11B-Vision-Instruct",
         | 
| 46 | 
            +
                    "meta-llama/Llama-3.2-1B-Instruct",
         | 
| 47 | 
            +
                    "meta-llama/Llama-3.2-3B-Instruct",
         | 
| 48 | 
            +
                    "meta-llama/Llama-3.2-90B-Vision-Instruct",
         | 
| 49 | 
            +
                    "meta-llama/Llama-Guard-3-8B",
         | 
| 50 | 
            +
                    "meta-llama/Meta-Llama-3-70B-Instruct",
         | 
| 51 | 
            +
                    "meta-llama/Meta-Llama-3-8B-Instruct",
         | 
| 52 | 
            +
                    "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
         | 
| 53 | 
            +
                    "meta-llama/Meta-Llama-3.1-8B-Instruct",
         | 
| 54 | 
            +
                    "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
         | 
| 55 | 
            +
                    
         | 
| 56 | 
            +
                    # Mistral Models
         | 
| 57 | 
            +
                    "mistral",
         | 
| 58 | 
            +
                    "mistral-large",
         | 
| 59 | 
            +
                    "@cf/mistral/mistral-7b-instruct-v0.1",
         | 
| 60 | 
            +
                    "@cf/mistral/mistral-7b-instruct-v0.2-lora",
         | 
| 61 | 
            +
                    "@hf/mistralai/mistral-7b-instruct-v0.2",
         | 
| 62 | 
            +
                    "mistralai/Mistral-7B-Instruct-v0.2",
         | 
| 63 | 
            +
                    "mistralai/Mistral-7B-Instruct-v0.3",
         | 
| 64 | 
            +
                    "mistralai/Mixtral-8x22B-Instruct-v0.1",
         | 
| 65 | 
            +
                    "mistralai/Mixtral-8x7B-Instruct-v0.1",
         | 
| 66 | 
            +
                    
         | 
| 67 | 
            +
                    # Qwen Models
         | 
| 68 | 
            +
                    "@cf/qwen/qwen1.5-0.5b-chat",
         | 
| 69 | 
            +
                    "@cf/qwen/qwen1.5-1.8b-chat",
         | 
| 70 | 
            +
                    "@cf/qwen/qwen1.5-7b-chat-awq",
         | 
| 71 | 
            +
                    "@cf/qwen/qwen1.5-14b-chat-awq",
         | 
| 72 | 
            +
                    "Qwen/Qwen2.5-3B-Instruct",
         | 
| 73 | 
            +
                    "Qwen/Qwen2.5-72B-Instruct",
         | 
| 74 | 
            +
                    "Qwen/Qwen2.5-Coder-32B-Instruct",
         | 
| 75 | 
            +
                    
         | 
| 76 | 
            +
                    # Google/Gemini Models
         | 
| 77 | 
            +
                    "@cf/google/gemma-2b-it-lora",
         | 
| 78 | 
            +
                    "@cf/google/gemma-7b-it-lora",
         | 
| 79 | 
            +
                    "@hf/google/gemma-7b-it",
         | 
| 80 | 
            +
                    "google/gemma-1.1-2b-it",
         | 
| 81 | 
            +
                    "google/gemma-1.1-7b-it",
         | 
| 82 | 
            +
                    "gemini-pro",
         | 
| 83 | 
            +
                    "gemini-1.5-pro",
         | 
| 84 | 
            +
                    "gemini-1.5-pro-latest",
         | 
| 85 | 
            +
                    "gemini-1.5-flash",
         | 
| 86 | 
            +
                    
         | 
| 87 | 
            +
                    # Cohere Models
         | 
| 88 | 
            +
                    "c4ai-aya-23-35b",
         | 
| 89 | 
            +
                    "c4ai-aya-23-8b",
         | 
| 90 | 
            +
                    "command",
         | 
| 91 | 
            +
                    "command-light",
         | 
| 92 | 
            +
                    "command-light-nightly",
         | 
| 93 | 
            +
                    "command-nightly",
         | 
| 94 | 
            +
                    "command-r",
         | 
| 95 | 
            +
                    "command-r-08-2024",
         | 
| 96 | 
            +
                    "command-r-plus",
         | 
| 97 | 
            +
                    "command-r-plus-08-2024",
         | 
| 98 | 
            +
                    "rerank-english-v2.0",
         | 
| 99 | 
            +
                    "rerank-english-v3.0",
         | 
| 100 | 
            +
                    "rerank-multilingual-v2.0",
         | 
| 101 | 
            +
                    "rerank-multilingual-v3.0",
         | 
| 102 | 
            +
                    
         | 
| 103 | 
            +
                    # Microsoft Models
         | 
| 104 | 
            +
                    "@cf/microsoft/phi-2",
         | 
| 105 | 
            +
                    "microsoft/DialoGPT-medium",
         | 
| 106 | 
            +
                    "microsoft/Phi-3-medium-4k-instruct",
         | 
| 107 | 
            +
                    "microsoft/Phi-3-mini-4k-instruct",
         | 
| 108 | 
            +
                    "microsoft/Phi-3.5-mini-instruct",
         | 
| 109 | 
            +
                    "microsoft/WizardLM-2-8x22B",
         | 
| 110 | 
            +
                    
         | 
| 111 | 
            +
                    # Yi Models
         | 
| 112 | 
            +
                    "01-ai/Yi-1.5-34B-Chat",
         | 
| 113 | 
            +
                    "01-ai/Yi-34B-Chat",
         | 
| 114 | 
            +
                    
         | 
| 115 | 
            +
                    # Specialized Models and Tools
         | 
| 116 | 
            +
                    "@cf/deepseek-ai/deepseek-math-7b-base",
         | 
| 117 | 
            +
                    "@cf/deepseek-ai/deepseek-math-7b-instruct",
         | 
| 118 | 
            +
                    "@cf/defog/sqlcoder-7b-2",
         | 
| 119 | 
            +
                    "@cf/openchat/openchat-3.5-0106",
         | 
| 120 | 
            +
                    "@cf/thebloke/discolm-german-7b-v1-awq",
         | 
| 121 | 
            +
                    "@cf/tiiuae/falcon-7b-instruct",
         | 
| 122 | 
            +
                    "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
         | 
| 123 | 
            +
                    "@hf/nexusflow/starling-lm-7b-beta",
         | 
| 124 | 
            +
                    "@hf/nousresearch/hermes-2-pro-mistral-7b",
         | 
| 125 | 
            +
                    "@hf/thebloke/deepseek-coder-6.7b-base-awq",
         | 
| 126 | 
            +
                    "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
         | 
| 127 | 
            +
                    "@hf/thebloke/llama-2-13b-chat-awq",
         | 
| 128 | 
            +
                    "@hf/thebloke/llamaguard-7b-awq",
         | 
| 129 | 
            +
                    "@hf/thebloke/neural-chat-7b-v3-1-awq",
         | 
| 130 | 
            +
                    "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
         | 
| 131 | 
            +
                    "@hf/thebloke/zephyr-7b-beta-awq",
         | 
| 132 | 
            +
                    "AndroidDeveloper",
         | 
| 133 | 
            +
                    "AngularJSAgent",
         | 
| 134 | 
            +
                    "AzureAgent",
         | 
| 135 | 
            +
                    "BitbucketAgent",
         | 
| 136 | 
            +
                    "DigitalOceanAgent",
         | 
| 137 | 
            +
                    "DockerAgent",
         | 
| 138 | 
            +
                    "ElectronAgent",
         | 
| 139 | 
            +
                    "ErlangAgent",
         | 
| 140 | 
            +
                    "FastAPIAgent",
         | 
| 141 | 
            +
                    "FirebaseAgent",
         | 
| 142 | 
            +
                    "FlaskAgent",
         | 
| 143 | 
            +
                    "FlutterAgent",
         | 
| 144 | 
            +
                    "GitAgent",
         | 
| 145 | 
            +
                    "GitlabAgent",
         | 
| 146 | 
            +
                    "GoAgent",
         | 
| 147 | 
            +
                    "GodotAgent",
         | 
| 148 | 
            +
                    "GoogleCloudAgent",
         | 
| 149 | 
            +
                    "HTMLAgent",
         | 
| 150 | 
            +
                    "HerokuAgent",
         | 
| 151 | 
            +
                    "ImageGeneration",
         | 
| 152 | 
            +
                    "JavaAgent",
         | 
| 153 | 
            +
                    "JavaScriptAgent",
         | 
| 154 | 
            +
                    "MongoDBAgent",
         | 
| 155 | 
            +
                    "Next.jsAgent",
         | 
| 156 | 
            +
                    "PyTorchAgent",
         | 
| 157 | 
            +
                    "PythonAgent",
         | 
| 158 | 
            +
                    "ReactAgent",
         | 
| 159 | 
            +
                    "RepoMap",
         | 
| 160 | 
            +
                    "SwiftDeveloper",
         | 
| 161 | 
            +
                    "XcodeAgent",
         | 
| 162 | 
            +
                    "YoutubeAgent",
         | 
| 163 | 
            +
                    "blackboxai",
         | 
| 164 | 
            +
                    "blackboxai-pro",
         | 
| 165 | 
            +
                    "builderAgent",
         | 
| 166 | 
            +
                    "dify",
         | 
| 167 | 
            +
                    "flux",
         | 
| 168 | 
            +
                    "openchat/openchat-3.6-8b",
         | 
| 169 | 
            +
                    "rtist",
         | 
| 170 | 
            +
                    "searchgpt",
         | 
| 171 | 
            +
                    "sur",
         | 
| 172 | 
            +
                    "sur-mistral",
         | 
| 173 | 
            +
                    "unity"
         | 
| 174 | 
            +
                ]
         | 
| 175 | 
            +
             | 
| 176 | 
            +
                def __init__(
         | 
| 177 | 
            +
                    self,
         | 
| 178 | 
            +
                    is_conversation: bool = True,
         | 
| 179 | 
            +
                    max_tokens: int = 4000,  # Set a reasonable default
         | 
| 180 | 
            +
                    timeout: int = 30,
         | 
| 181 | 
            +
                    intro: str = None,
         | 
| 182 | 
            +
                    filepath: str = None,
         | 
| 183 | 
            +
                    update_file: bool = True,
         | 
| 184 | 
            +
                    proxies: dict = {},
         | 
| 185 | 
            +
                    history_offset: int = 10250,
         | 
| 186 | 
            +
                    act: str = None,
         | 
| 187 | 
            +
                    model: str = "claude-3-5-sonnet-20240620",
         | 
| 188 | 
            +
                    system_prompt: str = "You are a helpful assistant.",
         | 
| 189 | 
            +
                    temperature: float = 0.5,
         | 
| 190 | 
            +
                    presence_penalty: int = 0,
         | 
| 191 | 
            +
                    frequency_penalty: int = 0,
         | 
| 192 | 
            +
                    top_p: float = 1,
         | 
| 193 | 
            +
                ):
         | 
| 194 | 
            +
                    """Initializes the TypeGPT API client."""
         | 
| 195 | 
            +
                    if model not in self.models:
         | 
| 196 | 
            +
                        raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.models)}")
         | 
| 197 | 
            +
             | 
| 198 | 
            +
                    self.session = requests.Session()
         | 
| 199 | 
            +
                    self.is_conversation = is_conversation
         | 
| 200 | 
            +
                    self.max_tokens_to_sample = max_tokens
         | 
| 201 | 
            +
                    self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
         | 
| 202 | 
            +
                    self.timeout = timeout
         | 
| 203 | 
            +
                    self.last_response = {}
         | 
| 204 | 
            +
                    self.last_response_status_code = None  # Added line for status code
         | 
| 205 | 
            +
                    self.model = model
         | 
| 206 | 
            +
                    self.system_prompt = system_prompt
         | 
| 207 | 
            +
                    self.temperature = temperature
         | 
| 208 | 
            +
                    self.presence_penalty = presence_penalty
         | 
| 209 | 
            +
                    self.frequency_penalty = frequency_penalty
         | 
| 210 | 
            +
                    self.top_p = top_p
         | 
| 211 | 
            +
             | 
| 212 | 
            +
                    self.headers = {
         | 
| 213 | 
            +
                        "authority": "chat.typegpt.net",
         | 
| 214 | 
            +
                        "accept": "application/json, text/event-stream",
         | 
| 215 | 
            +
                        "accept-language": "en-US,en;q=0.9",
         | 
| 216 | 
            +
                        "content-type": "application/json",
         | 
| 217 | 
            +
                        "origin": "https://chat.typegpt.net",
         | 
| 218 | 
            +
                        "referer": "https://chat.typegpt.net/",
         | 
| 219 | 
            +
                        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
         | 
| 220 | 
            +
                    }
         | 
| 221 | 
            +
             | 
| 222 | 
            +
                    self.__available_optimizers = (
         | 
| 223 | 
            +
                        method
         | 
| 224 | 
            +
                        for method in dir(Optimizers)
         | 
| 225 | 
            +
                        if callable(getattr(Optimizers, method)) and not method.startswith("__")
         | 
| 226 | 
            +
                    )
         | 
| 227 | 
            +
                    Conversation.intro = (
         | 
| 228 | 
            +
                        AwesomePrompts().get_act(
         | 
| 229 | 
            +
                            act, raise_not_found=True, default=None, case_insensitive=True
         | 
| 230 | 
            +
                        )
         | 
| 231 | 
            +
                        if act
         | 
| 232 | 
            +
                        else intro or Conversation.intro
         | 
| 233 | 
            +
                    )
         | 
| 234 | 
            +
                    self.conversation = Conversation(
         | 
| 235 | 
            +
                        is_conversation, self.max_tokens_to_sample, filepath, update_file
         | 
| 236 | 
            +
                    )
         | 
| 237 | 
            +
                    self.conversation.history_offset = history_offset
         | 
| 238 | 
            +
                    self.session.proxies = proxies
         | 
| 239 | 
            +
             | 
| 240 | 
            +
                def ask(
         | 
| 241 | 
            +
                    self,
         | 
| 242 | 
            +
                    prompt: str,
         | 
| 243 | 
            +
                    stream: bool = False,
         | 
| 244 | 
            +
                    raw: bool = False,
         | 
| 245 | 
            +
                    optimizer: str = None,
         | 
| 246 | 
            +
                    conversationally: bool = False,
         | 
| 247 | 
            +
                ) -> Dict[str, Any] | Generator:
         | 
| 248 | 
            +
                    """Sends a prompt to the TypeGPT.net API and returns the response."""
         | 
| 249 | 
            +
                    conversation_prompt = self.conversation.gen_complete_prompt(prompt)
         | 
| 250 | 
            +
                    if optimizer:
         | 
| 251 | 
            +
                        if optimizer in self.__available_optimizers:
         | 
| 252 | 
            +
                            conversation_prompt = getattr(Optimizers, optimizer)(
         | 
| 253 | 
            +
                                conversation_prompt if conversationally else prompt
         | 
| 254 | 
            +
                            )
         | 
| 255 | 
            +
                        else:
         | 
| 256 | 
            +
                            raise exceptions.FailedToGenerateResponseError(
         | 
| 257 | 
            +
                                f"Optimizer is not one of {self.__available_optimizers}"
         | 
| 258 | 
            +
                            )
         | 
| 259 | 
            +
             | 
| 260 | 
            +
                    payload = {
         | 
| 261 | 
            +
                        "messages": [
         | 
| 262 | 
            +
                            {"role": "system", "content": self.system_prompt},
         | 
| 263 | 
            +
                            {"role": "user", "content": conversation_prompt}
         | 
| 264 | 
            +
                        ],
         | 
| 265 | 
            +
                        "stream": stream,
         | 
| 266 | 
            +
                        "model": self.model,
         | 
| 267 | 
            +
                        "temperature": self.temperature,
         | 
| 268 | 
            +
                        "presence_penalty": self.presence_penalty,
         | 
| 269 | 
            +
                        "frequency_penalty": self.frequency_penalty,
         | 
| 270 | 
            +
                        "top_p": self.top_p,
         | 
| 271 | 
            +
                        "max_tokens": self.max_tokens_to_sample,
         | 
| 272 | 
            +
                    }
         | 
| 273 | 
            +
             | 
| 274 | 
            +
                    def for_stream():
         | 
| 275 | 
            +
                        response = self.session.post(
         | 
| 276 | 
            +
                            self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout
         | 
| 277 | 
            +
                        )
         | 
| 278 | 
            +
                        self.last_response_status_code = response.status_code  # Capture status code
         | 
| 279 | 
            +
                        if not response.ok:
         | 
| 280 | 
            +
                            raise exceptions.FailedToGenerateResponseError(
         | 
| 281 | 
            +
                                f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
         | 
| 282 | 
            +
                            )
         | 
| 283 | 
            +
                        message_load = ""
         | 
| 284 | 
            +
                        for line in response.iter_lines():
         | 
| 285 | 
            +
                            if line:
         | 
| 286 | 
            +
                                line = line.decode("utf-8")
         | 
| 287 | 
            +
                                if line.startswith("data: "):
         | 
| 288 | 
            +
                                    line = line[6:]  # Remove "data: " prefix
         | 
| 289 | 
            +
                                    # Skip [DONE] message
         | 
| 290 | 
            +
                                    if line.strip() == "[DONE]":
         | 
| 291 | 
            +
                                        break
         | 
| 292 | 
            +
             | 
| 293 | 
            +
                                    try:
         | 
| 294 | 
            +
                                        data = json.loads(line)
         | 
| 295 | 
            +
                                        
         | 
| 296 | 
            +
                                        # Extract and yield only new content
         | 
| 297 | 
            +
                                        if 'choices' in data and len(data['choices']) > 0:
         | 
| 298 | 
            +
                                            delta = data['choices'][0].get('delta', {})
         | 
| 299 | 
            +
                                            if 'content' in delta:
         | 
| 300 | 
            +
                                                new_content = delta['content']
         | 
| 301 | 
            +
                                                message_load += new_content
         | 
| 302 | 
            +
                                                # Yield only the new content
         | 
| 303 | 
            +
                                                yield dict(text=new_content) if not raw else new_content
         | 
| 304 | 
            +
                                                self.last_response = dict(text=message_load)
         | 
| 305 | 
            +
             | 
| 306 | 
            +
                                    except json.JSONDecodeError:
         | 
| 307 | 
            +
                                        continue
         | 
| 308 | 
            +
                        self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
         | 
| 309 | 
            +
             | 
| 310 | 
            +
                    def for_non_stream():
         | 
| 311 | 
            +
                        response = self.session.post(self.api_endpoint, headers=self.headers, json=payload)
         | 
| 312 | 
            +
                        self.last_response_status_code = response.status_code  # Capture status code
         | 
| 313 | 
            +
                        if not response.ok:
         | 
| 314 | 
            +
                            raise exceptions.FailedToGenerateResponseError(
         | 
| 315 | 
            +
                                f"Request failed - {response.status_code}: {response.text}"
         | 
| 316 | 
            +
                            )
         | 
| 317 | 
            +
                        self.last_response = response.json()
         | 
| 318 | 
            +
                        self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
         | 
| 319 | 
            +
                        return self.last_response
         | 
| 320 | 
            +
             | 
| 321 | 
            +
                    return for_stream() if stream else for_non_stream()
         | 
| 322 | 
            +
             | 
| 323 | 
            +
                def chat(
         | 
| 324 | 
            +
                    self,
         | 
| 325 | 
            +
                    prompt: str,
         | 
| 326 | 
            +
                    stream: bool = False,
         | 
| 327 | 
            +
                    optimizer: str = None,
         | 
| 328 | 
            +
                    conversationally: bool = False,
         | 
| 329 | 
            +
                ) -> str | Generator[str, None, None]:
         | 
| 330 | 
            +
                    """Generate response `str` or stream."""
         | 
| 331 | 
            +
                    if stream:
         | 
| 332 | 
            +
                        gen = self.ask(
         | 
| 333 | 
            +
                            prompt, stream=True, optimizer=optimizer, conversationally=conversationally
         | 
| 334 | 
            +
                        )
         | 
| 335 | 
            +
                        for chunk in gen:
         | 
| 336 | 
            +
                            yield self.get_message(chunk)  # Extract text from streamed chunks
         | 
| 337 | 
            +
                    else:
         | 
| 338 | 
            +
                        return self.get_message(self.ask(prompt, stream=False, optimizer=optimizer, conversationally=conversationally))
         | 
| 339 | 
            +
             | 
| 340 | 
            +
                def get_message(self, response: Dict[str, Any]) -> str:
         | 
| 341 | 
            +
                    """Retrieves message from response."""
         | 
| 342 | 
            +
                    if isinstance(response, str):  # Handle raw responses
         | 
| 343 | 
            +
                        return response
         | 
| 344 | 
            +
                    elif isinstance(response, dict):
         | 
| 345 | 
            +
                        assert isinstance(response, dict), "Response should be of dict data-type only"
         | 
| 346 | 
            +
                        return response.get("text", "")  # Extract text from dictionary response
         | 
| 347 | 
            +
                    else:
         | 
| 348 | 
            +
                        raise TypeError("Invalid response type. Expected str or dict.")
         | 
| 349 | 
            +
             | 
| 350 | 
            +
            if __name__ == "__main__":
         | 
| 351 | 
            +
                from rich import print
         | 
| 352 | 
            +
                from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn, SpinnerColumn
         | 
| 353 | 
            +
                from rich.console import Console
         | 
| 354 | 
            +
                from rich.table import Table
         | 
| 355 | 
            +
                import concurrent.futures
         | 
| 356 | 
            +
             | 
| 357 | 
            +
                def make_api_call(thread_number, results):
         | 
| 358 | 
            +
                    ai = TypeGPT()
         | 
| 359 | 
            +
                    try:
         | 
| 360 | 
            +
                        ai.ask("Test message", stream=False)
         | 
| 361 | 
            +
                        status_code = ai.last_response_status_code
         | 
| 362 | 
            +
                        results[thread_number] = status_code
         | 
| 363 | 
            +
                    except Exception as e:
         | 
| 364 | 
            +
                        results[thread_number] = str(e)
         | 
| 365 | 
            +
             | 
| 366 | 
            +
                results = {}
         | 
| 367 | 
            +
                total_requests = 100
         | 
| 368 | 
            +
             | 
| 369 | 
            +
                console = Console()
         | 
| 370 | 
            +
             | 
| 371 | 
            +
                print("[bold magenta]Starting API Load Test with 100 simultaneous requests...[/bold magenta]\n")
         | 
| 372 | 
            +
             | 
| 373 | 
            +
                with Progress(
         | 
| 374 | 
            +
                    SpinnerColumn(),
         | 
| 375 | 
            +
                    "[progress.description]{task.description}",
         | 
| 376 | 
            +
                    BarColumn(bar_width=None),
         | 
| 377 | 
            +
                    "[progress.percentage]{task.percentage:>3.0f}%",
         | 
| 378 | 
            +
                    TimeRemainingColumn(),
         | 
| 379 | 
            +
                    console=console,
         | 
| 380 | 
            +
                ) as progress:
         | 
| 381 | 
            +
                    task = progress.add_task("[cyan]Sending API Requests...", total=total_requests)
         | 
| 382 | 
            +
                    with concurrent.futures.ThreadPoolExecutor(max_workers=total_requests) as executor:
         | 
| 383 | 
            +
                        futures = {
         | 
| 384 | 
            +
                            executor.submit(make_api_call, i, results): i for i in range(total_requests)
         | 
| 385 | 
            +
                        }
         | 
| 386 | 
            +
                        for future in concurrent.futures.as_completed(futures):
         | 
| 387 | 
            +
                            progress.update(task, advance=1)
         | 
| 388 | 
            +
                    progress.stop()
         | 
| 389 | 
            +
             | 
| 390 | 
            +
                # Process and display the results
         | 
| 391 | 
            +
                successful_calls = sum(1 for status in results.values() if status == 200)
         | 
| 392 | 
            +
                failed_calls = total_requests - successful_calls
         | 
| 393 | 
            +
             | 
| 394 | 
            +
                print("\n[bold magenta]API Load Test Results:[/bold magenta]\n")
         | 
| 395 | 
            +
                print(f"[bold green]Successful calls: {successful_calls}")
         | 
| 396 | 
            +
                print(f"[bold red]Failed calls: {failed_calls}\n")
         | 
| 397 | 
            +
             | 
| 398 | 
            +
                # Create a table to display detailed results
         | 
| 399 | 
            +
                table = Table(show_header=True, header_style="bold blue")
         | 
| 400 | 
            +
                table.add_column("Thread Number", justify="right", style="dim")
         | 
| 401 | 
            +
                table.add_column("Status", style="bold")
         | 
| 402 | 
            +
             | 
| 403 | 
            +
                for thread_number, status in results.items():
         | 
| 404 | 
            +
                    if status == 200:
         | 
| 405 | 
            +
                        table.add_row(f"{thread_number}", f"[green]Success[/green]")
         | 
| 406 | 
            +
                    else:
         | 
| 407 | 
            +
                        table.add_row(f"{thread_number}", f"[red]Failed ({status})[/red]")
         | 
| 408 | 
            +
             | 
| 409 | 
            +
                print(table)
         | 
    	
        TYPEGPT/typegpt_normal.py
    ADDED
    
    | @@ -0,0 +1,231 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import requests
         | 
| 2 | 
            +
            import json
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            # List of available models
         | 
| 5 | 
            +
            models = [
         | 
| 6 | 
            +
                # OpenAI Models
         | 
| 7 | 
            +
                "gpt-3.5-turbo",
         | 
| 8 | 
            +
                "gpt-3.5-turbo-202201",
         | 
| 9 | 
            +
                "gpt-4o",
         | 
| 10 | 
            +
                "gpt-4o-2024-05-13",
         | 
| 11 | 
            +
                "o1-preview",
         | 
| 12 | 
            +
                
         | 
| 13 | 
            +
                # Claude Models
         | 
| 14 | 
            +
                "claude",
         | 
| 15 | 
            +
                "claude-3-5-sonnet",
         | 
| 16 | 
            +
                "claude-sonnet-3.5",
         | 
| 17 | 
            +
                "claude-3-5-sonnet-20240620",
         | 
| 18 | 
            +
                
         | 
| 19 | 
            +
                # Meta/LLaMA Models
         | 
| 20 | 
            +
                "@cf/meta/llama-2-7b-chat-fp16",
         | 
| 21 | 
            +
                "@cf/meta/llama-2-7b-chat-int8",
         | 
| 22 | 
            +
                "@cf/meta/llama-3-8b-instruct",
         | 
| 23 | 
            +
                "@cf/meta/llama-3.1-8b-instruct",
         | 
| 24 | 
            +
                "@cf/meta-llama/llama-2-7b-chat-hf-lora",
         | 
| 25 | 
            +
                "llama-3.1-405b",
         | 
| 26 | 
            +
                "llama-3.1-70b",
         | 
| 27 | 
            +
                "llama-3.1-8b",
         | 
| 28 | 
            +
                "meta-llama/Llama-2-7b-chat-hf",
         | 
| 29 | 
            +
                "meta-llama/Llama-3.1-70B-Instruct",
         | 
| 30 | 
            +
                "meta-llama/Llama-3.1-8B-Instruct",
         | 
| 31 | 
            +
                "meta-llama/Llama-3.2-11B-Vision-Instruct",
         | 
| 32 | 
            +
                "meta-llama/Llama-3.2-1B-Instruct",
         | 
| 33 | 
            +
                "meta-llama/Llama-3.2-3B-Instruct",
         | 
| 34 | 
            +
                "meta-llama/Llama-3.2-90B-Vision-Instruct",
         | 
| 35 | 
            +
                "meta-llama/Llama-Guard-3-8B",
         | 
| 36 | 
            +
                "meta-llama/Meta-Llama-3-70B-Instruct",
         | 
| 37 | 
            +
                "meta-llama/Meta-Llama-3-8B-Instruct",
         | 
| 38 | 
            +
                "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
         | 
| 39 | 
            +
                "meta-llama/Meta-Llama-3.1-8B-Instruct",
         | 
| 40 | 
            +
                "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
         | 
| 41 | 
            +
                
         | 
| 42 | 
            +
                # Mistral Models
         | 
| 43 | 
            +
                "mistral",
         | 
| 44 | 
            +
                "mistral-large",
         | 
| 45 | 
            +
                "@cf/mistral/mistral-7b-instruct-v0.1",
         | 
| 46 | 
            +
                "@cf/mistral/mistral-7b-instruct-v0.2-lora",
         | 
| 47 | 
            +
                "@hf/mistralai/mistral-7b-instruct-v0.2",
         | 
| 48 | 
            +
                "mistralai/Mistral-7B-Instruct-v0.2",
         | 
| 49 | 
            +
                "mistralai/Mistral-7B-Instruct-v0.3",
         | 
| 50 | 
            +
                "mistralai/Mixtral-8x22B-Instruct-v0.1",
         | 
| 51 | 
            +
                "mistralai/Mixtral-8x7B-Instruct-v0.1",
         | 
| 52 | 
            +
                
         | 
| 53 | 
            +
                # Qwen Models
         | 
| 54 | 
            +
                "@cf/qwen/qwen1.5-0.5b-chat",
         | 
| 55 | 
            +
                "@cf/qwen/qwen1.5-1.8b-chat",
         | 
| 56 | 
            +
                "@cf/qwen/qwen1.5-7b-chat-awq",
         | 
| 57 | 
            +
                "@cf/qwen/qwen1.5-14b-chat-awq",
         | 
| 58 | 
            +
                "Qwen/Qwen2.5-3B-Instruct",
         | 
| 59 | 
            +
                "Qwen/Qwen2.5-72B-Instruct",
         | 
| 60 | 
            +
                "Qwen/Qwen2.5-Coder-32B-Instruct",
         | 
| 61 | 
            +
                
         | 
| 62 | 
            +
                # Google/Gemini Models
         | 
| 63 | 
            +
                "@cf/google/gemma-2b-it-lora",
         | 
| 64 | 
            +
                "@cf/google/gemma-7b-it-lora",
         | 
| 65 | 
            +
                "@hf/google/gemma-7b-it",
         | 
| 66 | 
            +
                "google/gemma-1.1-2b-it",
         | 
| 67 | 
            +
                "google/gemma-1.1-7b-it",
         | 
| 68 | 
            +
                "gemini-pro",
         | 
| 69 | 
            +
                "gemini-1.5-pro",
         | 
| 70 | 
            +
                "gemini-1.5-pro-latest",
         | 
| 71 | 
            +
                "gemini-1.5-flash",
         | 
| 72 | 
            +
                
         | 
| 73 | 
            +
                # Cohere Models
         | 
| 74 | 
            +
                "c4ai-aya-23-35b",
         | 
| 75 | 
            +
                "c4ai-aya-23-8b",
         | 
| 76 | 
            +
                "command",
         | 
| 77 | 
            +
                "command-light",
         | 
| 78 | 
            +
                "command-light-nightly",
         | 
| 79 | 
            +
                "command-nightly",
         | 
| 80 | 
            +
                "command-r",
         | 
| 81 | 
            +
                "command-r-08-2024",
         | 
| 82 | 
            +
                "command-r-plus",
         | 
| 83 | 
            +
                "command-r-plus-08-2024",
         | 
| 84 | 
            +
                "rerank-english-v2.0",
         | 
| 85 | 
            +
                "rerank-english-v3.0",
         | 
| 86 | 
            +
                "rerank-multilingual-v2.0",
         | 
| 87 | 
            +
                "rerank-multilingual-v3.0",
         | 
| 88 | 
            +
                
         | 
| 89 | 
            +
                # Microsoft Models
         | 
| 90 | 
            +
                "@cf/microsoft/phi-2",
         | 
| 91 | 
            +
                "microsoft/DialoGPT-medium",
         | 
| 92 | 
            +
                "microsoft/Phi-3-medium-4k-instruct",
         | 
| 93 | 
            +
                "microsoft/Phi-3-mini-4k-instruct",
         | 
| 94 | 
            +
                "microsoft/Phi-3.5-mini-instruct",
         | 
| 95 | 
            +
                "microsoft/WizardLM-2-8x22B",
         | 
| 96 | 
            +
                
         | 
| 97 | 
            +
                # Yi Models
         | 
| 98 | 
            +
                "01-ai/Yi-1.5-34B-Chat",
         | 
| 99 | 
            +
                "01-ai/Yi-34B-Chat",
         | 
| 100 | 
            +
                
         | 
| 101 | 
            +
                # Specialized Models and Tools
         | 
| 102 | 
            +
                "@cf/deepseek-ai/deepseek-math-7b-base",
         | 
| 103 | 
            +
                "@cf/deepseek-ai/deepseek-math-7b-instruct",
         | 
| 104 | 
            +
                "@cf/defog/sqlcoder-7b-2",
         | 
| 105 | 
            +
                "@cf/openchat/openchat-3.5-0106",
         | 
| 106 | 
            +
                "@cf/thebloke/discolm-german-7b-v1-awq",
         | 
| 107 | 
            +
                "@cf/tiiuae/falcon-7b-instruct",
         | 
| 108 | 
            +
                "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
         | 
| 109 | 
            +
                "@hf/nexusflow/starling-lm-7b-beta",
         | 
| 110 | 
            +
                "@hf/nousresearch/hermes-2-pro-mistral-7b",
         | 
| 111 | 
            +
                "@hf/thebloke/deepseek-coder-6.7b-base-awq",
         | 
| 112 | 
            +
                "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
         | 
| 113 | 
            +
                "@hf/thebloke/llama-2-13b-chat-awq",
         | 
| 114 | 
            +
                "@hf/thebloke/llamaguard-7b-awq",
         | 
| 115 | 
            +
                "@hf/thebloke/neural-chat-7b-v3-1-awq",
         | 
| 116 | 
            +
                "@hf/thebloke/openhermes-2.5-mistral-7b-awq",
         | 
| 117 | 
            +
                "@hf/thebloke/zephyr-7b-beta-awq",
         | 
| 118 | 
            +
                "AndroidDeveloper",
         | 
| 119 | 
            +
                "AngularJSAgent",
         | 
| 120 | 
            +
                "AzureAgent",
         | 
| 121 | 
            +
                "BitbucketAgent",
         | 
| 122 | 
            +
                "DigitalOceanAgent",
         | 
| 123 | 
            +
                "DockerAgent",
         | 
| 124 | 
            +
                "ElectronAgent",
         | 
| 125 | 
            +
                "ErlangAgent",
         | 
| 126 | 
            +
                "FastAPIAgent",
         | 
| 127 | 
            +
                "FirebaseAgent",
         | 
| 128 | 
            +
                "FlaskAgent",
         | 
| 129 | 
            +
                "FlutterAgent",
         | 
| 130 | 
            +
                "GitAgent",
         | 
| 131 | 
            +
                "GitlabAgent",
         | 
| 132 | 
            +
                "GoAgent",
         | 
| 133 | 
            +
                "GodotAgent",
         | 
| 134 | 
            +
                "GoogleCloudAgent",
         | 
| 135 | 
            +
                "HTMLAgent",
         | 
| 136 | 
            +
                "HerokuAgent",
         | 
| 137 | 
            +
                "ImageGeneration",
         | 
| 138 | 
            +
                "JavaAgent",
         | 
| 139 | 
            +
                "JavaScriptAgent",
         | 
| 140 | 
            +
                "MongoDBAgent",
         | 
| 141 | 
            +
                "Next.jsAgent",
         | 
| 142 | 
            +
                "PyTorchAgent",
         | 
| 143 | 
            +
                "PythonAgent",
         | 
| 144 | 
            +
                "ReactAgent",
         | 
| 145 | 
            +
                "RepoMap",
         | 
| 146 | 
            +
                "SwiftDeveloper",
         | 
| 147 | 
            +
                "XcodeAgent",
         | 
| 148 | 
            +
                "YoutubeAgent",
         | 
| 149 | 
            +
                "blackboxai",
         | 
| 150 | 
            +
                "blackboxai-pro",
         | 
| 151 | 
            +
                "builderAgent",
         | 
| 152 | 
            +
                "dify",
         | 
| 153 | 
            +
                "flux",
         | 
| 154 | 
            +
                "openchat/openchat-3.6-8b",
         | 
| 155 | 
            +
                "rtist",
         | 
| 156 | 
            +
                "searchgpt",
         | 
| 157 | 
            +
                "sur",
         | 
| 158 | 
            +
                "sur-mistral",
         | 
| 159 | 
            +
                "unity"
         | 
| 160 | 
            +
            ]
         | 
| 161 | 
            +
             | 
| 162 | 
            +
            # Parameters
         | 
| 163 | 
            +
            is_conversation = True
         | 
| 164 | 
            +
            max_tokens = 4000  # Set a reasonable default
         | 
| 165 | 
            +
            timeout = 30
         | 
| 166 | 
            +
            model = "claude-3-5-sonnet-20240620"
         | 
| 167 | 
            +
            system_prompt = "You are a helpful assistant."
         | 
| 168 | 
            +
            temperature = 0.5
         | 
| 169 | 
            +
            presence_penalty = 0
         | 
| 170 | 
            +
            frequency_penalty = 0
         | 
| 171 | 
            +
            top_p = 1
         | 
| 172 | 
            +
             | 
| 173 | 
            +
            if model not in models:
         | 
| 174 | 
            +
                raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(models)}")
         | 
| 175 | 
            +
             | 
| 176 | 
            +
            session = requests.Session()
         | 
| 177 | 
            +
            api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
         | 
| 178 | 
            +
             | 
| 179 | 
            +
            headers = {
         | 
| 180 | 
            +
                "authority": "chat.typegpt.net",
         | 
| 181 | 
            +
                "accept": "application/json, text/event-stream",
         | 
| 182 | 
            +
                "accept-language": "en-US,en;q=0.9",
         | 
| 183 | 
            +
                "content-type": "application/json",
         | 
| 184 | 
            +
                "origin": "https://chat.typegpt.net",
         | 
| 185 | 
            +
                "referer": "https://chat.typegpt.net/",
         | 
| 186 | 
            +
                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
         | 
| 187 | 
            +
            }
         | 
| 188 | 
            +
             | 
| 189 | 
            +
            # Prompt to send
         | 
| 190 | 
            +
            prompt = "What is the knowledge cut off? Be specific and also specify the month, year and date. If not sure, then provide approximate"
         | 
| 191 | 
            +
             | 
| 192 | 
            +
            # Payload
         | 
| 193 | 
            +
            payload = {
         | 
| 194 | 
            +
                "messages": [
         | 
| 195 | 
            +
                    {"role": "system", "content": system_prompt},
         | 
| 196 | 
            +
                    {"role": "user", "content": prompt}
         | 
| 197 | 
            +
                ],
         | 
| 198 | 
            +
                "stream": True,
         | 
| 199 | 
            +
                "model": model,
         | 
| 200 | 
            +
                "temperature": temperature,
         | 
| 201 | 
            +
                "presence_penalty": presence_penalty,
         | 
| 202 | 
            +
                "frequency_penalty": frequency_penalty,
         | 
| 203 | 
            +
                "top_p": top_p,
         | 
| 204 | 
            +
                "max_tokens": max_tokens,
         | 
| 205 | 
            +
            }
         | 
| 206 | 
            +
             | 
| 207 | 
            +
            # Make the API request
         | 
| 208 | 
            +
            response = session.post(
         | 
| 209 | 
            +
                api_endpoint, headers=headers, json=payload, stream=True, timeout=timeout
         | 
| 210 | 
            +
            )
         | 
| 211 | 
            +
             | 
| 212 | 
            +
            if not response.ok:
         | 
| 213 | 
            +
                raise Exception(f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}")
         | 
| 214 | 
            +
             | 
| 215 | 
            +
            # Process the streamed response
         | 
| 216 | 
            +
            for line in response.iter_lines():
         | 
| 217 | 
            +
                if line:
         | 
| 218 | 
            +
                    line = line.decode("utf-8")
         | 
| 219 | 
            +
                    if line.startswith("data: "):
         | 
| 220 | 
            +
                        line = line[6:]  # Remove "data: " prefix
         | 
| 221 | 
            +
                        if line.strip() == "[DONE]":
         | 
| 222 | 
            +
                            break
         | 
| 223 | 
            +
                        try:
         | 
| 224 | 
            +
                            data = json.loads(line)
         | 
| 225 | 
            +
                            if 'choices' in data and len(data['choices']) > 0:
         | 
| 226 | 
            +
                                delta = data['choices'][0].get('delta', {})
         | 
| 227 | 
            +
                                if 'content' in delta:
         | 
| 228 | 
            +
                                    new_content = delta['content']
         | 
| 229 | 
            +
                                    print(new_content, end="", flush=True)
         | 
| 230 | 
            +
                        except json.JSONDecodeError:
         | 
| 231 | 
            +
                            continue
         |