nazdridoy commited on
Commit
c40f3d0
Β·
verified Β·
1 Parent(s): 4ddd600

feat(network): add timeouts and retry logic

Browse files

- [feat] Apply `INFERENCE_TIMEOUT` and token inactivity timeout to chat completion (chat_handler.py:L62-76)
- [feat] Implement `IMAGE_GENERATION_TIMEOUT` for image creation (image_handler.py:L70-75)
- [feat] Add retry and timeout logic to `get_proxy_token()` and `report_token_status()` requests (hf_token_utils.py:L29-66, L84-110)
- [feat] Enhance network error handling in chat and image handlers (chat_handler.py:L91-131, image_handler.py:L82-117)
- [update] Update `report_token_status()` to handle "timeout" and "connection_error" types (hf_token_utils.py:L70-71)

Files changed (3) hide show
  1. chat_handler.py +85 -21
  2. hf_token_utils.py +102 -21
  3. image_handler.py +65 -13
chat_handler.py CHANGED
@@ -4,8 +4,12 @@ Handles chat completion requests with streaming responses.
4
  """
5
 
6
  import os
 
 
 
7
  from huggingface_hub import InferenceClient
8
  from huggingface_hub.errors import HfHubHTTPError
 
9
  from hf_token_utils import get_proxy_token, report_token_status
10
  from utils import (
11
  validate_proxy_key,
@@ -13,6 +17,9 @@ from utils import (
13
  format_error_message
14
  )
15
 
 
 
 
16
 
17
  def chat_respond(
18
  message,
@@ -34,8 +41,9 @@ def chat_respond(
34
 
35
  proxy_api_key = os.getenv("PROXY_KEY")
36
 
 
37
  try:
38
- # Get token from HF-Inferoxy proxy server
39
  print(f"πŸ”‘ Chat: Requesting token from proxy...")
40
  token, token_id = get_proxy_token(api_key=proxy_api_key)
41
  print(f"βœ… Chat: Got token: {token_id}")
@@ -58,7 +66,7 @@ def chat_respond(
58
  api_key=token
59
  )
60
 
61
- print(f"πŸš€ Chat: Client created, starting inference...")
62
 
63
  chat_completion_kwargs = {
64
  "model": model,
@@ -71,33 +79,89 @@ def chat_respond(
71
 
72
  response = ""
73
 
74
- print(f"πŸ“‘ Chat: Making streaming request...")
75
- stream = client.chat_completion(**chat_completion_kwargs)
76
- print(f"πŸ”„ Chat: Got stream, starting to iterate...")
 
 
 
 
 
 
 
 
 
 
 
77
 
78
- for message in stream:
79
- choices = message.choices
80
- token_content = ""
81
- if len(choices) and choices[0].delta.content:
82
- token_content = choices[0].delta.content
 
 
 
 
 
 
 
 
 
 
 
83
 
84
- response += token_content
85
- yield response
 
 
 
 
86
 
87
  # Report successful token usage
88
- report_token_status(token_id, "success", api_key=proxy_api_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  except HfHubHTTPError as e:
91
- # Report HF Hub errors
92
- if 'token_id' in locals():
93
- report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
94
- yield format_error_message("HuggingFace API Error", str(e))
 
 
 
 
 
 
 
 
 
 
 
95
 
96
  except Exception as e:
97
- # Report other errors
98
- if 'token_id' in locals():
99
- report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
100
- yield format_error_message("Unexpected Error", str(e))
 
 
101
 
102
 
103
  def handle_chat_submit(message, history, system_msg, model_name, max_tokens, temperature, top_p):
 
4
  """
5
 
6
  import os
7
+ import time
8
+ import threading
9
+ from concurrent.futures import ThreadPoolExecutor, TimeoutError as FutureTimeoutError
10
  from huggingface_hub import InferenceClient
11
  from huggingface_hub.errors import HfHubHTTPError
12
+ from requests.exceptions import ConnectionError, Timeout, RequestException
13
  from hf_token_utils import get_proxy_token, report_token_status
14
  from utils import (
15
  validate_proxy_key,
 
17
  format_error_message
18
  )
19
 
20
+ # Timeout configuration for inference requests
21
+ INFERENCE_TIMEOUT = 120 # 2 minutes max for inference
22
+
23
 
24
  def chat_respond(
25
  message,
 
41
 
42
  proxy_api_key = os.getenv("PROXY_KEY")
43
 
44
+ token_id = None
45
  try:
46
+ # Get token from HF-Inferoxy proxy server with timeout handling
47
  print(f"πŸ”‘ Chat: Requesting token from proxy...")
48
  token, token_id = get_proxy_token(api_key=proxy_api_key)
49
  print(f"βœ… Chat: Got token: {token_id}")
 
66
  api_key=token
67
  )
68
 
69
+ print(f"πŸš€ Chat: Client created, starting inference with timeout...")
70
 
71
  chat_completion_kwargs = {
72
  "model": model,
 
79
 
80
  response = ""
81
 
82
+ print(f"πŸ“‘ Chat: Making streaming request with {INFERENCE_TIMEOUT}s timeout...")
83
+
84
+ # Create streaming function for timeout handling
85
+ def create_stream():
86
+ return client.chat_completion(**chat_completion_kwargs)
87
+
88
+ # Execute with timeout using ThreadPoolExecutor
89
+ with ThreadPoolExecutor(max_workers=1) as executor:
90
+ future = executor.submit(create_stream)
91
+
92
+ try:
93
+ # Get the stream with timeout
94
+ stream = future.result(timeout=INFERENCE_TIMEOUT)
95
+ print(f"πŸ”„ Chat: Got stream, starting to iterate...")
96
 
97
+ # Track streaming time to detect hangs
98
+ last_token_time = time.time()
99
+ token_timeout = 30 # 30 seconds between tokens
100
+
101
+ for message in stream:
102
+ current_time = time.time()
103
+
104
+ # Check if we've been waiting too long for a token
105
+ if current_time - last_token_time > token_timeout:
106
+ raise TimeoutError(f"No response received for {token_timeout} seconds during streaming")
107
+
108
+ choices = message.choices
109
+ token_content = ""
110
+ if len(choices) and choices[0].delta.content:
111
+ token_content = choices[0].delta.content
112
+ last_token_time = current_time # Reset timer when we get content
113
 
114
+ response += token_content
115
+ yield response
116
+
117
+ except FutureTimeoutError:
118
+ future.cancel() # Cancel the running task
119
+ raise TimeoutError(f"Chat request timed out after {INFERENCE_TIMEOUT} seconds")
120
 
121
  # Report successful token usage
122
+ if token_id:
123
+ report_token_status(token_id, "success", api_key=proxy_api_key)
124
+
125
+ except ConnectionError as e:
126
+ # Handle proxy connection errors
127
+ error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
128
+ print(f"πŸ”Œ Chat connection error: {error_msg}")
129
+ if token_id:
130
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
131
+ yield format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
132
+
133
+ except TimeoutError as e:
134
+ # Handle timeout errors
135
+ error_msg = f"Request timed out: {str(e)}"
136
+ print(f"⏰ Chat timeout: {error_msg}")
137
+ if token_id:
138
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
139
+ yield format_error_message("Timeout Error", "The request took too long. The server may be overloaded. Please try again.")
140
 
141
  except HfHubHTTPError as e:
142
+ # Handle HuggingFace API errors
143
+ error_msg = str(e)
144
+ print(f"πŸ€— Chat HF error: {error_msg}")
145
+ if token_id:
146
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
147
+
148
+ # Provide more user-friendly error messages
149
+ if "401" in error_msg:
150
+ yield format_error_message("Authentication Error", "Invalid or expired API token. The proxy will provide a new token on retry.")
151
+ elif "402" in error_msg:
152
+ yield format_error_message("Quota Exceeded", "API quota exceeded. The proxy will try alternative providers.")
153
+ elif "429" in error_msg:
154
+ yield format_error_message("Rate Limited", "Too many requests. Please wait a moment and try again.")
155
+ else:
156
+ yield format_error_message("HuggingFace API Error", error_msg)
157
 
158
  except Exception as e:
159
+ # Handle all other errors
160
+ error_msg = str(e)
161
+ print(f"❌ Chat unexpected error: {error_msg}")
162
+ if token_id:
163
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
164
+ yield format_error_message("Unexpected Error", f"An unexpected error occurred: {error_msg}")
165
 
166
 
167
  def handle_chat_submit(message, history, system_msg, model_name, max_tokens, temperature, top_p):
hf_token_utils.py CHANGED
@@ -2,11 +2,18 @@
2
  import os
3
  import requests
4
  import json
 
5
  from typing import Dict, Optional, Any, Tuple
 
 
 
 
 
 
6
 
7
  def get_proxy_token(proxy_url: str = "http://scw.nazdev.tech:11155", api_key: str = None) -> Tuple[str, str]:
8
  """
9
- Get a valid token from the proxy server.
10
 
11
  Args:
12
  proxy_url: URL of the HF-Inferoxy server
@@ -16,24 +23,68 @@ def get_proxy_token(proxy_url: str = "http://scw.nazdev.tech:11155", api_key: st
16
  Tuple of (token, token_id)
17
 
18
  Raises:
 
 
19
  Exception: If token provisioning fails
20
  """
21
  headers = {}
22
  if api_key:
23
  headers["Authorization"] = f"Bearer {api_key}"
24
 
25
- response = requests.get(f"{proxy_url}/keys/provision", headers=headers)
26
- if response.status_code != 200:
27
- raise Exception(f"Failed to provision token: {response.text}")
28
-
29
- data = response.json()
30
- token = data["token"]
31
- token_id = data["token_id"]
32
-
33
- # For convenience, also set environment variable
34
- os.environ["HF_TOKEN"] = token
35
 
36
- return token, token_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  def report_token_status(
39
  token_id: str,
@@ -43,7 +94,7 @@ def report_token_status(
43
  api_key: str = None
44
  ) -> bool:
45
  """
46
- Report token usage status back to the proxy server.
47
 
48
  Args:
49
  token_id: ID of the token to report (from get_proxy_token)
@@ -66,6 +117,10 @@ def report_token_status(
66
  error_type = "invalid_credentials"
67
  elif "402 Client Error" in error and "exceeded your monthly included credits" in error:
68
  error_type = "credits_exceeded"
 
 
 
 
69
 
70
  if error_type:
71
  payload["error_type"] = error_type
@@ -73,11 +128,37 @@ def report_token_status(
73
  headers = {"Content-Type": "application/json"}
74
  if api_key:
75
  headers["Authorization"] = f"Bearer {api_key}"
76
-
77
- try:
78
- response = requests.post(f"{proxy_url}/keys/report", json=payload, headers=headers)
79
- return response.status_code == 200
80
- except Exception as e:
81
- # Silently fail to avoid breaking the client application
82
- # In production, consider logging this error
83
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import os
3
  import requests
4
  import json
5
+ import time
6
  from typing import Dict, Optional, Any, Tuple
7
+ from requests.exceptions import ConnectionError, Timeout, RequestException
8
+
9
+ # Timeout and retry configuration
10
+ REQUEST_TIMEOUT = 30 # 30 seconds timeout
11
+ RETRY_ATTEMPTS = 2
12
+ RETRY_DELAY = 1 # 1 second delay between retries
13
 
14
  def get_proxy_token(proxy_url: str = "http://scw.nazdev.tech:11155", api_key: str = None) -> Tuple[str, str]:
15
  """
16
+ Get a valid token from the proxy server with timeout and retry logic.
17
 
18
  Args:
19
  proxy_url: URL of the HF-Inferoxy server
 
23
  Tuple of (token, token_id)
24
 
25
  Raises:
26
+ ConnectionError: If unable to connect to proxy server
27
+ TimeoutError: If request times out
28
  Exception: If token provisioning fails
29
  """
30
  headers = {}
31
  if api_key:
32
  headers["Authorization"] = f"Bearer {api_key}"
33
 
34
+ print(f"πŸ”— Connecting to proxy: {proxy_url}")
 
 
 
 
 
 
 
 
 
35
 
36
+ for attempt in range(RETRY_ATTEMPTS):
37
+ try:
38
+ print(f"πŸ”„ Token provision attempt {attempt + 1}/{RETRY_ATTEMPTS}")
39
+
40
+ response = requests.get(
41
+ f"{proxy_url}/keys/provision",
42
+ headers=headers,
43
+ timeout=REQUEST_TIMEOUT
44
+ )
45
+
46
+ if response.status_code == 200:
47
+ data = response.json()
48
+ token = data["token"]
49
+ token_id = data["token_id"]
50
+
51
+ # For convenience, also set environment variable
52
+ os.environ["HF_TOKEN"] = token
53
+
54
+ print(f"βœ… Token provisioned successfully: {token_id}")
55
+ return token, token_id
56
+ else:
57
+ error_msg = f"HTTP {response.status_code}: {response.text}"
58
+ print(f"❌ Provision failed: {error_msg}")
59
+
60
+ if attempt == RETRY_ATTEMPTS - 1: # Last attempt
61
+ raise Exception(f"Failed to provision token: {error_msg}")
62
+
63
+ except ConnectionError as e:
64
+ error_msg = f"Connection failed to proxy server: {str(e)}"
65
+ print(f"πŸ”Œ {error_msg}")
66
+
67
+ if attempt == RETRY_ATTEMPTS - 1: # Last attempt
68
+ raise ConnectionError(f"Cannot connect to HF-Inferoxy at {proxy_url}. Please check if the server is running.")
69
+
70
+ except Timeout as e:
71
+ error_msg = f"Request timeout after {REQUEST_TIMEOUT}s: {str(e)}"
72
+ print(f"⏰ {error_msg}")
73
+
74
+ if attempt == RETRY_ATTEMPTS - 1: # Last attempt
75
+ raise TimeoutError(f"Timeout connecting to HF-Inferoxy. Server may be overloaded.")
76
+
77
+ except RequestException as e:
78
+ error_msg = f"Request error: {str(e)}"
79
+ print(f"🚫 {error_msg}")
80
+
81
+ if attempt == RETRY_ATTEMPTS - 1: # Last attempt
82
+ raise Exception(f"Network error connecting to proxy: {str(e)}")
83
+
84
+ # Wait before retry
85
+ if attempt < RETRY_ATTEMPTS - 1:
86
+ print(f"⏱️ Retrying in {RETRY_DELAY}s...")
87
+ time.sleep(RETRY_DELAY)
88
 
89
  def report_token_status(
90
  token_id: str,
 
94
  api_key: str = None
95
  ) -> bool:
96
  """
97
+ Report token usage status back to the proxy server with timeout handling.
98
 
99
  Args:
100
  token_id: ID of the token to report (from get_proxy_token)
 
117
  error_type = "invalid_credentials"
118
  elif "402 Client Error" in error and "exceeded your monthly included credits" in error:
119
  error_type = "credits_exceeded"
120
+ elif "timeout" in error.lower() or "timed out" in error.lower():
121
+ error_type = "timeout"
122
+ elif "connection" in error.lower():
123
+ error_type = "connection_error"
124
 
125
  if error_type:
126
  payload["error_type"] = error_type
 
128
  headers = {"Content-Type": "application/json"}
129
  if api_key:
130
  headers["Authorization"] = f"Bearer {api_key}"
131
+
132
+ print(f"πŸ“Š Reporting {status} for token {token_id}")
133
+
134
+ for attempt in range(RETRY_ATTEMPTS):
135
+ try:
136
+ response = requests.post(
137
+ f"{proxy_url}/keys/report",
138
+ json=payload,
139
+ headers=headers,
140
+ timeout=REQUEST_TIMEOUT
141
+ )
142
+
143
+ if response.status_code == 200:
144
+ print(f"βœ… Status reported successfully")
145
+ return True
146
+ else:
147
+ print(f"⚠️ Report failed: HTTP {response.status_code}")
148
+
149
+ except ConnectionError as e:
150
+ print(f"πŸ”Œ Report connection error: {str(e)}")
151
+
152
+ except Timeout as e:
153
+ print(f"⏰ Report timeout: {str(e)}")
154
+
155
+ except RequestException as e:
156
+ print(f"🚫 Report request error: {str(e)}")
157
+
158
+ # Don't retry on last attempt
159
+ if attempt < RETRY_ATTEMPTS - 1:
160
+ print(f"⏱️ Retrying report in {RETRY_DELAY}s...")
161
+ time.sleep(RETRY_DELAY)
162
+
163
+ print(f"❌ Failed to report status after {RETRY_ATTEMPTS} attempts")
164
+ return False
image_handler.py CHANGED
@@ -4,8 +4,12 @@ Handles text-to-image generation with multiple providers.
4
  """
5
 
6
  import os
 
 
 
7
  from huggingface_hub import InferenceClient
8
  from huggingface_hub.errors import HfHubHTTPError
 
9
  from hf_token_utils import get_proxy_token, report_token_status
10
  from utils import (
11
  IMAGE_CONFIG,
@@ -14,6 +18,9 @@ from utils import (
14
  format_success_message
15
  )
16
 
 
 
 
17
 
18
  def validate_dimensions(width, height):
19
  """Validate that dimensions are divisible by 8 (required by most diffusion models)"""
@@ -43,8 +50,9 @@ def generate_image(
43
 
44
  proxy_api_key = os.getenv("PROXY_KEY")
45
 
 
46
  try:
47
- # Get token from HF-Inferoxy proxy server
48
  print(f"πŸ”‘ Image: Requesting token from proxy...")
49
  token, token_id = get_proxy_token(api_key=proxy_api_key)
50
  print(f"βœ… Image: Got token: {token_id}")
@@ -76,29 +84,73 @@ def generate_image(
76
  generation_params["seed"] = seed
77
 
78
  print(f"πŸ“ Image: Dimensions: {width}x{height}, steps: {num_inference_steps}, guidance: {guidance_scale}")
79
- print(f"πŸ“‘ Image: Making generation request...")
 
 
 
 
80
 
81
- # Generate image
82
- image = client.text_to_image(**generation_params)
 
 
 
 
 
 
 
 
83
 
84
  print(f"πŸ–ΌοΈ Image: Generation completed! Image type: {type(image)}")
85
 
86
  # Report successful token usage
87
- report_token_status(token_id, "success", api_key=proxy_api_key)
 
88
 
89
  return image, format_success_message("Image generated", f"using {model_name} on {provider}")
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  except HfHubHTTPError as e:
92
- # Report HF Hub errors
93
- if 'token_id' in locals():
94
- report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
95
- return None, format_error_message("HuggingFace API Error", str(e))
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  except Exception as e:
98
- # Report other errors
99
- if 'token_id' in locals():
100
- report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
101
- return None, format_error_message("Unexpected Error", str(e))
 
 
102
 
103
 
104
  def handle_image_generation(prompt_val, model_val, provider_val, negative_prompt_val, width_val, height_val, steps_val, guidance_val, seed_val):
 
4
  """
5
 
6
  import os
7
+ import time
8
+ import threading
9
+ from concurrent.futures import ThreadPoolExecutor, TimeoutError as FutureTimeoutError
10
  from huggingface_hub import InferenceClient
11
  from huggingface_hub.errors import HfHubHTTPError
12
+ from requests.exceptions import ConnectionError, Timeout, RequestException
13
  from hf_token_utils import get_proxy_token, report_token_status
14
  from utils import (
15
  IMAGE_CONFIG,
 
18
  format_success_message
19
  )
20
 
21
+ # Timeout configuration for image generation
22
+ IMAGE_GENERATION_TIMEOUT = 300 # 5 minutes max for image generation
23
+
24
 
25
  def validate_dimensions(width, height):
26
  """Validate that dimensions are divisible by 8 (required by most diffusion models)"""
 
50
 
51
  proxy_api_key = os.getenv("PROXY_KEY")
52
 
53
+ token_id = None
54
  try:
55
+ # Get token from HF-Inferoxy proxy server with timeout handling
56
  print(f"πŸ”‘ Image: Requesting token from proxy...")
57
  token, token_id = get_proxy_token(api_key=proxy_api_key)
58
  print(f"βœ… Image: Got token: {token_id}")
 
84
  generation_params["seed"] = seed
85
 
86
  print(f"πŸ“ Image: Dimensions: {width}x{height}, steps: {num_inference_steps}, guidance: {guidance_scale}")
87
+ print(f"πŸ“‘ Image: Making generation request with {IMAGE_GENERATION_TIMEOUT}s timeout...")
88
+
89
+ # Create generation function for timeout handling
90
+ def generate_image_task():
91
+ return client.text_to_image(**generation_params)
92
 
93
+ # Execute with timeout using ThreadPoolExecutor
94
+ with ThreadPoolExecutor(max_workers=1) as executor:
95
+ future = executor.submit(generate_image_task)
96
+
97
+ try:
98
+ # Generate image with timeout
99
+ image = future.result(timeout=IMAGE_GENERATION_TIMEOUT)
100
+ except FutureTimeoutError:
101
+ future.cancel() # Cancel the running task
102
+ raise TimeoutError(f"Image generation timed out after {IMAGE_GENERATION_TIMEOUT} seconds")
103
 
104
  print(f"πŸ–ΌοΈ Image: Generation completed! Image type: {type(image)}")
105
 
106
  # Report successful token usage
107
+ if token_id:
108
+ report_token_status(token_id, "success", api_key=proxy_api_key)
109
 
110
  return image, format_success_message("Image generated", f"using {model_name} on {provider}")
111
 
112
+ except ConnectionError as e:
113
+ # Handle proxy connection errors
114
+ error_msg = f"Cannot connect to HF-Inferoxy server: {str(e)}"
115
+ print(f"πŸ”Œ Image connection error: {error_msg}")
116
+ if token_id:
117
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
118
+ return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
119
+
120
+ except TimeoutError as e:
121
+ # Handle timeout errors
122
+ error_msg = f"Image generation timed out: {str(e)}"
123
+ print(f"⏰ Image timeout: {error_msg}")
124
+ if token_id:
125
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
126
+ return None, format_error_message("Timeout Error", f"Image generation took too long (>{IMAGE_GENERATION_TIMEOUT//60} minutes). Try reducing image size or steps.")
127
+
128
  except HfHubHTTPError as e:
129
+ # Handle HuggingFace API errors
130
+ error_msg = str(e)
131
+ print(f"πŸ€— Image HF error: {error_msg}")
132
+ if token_id:
133
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
134
+
135
+ # Provide more user-friendly error messages
136
+ if "401" in error_msg:
137
+ return None, format_error_message("Authentication Error", "Invalid or expired API token. The proxy will provide a new token on retry.")
138
+ elif "402" in error_msg:
139
+ return None, format_error_message("Quota Exceeded", "API quota exceeded. The proxy will try alternative providers.")
140
+ elif "429" in error_msg:
141
+ return None, format_error_message("Rate Limited", "Too many requests. Please wait a moment and try again.")
142
+ elif "content policy" in error_msg.lower() or "safety" in error_msg.lower():
143
+ return None, format_error_message("Content Policy", "Image prompt was rejected by content policy. Please try a different prompt.")
144
+ else:
145
+ return None, format_error_message("HuggingFace API Error", error_msg)
146
 
147
  except Exception as e:
148
+ # Handle all other errors
149
+ error_msg = str(e)
150
+ print(f"❌ Image unexpected error: {error_msg}")
151
+ if token_id:
152
+ report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
153
+ return None, format_error_message("Unexpected Error", f"An unexpected error occurred: {error_msg}")
154
 
155
 
156
  def handle_image_generation(prompt_val, model_val, provider_val, negative_prompt_val, width_val, height_val, steps_val, guidance_val, seed_val):