Spaces:
Running
Running
Zachary Greathouse
commited on
Zg/improve logging (#8)
Browse files* reduce character description and text max length to 400 from 1000
* Reduce number of retries for tts calls to a single retry, and a 2 second wait before retrying
* Structure error logs for easier parsing
- src/constants.py +2 -2
- src/frontend.py +8 -7
- src/integrations/elevenlabs_api.py +3 -3
- src/integrations/hume_api.py +3 -3
src/constants.py
CHANGED
|
@@ -24,10 +24,10 @@ HUME_TO_HUME: ComparisonType = "Hume AI - Hume AI"
|
|
| 24 |
HUME_TO_ELEVENLABS: ComparisonType = "Hume AI - ElevenLabs"
|
| 25 |
|
| 26 |
CHARACTER_DESCRIPTION_MIN_LENGTH: int = 20
|
| 27 |
-
CHARACTER_DESCRIPTION_MAX_LENGTH: int =
|
| 28 |
|
| 29 |
TEXT_MIN_LENGTH: int = 100
|
| 30 |
-
TEXT_MAX_LENGTH: int =
|
| 31 |
|
| 32 |
OPTION_A_KEY: OptionKey = "option_a"
|
| 33 |
OPTION_B_KEY: OptionKey = "option_b"
|
|
|
|
| 24 |
HUME_TO_ELEVENLABS: ComparisonType = "Hume AI - ElevenLabs"
|
| 25 |
|
| 26 |
CHARACTER_DESCRIPTION_MIN_LENGTH: int = 20
|
| 27 |
+
CHARACTER_DESCRIPTION_MAX_LENGTH: int = 400
|
| 28 |
|
| 29 |
TEXT_MIN_LENGTH: int = 100
|
| 30 |
+
TEXT_MAX_LENGTH: int = 400
|
| 31 |
|
| 32 |
OPTION_A_KEY: OptionKey = "option_a"
|
| 33 |
OPTION_B_KEY: OptionKey = "option_b"
|
src/frontend.py
CHANGED
|
@@ -73,11 +73,11 @@ class Frontend:
|
|
| 73 |
logger.info(f"Generated text ({len(generated_text)} characters).")
|
| 74 |
return gr.update(value=generated_text), generated_text
|
| 75 |
except AnthropicError as ae:
|
| 76 |
-
logger.error(f"AnthropicError while generating text: {ae!s}")
|
| 77 |
raise gr.Error(f'There was an issue communicating with the Anthropic API: "{ae.message}"')
|
| 78 |
except Exception as e:
|
| 79 |
-
logger.error(f"Unexpected error while generating text: {e}")
|
| 80 |
-
raise gr.Error("Failed to generate text. Please try again
|
| 81 |
|
| 82 |
async def _synthesize_speech(
|
| 83 |
self,
|
|
@@ -143,6 +143,7 @@ class Frontend:
|
|
| 143 |
|
| 144 |
# Await both tasks concurrently using asyncio.gather()
|
| 145 |
(generation_id_a, audio_a), (generation_id_b, audio_b) = await asyncio.gather(task_a, task_b)
|
|
|
|
| 146 |
|
| 147 |
option_a = Option(provider=provider_a, audio=audio_a, generation_id=generation_id_a)
|
| 148 |
option_b = Option(provider=provider_b, audio=audio_b, generation_id=generation_id_b)
|
|
@@ -158,14 +159,14 @@ class Frontend:
|
|
| 158 |
True,
|
| 159 |
)
|
| 160 |
except ElevenLabsError as ee:
|
| 161 |
-
logger.error(f"
|
| 162 |
raise gr.Error(f'There was an issue communicating with the Elevenlabs API: "{ee.message}"')
|
| 163 |
except HumeError as he:
|
| 164 |
-
logger.error(f"
|
| 165 |
raise gr.Error(f'There was an issue communicating with the Hume API: "{he.message}"')
|
| 166 |
except Exception as e:
|
| 167 |
-
logger.error(f"
|
| 168 |
-
raise gr.Error("An unexpected error occurred. Please try again
|
| 169 |
|
| 170 |
async def _vote(
|
| 171 |
self,
|
|
|
|
| 73 |
logger.info(f"Generated text ({len(generated_text)} characters).")
|
| 74 |
return gr.update(value=generated_text), generated_text
|
| 75 |
except AnthropicError as ae:
|
| 76 |
+
logger.error(f"Text Generation Failed: AnthropicError while generating text: {ae!s}")
|
| 77 |
raise gr.Error(f'There was an issue communicating with the Anthropic API: "{ae.message}"')
|
| 78 |
except Exception as e:
|
| 79 |
+
logger.error(f"Text Generation Failed: Unexpected error while generating text: {e!s}")
|
| 80 |
+
raise gr.Error("Failed to generate text. Please try again shortly.")
|
| 81 |
|
| 82 |
async def _synthesize_speech(
|
| 83 |
self,
|
|
|
|
| 143 |
|
| 144 |
# Await both tasks concurrently using asyncio.gather()
|
| 145 |
(generation_id_a, audio_a), (generation_id_b, audio_b) = await asyncio.gather(task_a, task_b)
|
| 146 |
+
logger.info(f"Synthesis Succeed for providers: {provider_a} and {provider_b}")
|
| 147 |
|
| 148 |
option_a = Option(provider=provider_a, audio=audio_a, generation_id=generation_id_a)
|
| 149 |
option_b = Option(provider=provider_b, audio=audio_b, generation_id=generation_id_b)
|
|
|
|
| 159 |
True,
|
| 160 |
)
|
| 161 |
except ElevenLabsError as ee:
|
| 162 |
+
logger.error(f"Synthesis Failed with ElevenLabsError during TTS generation: {ee!s}")
|
| 163 |
raise gr.Error(f'There was an issue communicating with the Elevenlabs API: "{ee.message}"')
|
| 164 |
except HumeError as he:
|
| 165 |
+
logger.error(f"Synthesis Failed with HumeError during TTS generation: {he!s}")
|
| 166 |
raise gr.Error(f'There was an issue communicating with the Hume API: "{he.message}"')
|
| 167 |
except Exception as e:
|
| 168 |
+
logger.error(f"Synthesis Failed with an unexpected error during TTS generation: {e!s}")
|
| 169 |
+
raise gr.Error("An unexpected error occurred. Please try again shortly.")
|
| 170 |
|
| 171 |
async def _vote(
|
| 172 |
self,
|
src/integrations/elevenlabs_api.py
CHANGED
|
@@ -22,7 +22,7 @@ from typing import Optional, Tuple
|
|
| 22 |
# Third-Party Library Imports
|
| 23 |
from elevenlabs import AsyncElevenLabs, TextToVoiceCreatePreviewsRequestOutputFormat
|
| 24 |
from elevenlabs.core import ApiError
|
| 25 |
-
from tenacity import after_log, before_log, retry, retry_if_exception, stop_after_attempt,
|
| 26 |
|
| 27 |
# Local Application Imports
|
| 28 |
from src.config import Config, logger
|
|
@@ -76,8 +76,8 @@ class UnretryableElevenLabsError(ElevenLabsError):
|
|
| 76 |
|
| 77 |
@retry(
|
| 78 |
retry=retry_if_exception(lambda e: not isinstance(e, UnretryableElevenLabsError)),
|
| 79 |
-
stop=stop_after_attempt(
|
| 80 |
-
wait=
|
| 81 |
before=before_log(logger, logging.DEBUG),
|
| 82 |
after=after_log(logger, logging.DEBUG),
|
| 83 |
reraise=True,
|
|
|
|
| 22 |
# Third-Party Library Imports
|
| 23 |
from elevenlabs import AsyncElevenLabs, TextToVoiceCreatePreviewsRequestOutputFormat
|
| 24 |
from elevenlabs.core import ApiError
|
| 25 |
+
from tenacity import after_log, before_log, retry, retry_if_exception, stop_after_attempt, wait_fixed
|
| 26 |
|
| 27 |
# Local Application Imports
|
| 28 |
from src.config import Config, logger
|
|
|
|
| 76 |
|
| 77 |
@retry(
|
| 78 |
retry=retry_if_exception(lambda e: not isinstance(e, UnretryableElevenLabsError)),
|
| 79 |
+
stop=stop_after_attempt(2),
|
| 80 |
+
wait=wait_fixed(2),
|
| 81 |
before=before_log(logger, logging.DEBUG),
|
| 82 |
after=after_log(logger, logging.DEBUG),
|
| 83 |
reraise=True,
|
src/integrations/hume_api.py
CHANGED
|
@@ -21,7 +21,7 @@ from typing import Tuple, Union
|
|
| 21 |
from hume import AsyncHumeClient
|
| 22 |
from hume.core.api_error import ApiError
|
| 23 |
from hume.tts.types import Format, FormatMp3, PostedUtterance, ReturnTts
|
| 24 |
-
from tenacity import after_log, before_log, retry, retry_if_exception, stop_after_attempt,
|
| 25 |
|
| 26 |
# Local Application Imports
|
| 27 |
from src.config import Config, logger
|
|
@@ -79,8 +79,8 @@ class UnretryableHumeError(HumeError):
|
|
| 79 |
|
| 80 |
@retry(
|
| 81 |
retry=retry_if_exception(lambda e: not isinstance(e, UnretryableHumeError)),
|
| 82 |
-
stop=stop_after_attempt(
|
| 83 |
-
wait=
|
| 84 |
before=before_log(logger, logging.DEBUG),
|
| 85 |
after=after_log(logger, logging.DEBUG),
|
| 86 |
reraise=True,
|
|
|
|
| 21 |
from hume import AsyncHumeClient
|
| 22 |
from hume.core.api_error import ApiError
|
| 23 |
from hume.tts.types import Format, FormatMp3, PostedUtterance, ReturnTts
|
| 24 |
+
from tenacity import after_log, before_log, retry, retry_if_exception, stop_after_attempt, wait_fixed
|
| 25 |
|
| 26 |
# Local Application Imports
|
| 27 |
from src.config import Config, logger
|
|
|
|
| 79 |
|
| 80 |
@retry(
|
| 81 |
retry=retry_if_exception(lambda e: not isinstance(e, UnretryableHumeError)),
|
| 82 |
+
stop=stop_after_attempt(2),
|
| 83 |
+
wait=wait_fixed(2),
|
| 84 |
before=before_log(logger, logging.DEBUG),
|
| 85 |
after=after_log(logger, logging.DEBUG),
|
| 86 |
reraise=True,
|