Spaces:
Running
Running
zach
commited on
Commit
·
136ff40
1
Parent(s):
49be7fc
Update TTS calls to Hume and ElevenLabs to run in parallel to reduce latency
Browse files- src/app.py +15 -7
src/app.py
CHANGED
|
@@ -15,7 +15,8 @@ Functions:
|
|
| 15 |
- process_prompt: Handles user input, calls the Anthropic and Hume APIs, and returns generated text and audio.
|
| 16 |
- build_gradio_interface: Constructs the Gradio Blocks-based interface.
|
| 17 |
"""
|
| 18 |
-
|
|
|
|
| 19 |
# Third-Party Library Imports
|
| 20 |
import gradio as gr
|
| 21 |
# Local Application Imports
|
|
@@ -50,10 +51,16 @@ def process_prompt(prompt: str) -> str:
|
|
| 50 |
generated_text = generate_text_with_claude(prompt)
|
| 51 |
logger.info(f"Generated text (length={len(generated_text)} characters).")
|
| 52 |
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
-
logger.info("
|
| 57 |
return generated_text, hume_audio, elevenlabs_audio
|
| 58 |
|
| 59 |
except ValueError as ve:
|
|
@@ -84,14 +91,15 @@ def build_gradio_interface() -> gr.Blocks:
|
|
| 84 |
sample_prompt_dropdown = gr.Dropdown(
|
| 85 |
choices=list(SAMPLE_PROMPTS.keys()),
|
| 86 |
label="Choose a Sample Prompt (or enter your own below)",
|
|
|
|
| 87 |
interactive=True
|
| 88 |
)
|
| 89 |
|
| 90 |
with gr.Row():
|
| 91 |
# Custom prompt input
|
| 92 |
prompt_input = gr.Textbox(
|
| 93 |
-
label="Enter your
|
| 94 |
-
placeholder="Or type your own
|
| 95 |
lines=2,
|
| 96 |
)
|
| 97 |
|
|
@@ -103,7 +111,7 @@ def build_gradio_interface() -> gr.Blocks:
|
|
| 103 |
output_text = gr.Textbox(
|
| 104 |
label="Generated Text",
|
| 105 |
interactive=False,
|
| 106 |
-
lines=
|
| 107 |
max_lines=24,
|
| 108 |
scale=2,
|
| 109 |
)
|
|
|
|
| 15 |
- process_prompt: Handles user input, calls the Anthropic and Hume APIs, and returns generated text and audio.
|
| 16 |
- build_gradio_interface: Constructs the Gradio Blocks-based interface.
|
| 17 |
"""
|
| 18 |
+
# Standard Library Imports
|
| 19 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 20 |
# Third-Party Library Imports
|
| 21 |
import gradio as gr
|
| 22 |
# Local Application Imports
|
|
|
|
| 51 |
generated_text = generate_text_with_claude(prompt)
|
| 52 |
logger.info(f"Generated text (length={len(generated_text)} characters).")
|
| 53 |
|
| 54 |
+
# Run TTS requests in parallel
|
| 55 |
+
with ThreadPoolExecutor(max_workers=2) as executor:
|
| 56 |
+
hume_future = executor.submit(text_to_speech_with_hume, prompt, generated_text)
|
| 57 |
+
elevenlabs_future = executor.submit(text_to_speech_with_elevenlabs, generated_text)
|
| 58 |
+
|
| 59 |
+
# Process TTS results
|
| 60 |
+
hume_audio = hume_future.result()
|
| 61 |
+
elevenlabs_audio = elevenlabs_future.result()
|
| 62 |
|
| 63 |
+
logger.info(f"TTS audio generated successfully: Hume={len(hume_audio)} bytes, ElevenLabs={len(elevenlabs_audio)} bytes")
|
| 64 |
return generated_text, hume_audio, elevenlabs_audio
|
| 65 |
|
| 66 |
except ValueError as ve:
|
|
|
|
| 91 |
sample_prompt_dropdown = gr.Dropdown(
|
| 92 |
choices=list(SAMPLE_PROMPTS.keys()),
|
| 93 |
label="Choose a Sample Prompt (or enter your own below)",
|
| 94 |
+
value=None,
|
| 95 |
interactive=True
|
| 96 |
)
|
| 97 |
|
| 98 |
with gr.Row():
|
| 99 |
# Custom prompt input
|
| 100 |
prompt_input = gr.Textbox(
|
| 101 |
+
label="Enter your prompt",
|
| 102 |
+
placeholder="Or type your own prompt here...",
|
| 103 |
lines=2,
|
| 104 |
)
|
| 105 |
|
|
|
|
| 111 |
output_text = gr.Textbox(
|
| 112 |
label="Generated Text",
|
| 113 |
interactive=False,
|
| 114 |
+
lines=12,
|
| 115 |
max_lines=24,
|
| 116 |
scale=2,
|
| 117 |
)
|