Spaces:
Running
Running
Add voice narrator (#16)
Browse files- .gitignore +3 -1
- LLM/llm_models.py +1 -1
- UI/app_interface.py +131 -37
- agents/narrator_agent/__init__.py +3 -0
- agents/narrator_agent/get_narrator_prompt.py +16 -0
- agents/narrator_agent/narrator_agent.py +249 -0
- agents/orchestrator_agent/get_orchestrator_prompt.py +3 -36
- agents/supabase_agent/get_supabase_agent_prompt.py +2 -1
- agents/supabase_agent/supabase_agent.py +1 -1
- app.py +36 -4
- get_final_text.py +6 -0
- pyproject.toml +2 -0
- third_party_tools/eleven_labs_realtime_tts.py +142 -0
- third_party_tools/text_to_audio_stream.py +120 -0
- uv.lock +152 -59
.gitignore
CHANGED
|
@@ -98,4 +98,6 @@ execution_plan.json
|
|
| 98 |
|
| 99 |
logs/run_logs
|
| 100 |
|
| 101 |
-
output.mp3
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
logs/run_logs
|
| 100 |
|
| 101 |
+
output.mp3
|
| 102 |
+
|
| 103 |
+
output.wav
|
LLM/llm_models.py
CHANGED
|
@@ -3,7 +3,7 @@ from .models import LLMProviderType, LLMModelType
|
|
| 3 |
from .llm_provider import LLMProvider
|
| 4 |
|
| 5 |
|
| 6 |
-
use_gemini =
|
| 7 |
|
| 8 |
base_provider = LLMProviderType.GEMINI if use_gemini else LLMProviderType.HF
|
| 9 |
|
|
|
|
| 3 |
from .llm_provider import LLMProvider
|
| 4 |
|
| 5 |
|
| 6 |
+
use_gemini = True
|
| 7 |
|
| 8 |
base_provider = LLMProviderType.GEMINI if use_gemini else LLMProviderType.HF
|
| 9 |
|
UI/app_interface.py
CHANGED
|
@@ -3,8 +3,10 @@ from __future__ import annotations
|
|
| 3 |
import html
|
| 4 |
from collections import deque
|
| 5 |
from threading import Lock
|
| 6 |
-
from typing import Any, Callable, Deque, Dict, Optional
|
| 7 |
-
|
|
|
|
|
|
|
| 8 |
import gradio as gr # type: ignore
|
| 9 |
|
| 10 |
from third_party_tools.text_to_audio_file import text_to_audio_file
|
|
@@ -12,7 +14,8 @@ from third_party_tools.text_to_audio_file import text_to_audio_file
|
|
| 12 |
from .cv_interface import CVInterface
|
| 13 |
from .render_plan_html import render_plan_html
|
| 14 |
|
| 15 |
-
|
|
|
|
| 16 |
class EastSyncInterface:
|
| 17 |
"""
|
| 18 |
EASTSYNC ENTERPRISE INTERFACE
|
|
@@ -20,15 +23,20 @@ class EastSyncInterface:
|
|
| 20 |
"""
|
| 21 |
|
| 22 |
SAMPLE_PROMPT = (
|
| 23 |
-
"PROJECT:
|
| 24 |
"SCOPE: Develop a real-time visualization layer for regional sales data.\n"
|
| 25 |
-
"TEAM: Data Science Team Alpha (
|
| 26 |
"OBJECTIVE: Analyze current team capabilities and generate a training roadmap to close skill gaps."
|
| 27 |
)
|
| 28 |
|
| 29 |
def __init__(self):
|
| 30 |
self._action_log: Deque[str] = deque(maxlen=200)
|
| 31 |
self._action_log_lock = Lock()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
self.init_message = (
|
| 33 |
'<div class="console-line">>> SYSTEM INITIALIZED. WAITING FOR PROJECT INPUT...</div>'
|
| 34 |
)
|
|
@@ -96,6 +104,8 @@ class EastSyncInterface:
|
|
| 96 |
action_str = str(action)
|
| 97 |
if action_str not in self._processing_steps:
|
| 98 |
self._processing_steps.append(action_str)
|
|
|
|
|
|
|
| 99 |
self._cached_processing_state = None
|
| 100 |
|
| 101 |
def get_action_log_text(self) -> str:
|
|
@@ -109,12 +119,9 @@ class EastSyncInterface:
|
|
| 109 |
return self.get_action_log_text()
|
| 110 |
|
| 111 |
def render_analysis_result(self, result: Any) -> str:
|
| 112 |
-
|
| 113 |
-
audio_path = text_to_audio_file(summary_text)
|
| 114 |
-
is_audio = audio_path is not None
|
| 115 |
-
audio_out = gr.update(value=audio_path, visible=is_audio)
|
| 116 |
html_out = render_plan_html(result)
|
| 117 |
-
return html_out
|
| 118 |
|
| 119 |
def set_analysis_result(self, result: Any):
|
| 120 |
"""Store analysis result for async display."""
|
|
@@ -189,14 +196,16 @@ class EastSyncInterface:
|
|
| 189 |
|
| 190 |
# Build steps HTML - show only actual steps that have been added
|
| 191 |
if current_steps:
|
| 192 |
-
steps_html = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
f'<div style="padding: 12px; margin: 8px 0; background: rgba(85,255,0,0.08); border-left: 3px solid var(--arc-green); color: #FFFFFF; font-size: 14px; font-weight: 500;">✓ {html.escape(step)}</div>'
|
| 194 |
-
for step in current_steps[:-1]
|
| 195 |
])
|
| 196 |
-
# Current step (in progress) - with animation
|
| 197 |
-
if current_steps:
|
| 198 |
-
current_step = current_steps[-1]
|
| 199 |
-
steps_html += f'<div style="padding: 12px; margin: 8px 0; background: rgba(255,127,0,0.15); border-left: 3px solid var(--arc-orange); color: var(--arc-orange); font-size: 14px; font-weight: 600; animation: pulse 1.5s ease-in-out infinite;">⏳ {html.escape(current_step)}</div>'
|
| 200 |
else:
|
| 201 |
steps_html = '<div style="padding: 12px; margin: 8px 0; background: rgba(255,255,255,0.02); border-left: 3px solid var(--arc-cyan); color: var(--text-main); opacity:0.9; font-size: 14px; animation: pulse 1.5s ease-in-out infinite;">⏳ Initializing analysis...</div>'
|
| 202 |
|
|
@@ -238,7 +247,7 @@ class EastSyncInterface:
|
|
| 238 |
|
| 239 |
<!-- Progress Bar -->
|
| 240 |
<div style="width: 100%; height: 4px; background: rgba(255,255,255,0.1); border-radius: 2px; overflow: hidden; margin-bottom: 32px;">
|
| 241 |
-
<div style="height: 100%; background: linear-gradient(90deg, var(--arc-red), var(--arc-orange), var(--arc-yellow), var(--arc-green)); animation: progress
|
| 242 |
</div>
|
| 243 |
|
| 244 |
<!-- Processing Steps (DYNAMIC) -->
|
|
@@ -261,7 +270,7 @@ class EastSyncInterface:
|
|
| 261 |
</div>
|
| 262 |
</div>
|
| 263 |
</div>
|
| 264 |
-
"""
|
| 265 |
|
| 266 |
def render_processing_state(self, mode: str = "extract") -> str:
|
| 267 |
"""Render animated processing state for CV analysis with dynamic steps."""
|
|
@@ -272,14 +281,16 @@ class EastSyncInterface:
|
|
| 272 |
|
| 273 |
# Build steps HTML - show only actual steps that have been added
|
| 274 |
if current_steps:
|
| 275 |
-
steps_html = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 276 |
f'<div style="padding: 12px; margin: 8px 0; background: rgba(85,255,0,0.08); border-left: 3px solid var(--arc-green); color: #FFFFFF; font-size: 14px; font-weight: 500;">✓ {html.escape(step)}</div>'
|
| 277 |
-
for step in current_steps[:-1]
|
| 278 |
])
|
| 279 |
-
# Current step (in progress) - with animation
|
| 280 |
-
if current_steps:
|
| 281 |
-
current_step = current_steps[-1]
|
| 282 |
-
steps_html += f'<div style="padding: 12px; margin: 8px 0; background: rgba(0,255,255,0.15); border-left: 3px solid var(--arc-cyan); color: var(--arc-cyan); font-size: 14px; font-weight: 600; animation: pulse 1.5s ease-in-out infinite;">⏳ {html.escape(current_step)}</div>'
|
| 283 |
else:
|
| 284 |
steps_html = '<div style="padding: 12px; margin: 8px 0; background: rgba(255,255,255,0.02); border-left: 3px solid var(--arc-cyan); color: var(--text-main); opacity:0.9; font-size: 14px; animation: pulse 1.5s ease-in-out infinite;">⏳ Initializing...</div>'
|
| 285 |
|
|
@@ -321,7 +332,7 @@ class EastSyncInterface:
|
|
| 321 |
|
| 322 |
<!-- Progress Bar -->
|
| 323 |
<div style="width: 100%; height: 4px; background: rgba(255,255,255,0.1); border-radius: 2px; overflow: hidden; margin-bottom: 32px;">
|
| 324 |
-
<div style="height: 100%; background: linear-gradient(90deg, var(--arc-orange), var(--arc-yellow), var(--arc-green)); animation: progress
|
| 325 |
</div>
|
| 326 |
|
| 327 |
<!-- Processing Steps (DYNAMIC) -->
|
|
@@ -586,6 +597,77 @@ class EastSyncInterface:
|
|
| 586 |
.eta-bar {
|
| 587 |
display: none !important;
|
| 588 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 589 |
"""
|
| 590 |
|
| 591 |
def _compose_css(self) -> str:
|
|
@@ -618,7 +700,7 @@ class EastSyncInterface:
|
|
| 618 |
</div>
|
| 619 |
"""
|
| 620 |
|
| 621 |
-
def build_interface(self, analyze_callback: Callable[[str], str], cancel_run_callback: Callable[[], None]) -> gr.Blocks:
|
| 622 |
theme = gr.themes.Base(
|
| 623 |
primary_hue="orange",
|
| 624 |
neutral_hue="slate",
|
|
@@ -628,13 +710,24 @@ class EastSyncInterface:
|
|
| 628 |
with gr.Blocks(theme=theme, css=self._app_css, title="EastSync Enterprise", fill_height=True) as demo:
|
| 629 |
gr.HTML(self._build_hero())
|
| 630 |
|
| 631 |
-
|
| 632 |
-
|
| 633 |
-
|
| 634 |
-
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 638 |
|
| 639 |
with gr.Row(equal_height=True, elem_classes=["main-container"]):
|
| 640 |
|
|
@@ -717,7 +810,8 @@ class EastSyncInterface:
|
|
| 717 |
def start_project_analysis():
|
| 718 |
self.start_processing("project")
|
| 719 |
return self.render_project_processing_state()
|
| 720 |
-
|
|
|
|
| 721 |
btn_run.click(
|
| 722 |
start_project_analysis,
|
| 723 |
outputs=output_display,
|
|
@@ -727,7 +821,7 @@ class EastSyncInterface:
|
|
| 727 |
).then(
|
| 728 |
lambda: self.get_action_log_text(), outputs=console
|
| 729 |
).then(
|
| 730 |
-
analyze_callback, inputs=input_box, outputs=
|
| 731 |
).then(
|
| 732 |
self.get_action_log_text, outputs=console
|
| 733 |
)
|
|
@@ -809,7 +903,7 @@ class EastSyncInterface:
|
|
| 809 |
log_text = self.get_action_log_text()
|
| 810 |
return (log_text, log_text) # Return same log for both consoles
|
| 811 |
|
| 812 |
-
gr.Timer(
|
| 813 |
|
| 814 |
# Check for analysis result updates (poll every 1 second)
|
| 815 |
def check_analysis_result():
|
|
@@ -827,6 +921,6 @@ class EastSyncInterface:
|
|
| 827 |
# Return the result if not None, otherwise skip update
|
| 828 |
return result if result is not None else gr.update()
|
| 829 |
|
| 830 |
-
gr.Timer(
|
| 831 |
|
| 832 |
return demo
|
|
|
|
| 3 |
import html
|
| 4 |
from collections import deque
|
| 5 |
from threading import Lock
|
| 6 |
+
from typing import Any, Callable, Deque, Dict, Optional, Tuple
|
| 7 |
+
from queue import Queue, LifoQueue
|
| 8 |
+
import numpy as np
|
| 9 |
+
import os
|
| 10 |
import gradio as gr # type: ignore
|
| 11 |
|
| 12 |
from third_party_tools.text_to_audio_file import text_to_audio_file
|
|
|
|
| 14 |
from .cv_interface import CVInterface
|
| 15 |
from .render_plan_html import render_plan_html
|
| 16 |
|
| 17 |
+
should_narrate_events = os.getenv("SHOULD_NARRATE_EVENTS", "False").lower() == "true"
|
| 18 |
+
new_events_check_interval_seconds = 3
|
| 19 |
class EastSyncInterface:
|
| 20 |
"""
|
| 21 |
EASTSYNC ENTERPRISE INTERFACE
|
|
|
|
| 23 |
"""
|
| 24 |
|
| 25 |
SAMPLE_PROMPT = (
|
| 26 |
+
"PROJECT: Data Analytics Dashboard\n"
|
| 27 |
"SCOPE: Develop a real-time visualization layer for regional sales data.\n"
|
| 28 |
+
"TEAM: Data Science Team Alpha (2 Juniors, 1 Senior).\n"
|
| 29 |
"OBJECTIVE: Analyze current team capabilities and generate a training roadmap to close skill gaps."
|
| 30 |
)
|
| 31 |
|
| 32 |
def __init__(self):
|
| 33 |
self._action_log: Deque[str] = deque(maxlen=200)
|
| 34 |
self._action_log_lock = Lock()
|
| 35 |
+
|
| 36 |
+
# Queues for live audio narration
|
| 37 |
+
self.audio_queue: Queue[Tuple[int, np.ndarray]] = Queue()
|
| 38 |
+
self.event_queue: Queue[str] = LifoQueue()
|
| 39 |
+
|
| 40 |
self.init_message = (
|
| 41 |
'<div class="console-line">>> SYSTEM INITIALIZED. WAITING FOR PROJECT INPUT...</div>'
|
| 42 |
)
|
|
|
|
| 104 |
action_str = str(action)
|
| 105 |
if action_str not in self._processing_steps:
|
| 106 |
self._processing_steps.append(action_str)
|
| 107 |
+
# Push to event queue for narrator
|
| 108 |
+
self.event_queue.put_nowait(f"{action_str} {args if args else ''}")
|
| 109 |
self._cached_processing_state = None
|
| 110 |
|
| 111 |
def get_action_log_text(self) -> str:
|
|
|
|
| 119 |
return self.get_action_log_text()
|
| 120 |
|
| 121 |
def render_analysis_result(self, result: Any) -> str:
|
| 122 |
+
"""Render the analysis result as HTML only. Audio is handled by narrator."""
|
|
|
|
|
|
|
|
|
|
| 123 |
html_out = render_plan_html(result)
|
| 124 |
+
return html_out
|
| 125 |
|
| 126 |
def set_analysis_result(self, result: Any):
|
| 127 |
"""Store analysis result for async display."""
|
|
|
|
| 196 |
|
| 197 |
# Build steps HTML - show only actual steps that have been added
|
| 198 |
if current_steps:
|
| 199 |
+
steps_html = ""
|
| 200 |
+
# Current step (in progress) - with animation - ON TOP
|
| 201 |
+
current_step = current_steps[-1]
|
| 202 |
+
steps_html += f'<div style="padding: 12px; margin: 8px 0; background: rgba(255,127,0,0.15); border-left: 3px solid var(--arc-orange); color: var(--arc-orange); font-size: 14px; font-weight: 600; animation: pulse 1.5s ease-in-out infinite;">⏳ {html.escape(current_step)}</div>'
|
| 203 |
+
|
| 204 |
+
# Previous steps (completed) - WHITE text - REVERSED (Newest first)
|
| 205 |
+
steps_html += "".join([
|
| 206 |
f'<div style="padding: 12px; margin: 8px 0; background: rgba(85,255,0,0.08); border-left: 3px solid var(--arc-green); color: #FFFFFF; font-size: 14px; font-weight: 500;">✓ {html.escape(step)}</div>'
|
| 207 |
+
for step in reversed(current_steps[:-1])
|
| 208 |
])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
else:
|
| 210 |
steps_html = '<div style="padding: 12px; margin: 8px 0; background: rgba(255,255,255,0.02); border-left: 3px solid var(--arc-cyan); color: var(--text-main); opacity:0.9; font-size: 14px; animation: pulse 1.5s ease-in-out infinite;">⏳ Initializing analysis...</div>'
|
| 211 |
|
|
|
|
| 247 |
|
| 248 |
<!-- Progress Bar -->
|
| 249 |
<div style="width: 100%; height: 4px; background: rgba(255,255,255,0.1); border-radius: 2px; overflow: hidden; margin-bottom: 32px;">
|
| 250 |
+
<div style="height: 100%; background: linear-gradient(90deg, var(--arc-red), var(--arc-orange), var(--arc-yellow), var(--arc-green)); animation: progress {new_events_check_interval_seconds}s ease-in-out infinite;"></div>
|
| 251 |
</div>
|
| 252 |
|
| 253 |
<!-- Processing Steps (DYNAMIC) -->
|
|
|
|
| 270 |
</div>
|
| 271 |
</div>
|
| 272 |
</div>
|
| 273 |
+
""", None
|
| 274 |
|
| 275 |
def render_processing_state(self, mode: str = "extract") -> str:
|
| 276 |
"""Render animated processing state for CV analysis with dynamic steps."""
|
|
|
|
| 281 |
|
| 282 |
# Build steps HTML - show only actual steps that have been added
|
| 283 |
if current_steps:
|
| 284 |
+
steps_html = ""
|
| 285 |
+
# Current step (in progress) - with animation - ON TOP
|
| 286 |
+
current_step = current_steps[-1]
|
| 287 |
+
steps_html += f'<div style="padding: 12px; margin: 8px 0; background: rgba(0,255,255,0.15); border-left: 3px solid var(--arc-cyan); color: var(--arc-cyan); font-size: 14px; font-weight: 600; animation: pulse 1.5s ease-in-out infinite;">⏳ {html.escape(current_step)}</div>'
|
| 288 |
+
|
| 289 |
+
# Previous steps (completed) - WHITE text - REVERSED (Newest first)
|
| 290 |
+
steps_html += "".join([
|
| 291 |
f'<div style="padding: 12px; margin: 8px 0; background: rgba(85,255,0,0.08); border-left: 3px solid var(--arc-green); color: #FFFFFF; font-size: 14px; font-weight: 500;">✓ {html.escape(step)}</div>'
|
| 292 |
+
for step in reversed(current_steps[:-1])
|
| 293 |
])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
else:
|
| 295 |
steps_html = '<div style="padding: 12px; margin: 8px 0; background: rgba(255,255,255,0.02); border-left: 3px solid var(--arc-cyan); color: var(--text-main); opacity:0.9; font-size: 14px; animation: pulse 1.5s ease-in-out infinite;">⏳ Initializing...</div>'
|
| 296 |
|
|
|
|
| 332 |
|
| 333 |
<!-- Progress Bar -->
|
| 334 |
<div style="width: 100%; height: 4px; background: rgba(255,255,255,0.1); border-radius: 2px; overflow: hidden; margin-bottom: 32px;">
|
| 335 |
+
<div style="height: 100%; background: linear-gradient(90deg, var(--arc-orange), var(--arc-yellow), var(--arc-green)); animation: progress {new_events_check_interval_seconds}s ease-in-out infinite;"></div>
|
| 336 |
</div>
|
| 337 |
|
| 338 |
<!-- Processing Steps (DYNAMIC) -->
|
|
|
|
| 597 |
.eta-bar {
|
| 598 |
display: none !important;
|
| 599 |
}
|
| 600 |
+
/* LIVE NARRATION STYLING */
|
| 601 |
+
/* Target the audio component's container */
|
| 602 |
+
audio {
|
| 603 |
+
width: 100% !important;
|
| 604 |
+
background: #1A1D24 !important;
|
| 605 |
+
border-radius: 4px !important;
|
| 606 |
+
border: 1px solid rgba(255, 127, 0, 0.4) !important;
|
| 607 |
+
}
|
| 608 |
+
|
| 609 |
+
/* Style the audio player controls */
|
| 610 |
+
audio::-webkit-media-controls-panel {
|
| 611 |
+
background: linear-gradient(to bottom, rgba(30, 30, 40, 0.9), rgba(20, 20, 30, 0.95)) !important;
|
| 612 |
+
border-radius: 4px !important;
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
audio::-webkit-media-controls-play-button,
|
| 616 |
+
audio::-webkit-media-controls-mute-button {
|
| 617 |
+
border-radius: 50% !important;
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
audio::-webkit-media-controls-timeline {
|
| 621 |
+
border-radius: 2px !important;
|
| 622 |
+
height: 6px !important;
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
audio::-webkit-media-controls-current-time-display,
|
| 626 |
+
audio::-webkit-media-controls-time-remaining-display {
|
| 627 |
+
color: #FFA94D !important;
|
| 628 |
+
font-family: var(--font-mono) !important;
|
| 629 |
+
font-size: 12px !important;
|
| 630 |
+
font-weight: 600 !important;
|
| 631 |
+
text-shadow: 0 0 3px rgba(255, 127, 0, 0.4) !important;
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
/* Add glow effect to live narration container */
|
| 635 |
+
.live-narration-wrapper {
|
| 636 |
+
padding: 16px;
|
| 637 |
+
background: linear-gradient(135deg, rgba(26, 29, 36, 0.8), rgba(20, 23, 30, 0.9));
|
| 638 |
+
border: 2px solid var(--arc-orange);
|
| 639 |
+
border-radius: 4px;
|
| 640 |
+
box-shadow: 0 0 20px rgba(255, 127, 0, 0.3);
|
| 641 |
+
margin-bottom: 20px;
|
| 642 |
+
}
|
| 643 |
+
|
| 644 |
+
.live-narration-label {
|
| 645 |
+
color: var(--arc-orange);
|
| 646 |
+
background: rgb(18, 20, 26);
|
| 647 |
+
font-size: 12px;
|
| 648 |
+
font-weight: 700;
|
| 649 |
+
text-transform: uppercase;
|
| 650 |
+
letter-spacing: 1.5px;
|
| 651 |
+
padding-bottom: 8px;
|
| 652 |
+
display: flex;
|
| 653 |
+
align-items: center;
|
| 654 |
+
gap: 8px;
|
| 655 |
+
}
|
| 656 |
+
|
| 657 |
+
.live-narration-label::before {
|
| 658 |
+
content: "";
|
| 659 |
+
display: inline-block;
|
| 660 |
+
width: 8px;
|
| 661 |
+
height: 8px;
|
| 662 |
+
background: var(--arc-red);
|
| 663 |
+
border-radius: 50%;
|
| 664 |
+
animation: pulse-red 1.5s ease-in-out infinite;
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
@keyframes pulse-red {
|
| 668 |
+
0%, 100% { opacity: 1; box-shadow: 0 0 8px var(--arc-red); }
|
| 669 |
+
50% { opacity: 0.5; box-shadow: 0 0 4px var(--arc-red); }
|
| 670 |
+
}
|
| 671 |
"""
|
| 672 |
|
| 673 |
def _compose_css(self) -> str:
|
|
|
|
| 700 |
</div>
|
| 701 |
"""
|
| 702 |
|
| 703 |
+
def build_interface(self, analyze_callback: Callable[[str], str], cancel_run_callback: Callable[[], None], start_audio_stream_callback: Callable[[], Any]) -> gr.Blocks:
|
| 704 |
theme = gr.themes.Base(
|
| 705 |
primary_hue="orange",
|
| 706 |
neutral_hue="slate",
|
|
|
|
| 710 |
with gr.Blocks(theme=theme, css=self._app_css, title="EastSync Enterprise", fill_height=True) as demo:
|
| 711 |
gr.HTML(self._build_hero())
|
| 712 |
|
| 713 |
+
|
| 714 |
+
# Live Narration with custom wrapper
|
| 715 |
+
with gr.Group(elem_classes=["live-narration-wrapper"]) as live_narration_group:
|
| 716 |
+
gr.HTML('<div class="live-narration-label">🔴 LIVE AI NARRATION</div>')
|
| 717 |
+
live_audio = gr.Audio(
|
| 718 |
+
label="",
|
| 719 |
+
streaming=True,
|
| 720 |
+
autoplay=True,
|
| 721 |
+
buttons=None,
|
| 722 |
+
visible=should_narrate_events,
|
| 723 |
+
show_label=False,
|
| 724 |
+
elem_id="live-narrator-audio"
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
# Hide the wrapper if narration is disabled
|
| 728 |
+
if not should_narrate_events:
|
| 729 |
+
live_narration_group.visible = False
|
| 730 |
+
|
| 731 |
|
| 732 |
with gr.Row(equal_height=True, elem_classes=["main-container"]):
|
| 733 |
|
|
|
|
| 810 |
def start_project_analysis():
|
| 811 |
self.start_processing("project")
|
| 812 |
return self.render_project_processing_state()
|
| 813 |
+
# Trigger audio stream independently so it doesn't block analysis
|
| 814 |
+
btn_run.click(start_audio_stream_callback, outputs=live_audio)
|
| 815 |
btn_run.click(
|
| 816 |
start_project_analysis,
|
| 817 |
outputs=output_display,
|
|
|
|
| 821 |
).then(
|
| 822 |
lambda: self.get_action_log_text(), outputs=console
|
| 823 |
).then(
|
| 824 |
+
analyze_callback, inputs=input_box, outputs=output_display
|
| 825 |
).then(
|
| 826 |
self.get_action_log_text, outputs=console
|
| 827 |
)
|
|
|
|
| 903 |
log_text = self.get_action_log_text()
|
| 904 |
return (log_text, log_text) # Return same log for both consoles
|
| 905 |
|
| 906 |
+
gr.Timer(new_events_check_interval_seconds).tick(update_both_consoles, outputs=[console, console_cv])
|
| 907 |
|
| 908 |
# Check for analysis result updates (poll every 1 second)
|
| 909 |
def check_analysis_result():
|
|
|
|
| 921 |
# Return the result if not None, otherwise skip update
|
| 922 |
return result if result is not None else gr.update()
|
| 923 |
|
| 924 |
+
gr.Timer(new_events_check_interval_seconds).tick(poll_with_skip, outputs=output_display)
|
| 925 |
|
| 926 |
return demo
|
agents/narrator_agent/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .narrator_agent import NarratorAgent
|
| 2 |
+
|
| 3 |
+
__all__ = ["NarratorAgent"]
|
agents/narrator_agent/get_narrator_prompt.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def get_narrator_prompt() -> str:
|
| 2 |
+
return """
|
| 3 |
+
You are a futuristic, high-tech AI system narrator (like JARVIS or FRIDAY). \n
|
| 4 |
+
You receive technical log events from an analysis engine. \n
|
| 5 |
+
Your job is to briefly and engagingly narrate what is happening for the user. \n
|
| 6 |
+
Keep it short (1 sentence max). Use a professional, slightly dramatic tone. \n
|
| 7 |
+
Do not just repeat the log. Interpret it. If there is brief pause of events, try to \n
|
| 8 |
+
summarize what has happened so far, or maybe insert a dry joke or commentary. Keep it lively. \n
|
| 9 |
+
Your narration will be converted to audio and will be used to record a video for a hackathon about the app. \n
|
| 10 |
+
You can joke about that or break the fourth wall, but do it subtly and rarely, don't overdo it \n
|
| 11 |
+
You can also add some dry commentary about the app or the team, but again, keep it subtle and don't overdo it \n
|
| 12 |
+
Keep the commentary short and engaging, because the events are going to come fast \n
|
| 13 |
+
Keep the commentary at 5 second speech max \n
|
| 14 |
+
Don't start with 'Affirmative' or anything like that, just start with the narration, it will be part of a continuous stream of audio \n
|
| 15 |
+
YOU MUST KEEP EVERYTHING UNDER 5 SECOND NARRATION, one Sentence max, keep it SHORT
|
| 16 |
+
"""
|
agents/narrator_agent/narrator_agent.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import threading
|
| 3 |
+
import time
|
| 4 |
+
import re
|
| 5 |
+
from typing import Iterator, Tuple, Any
|
| 6 |
+
from queue import Empty, Queue
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from third_party_tools.text_to_audio_stream import (
|
| 10 |
+
text_to_audio_stream,
|
| 11 |
+
local_file_audio_stream,
|
| 12 |
+
EFFECTIVE_SAMPLE_RATE,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
from google import genai
|
| 16 |
+
from .get_narrator_prompt import get_narrator_prompt
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
time_between_narrator_updates = 6
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# ----------------------------
|
| 23 |
+
# Sentence Window
|
| 24 |
+
# ----------------------------
|
| 25 |
+
class SentenceWindow:
|
| 26 |
+
END_RE = re.compile(r"([.!?]+)(\s|$)")
|
| 27 |
+
|
| 28 |
+
def __init__(self):
|
| 29 |
+
self.buffer = ""
|
| 30 |
+
|
| 31 |
+
def push(self, text: str):
|
| 32 |
+
self.buffer += text
|
| 33 |
+
out = []
|
| 34 |
+
while True:
|
| 35 |
+
m = self.END_RE.search(self.buffer)
|
| 36 |
+
if not m:
|
| 37 |
+
break
|
| 38 |
+
end = m.end()
|
| 39 |
+
sentence = self.buffer[:end].strip()
|
| 40 |
+
if sentence:
|
| 41 |
+
out.append(sentence)
|
| 42 |
+
self.buffer = self.buffer[end:]
|
| 43 |
+
return out
|
| 44 |
+
|
| 45 |
+
def flush(self):
|
| 46 |
+
leftover = self.buffer.strip()
|
| 47 |
+
self.buffer = ""
|
| 48 |
+
return [leftover] if leftover else []
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# ----------------------------
|
| 52 |
+
# NarratorAgent - Batched Streaming
|
| 53 |
+
# ----------------------------
|
| 54 |
+
class NarratorAgent:
|
| 55 |
+
def __init__(self):
|
| 56 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
| 57 |
+
if not api_key:
|
| 58 |
+
raise RuntimeError("Missing GEMINI_API_KEY")
|
| 59 |
+
|
| 60 |
+
self.client = genai.Client(api_key=api_key)
|
| 61 |
+
self.model = "gemini-2.5-flash"
|
| 62 |
+
self.system_prompt = get_narrator_prompt()
|
| 63 |
+
|
| 64 |
+
# Queue for complete narration segments (not small chunks)
|
| 65 |
+
self.narration_queue: Queue[Tuple[int, np.ndarray]] = Queue(maxsize=10)
|
| 66 |
+
|
| 67 |
+
# Threading
|
| 68 |
+
self._worker_thread: threading.Thread | None = None
|
| 69 |
+
self._running = False
|
| 70 |
+
|
| 71 |
+
# Update state
|
| 72 |
+
self.previous_narrations: list[str] = []
|
| 73 |
+
self.all_events: list[str] = []
|
| 74 |
+
self.last_update_time = time.time()
|
| 75 |
+
|
| 76 |
+
# Prevent mid-sentence TTS
|
| 77 |
+
self.sentence_window = SentenceWindow()
|
| 78 |
+
|
| 79 |
+
def stream_commentary(self, new_events, previous_narrations, seconds_since_update):
|
| 80 |
+
if new_events:
|
| 81 |
+
context = (
|
| 82 |
+
f"Previous narrations (last 3): {previous_narrations}\n"
|
| 83 |
+
f"New events since last update ({seconds_since_update:.0f}s ago): {new_events}\n"
|
| 84 |
+
f"Provide a brief update narrating what's happening."
|
| 85 |
+
)
|
| 86 |
+
else:
|
| 87 |
+
context = (
|
| 88 |
+
f"Previous narrations: {previous_narrations}\n"
|
| 89 |
+
f"No new events for {seconds_since_update:.0f} seconds.\n"
|
| 90 |
+
f"Make a brief, dry joke about the wait or a witty observation."
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
messages = [
|
| 94 |
+
f"system: {self.system_prompt}",
|
| 95 |
+
f"user: {context}"
|
| 96 |
+
]
|
| 97 |
+
|
| 98 |
+
stream = self.client.models.generate_content_stream(
|
| 99 |
+
model=self.model,
|
| 100 |
+
contents=messages
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
for chunk in stream:
|
| 104 |
+
if hasattr(chunk, "text") and chunk.text:
|
| 105 |
+
yield chunk.text
|
| 106 |
+
|
| 107 |
+
# ----------------------------
|
| 108 |
+
# Worker Thread - Generates COMPLETE narrations every 5 seconds
|
| 109 |
+
# ----------------------------
|
| 110 |
+
def _background_worker(self, ui: Any):
|
| 111 |
+
print("Narrator worker started (batched streaming mode)")
|
| 112 |
+
|
| 113 |
+
while self._running:
|
| 114 |
+
try:
|
| 115 |
+
# 1. Get all new events
|
| 116 |
+
new_events = []
|
| 117 |
+
while True:
|
| 118 |
+
try:
|
| 119 |
+
ev = ui.event_queue.get_nowait()
|
| 120 |
+
new_events.append(ev)
|
| 121 |
+
self.all_events.append(ev)
|
| 122 |
+
except Empty:
|
| 123 |
+
break
|
| 124 |
+
|
| 125 |
+
# 2. Time delta
|
| 126 |
+
now = time.time()
|
| 127 |
+
elapsed = now - self.last_update_time
|
| 128 |
+
self.last_update_time = now
|
| 129 |
+
|
| 130 |
+
gen_start = time.time()
|
| 131 |
+
|
| 132 |
+
# 3. Stream Gemini text
|
| 133 |
+
narration_text = ""
|
| 134 |
+
for chunk in self.stream_commentary(
|
| 135 |
+
new_events,
|
| 136 |
+
self.previous_narrations[-3:],
|
| 137 |
+
elapsed
|
| 138 |
+
):
|
| 139 |
+
narration_text += chunk
|
| 140 |
+
|
| 141 |
+
if narration_text.strip():
|
| 142 |
+
self.previous_narrations.append(narration_text)
|
| 143 |
+
print(f"Generating narration: {narration_text[:80]}...")
|
| 144 |
+
|
| 145 |
+
# 4. Generate COMPLETE audio for this narration
|
| 146 |
+
audio_chunks = []
|
| 147 |
+
sample_rate = EFFECTIVE_SAMPLE_RATE
|
| 148 |
+
|
| 149 |
+
for sentence in self.sentence_window.push(narration_text):
|
| 150 |
+
for sr, chunk in text_to_audio_stream(iter([sentence])):
|
| 151 |
+
audio_chunks.append(chunk)
|
| 152 |
+
sample_rate = sr
|
| 153 |
+
|
| 154 |
+
# 5. Combine into ONE complete audio segment
|
| 155 |
+
if audio_chunks:
|
| 156 |
+
combined_audio = np.concatenate(audio_chunks)
|
| 157 |
+
duration = len(combined_audio) / sample_rate
|
| 158 |
+
|
| 159 |
+
# Queue the COMPLETE narration (not tiny chunks)
|
| 160 |
+
try:
|
| 161 |
+
self.narration_queue.put((sample_rate, combined_audio), timeout=1.0)
|
| 162 |
+
gen_time = time.time() - gen_start
|
| 163 |
+
print(f"✓ Queued narration: {duration:.1f}s audio (generated in {gen_time:.1f}s)")
|
| 164 |
+
except:
|
| 165 |
+
print("⚠️ Narration queue full, skipping")
|
| 166 |
+
|
| 167 |
+
# Wait 5 seconds before next narration
|
| 168 |
+
time.sleep(time_between_narrator_updates)
|
| 169 |
+
|
| 170 |
+
except Exception as e:
|
| 171 |
+
print(f"Worker error: {e}")
|
| 172 |
+
import traceback
|
| 173 |
+
traceback.print_exc()
|
| 174 |
+
time.sleep(1)
|
| 175 |
+
|
| 176 |
+
# ----------------------------
|
| 177 |
+
# Final summary
|
| 178 |
+
# ----------------------------
|
| 179 |
+
def queue_final_summary(self, summary_text: str):
|
| 180 |
+
audio_chunks = []
|
| 181 |
+
sample_rate = EFFECTIVE_SAMPLE_RATE
|
| 182 |
+
|
| 183 |
+
for sentence in self.sentence_window.push(summary_text):
|
| 184 |
+
for sr, chunk in text_to_audio_stream(iter([sentence])):
|
| 185 |
+
audio_chunks.append(chunk)
|
| 186 |
+
sample_rate = sr
|
| 187 |
+
|
| 188 |
+
for sentence in self.sentence_window.flush():
|
| 189 |
+
for sr, chunk in text_to_audio_stream(iter([sentence])):
|
| 190 |
+
audio_chunks.append(chunk)
|
| 191 |
+
sample_rate = sr
|
| 192 |
+
|
| 193 |
+
if audio_chunks:
|
| 194 |
+
combined_audio = np.concatenate(audio_chunks)
|
| 195 |
+
self.narration_queue.put((sample_rate, combined_audio))
|
| 196 |
+
|
| 197 |
+
# ----------------------------
|
| 198 |
+
# Streaming interface - Yields COMPLETE narrations
|
| 199 |
+
# ----------------------------
|
| 200 |
+
def narrate_event_streaming(self, ui: Any) -> Iterator[Tuple[int, np.ndarray]]:
|
| 201 |
+
"""
|
| 202 |
+
Generator that yields complete narration segments every ~5 seconds.
|
| 203 |
+
This works much better with Gradio's streaming than tiny chunks.
|
| 204 |
+
"""
|
| 205 |
+
# Play intro
|
| 206 |
+
try:
|
| 207 |
+
for sr, chunk in local_file_audio_stream("intro.wav"):
|
| 208 |
+
yield (sr, chunk)
|
| 209 |
+
except Exception as e:
|
| 210 |
+
print("Intro failed:", e)
|
| 211 |
+
|
| 212 |
+
# Start worker if needed
|
| 213 |
+
if not self._worker_thread or not self._worker_thread.is_alive():
|
| 214 |
+
self._running = True
|
| 215 |
+
self._worker_thread = threading.Thread(
|
| 216 |
+
target=self._background_worker,
|
| 217 |
+
args=(ui,),
|
| 218 |
+
daemon=True
|
| 219 |
+
)
|
| 220 |
+
self._worker_thread.start()
|
| 221 |
+
|
| 222 |
+
# Stream complete narrations as they become available
|
| 223 |
+
narration_count = 0
|
| 224 |
+
|
| 225 |
+
while True:
|
| 226 |
+
try:
|
| 227 |
+
# Wait for next complete narration (blocks up to 1 second)
|
| 228 |
+
sr, complete_narration = self.narration_queue.get(timeout=1.0)
|
| 229 |
+
|
| 230 |
+
narration_count += 1
|
| 231 |
+
duration = len(complete_narration) / sr
|
| 232 |
+
|
| 233 |
+
print(f"🔊 Streaming narration #{narration_count} ({duration:.1f}s)")
|
| 234 |
+
|
| 235 |
+
# Yield the COMPLETE narration as ONE chunk
|
| 236 |
+
# Gradio will handle playing it smoothly
|
| 237 |
+
yield (sr, complete_narration)
|
| 238 |
+
|
| 239 |
+
except Empty:
|
| 240 |
+
# No new narration yet, just wait
|
| 241 |
+
# Don't yield silence - let Gradio handle gaps naturally
|
| 242 |
+
pass
|
| 243 |
+
|
| 244 |
+
def stop(self):
|
| 245 |
+
"""Stop the narrator worker thread"""
|
| 246 |
+
self._running = False
|
| 247 |
+
if self._worker_thread:
|
| 248 |
+
self._worker_thread.join(timeout=2.0)
|
| 249 |
+
print("Narrator stopped")
|
agents/orchestrator_agent/get_orchestrator_prompt.py
CHANGED
|
@@ -19,32 +19,6 @@ You **only delegate** to managed agents.
|
|
| 19 |
|
| 20 |
---
|
| 21 |
|
| 22 |
-
### MODE SELECTION
|
| 23 |
-
|
| 24 |
-
#### **1) Information-Only Requests (Default)**
|
| 25 |
-
Trigger when the user **does NOT explicitly ask to analyze or build/update/delete a training plan**.
|
| 26 |
-
|
| 27 |
-
Examples:
|
| 28 |
-
- “list all edge functions and what they do”
|
| 29 |
-
- “show project details”
|
| 30 |
-
- “who is on the team for project 12?”
|
| 31 |
-
- “what does fn_get_project_skill_overview do?”
|
| 32 |
-
- “list all training plans” (if no analysis requested)
|
| 33 |
-
|
| 34 |
-
Rules for this mode:
|
| 35 |
-
- Only retrieve and return the requested info.
|
| 36 |
-
- **Do NOT build or return training-plan JSON.**
|
| 37 |
-
- **Do NOT call web_search_agent.**
|
| 38 |
-
- Answer concisely in the most natural data structure (e.g., a list or dict).
|
| 39 |
-
- Use **minimal necessary delegation**.
|
| 40 |
-
|
| 41 |
-
#### **2) Full Training-Plan Workflow**
|
| 42 |
-
Trigger only if the request **explicitly involves training plans or project skill analysis**, such as:
|
| 43 |
-
- build a new plan
|
| 44 |
-
- update or delete a plan
|
| 45 |
-
- generate a plan for a project
|
| 46 |
-
- analyze skill gaps
|
| 47 |
-
- view a specific plan with training options
|
| 48 |
|
| 49 |
Workflow must:
|
| 50 |
1) Instruct `supabase_agent` to retrieve project details, team, and skill gaps.
|
|
@@ -83,13 +57,14 @@ Workflow must:
|
|
| 83 |
{orchestrator_agent_return_schema}
|
| 84 |
|
| 85 |
The corny_summary field is a short, corny summary of the project to be used for a voiced summary of the training plan.
|
| 86 |
-
It must be short and funny, but no offensive jokes.
|
| 87 |
|
| 88 |
<example_corny_summary>
|
| 89 |
"In the quest to build the ultimate Data Analytics Dashboard, one brave team stands almost ready. Laura, a fearless fullstack engineer, has everything she needs—except SQL and data pipelines—so she’s heading off for a crash course in data sorcery. Oliver, the battle-hardened data engineer, must finally face his greatest foe: JavaScript. Emma, the SQL specialist, attempts the impossible, aiming to master both data engineering and frontend magic at the same time. And Ethan, the frontend warrior, discovers he cannot escape the call of back-end knowledge.
|
| 90 |
With a hefty budget, a looming deadline, and a mountain of courses ahead, these heroes will train, level up, and, if all goes well… finally deliver the dashboard of destiny."
|
| 91 |
</example_corny_summary>
|
| 92 |
|
|
|
|
| 93 |
---
|
| 94 |
|
| 95 |
### Tool Usage Requirement
|
|
@@ -109,18 +84,10 @@ You must use:
|
|
| 109 |
---
|
| 110 |
|
| 111 |
### Final Output
|
| 112 |
-
Return the
|
| 113 |
-
|
| 114 |
-
- Information-only: direct, concise natural structure (list/dict), no plan schema
|
| 115 |
-
- Training-plan workflow: full JSON plan using `AnalysisResult` schema: {orchestrator_agent_return_schema}
|
| 116 |
|
| 117 |
### FINAL OUTPUT REQUIREMENTS (STRICT)
|
| 118 |
|
| 119 |
-
#### If Information-Only Mode:
|
| 120 |
-
- Output **only** the requested information as a natural concise structure (list/dict).
|
| 121 |
-
- **Do NOT** include training-plan JSON.
|
| 122 |
-
- **Do NOT** add text, comments, summary, or message.
|
| 123 |
-
|
| 124 |
#### If Training-Plan Workflow:
|
| 125 |
- Output strictly and **only** the JSON that conforms to the `AnalysisResult` schema.
|
| 126 |
- **NO** additional words, comments, explanation, status, success messages, formatting, or metadata.
|
|
|
|
| 19 |
|
| 20 |
---
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
Workflow must:
|
| 24 |
1) Instruct `supabase_agent` to retrieve project details, team, and skill gaps.
|
|
|
|
| 57 |
{orchestrator_agent_return_schema}
|
| 58 |
|
| 59 |
The corny_summary field is a short, corny summary of the project to be used for a voiced summary of the training plan.
|
| 60 |
+
It must be short and funny, but no offensive jokes. For example:
|
| 61 |
|
| 62 |
<example_corny_summary>
|
| 63 |
"In the quest to build the ultimate Data Analytics Dashboard, one brave team stands almost ready. Laura, a fearless fullstack engineer, has everything she needs—except SQL and data pipelines—so she’s heading off for a crash course in data sorcery. Oliver, the battle-hardened data engineer, must finally face his greatest foe: JavaScript. Emma, the SQL specialist, attempts the impossible, aiming to master both data engineering and frontend magic at the same time. And Ethan, the frontend warrior, discovers he cannot escape the call of back-end knowledge.
|
| 64 |
With a hefty budget, a looming deadline, and a mountain of courses ahead, these heroes will train, level up, and, if all goes well… finally deliver the dashboard of destiny."
|
| 65 |
</example_corny_summary>
|
| 66 |
|
| 67 |
+
Make it a bit shorter that the example.
|
| 68 |
---
|
| 69 |
|
| 70 |
### Tool Usage Requirement
|
|
|
|
| 84 |
---
|
| 85 |
|
| 86 |
### Final Output
|
| 87 |
+
Return the full JSON plan using `AnalysisResult` schema: {orchestrator_agent_return_schema}
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
### FINAL OUTPUT REQUIREMENTS (STRICT)
|
| 90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
#### If Training-Plan Workflow:
|
| 92 |
- Output strictly and **only** the JSON that conforms to the `AnalysisResult` schema.
|
| 93 |
- **NO** additional words, comments, explanation, status, success messages, formatting, or metadata.
|
agents/supabase_agent/get_supabase_agent_prompt.py
CHANGED
|
@@ -71,11 +71,12 @@ def get_supabase_agent_prompt(managed_agent_prompt: dict):
|
|
| 71 |
If ANY existing RPC or Edge Function returns the needed data in ONE call,
|
| 72 |
YOU MUST USE IT instead of writing SQL.
|
| 73 |
|
| 74 |
-
|
| 75 |
ALWAYS return the data in a JSON object. The JSON object must match the following schema:
|
| 76 |
|
| 77 |
{supabase_agent_return_schema}
|
| 78 |
|
|
|
|
| 79 |
"""
|
| 80 |
|
| 81 |
return managed_agent_prompt
|
|
|
|
| 71 |
If ANY existing RPC or Edge Function returns the needed data in ONE call,
|
| 72 |
YOU MUST USE IT instead of writing SQL.
|
| 73 |
|
| 74 |
+
**IMPORTANT**
|
| 75 |
ALWAYS return the data in a JSON object. The JSON object must match the following schema:
|
| 76 |
|
| 77 |
{supabase_agent_return_schema}
|
| 78 |
|
| 79 |
+
DO NOT TRY TO RETURN THE DATA IN ANY OTHER FORMAT!
|
| 80 |
"""
|
| 81 |
|
| 82 |
return managed_agent_prompt
|
agents/supabase_agent/supabase_agent.py
CHANGED
|
@@ -42,7 +42,7 @@ class SupabaseAgent:
|
|
| 42 |
self._supabase_agent = CodeAgent(
|
| 43 |
model=supabase_model,
|
| 44 |
tools=self.mcp_client.get_tools(),
|
| 45 |
-
additional_authorized_imports=['json'],
|
| 46 |
step_callbacks=step_callbacks,
|
| 47 |
name="supabase_agent",
|
| 48 |
description=(
|
|
|
|
| 42 |
self._supabase_agent = CodeAgent(
|
| 43 |
model=supabase_model,
|
| 44 |
tools=self.mcp_client.get_tools(),
|
| 45 |
+
additional_authorized_imports=['json', 'ast'],
|
| 46 |
step_callbacks=step_callbacks,
|
| 47 |
name="supabase_agent",
|
| 48 |
description=(
|
app.py
CHANGED
|
@@ -1,15 +1,23 @@
|
|
| 1 |
from __future__ import annotations
|
| 2 |
import threading
|
| 3 |
from typing import Any
|
| 4 |
-
|
| 5 |
from dotenv import load_dotenv
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
load_dotenv()
|
| 8 |
|
|
|
|
|
|
|
| 9 |
current_thread: threading.Thread | None = None
|
| 10 |
|
| 11 |
is_run_cancelled = False
|
| 12 |
|
|
|
|
|
|
|
| 13 |
try: # pragma: no cover - fallback when providers misconfigured
|
| 14 |
from agents.orchestrator_agent import OrchestratorAgent
|
| 15 |
except Exception as exc: # pylint: disable=broad-except
|
|
@@ -18,9 +26,14 @@ except Exception as exc: # pylint: disable=broad-except
|
|
| 18 |
else:
|
| 19 |
ORCHESTRATOR_IMPORT_ERROR = None
|
| 20 |
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
ui = EastSyncInterface()
|
| 24 |
|
| 25 |
if OrchestratorAgent is not None:
|
| 26 |
try:
|
|
@@ -33,6 +46,12 @@ else:
|
|
| 33 |
orchestrator_agent = None
|
| 34 |
orchestrator_error = ORCHESTRATOR_IMPORT_ERROR or "Provider unavailable"
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
def cancel_run():
|
| 38 |
global is_run_cancelled, current_thread
|
|
@@ -68,6 +87,12 @@ def analyze_and_plan_interface(user_prompt: str):
|
|
| 68 |
)
|
| 69 |
if result is not None: # agent didn't get cancelled, keep running
|
| 70 |
ui.set_analysis_result(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
except Exception as exc:
|
| 72 |
ui.set_analysis_error(str(exc))
|
| 73 |
finally:
|
|
@@ -83,8 +108,15 @@ def analyze_and_plan_interface(user_prompt: str):
|
|
| 83 |
return ui.render_project_processing_state()
|
| 84 |
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
def main():
|
| 87 |
-
demo = ui.build_interface(analyze_and_plan_interface, cancel_run)
|
| 88 |
demo.launch(share=True)
|
| 89 |
|
| 90 |
|
|
|
|
| 1 |
from __future__ import annotations
|
| 2 |
import threading
|
| 3 |
from typing import Any
|
| 4 |
+
import os
|
| 5 |
from dotenv import load_dotenv
|
| 6 |
|
| 7 |
+
from UI import EastSyncInterface
|
| 8 |
+
from get_final_text import get_final_text
|
| 9 |
+
|
| 10 |
+
|
| 11 |
load_dotenv()
|
| 12 |
|
| 13 |
+
ui = EastSyncInterface()
|
| 14 |
+
|
| 15 |
current_thread: threading.Thread | None = None
|
| 16 |
|
| 17 |
is_run_cancelled = False
|
| 18 |
|
| 19 |
+
should_narrate_events = os.getenv("SHOULD_NARRATE_EVENTS", "False").lower() == "true"
|
| 20 |
+
|
| 21 |
try: # pragma: no cover - fallback when providers misconfigured
|
| 22 |
from agents.orchestrator_agent import OrchestratorAgent
|
| 23 |
except Exception as exc: # pylint: disable=broad-except
|
|
|
|
| 26 |
else:
|
| 27 |
ORCHESTRATOR_IMPORT_ERROR = None
|
| 28 |
|
| 29 |
+
try:
|
| 30 |
+
from agents.narrator_agent import NarratorAgent
|
| 31 |
+
except Exception as exc:
|
| 32 |
+
NarratorAgent = None
|
| 33 |
+
print(f"Narrator Agent unavailable: {exc}")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
|
|
|
|
| 37 |
|
| 38 |
if OrchestratorAgent is not None:
|
| 39 |
try:
|
|
|
|
| 46 |
orchestrator_agent = None
|
| 47 |
orchestrator_error = ORCHESTRATOR_IMPORT_ERROR or "Provider unavailable"
|
| 48 |
|
| 49 |
+
if NarratorAgent:
|
| 50 |
+
narrator_agent = NarratorAgent()
|
| 51 |
+
else:
|
| 52 |
+
narrator_agent = None
|
| 53 |
+
|
| 54 |
+
|
| 55 |
|
| 56 |
def cancel_run():
|
| 57 |
global is_run_cancelled, current_thread
|
|
|
|
| 87 |
)
|
| 88 |
if result is not None: # agent didn't get cancelled, keep running
|
| 89 |
ui.set_analysis_result(result)
|
| 90 |
+
# Queue the final corny summary to the narrator stream
|
| 91 |
+
if should_narrate_events and narrator_agent:
|
| 92 |
+
corny_summary = result.get('corny_summary', '')
|
| 93 |
+
final_text = get_final_text(corny_summary)
|
| 94 |
+
if corny_summary:
|
| 95 |
+
narrator_agent.queue_final_summary(final_text)
|
| 96 |
except Exception as exc:
|
| 97 |
ui.set_analysis_error(str(exc))
|
| 98 |
finally:
|
|
|
|
| 108 |
return ui.render_project_processing_state()
|
| 109 |
|
| 110 |
|
| 111 |
+
def start_audio_stream():
|
| 112 |
+
"""
|
| 113 |
+
Starts the background narrator thread and returns the audio generator.
|
| 114 |
+
"""
|
| 115 |
+
if should_narrate_events:
|
| 116 |
+
yield from narrator_agent.narrate_event_streaming(ui)
|
| 117 |
+
|
| 118 |
def main():
|
| 119 |
+
demo = ui.build_interface(analyze_and_plan_interface, cancel_run, start_audio_stream)
|
| 120 |
demo.launch(share=True)
|
| 121 |
|
| 122 |
|
get_final_text.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def get_final_text(corny_summary: str) -> str:
|
| 2 |
+
return f""" And here it is, the final analysis: {corny_summary}
|
| 3 |
+
We hope you enjoyed this little streaming demonstration! Have a great day and don't forget to stay awesome!
|
| 4 |
+
|
| 5 |
+
Cool voice out!
|
| 6 |
+
"""
|
pyproject.toml
CHANGED
|
@@ -20,7 +20,9 @@ dependencies = [
|
|
| 20 |
"python-docx>=1.0.0",
|
| 21 |
"pdfplumber>=0.11.0",
|
| 22 |
"python-multipart>=0.0.6",
|
|
|
|
| 23 |
"elevenlabs>=2.24.0",
|
|
|
|
| 24 |
]
|
| 25 |
|
| 26 |
[tool.uv]
|
|
|
|
| 20 |
"python-docx>=1.0.0",
|
| 21 |
"pdfplumber>=0.11.0",
|
| 22 |
"python-multipart>=0.0.6",
|
| 23 |
+
"google-genai>=1.52.0",
|
| 24 |
"elevenlabs>=2.24.0",
|
| 25 |
+
"websocket-client>=1.9.0",
|
| 26 |
]
|
| 27 |
|
| 28 |
[tool.uv]
|
third_party_tools/eleven_labs_realtime_tts.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import threading
|
| 4 |
+
import time
|
| 5 |
+
from queue import Queue, Empty
|
| 6 |
+
from typing import Iterator, Tuple
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import websocket
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# ========== CONFIG ==========
|
| 13 |
+
ELEVEN_API_KEY = os.getenv("ELEVEN_LABS_API_KEY")
|
| 14 |
+
REALTIME_VOICE_ID = os.getenv("ELEVEN_REALTIME_VOICE_ID") # MUST be a Realtime voice
|
| 15 |
+
REALTIME_MODEL_ID = os.getenv("ELEVEN_REALTIME_MODEL_ID", "eleven_multilingual_v2")
|
| 16 |
+
REALTIME_SAMPLE_RATE = 44100 # PCM 44.1kHz audio
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# ========== REALTIME CLIENT ==========
|
| 20 |
+
class ElevenLabsRealtimeTTS:
|
| 21 |
+
"""Smooth low-latency ElevenLabs realtime narration with PCM audio output."""
|
| 22 |
+
|
| 23 |
+
def __init__(self):
|
| 24 |
+
self.ws: websocket.WebSocketApp | None = None
|
| 25 |
+
self._ws_thread: threading.Thread | None = None
|
| 26 |
+
self._connected = threading.Event()
|
| 27 |
+
self._queue: Queue[np.ndarray] = Queue(maxsize=256)
|
| 28 |
+
self._lock = threading.Lock()
|
| 29 |
+
self.running = False
|
| 30 |
+
|
| 31 |
+
# ---------- WebSocket Connection ----------
|
| 32 |
+
def connect(self):
|
| 33 |
+
if not ELEVEN_API_KEY or not REALTIME_VOICE_ID:
|
| 34 |
+
raise RuntimeError("Missing ELEVEN_LABS_API_KEY or ELEVEN_REALTIME_VOICE_ID")
|
| 35 |
+
|
| 36 |
+
url = (
|
| 37 |
+
f"wss://api.elevenlabs.io/v1/text-to-speech/"
|
| 38 |
+
f"{REALTIME_VOICE_ID}/stream-input?"
|
| 39 |
+
f"model_id={REALTIME_MODEL_ID}"
|
| 40 |
+
f"&output_format=pcm_24000"
|
| 41 |
+
f"&optimize_streaming_latency=3"
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
# Try passing key in headers (standard)
|
| 45 |
+
headers = {
|
| 46 |
+
"xi-api-key": ELEVEN_API_KEY,
|
| 47 |
+
"Accept": "audio/wav",
|
| 48 |
+
"Content-Type": "application/json",
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
print(f"Connecting to ElevenLabs Realtime... VoiceID: {REALTIME_VOICE_ID}, Model: {REALTIME_MODEL_ID}")
|
| 52 |
+
|
| 53 |
+
self.ws = websocket.WebSocketApp(
|
| 54 |
+
url,
|
| 55 |
+
header=headers,
|
| 56 |
+
on_open=self._on_open,
|
| 57 |
+
on_message=self._on_message,
|
| 58 |
+
on_close=self._on_close,
|
| 59 |
+
on_error=self._on_error,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
self._ws_thread = threading.Thread(target=self.ws.run_forever, daemon=True)
|
| 63 |
+
self._ws_thread.start()
|
| 64 |
+
self.running = True
|
| 65 |
+
|
| 66 |
+
# ---------- WebSocket Callbacks ----------
|
| 67 |
+
def _on_open(self, ws):
|
| 68 |
+
self._connected.set()
|
| 69 |
+
|
| 70 |
+
def _on_close(self, ws, *args):
|
| 71 |
+
self.running = False
|
| 72 |
+
self._connected.clear()
|
| 73 |
+
|
| 74 |
+
def _on_error(self, ws, error):
|
| 75 |
+
print("⚠ ElevenLabs realtime error:", error)
|
| 76 |
+
|
| 77 |
+
def _on_message(self, ws, message):
|
| 78 |
+
"""Receive PCM bytes → push to queue immediately with no decoding."""
|
| 79 |
+
if isinstance(message, bytes):
|
| 80 |
+
pcm = np.frombuffer(message, dtype=np.int16)
|
| 81 |
+
try:
|
| 82 |
+
self._queue.put_nowait(pcm)
|
| 83 |
+
except:
|
| 84 |
+
pass # Drop if queue full—we never block here.
|
| 85 |
+
|
| 86 |
+
# ---------- Send Text ----------
|
| 87 |
+
def speak(self, text: str):
|
| 88 |
+
if not text:
|
| 89 |
+
return
|
| 90 |
+
|
| 91 |
+
with self._lock:
|
| 92 |
+
if not self.running or not self.ws:
|
| 93 |
+
self.connect()
|
| 94 |
+
|
| 95 |
+
if not self._connected.wait(timeout=5):
|
| 96 |
+
raise RuntimeError("Failed to open ElevenLabs realtime websocket")
|
| 97 |
+
|
| 98 |
+
payload = {
|
| 99 |
+
"text": text,
|
| 100 |
+
"voice_settings": {"stability": 0.5, "similarity_boost": 0.8, "use_speaker_boost": False},
|
| 101 |
+
"generation_config": {
|
| 102 |
+
"chunk_length_schedule": [120, 160, 250, 290]
|
| 103 |
+
},
|
| 104 |
+
"try_trigger_generation": True,
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
# Clear queue of any stale audio from previous runs
|
| 108 |
+
with self._lock:
|
| 109 |
+
while not self._queue.empty():
|
| 110 |
+
try:
|
| 111 |
+
self._queue.get_nowait()
|
| 112 |
+
except Empty:
|
| 113 |
+
break
|
| 114 |
+
|
| 115 |
+
self.ws.send(json.dumps(payload))
|
| 116 |
+
|
| 117 |
+
# ---------- Stream PCM Audio ----------
|
| 118 |
+
def stream_text(
|
| 119 |
+
self, text: str, idle_timeout: float = 0.5
|
| 120 |
+
) -> Iterator[Tuple[int, np.ndarray]]:
|
| 121 |
+
"""
|
| 122 |
+
Speak text → yield (sample_rate, pcm_chunk) continuously
|
| 123 |
+
Stops ONLY after audio finishes (no artificial silence).
|
| 124 |
+
"""
|
| 125 |
+
self.speak(text)
|
| 126 |
+
last_received = time.time()
|
| 127 |
+
|
| 128 |
+
while True:
|
| 129 |
+
# Check if connection is still alive
|
| 130 |
+
if not self.running:
|
| 131 |
+
break
|
| 132 |
+
|
| 133 |
+
try:
|
| 134 |
+
chunk = self._queue.get(timeout=0.1)
|
| 135 |
+
last_received = time.time()
|
| 136 |
+
yield REALTIME_SAMPLE_RATE, chunk
|
| 137 |
+
except Empty:
|
| 138 |
+
# If we haven't received anything for a while, end stream
|
| 139 |
+
if time.time() - last_received > idle_timeout:
|
| 140 |
+
break
|
| 141 |
+
continue # DO NOT inject silence; just wait
|
| 142 |
+
|
third_party_tools/text_to_audio_stream.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import wave
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Iterator, Tuple
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from elevenlabs.client import ElevenLabs
|
| 9 |
+
|
| 10 |
+
from elevenlabs import play
|
| 11 |
+
|
| 12 |
+
from .eleven_labs_realtime_tts import (
|
| 13 |
+
ElevenLabsRealtimeTTS,
|
| 14 |
+
REALTIME_SAMPLE_RATE,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
SAMPLE_RATE = 24000
|
| 18 |
+
DEFAULT_VOICE_ID = "fjnwTZkKtQOJaYzGLa6n"
|
| 19 |
+
DEFAULT_MODEL_ID = "eleven_flash_v2"
|
| 20 |
+
LOCAL_AUDIO_ENV = "EASTSYNC_AUDIO_FILE"
|
| 21 |
+
USE_REALTIME_TTS = False
|
| 22 |
+
|
| 23 |
+
EFFECTIVE_SAMPLE_RATE = REALTIME_SAMPLE_RATE if USE_REALTIME_TTS else SAMPLE_RATE
|
| 24 |
+
|
| 25 |
+
api_key = os.getenv("ELEVEN_LABS_API_KEY")
|
| 26 |
+
voice_id = os.getenv("ELEVEN_LABS_VOICE_ID", DEFAULT_VOICE_ID)
|
| 27 |
+
model_id = os.getenv("ELEVEN_LABS_MODEL_ID", DEFAULT_MODEL_ID)
|
| 28 |
+
client = ElevenLabs(api_key=api_key) if api_key else None
|
| 29 |
+
realtime_client = ElevenLabsRealtimeTTS() if USE_REALTIME_TTS else None
|
| 30 |
+
|
| 31 |
+
def resample_chunk(chunk: np.ndarray, orig_sr: int, target_sr: int) -> np.ndarray:
|
| 32 |
+
"""Resample audio chunk from orig_sr to target_sr using linear interpolation."""
|
| 33 |
+
if orig_sr == target_sr:
|
| 34 |
+
return chunk
|
| 35 |
+
|
| 36 |
+
# Calculate duration and new length
|
| 37 |
+
duration = len(chunk) / orig_sr
|
| 38 |
+
new_length = int(duration * target_sr)
|
| 39 |
+
|
| 40 |
+
# Simple linear interpolation
|
| 41 |
+
return np.interp(
|
| 42 |
+
np.linspace(0, len(chunk), new_length),
|
| 43 |
+
np.arange(len(chunk)),
|
| 44 |
+
chunk
|
| 45 |
+
).astype(np.int16)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def local_file_audio_stream(path: str, chunk_ms: int = 100) -> Iterator[Tuple[int, np.ndarray]]:
|
| 49 |
+
wav_path = Path(path).expanduser().resolve()
|
| 50 |
+
|
| 51 |
+
with wave.open(str(wav_path), "rb") as wf:
|
| 52 |
+
# Relaxed check: Only enforce 16-bit for now as we cast to int16
|
| 53 |
+
if wf.getsampwidth() != 2:
|
| 54 |
+
raise ValueError(f"Audio format mismatch. Expected 16-bit PCM. Got {wf.getsampwidth()*8}-bit.")
|
| 55 |
+
|
| 56 |
+
sr = wf.getframerate()
|
| 57 |
+
channels = wf.getnchannels()
|
| 58 |
+
|
| 59 |
+
frames_per_chunk = int(sr * chunk_ms)
|
| 60 |
+
|
| 61 |
+
while True:
|
| 62 |
+
data = wf.readframes(frames_per_chunk)
|
| 63 |
+
if not data:
|
| 64 |
+
break
|
| 65 |
+
|
| 66 |
+
audio_data = np.frombuffer(data, dtype=np.int16)
|
| 67 |
+
|
| 68 |
+
if channels > 1:
|
| 69 |
+
audio_data = audio_data.reshape(-1, channels)[:, 0]
|
| 70 |
+
|
| 71 |
+
resampled_chunk = resample_chunk(audio_data, sr, EFFECTIVE_SAMPLE_RATE)
|
| 72 |
+
|
| 73 |
+
yield EFFECTIVE_SAMPLE_RATE, resampled_chunk
|
| 74 |
+
time.sleep(chunk_ms / 1000)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def _stream_with_elevenlabs(text: str) -> Iterator[np.ndarray]:
|
| 79 |
+
if not client:
|
| 80 |
+
return
|
| 81 |
+
|
| 82 |
+
audio_stream = client.text_to_speech.stream(
|
| 83 |
+
text=text,
|
| 84 |
+
voice_id=voice_id,
|
| 85 |
+
model_id=model_id,
|
| 86 |
+
output_format="pcm_24000",
|
| 87 |
+
optimize_streaming_latency=0,
|
| 88 |
+
request_options={
|
| 89 |
+
"chunk_size": 120_000,
|
| 90 |
+
}
|
| 91 |
+
)
|
| 92 |
+
for chunk in audio_stream:
|
| 93 |
+
if not chunk:
|
| 94 |
+
continue
|
| 95 |
+
yield np.frombuffer(chunk, dtype=np.int16)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _collect_text(text_stream: Iterator[str]) -> str:
|
| 99 |
+
parts: list[str] = []
|
| 100 |
+
for new_text in text_stream:
|
| 101 |
+
if new_text:
|
| 102 |
+
parts.append(new_text)
|
| 103 |
+
return "".join(parts).strip()
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def text_to_audio_stream(text_stream: Iterator[str]) -> Iterator[Tuple[int, np.ndarray]]:
|
| 107 |
+
|
| 108 |
+
narration = _collect_text(text_stream)
|
| 109 |
+
if not narration:
|
| 110 |
+
return
|
| 111 |
+
|
| 112 |
+
if realtime_client:
|
| 113 |
+
yield from realtime_client.stream_text(narration)
|
| 114 |
+
return
|
| 115 |
+
|
| 116 |
+
if not client:
|
| 117 |
+
return
|
| 118 |
+
|
| 119 |
+
for chunk in _stream_with_elevenlabs(narration):
|
| 120 |
+
yield EFFECTIVE_SAMPLE_RATE, chunk
|
uv.lock
CHANGED
|
@@ -2,6 +2,9 @@ version = 1
|
|
| 2 |
revision = 3
|
| 3 |
requires-python = ">=3.13"
|
| 4 |
|
|
|
|
|
|
|
|
|
|
| 5 |
[[package]]
|
| 6 |
name = "aiofiles"
|
| 7 |
version = "24.1.0"
|
|
@@ -158,19 +161,18 @@ wheels = [
|
|
| 158 |
|
| 159 |
[[package]]
|
| 160 |
name = "brotlicffi"
|
| 161 |
-
version = "1.
|
| 162 |
source = { registry = "https://pypi.org/simple" }
|
| 163 |
dependencies = [
|
| 164 |
{ name = "cffi" },
|
| 165 |
]
|
| 166 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
| 167 |
wheels = [
|
| 168 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 169 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 170 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 171 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 172 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 173 |
-
{ url = "https://files.pythonhosted.org/packages/1c/fa/5408a03c041114ceab628ce21766a4ea882aa6f6f0a800e04ee3a30ec6b9/brotlicffi-1.1.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:994a4f0681bb6c6c3b0925530a1926b7a189d878e6e5e38fae8efa47c5d9c613", size = 366783, upload-time = "2023-09-14T14:22:07.096Z" },
|
| 174 |
]
|
| 175 |
|
| 176 |
[[package]]
|
|
@@ -440,6 +442,7 @@ dependencies = [
|
|
| 440 |
{ name = "ddgs" },
|
| 441 |
{ name = "elevenlabs" },
|
| 442 |
{ name = "fastmcp" },
|
|
|
|
| 443 |
{ name = "gradio" },
|
| 444 |
{ name = "pdfplumber" },
|
| 445 |
{ name = "pydantic" },
|
|
@@ -450,6 +453,7 @@ dependencies = [
|
|
| 450 |
{ name = "smolagents", extra = ["mcp", "openai"] },
|
| 451 |
{ name = "supabase" },
|
| 452 |
{ name = "typer" },
|
|
|
|
| 453 |
]
|
| 454 |
|
| 455 |
[package.metadata]
|
|
@@ -457,6 +461,7 @@ requires-dist = [
|
|
| 457 |
{ name = "ddgs", specifier = ">=9.9.1" },
|
| 458 |
{ name = "elevenlabs", specifier = ">=2.24.0" },
|
| 459 |
{ name = "fastmcp", specifier = ">=2.13.1" },
|
|
|
|
| 460 |
{ name = "gradio", specifier = "==6.0.0.dev1" },
|
| 461 |
{ name = "pdfplumber", specifier = ">=0.11.0" },
|
| 462 |
{ name = "pydantic", specifier = ">=2.0.0" },
|
|
@@ -467,6 +472,7 @@ requires-dist = [
|
|
| 467 |
{ name = "smolagents", extras = ["mcp", "openai"] },
|
| 468 |
{ name = "supabase", specifier = ">=2.0.0" },
|
| 469 |
{ name = "typer", specifier = ">=0.20.0" },
|
|
|
|
| 470 |
]
|
| 471 |
|
| 472 |
[[package]]
|
|
@@ -501,11 +507,11 @@ wheels = [
|
|
| 501 |
|
| 502 |
[[package]]
|
| 503 |
name = "exceptiongroup"
|
| 504 |
-
version = "1.3.
|
| 505 |
source = { registry = "https://pypi.org/simple" }
|
| 506 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
| 507 |
wheels = [
|
| 508 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 509 |
]
|
| 510 |
|
| 511 |
[[package]]
|
|
@@ -519,7 +525,7 @@ wheels = [
|
|
| 519 |
|
| 520 |
[[package]]
|
| 521 |
name = "fastapi"
|
| 522 |
-
version = "0.
|
| 523 |
source = { registry = "https://pypi.org/simple" }
|
| 524 |
dependencies = [
|
| 525 |
{ name = "annotated-doc" },
|
|
@@ -527,9 +533,9 @@ dependencies = [
|
|
| 527 |
{ name = "starlette" },
|
| 528 |
{ name = "typing-extensions" },
|
| 529 |
]
|
| 530 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
| 531 |
wheels = [
|
| 532 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 533 |
]
|
| 534 |
|
| 535 |
[[package]]
|
|
@@ -585,6 +591,39 @@ wheels = [
|
|
| 585 |
{ url = "https://files.pythonhosted.org/packages/eb/02/a6b21098b1d5d6249b7c5ab69dde30108a71e4e819d4a9778f1de1d5b70d/fsspec-2025.10.0-py3-none-any.whl", hash = "sha256:7c7712353ae7d875407f97715f0e1ffcc21e33d5b24556cb1e090ae9409ec61d", size = 200966, upload-time = "2025-10-30T14:58:42.53Z" },
|
| 586 |
]
|
| 587 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 588 |
[[package]]
|
| 589 |
name = "gradio"
|
| 590 |
version = "6.0.0.dev1"
|
|
@@ -673,31 +712,34 @@ wheels = [
|
|
| 673 |
|
| 674 |
[[package]]
|
| 675 |
name = "hf-xet"
|
| 676 |
-
version = "1.2.
|
| 677 |
-
source = { registry = "https://pypi.org/simple" }
|
| 678 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
| 679 |
-
wheels = [
|
| 680 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 681 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 682 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 683 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 684 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 685 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 686 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 687 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 688 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 689 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 690 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 691 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 692 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 693 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 694 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 695 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 696 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 697 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 698 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 699 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 700 |
-
{ url = "https://files.pythonhosted.org/packages/
|
|
|
|
|
|
|
|
|
|
| 701 |
]
|
| 702 |
|
| 703 |
[[package]]
|
|
@@ -1658,6 +1700,27 @@ wheels = [
|
|
| 1658 |
{ url = "https://files.pythonhosted.org/packages/84/7a/1726ceaa3343874f322dd83c9ec376ad81f533df8422b8b1e1233a59f8ce/py_key_value_shared-0.2.8-py3-none-any.whl", hash = "sha256:aff1bbfd46d065b2d67897d298642e80e5349eae588c6d11b48452b46b8d46ba", size = 14586, upload-time = "2025-10-24T13:31:02.838Z" },
|
| 1659 |
]
|
| 1660 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1661 |
[[package]]
|
| 1662 |
name = "pycparser"
|
| 1663 |
version = "2.23"
|
|
@@ -1763,31 +1826,31 @@ crypto = [
|
|
| 1763 |
|
| 1764 |
[[package]]
|
| 1765 |
name = "pypdf"
|
| 1766 |
-
version = "6.
|
| 1767 |
source = { registry = "https://pypi.org/simple" }
|
| 1768 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
| 1769 |
wheels = [
|
| 1770 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1771 |
]
|
| 1772 |
|
| 1773 |
[[package]]
|
| 1774 |
name = "pypdfium2"
|
| 1775 |
-
version = "5.
|
| 1776 |
source = { registry = "https://pypi.org/simple" }
|
| 1777 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
| 1778 |
wheels = [
|
| 1779 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1780 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1781 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1782 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1783 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1784 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1785 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1786 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1787 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1788 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1789 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1790 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 1791 |
]
|
| 1792 |
|
| 1793 |
[[package]]
|
|
@@ -2051,6 +2114,18 @@ wheels = [
|
|
| 2051 |
{ url = "https://files.pythonhosted.org/packages/c9/7f/1a65ae870bc9d0576aebb0c501ea5dccf1ae2178fe2821042150ebd2e707/rpds_py-0.29.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2023473f444752f0f82a58dfcbee040d0a1b3d1b3c2ec40e884bd25db6d117d2", size = 225919, upload-time = "2025-11-16T14:50:14.734Z" },
|
| 2052 |
]
|
| 2053 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2054 |
[[package]]
|
| 2055 |
name = "safehttpx"
|
| 2056 |
version = "0.1.7"
|
|
@@ -2065,15 +2140,15 @@ wheels = [
|
|
| 2065 |
|
| 2066 |
[[package]]
|
| 2067 |
name = "secretstorage"
|
| 2068 |
-
version = "3.
|
| 2069 |
source = { registry = "https://pypi.org/simple" }
|
| 2070 |
dependencies = [
|
| 2071 |
{ name = "cryptography" },
|
| 2072 |
{ name = "jeepney" },
|
| 2073 |
]
|
| 2074 |
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
| 2075 |
wheels = [
|
| 2076 |
-
{ url = "https://files.pythonhosted.org/packages/
|
| 2077 |
]
|
| 2078 |
|
| 2079 |
[[package]]
|
|
@@ -2240,6 +2315,15 @@ wheels = [
|
|
| 2240 |
{ url = "https://files.pythonhosted.org/packages/b7/da/d49051453766dbfdb5a086eebed7d3cb1814b6ff64ab6a90fd14edc13d46/supabase_functions-2.24.0-py3-none-any.whl", hash = "sha256:b93d79ffc446cb96faf03be550b6991847394064feec3ebf21954d3aff836d11", size = 8471, upload-time = "2025-11-07T17:08:20.943Z" },
|
| 2241 |
]
|
| 2242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2243 |
[[package]]
|
| 2244 |
name = "tomlkit"
|
| 2245 |
version = "0.13.3"
|
|
@@ -2328,6 +2412,15 @@ wheels = [
|
|
| 2328 |
{ url = "https://files.pythonhosted.org/packages/ee/d9/d88e73ca598f4f6ff671fb5fde8a32925c2e08a637303a1d12883c7305fa/uvicorn-0.38.0-py3-none-any.whl", hash = "sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02", size = 68109, upload-time = "2025-10-18T13:46:42.958Z" },
|
| 2329 |
]
|
| 2330 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2331 |
[[package]]
|
| 2332 |
name = "websockets"
|
| 2333 |
version = "15.0.1"
|
|
|
|
| 2 |
revision = 3
|
| 3 |
requires-python = ">=3.13"
|
| 4 |
|
| 5 |
+
[options]
|
| 6 |
+
prerelease-mode = "allow"
|
| 7 |
+
|
| 8 |
[[package]]
|
| 9 |
name = "aiofiles"
|
| 10 |
version = "24.1.0"
|
|
|
|
| 161 |
|
| 162 |
[[package]]
|
| 163 |
name = "brotlicffi"
|
| 164 |
+
version = "1.2.0.0"
|
| 165 |
source = { registry = "https://pypi.org/simple" }
|
| 166 |
dependencies = [
|
| 167 |
{ name = "cffi" },
|
| 168 |
]
|
| 169 |
+
sdist = { url = "https://files.pythonhosted.org/packages/84/85/57c314a6b35336efbbdc13e5fc9ae13f6b60a0647cfa7c1221178ac6d8ae/brotlicffi-1.2.0.0.tar.gz", hash = "sha256:34345d8d1f9d534fcac2249e57a4c3c8801a33c9942ff9f8574f67a175e17adb", size = 476682, upload-time = "2025-11-21T18:17:57.334Z" }
|
| 170 |
wheels = [
|
| 171 |
+
{ url = "https://files.pythonhosted.org/packages/e4/df/a72b284d8c7bef0ed5756b41c2eb7d0219a1dd6ac6762f1c7bdbc31ef3af/brotlicffi-1.2.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:9458d08a7ccde8e3c0afedbf2c70a8263227a68dea5ab13590593f4c0a4fd5f4", size = 432340, upload-time = "2025-11-21T18:17:42.277Z" },
|
| 172 |
+
{ url = "https://files.pythonhosted.org/packages/74/2b/cc55a2d1d6fb4f5d458fba44a3d3f91fb4320aa14145799fd3a996af0686/brotlicffi-1.2.0.0-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:84e3d0020cf1bd8b8131f4a07819edee9f283721566fe044a20ec792ca8fd8b7", size = 1534002, upload-time = "2025-11-21T18:17:43.746Z" },
|
| 173 |
+
{ url = "https://files.pythonhosted.org/packages/e4/9c/d51486bf366fc7d6735f0e46b5b96ca58dc005b250263525a1eea3cd5d21/brotlicffi-1.2.0.0-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:33cfb408d0cff64cd50bef268c0fed397c46fbb53944aa37264148614a62e990", size = 1536547, upload-time = "2025-11-21T18:17:45.729Z" },
|
| 174 |
+
{ url = "https://files.pythonhosted.org/packages/1b/37/293a9a0a7caf17e6e657668bebb92dfe730305999fe8c0e2703b8888789c/brotlicffi-1.2.0.0-cp38-abi3-win32.whl", hash = "sha256:23e5c912fdc6fd37143203820230374d24babd078fc054e18070a647118158f6", size = 343085, upload-time = "2025-11-21T18:17:48.887Z" },
|
| 175 |
+
{ url = "https://files.pythonhosted.org/packages/07/6b/6e92009df3b8b7272f85a0992b306b61c34b7ea1c4776643746e61c380ac/brotlicffi-1.2.0.0-cp38-abi3-win_amd64.whl", hash = "sha256:f139a7cdfe4ae7859513067b736eb44d19fae1186f9e99370092f6915216451b", size = 378586, upload-time = "2025-11-21T18:17:50.531Z" },
|
|
|
|
| 176 |
]
|
| 177 |
|
| 178 |
[[package]]
|
|
|
|
| 442 |
{ name = "ddgs" },
|
| 443 |
{ name = "elevenlabs" },
|
| 444 |
{ name = "fastmcp" },
|
| 445 |
+
{ name = "google-genai" },
|
| 446 |
{ name = "gradio" },
|
| 447 |
{ name = "pdfplumber" },
|
| 448 |
{ name = "pydantic" },
|
|
|
|
| 453 |
{ name = "smolagents", extra = ["mcp", "openai"] },
|
| 454 |
{ name = "supabase" },
|
| 455 |
{ name = "typer" },
|
| 456 |
+
{ name = "websocket-client" },
|
| 457 |
]
|
| 458 |
|
| 459 |
[package.metadata]
|
|
|
|
| 461 |
{ name = "ddgs", specifier = ">=9.9.1" },
|
| 462 |
{ name = "elevenlabs", specifier = ">=2.24.0" },
|
| 463 |
{ name = "fastmcp", specifier = ">=2.13.1" },
|
| 464 |
+
{ name = "google-genai", specifier = ">=1.52.0" },
|
| 465 |
{ name = "gradio", specifier = "==6.0.0.dev1" },
|
| 466 |
{ name = "pdfplumber", specifier = ">=0.11.0" },
|
| 467 |
{ name = "pydantic", specifier = ">=2.0.0" },
|
|
|
|
| 472 |
{ name = "smolagents", extras = ["mcp", "openai"] },
|
| 473 |
{ name = "supabase", specifier = ">=2.0.0" },
|
| 474 |
{ name = "typer", specifier = ">=0.20.0" },
|
| 475 |
+
{ name = "websocket-client", specifier = ">=1.9.0" },
|
| 476 |
]
|
| 477 |
|
| 478 |
[[package]]
|
|
|
|
| 507 |
|
| 508 |
[[package]]
|
| 509 |
name = "exceptiongroup"
|
| 510 |
+
version = "1.3.1"
|
| 511 |
source = { registry = "https://pypi.org/simple" }
|
| 512 |
+
sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" }
|
| 513 |
wheels = [
|
| 514 |
+
{ url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" },
|
| 515 |
]
|
| 516 |
|
| 517 |
[[package]]
|
|
|
|
| 525 |
|
| 526 |
[[package]]
|
| 527 |
name = "fastapi"
|
| 528 |
+
version = "0.122.0"
|
| 529 |
source = { registry = "https://pypi.org/simple" }
|
| 530 |
dependencies = [
|
| 531 |
{ name = "annotated-doc" },
|
|
|
|
| 533 |
{ name = "starlette" },
|
| 534 |
{ name = "typing-extensions" },
|
| 535 |
]
|
| 536 |
+
sdist = { url = "https://files.pythonhosted.org/packages/b2/de/3ee97a4f6ffef1fb70bf20561e4f88531633bb5045dc6cebc0f8471f764d/fastapi-0.122.0.tar.gz", hash = "sha256:cd9b5352031f93773228af8b4c443eedc2ac2aa74b27780387b853c3726fb94b", size = 346436, upload-time = "2025-11-24T19:17:47.95Z" }
|
| 537 |
wheels = [
|
| 538 |
+
{ url = "https://files.pythonhosted.org/packages/7a/93/aa8072af4ff37b795f6bbf43dcaf61115f40f49935c7dbb180c9afc3f421/fastapi-0.122.0-py3-none-any.whl", hash = "sha256:a456e8915dfc6c8914a50d9651133bd47ec96d331c5b44600baa635538a30d67", size = 110671, upload-time = "2025-11-24T19:17:45.96Z" },
|
| 539 |
]
|
| 540 |
|
| 541 |
[[package]]
|
|
|
|
| 591 |
{ url = "https://files.pythonhosted.org/packages/eb/02/a6b21098b1d5d6249b7c5ab69dde30108a71e4e819d4a9778f1de1d5b70d/fsspec-2025.10.0-py3-none-any.whl", hash = "sha256:7c7712353ae7d875407f97715f0e1ffcc21e33d5b24556cb1e090ae9409ec61d", size = 200966, upload-time = "2025-10-30T14:58:42.53Z" },
|
| 592 |
]
|
| 593 |
|
| 594 |
+
[[package]]
|
| 595 |
+
name = "google-auth"
|
| 596 |
+
version = "2.43.0"
|
| 597 |
+
source = { registry = "https://pypi.org/simple" }
|
| 598 |
+
dependencies = [
|
| 599 |
+
{ name = "cachetools" },
|
| 600 |
+
{ name = "pyasn1-modules" },
|
| 601 |
+
{ name = "rsa" },
|
| 602 |
+
]
|
| 603 |
+
sdist = { url = "https://files.pythonhosted.org/packages/ff/ef/66d14cf0e01b08d2d51ffc3c20410c4e134a1548fc246a6081eae585a4fe/google_auth-2.43.0.tar.gz", hash = "sha256:88228eee5fc21b62a1b5fe773ca15e67778cb07dc8363adcb4a8827b52d81483", size = 296359, upload-time = "2025-11-06T00:13:36.587Z" }
|
| 604 |
+
wheels = [
|
| 605 |
+
{ url = "https://files.pythonhosted.org/packages/6f/d1/385110a9ae86d91cc14c5282c61fe9f4dc41c0b9f7d423c6ad77038c4448/google_auth-2.43.0-py2.py3-none-any.whl", hash = "sha256:af628ba6fa493f75c7e9dbe9373d148ca9f4399b5ea29976519e0a3848eddd16", size = 223114, upload-time = "2025-11-06T00:13:35.209Z" },
|
| 606 |
+
]
|
| 607 |
+
|
| 608 |
+
[[package]]
|
| 609 |
+
name = "google-genai"
|
| 610 |
+
version = "1.52.0"
|
| 611 |
+
source = { registry = "https://pypi.org/simple" }
|
| 612 |
+
dependencies = [
|
| 613 |
+
{ name = "anyio" },
|
| 614 |
+
{ name = "google-auth" },
|
| 615 |
+
{ name = "httpx" },
|
| 616 |
+
{ name = "pydantic" },
|
| 617 |
+
{ name = "requests" },
|
| 618 |
+
{ name = "tenacity" },
|
| 619 |
+
{ name = "typing-extensions" },
|
| 620 |
+
{ name = "websockets" },
|
| 621 |
+
]
|
| 622 |
+
sdist = { url = "https://files.pythonhosted.org/packages/09/4e/0ad8585d05312074bb69711b2d81cfed69ce0ae441913d57bf169bed20a7/google_genai-1.52.0.tar.gz", hash = "sha256:a74e8a4b3025f23aa98d6a0f84783119012ca6c336fd68f73c5d2b11465d7fc5", size = 258743, upload-time = "2025-11-21T02:18:55.742Z" }
|
| 623 |
+
wheels = [
|
| 624 |
+
{ url = "https://files.pythonhosted.org/packages/ec/66/03f663e7bca7abe9ccfebe6cb3fe7da9a118fd723a5abb278d6117e7990e/google_genai-1.52.0-py3-none-any.whl", hash = "sha256:c8352b9f065ae14b9322b949c7debab8562982f03bf71d44130cd2b798c20743", size = 261219, upload-time = "2025-11-21T02:18:54.515Z" },
|
| 625 |
+
]
|
| 626 |
+
|
| 627 |
[[package]]
|
| 628 |
name = "gradio"
|
| 629 |
version = "6.0.0.dev1"
|
|
|
|
| 712 |
|
| 713 |
[[package]]
|
| 714 |
name = "hf-xet"
|
| 715 |
+
version = "1.2.1rc0"
|
| 716 |
+
source = { registry = "https://pypi.org/simple" }
|
| 717 |
+
sdist = { url = "https://files.pythonhosted.org/packages/9a/48/61907d37a180a1d016cb79396215b1064f075965cf14ac78b4a9682705d7/hf_xet-1.2.1rc0.tar.gz", hash = "sha256:ee6b196855720767283dbbca6d5f3877afdfa6df83e037bbadbed0181ac5972e", size = 518988, upload-time = "2025-11-21T23:26:10.526Z" }
|
| 718 |
+
wheels = [
|
| 719 |
+
{ url = "https://files.pythonhosted.org/packages/8c/2b/e9fb76e7dcba1efc0dc881124d0ebbdf0790ad78f90dae9f23a969224c0c/hf_xet-1.2.1rc0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:05acfd78c5b515a0c06103c9471208a71ae52c6a72dba73bbcb5b7f79575c530", size = 2973766, upload-time = "2025-11-21T23:25:50.546Z" },
|
| 720 |
+
{ url = "https://files.pythonhosted.org/packages/95/bf/8365816fb0e2dc0db633bed504fdf70b4e4e052aa86caff62e4b0175e7fa/hf_xet-1.2.1rc0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:2e4bbe0e4195c48aebce7c87438df6ba0748001c15cd088d1f41553b9cbf0aa5", size = 2850724, upload-time = "2025-11-21T23:25:48.95Z" },
|
| 721 |
+
{ url = "https://files.pythonhosted.org/packages/4a/52/72ba543089817fdf0e684032c1664fd249602896d52b76f4278b7c830cc8/hf_xet-1.2.1rc0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:66534e7190bafae92c8e3411011220f189fadcc8cba36ebf4bc261e769fb7e49", size = 3342204, upload-time = "2025-11-21T23:25:31.773Z" },
|
| 722 |
+
{ url = "https://files.pythonhosted.org/packages/85/a0/d0f7b4ffb08bdb25db2dbad8e5d97a266a4ada3c7e8dc4429bfe99c86ed2/hf_xet-1.2.1rc0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9d193015364fb9e95d4d295722538b554e9bfaa7b6a167e09e030148c8b15d0", size = 19434060, upload-time = "2025-11-21T23:25:33.89Z" },
|
| 723 |
+
{ url = "https://files.pythonhosted.org/packages/af/b4/c406e62a1895520da504bb9372f7ed26ef65e32e1b39e397d81b7136b5ab/hf_xet-1.2.1rc0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:dda4a029cd30f10ba205d8a74e232070ec75923e4c262a2d7f5d55eb3a3dd4d1", size = 3249296, upload-time = "2025-11-21T23:25:29.504Z" },
|
| 724 |
+
{ url = "https://files.pythonhosted.org/packages/cf/fb/c40487744c12a038e31af75de661938a6e9c2cfb55a544706d9b9d3cc00c/hf_xet-1.2.1rc0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:fc95e2b7a1a3a613587f407a8292f1240d45febd66a49ee1da0a94414ff3784e", size = 3434401, upload-time = "2025-11-21T23:25:59.747Z" },
|
| 725 |
+
{ url = "https://files.pythonhosted.org/packages/46/37/8b93e82bace53bb650474562487a4fe2aa43e8b8d9ecd01ddffc1b6a63f2/hf_xet-1.2.1rc0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4a4e981ef129bdf1af7be559319b017bed0ae997c8bdd696b6c7e50d888e5a51", size = 3520042, upload-time = "2025-11-21T23:26:01.691Z" },
|
| 726 |
+
{ url = "https://files.pythonhosted.org/packages/9d/64/bc73420f030808359d3c8f184ab563e095dd3f02186e6a1eb168244a733e/hf_xet-1.2.1rc0-cp313-cp313t-win_amd64.whl", hash = "sha256:d3ee934146fa2de521b4ab6ef21a7c15ee6bb33549973244b633db533028ad3b", size = 3041456, upload-time = "2025-11-21T23:26:11.928Z" },
|
| 727 |
+
{ url = "https://files.pythonhosted.org/packages/c7/b7/6ce9f48be8748b2e8599453dec7012d38e4685a5e5587ee3ef4c09fccaf9/hf_xet-1.2.1rc0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:1d57ee9323fcf87c3fc1840856ad2f767c0f8ee14a55d470ddba3a6fdab40dd2", size = 2973781, upload-time = "2025-11-21T23:25:58.073Z" },
|
| 728 |
+
{ url = "https://files.pythonhosted.org/packages/72/dc/6e1d3b653fdb34ce86f7b94c2388270f8bb5bb18da8590425a30ef0af1be/hf_xet-1.2.1rc0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6163f7de633ac0f5f88dc24d369b30df4df0f923dc61ebd9c39a9b022497f47f", size = 2850462, upload-time = "2025-11-21T23:25:56.157Z" },
|
| 729 |
+
{ url = "https://files.pythonhosted.org/packages/8c/6b/6e0daf5811badf6c9d60a49cb3f99fe41cc01f147ecae3911d8621fa69c1/hf_xet-1.2.1rc0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:05b518a2499dafd510e29ff6c14bfb9aae119f66af785fc99eaf9069e0ccda43", size = 3342036, upload-time = "2025-11-21T23:25:44.283Z" },
|
| 730 |
+
{ url = "https://files.pythonhosted.org/packages/b7/21/9dfdf0c66743cbf14f312d196f19367372a89232b2623d733690474008b9/hf_xet-1.2.1rc0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5ee726b80a1c0b2868bc58302ba1a47d0702f8d67f69aeecb94fe7f30ac1c2b", size = 19431002, upload-time = "2025-11-21T23:25:46.621Z" },
|
| 731 |
+
{ url = "https://files.pythonhosted.org/packages/f4/8c/f798608de78b5aa1cabbf9c1e5e8a0172a93a47267fe1733f7c9780802e2/hf_xet-1.2.1rc0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:bf8f5439c39a5fa41dec1071f9576ac510180522690771d54c211151e08cdf35", size = 3248725, upload-time = "2025-11-21T23:25:42.387Z" },
|
| 732 |
+
{ url = "https://files.pythonhosted.org/packages/75/75/7035ea757b2ef27c21a7d734da18c1537473f8dcff468872eb9b4281dd33/hf_xet-1.2.1rc0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5ca1fae9189095b15c89cd30ce2f6c3a97f2d1cab261e28a73b84690ebc8960a", size = 3433685, upload-time = "2025-11-21T23:26:06.88Z" },
|
| 733 |
+
{ url = "https://files.pythonhosted.org/packages/0e/47/1627f85cb062283edc9f516d61838c88bcdb46828d903b035674b5e0e89c/hf_xet-1.2.1rc0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:99676d52bbffc7747950d2686bc91f520758f3d83b594988058478be68706862", size = 3519636, upload-time = "2025-11-21T23:26:08.512Z" },
|
| 734 |
+
{ url = "https://files.pythonhosted.org/packages/7a/c4/e3467976ab137df73ac2f758147ccc7ca8c890bbf9ff342e410fa6d5d4b2/hf_xet-1.2.1rc0-cp314-cp314t-win_amd64.whl", hash = "sha256:82007060913dfe0ae7b0711838d0283751adaafa9aa52457da89c6ff18131ccd", size = 3041684, upload-time = "2025-11-21T23:26:15.59Z" },
|
| 735 |
+
{ url = "https://files.pythonhosted.org/packages/6e/ce/bfd825a3aa2a22caa78865a6331e3660825b82de24877b08c10d18c45748/hf_xet-1.2.1rc0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b6b6455d68f2b4439028c58198e6dc33f3b1b64314ed05b0a5f5f7dace37d711", size = 2977924, upload-time = "2025-11-21T23:25:54.254Z" },
|
| 736 |
+
{ url = "https://files.pythonhosted.org/packages/88/28/d78d7fcf2f3e18177e8dd6bbb4294bb00ef2f6d3addfc2b636a251ec297b/hf_xet-1.2.1rc0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:3d9894128c63478a3f67d7f0288e8f5780c2b3ae7a09f36fc3949be60dcf7ac8", size = 2853755, upload-time = "2025-11-21T23:25:52.222Z" },
|
| 737 |
+
{ url = "https://files.pythonhosted.org/packages/ae/09/637245509430b3dd9d37f676bbe0b993c723e3671ce0b39fdf42c6f05a02/hf_xet-1.2.1rc0-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f8b937c5e2a4f43720eca9564b14324ecfa108cc053a1b44890c620f51aac01e", size = 3347297, upload-time = "2025-11-21T23:25:37.9Z" },
|
| 738 |
+
{ url = "https://files.pythonhosted.org/packages/29/b5/bbc98a35ee5229d0cd6c9436ae97f86cf2ab63d6bd463cd5a43282e5c1f8/hf_xet-1.2.1rc0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bd4629e923dd7b12fb9d05312e03ed123db230ae25fd98a3fd5caa739f2357e", size = 19457253, upload-time = "2025-11-21T23:25:40.115Z" },
|
| 739 |
+
{ url = "https://files.pythonhosted.org/packages/0f/c6/ab21fc91f23ca54cdd44e86981d80475d67ee4122128f5ef988a119ebe28/hf_xet-1.2.1rc0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5484ad943ceec043f0c29733cb87e59c86c2c68804c470176f259b1ef339718e", size = 3254771, upload-time = "2025-11-21T23:25:36.213Z" },
|
| 740 |
+
{ url = "https://files.pythonhosted.org/packages/e6/c0/5a2887739722bd5a531769c1e9555e30dd7f470aefaabbe898d939dbba20/hf_xet-1.2.1rc0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2ec943ba2633ed0df48d2c817ce6a13670e96590f9fd4260011c5753afbc5d53", size = 3439600, upload-time = "2025-11-21T23:26:03.318Z" },
|
| 741 |
+
{ url = "https://files.pythonhosted.org/packages/30/c9/c7cd0a64eb2dba1f70fbb78dee33558567404522776328254a7c805ae23e/hf_xet-1.2.1rc0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:87e0bdd71172b7cb1621e706bbf70b75f31df5fa7c359ebc0978567b5c21c2cf", size = 3526094, upload-time = "2025-11-21T23:26:05.018Z" },
|
| 742 |
+
{ url = "https://files.pythonhosted.org/packages/42/1d/e87412cbde68f13c0160366a323497107c699d6c9a42a2ab55dfeed86a89/hf_xet-1.2.1rc0-cp37-abi3-win_amd64.whl", hash = "sha256:916148659d7f6bff92e9a2d59a45e14b29b0d1e41083884b2494abfc3a2f30e5", size = 3047488, upload-time = "2025-11-21T23:26:13.93Z" },
|
| 743 |
]
|
| 744 |
|
| 745 |
[[package]]
|
|
|
|
| 1700 |
{ url = "https://files.pythonhosted.org/packages/84/7a/1726ceaa3343874f322dd83c9ec376ad81f533df8422b8b1e1233a59f8ce/py_key_value_shared-0.2.8-py3-none-any.whl", hash = "sha256:aff1bbfd46d065b2d67897d298642e80e5349eae588c6d11b48452b46b8d46ba", size = 14586, upload-time = "2025-10-24T13:31:02.838Z" },
|
| 1701 |
]
|
| 1702 |
|
| 1703 |
+
[[package]]
|
| 1704 |
+
name = "pyasn1"
|
| 1705 |
+
version = "0.6.1"
|
| 1706 |
+
source = { registry = "https://pypi.org/simple" }
|
| 1707 |
+
sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" }
|
| 1708 |
+
wheels = [
|
| 1709 |
+
{ url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" },
|
| 1710 |
+
]
|
| 1711 |
+
|
| 1712 |
+
[[package]]
|
| 1713 |
+
name = "pyasn1-modules"
|
| 1714 |
+
version = "0.4.2"
|
| 1715 |
+
source = { registry = "https://pypi.org/simple" }
|
| 1716 |
+
dependencies = [
|
| 1717 |
+
{ name = "pyasn1" },
|
| 1718 |
+
]
|
| 1719 |
+
sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" }
|
| 1720 |
+
wheels = [
|
| 1721 |
+
{ url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" },
|
| 1722 |
+
]
|
| 1723 |
+
|
| 1724 |
[[package]]
|
| 1725 |
name = "pycparser"
|
| 1726 |
version = "2.23"
|
|
|
|
| 1826 |
|
| 1827 |
[[package]]
|
| 1828 |
name = "pypdf"
|
| 1829 |
+
version = "6.4.0"
|
| 1830 |
source = { registry = "https://pypi.org/simple" }
|
| 1831 |
+
sdist = { url = "https://files.pythonhosted.org/packages/f3/01/f7510cc6124f494cfbec2e8d3c2e1a20d4f6c18622b0c03a3a70e968bacb/pypdf-6.4.0.tar.gz", hash = "sha256:4769d471f8ddc3341193ecc5d6560fa44cf8cd0abfabf21af4e195cc0c224072", size = 5276661, upload-time = "2025-11-23T14:04:43.185Z" }
|
| 1832 |
wheels = [
|
| 1833 |
+
{ url = "https://files.pythonhosted.org/packages/cd/f2/9c9429411c91ac1dd5cd66780f22b6df20c64c3646cdd1e6d67cf38579c4/pypdf-6.4.0-py3-none-any.whl", hash = "sha256:55ab9837ed97fd7fcc5c131d52fcc2223bc5c6b8a1488bbf7c0e27f1f0023a79", size = 329497, upload-time = "2025-11-23T14:04:41.448Z" },
|
| 1834 |
]
|
| 1835 |
|
| 1836 |
[[package]]
|
| 1837 |
name = "pypdfium2"
|
| 1838 |
+
version = "5.1.0"
|
| 1839 |
source = { registry = "https://pypi.org/simple" }
|
| 1840 |
+
sdist = { url = "https://files.pythonhosted.org/packages/1c/87/56782107fa242137b77ccddc30519bbb33e7a9eed9da9649d9db45db2c64/pypdfium2-5.1.0.tar.gz", hash = "sha256:46335ca30a1584b804a6824da84d2e846b4b954bdfc342d035b7bf15ed9a14e5", size = 270104, upload-time = "2025-11-23T13:36:52.589Z" }
|
| 1841 |
wheels = [
|
| 1842 |
+
{ url = "https://files.pythonhosted.org/packages/1a/d7/46ce255322cd29f0db3772667a0da3db8ed137e1e9b9aa306ac5691765b3/pypdfium2-5.1.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f3dde94d320d582d3c20255b600f1e7e03261bfdea139b7064b54126fc3db4e2", size = 2817789, upload-time = "2025-11-23T13:36:31.423Z" },
|
| 1843 |
+
{ url = "https://files.pythonhosted.org/packages/19/a5/4ad3c1b336fdc2b7a88d835c56bcd64ce60d4a95d1a9eaafc44f853da582/pypdfium2-5.1.0-py3-none-macosx_11_0_x86_64.whl", hash = "sha256:dee09b7a3ab1860a17decc97c179a5aaba5a74b2780d53c91daa18d742945892", size = 2940861, upload-time = "2025-11-23T13:36:33.519Z" },
|
| 1844 |
+
{ url = "https://files.pythonhosted.org/packages/19/93/d13ca66d5e075d7e27736c51c15955cdd3266ac0a8327613c3c520d43693/pypdfium2-5.1.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1757d6470cbf5b8d1c825350df2ccd79fd0bfcf5753ff566fd02153a486014b1", size = 2980933, upload-time = "2025-11-23T13:36:35.283Z" },
|
| 1845 |
+
{ url = "https://files.pythonhosted.org/packages/a2/7c/02744ef9e0363af08f9ed47c0e603ef8713e02d4a48492c76d5bf36f65c3/pypdfium2-5.1.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad18e95497423f88b33f2976cb78c27f0bd6ef4b4bf340c901f5f28a234c4f06", size = 2762960, upload-time = "2025-11-23T13:36:37.033Z" },
|
| 1846 |
+
{ url = "https://files.pythonhosted.org/packages/89/26/f0abcfccb99b0a5c4451b70b0e72ccb7c27387931af01eae982870272202/pypdfium2-5.1.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2faee2f4fbd5bd33dd77c07d15ccaa6687562d883a54c4beb8329ebaee615b7d", size = 3060522, upload-time = "2025-11-23T13:36:38.835Z" },
|
| 1847 |
+
{ url = "https://files.pythonhosted.org/packages/2f/74/92f508e71178aa85de32454762f84d6f9cef35c468caab3e0f1041dae464/pypdfium2-5.1.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d688372df169a9dad606c1e5ad34b6e0e6b820f1e0d540b4780711600a7bf8dd", size = 2995178, upload-time = "2025-11-23T13:36:40.319Z" },
|
| 1848 |
+
{ url = "https://files.pythonhosted.org/packages/94/9f/91ca099ea64b24e19ef05da72e33d0ef0840e104d89cbdcb618da12629b5/pypdfium2-5.1.0-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:cfecd2b20f1c05027aaa2af6bfbcc2835b4c8f6455155b0dc2800ec6a2051965", size = 6321704, upload-time = "2025-11-23T13:36:42.177Z" },
|
| 1849 |
+
{ url = "https://files.pythonhosted.org/packages/e0/4b/5628cfda9f534b3acc1e2cf50f9e9582cd9cfd86cf2ce718da229de6e709/pypdfium2-5.1.0-py3-none-musllinux_1_1_i686.whl", hash = "sha256:5698de8e6d662f1b2cdff5cb62e6f0ee79ffaaa13e282251854cbc64cf712449", size = 6329892, upload-time = "2025-11-23T13:36:43.757Z" },
|
| 1850 |
+
{ url = "https://files.pythonhosted.org/packages/c5/25/5d2db765f8f82129d75ea2883ed26af3d1a64d8daaa20a11005ac681e2c3/pypdfium2-5.1.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:2cbd73093fbb1710ea1164cdf27583363e1b663b8cc22d555c84af0ee1af50c7", size = 6409889, upload-time = "2025-11-23T13:36:45.387Z" },
|
| 1851 |
+
{ url = "https://files.pythonhosted.org/packages/89/d3/135ed8ca46044cd5005cd104ead13bea417777afa65d7af5a710eb68d340/pypdfium2-5.1.0-py3-none-win32.whl", hash = "sha256:11d319cd2e5f71cdc3d68e8a79142b559a0edbcc16fe31d4036fcfc45f0e9ed8", size = 2991546, upload-time = "2025-11-23T13:36:47.373Z" },
|
| 1852 |
+
{ url = "https://files.pythonhosted.org/packages/52/8f/884a1b2fd7c747a98e9b4c95097c08b39d042a88837ac72f2945a7f6162c/pypdfium2-5.1.0-py3-none-win_amd64.whl", hash = "sha256:4725f347a8c9ff011a7035d8267ee25912ab1b946034ba0b57f3cca89de8847a", size = 3100176, upload-time = "2025-11-23T13:36:49.234Z" },
|
| 1853 |
+
{ url = "https://files.pythonhosted.org/packages/d7/5c/72448636ea0ccd44878f77bb5d59a2c967a54eec806ee2e0d894ef0d2434/pypdfium2-5.1.0-py3-none-win_arm64.whl", hash = "sha256:47c5593f7eb6ae0f1e5a940d712d733ede580f09ca91de6c3f89611848695c0f", size = 2941500, upload-time = "2025-11-23T13:36:50.69Z" },
|
| 1854 |
]
|
| 1855 |
|
| 1856 |
[[package]]
|
|
|
|
| 2114 |
{ url = "https://files.pythonhosted.org/packages/c9/7f/1a65ae870bc9d0576aebb0c501ea5dccf1ae2178fe2821042150ebd2e707/rpds_py-0.29.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2023473f444752f0f82a58dfcbee040d0a1b3d1b3c2ec40e884bd25db6d117d2", size = 225919, upload-time = "2025-11-16T14:50:14.734Z" },
|
| 2115 |
]
|
| 2116 |
|
| 2117 |
+
[[package]]
|
| 2118 |
+
name = "rsa"
|
| 2119 |
+
version = "4.9.1"
|
| 2120 |
+
source = { registry = "https://pypi.org/simple" }
|
| 2121 |
+
dependencies = [
|
| 2122 |
+
{ name = "pyasn1" },
|
| 2123 |
+
]
|
| 2124 |
+
sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" }
|
| 2125 |
+
wheels = [
|
| 2126 |
+
{ url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" },
|
| 2127 |
+
]
|
| 2128 |
+
|
| 2129 |
[[package]]
|
| 2130 |
name = "safehttpx"
|
| 2131 |
version = "0.1.7"
|
|
|
|
| 2140 |
|
| 2141 |
[[package]]
|
| 2142 |
name = "secretstorage"
|
| 2143 |
+
version = "3.5.0"
|
| 2144 |
source = { registry = "https://pypi.org/simple" }
|
| 2145 |
dependencies = [
|
| 2146 |
{ name = "cryptography" },
|
| 2147 |
{ name = "jeepney" },
|
| 2148 |
]
|
| 2149 |
+
sdist = { url = "https://files.pythonhosted.org/packages/1c/03/e834bcd866f2f8a49a85eaff47340affa3bfa391ee9912a952a1faa68c7b/secretstorage-3.5.0.tar.gz", hash = "sha256:f04b8e4689cbce351744d5537bf6b1329c6fc68f91fa666f60a380edddcd11be", size = 19884, upload-time = "2025-11-23T19:02:53.191Z" }
|
| 2150 |
wheels = [
|
| 2151 |
+
{ url = "https://files.pythonhosted.org/packages/b7/46/f5af3402b579fd5e11573ce652019a67074317e18c1935cc0b4ba9b35552/secretstorage-3.5.0-py3-none-any.whl", hash = "sha256:0ce65888c0725fcb2c5bc0fdb8e5438eece02c523557ea40ce0703c266248137", size = 15554, upload-time = "2025-11-23T19:02:51.545Z" },
|
| 2152 |
]
|
| 2153 |
|
| 2154 |
[[package]]
|
|
|
|
| 2315 |
{ url = "https://files.pythonhosted.org/packages/b7/da/d49051453766dbfdb5a086eebed7d3cb1814b6ff64ab6a90fd14edc13d46/supabase_functions-2.24.0-py3-none-any.whl", hash = "sha256:b93d79ffc446cb96faf03be550b6991847394064feec3ebf21954d3aff836d11", size = 8471, upload-time = "2025-11-07T17:08:20.943Z" },
|
| 2316 |
]
|
| 2317 |
|
| 2318 |
+
[[package]]
|
| 2319 |
+
name = "tenacity"
|
| 2320 |
+
version = "9.1.2"
|
| 2321 |
+
source = { registry = "https://pypi.org/simple" }
|
| 2322 |
+
sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" }
|
| 2323 |
+
wheels = [
|
| 2324 |
+
{ url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" },
|
| 2325 |
+
]
|
| 2326 |
+
|
| 2327 |
[[package]]
|
| 2328 |
name = "tomlkit"
|
| 2329 |
version = "0.13.3"
|
|
|
|
| 2412 |
{ url = "https://files.pythonhosted.org/packages/ee/d9/d88e73ca598f4f6ff671fb5fde8a32925c2e08a637303a1d12883c7305fa/uvicorn-0.38.0-py3-none-any.whl", hash = "sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02", size = 68109, upload-time = "2025-10-18T13:46:42.958Z" },
|
| 2413 |
]
|
| 2414 |
|
| 2415 |
+
[[package]]
|
| 2416 |
+
name = "websocket-client"
|
| 2417 |
+
version = "1.9.0"
|
| 2418 |
+
source = { registry = "https://pypi.org/simple" }
|
| 2419 |
+
sdist = { url = "https://files.pythonhosted.org/packages/2c/41/aa4bf9664e4cda14c3b39865b12251e8e7d239f4cd0e3cc1b6c2ccde25c1/websocket_client-1.9.0.tar.gz", hash = "sha256:9e813624b6eb619999a97dc7958469217c3176312b3a16a4bd1bc7e08a46ec98", size = 70576, upload-time = "2025-10-07T21:16:36.495Z" }
|
| 2420 |
+
wheels = [
|
| 2421 |
+
{ url = "https://files.pythonhosted.org/packages/34/db/b10e48aa8fff7407e67470363eac595018441cf32d5e1001567a7aeba5d2/websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef", size = 82616, upload-time = "2025-10-07T21:16:34.951Z" },
|
| 2422 |
+
]
|
| 2423 |
+
|
| 2424 |
[[package]]
|
| 2425 |
name = "websockets"
|
| 2426 |
version = "15.0.1"
|