Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Upload folder using huggingface_hub
Browse files
app.py
CHANGED
@@ -13,8 +13,8 @@ from fastapi.responses import HTMLResponse
|
|
13 |
from fastrtc import (
|
14 |
AsyncStreamHandler,
|
15 |
Stream,
|
16 |
-
WebRTCError,
|
17 |
get_twilio_turn_credentials,
|
|
|
18 |
)
|
19 |
from google import genai
|
20 |
from google.genai.types import (
|
@@ -68,13 +68,12 @@ class GeminiHandler(AsyncStreamHandler):
|
|
68 |
api_key, voice_name = self.latest_args[1:]
|
69 |
else:
|
70 |
api_key, voice_name = None, "Puck"
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
raise WebRTCError(str(e))
|
78 |
config = LiveConnectConfig(
|
79 |
response_modalities=["AUDIO"], # type: ignore
|
80 |
speech_config=SpeechConfig(
|
@@ -85,18 +84,15 @@ class GeminiHandler(AsyncStreamHandler):
|
|
85 |
)
|
86 |
),
|
87 |
)
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
self.output_queue.put_nowait(array)
|
98 |
-
except Exception as e:
|
99 |
-
raise WebRTCError(str(e))
|
100 |
|
101 |
async def stream(self) -> AsyncGenerator[bytes, None]:
|
102 |
while not self.quit.is_set():
|
@@ -112,13 +108,11 @@ class GeminiHandler(AsyncStreamHandler):
|
|
112 |
audio_message = encode_audio(array)
|
113 |
self.input_queue.put_nowait(audio_message)
|
114 |
|
115 |
-
async def emit(self) -> tuple[int, np.ndarray]:
|
116 |
-
|
117 |
-
return (self.output_sample_rate, array)
|
118 |
|
119 |
def shutdown(self) -> None:
|
120 |
self.quit.set()
|
121 |
-
self.args_set.clear()
|
122 |
|
123 |
|
124 |
stream = Stream(
|
|
|
13 |
from fastrtc import (
|
14 |
AsyncStreamHandler,
|
15 |
Stream,
|
|
|
16 |
get_twilio_turn_credentials,
|
17 |
+
wait_for_item,
|
18 |
)
|
19 |
from google import genai
|
20 |
from google.genai.types import (
|
|
|
68 |
api_key, voice_name = self.latest_args[1:]
|
69 |
else:
|
70 |
api_key, voice_name = None, "Puck"
|
71 |
+
|
72 |
+
client = genai.Client(
|
73 |
+
api_key=api_key or os.getenv("GEMINI_API_KEY"),
|
74 |
+
http_options={"api_version": "v1alpha"},
|
75 |
+
)
|
76 |
+
|
|
|
77 |
config = LiveConnectConfig(
|
78 |
response_modalities=["AUDIO"], # type: ignore
|
79 |
speech_config=SpeechConfig(
|
|
|
84 |
)
|
85 |
),
|
86 |
)
|
87 |
+
async with client.aio.live.connect(
|
88 |
+
model="gemini-2.0-flash-exp", config=config
|
89 |
+
) as session:
|
90 |
+
async for audio in session.start_stream(
|
91 |
+
stream=self.stream(), mime_type="audio/pcm"
|
92 |
+
):
|
93 |
+
if audio.data:
|
94 |
+
array = np.frombuffer(audio.data, dtype=np.int16)
|
95 |
+
self.output_queue.put_nowait((self.output_sample_rate, array))
|
|
|
|
|
|
|
96 |
|
97 |
async def stream(self) -> AsyncGenerator[bytes, None]:
|
98 |
while not self.quit.is_set():
|
|
|
108 |
audio_message = encode_audio(array)
|
109 |
self.input_queue.put_nowait(audio_message)
|
110 |
|
111 |
+
async def emit(self) -> tuple[int, np.ndarray] | None:
|
112 |
+
return await wait_for_item(self.output_queue)
|
|
|
113 |
|
114 |
def shutdown(self) -> None:
|
115 |
self.quit.set()
|
|
|
116 |
|
117 |
|
118 |
stream = Stream(
|