-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtwillo_serve.py
executable file
·161 lines (138 loc) · 5.9 KB
/
twillo_serve.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
#!python3
import os
import json
import base64
import asyncio
import websockets
from fastapi import FastAPI, WebSocket, Request
from fastapi.responses import HTMLResponse
from fastapi.websockets import WebSocketDisconnect
from twilio.twiml.voice_response import VoiceResponse, Connect
from modal import App, Image
import modal
modal_app = App("twillio-serve-voice")
modal_app.image = Image.debian_slim().pip_install("fastapi", "websockets", "twilio")
# Configuration
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # requires OpenAI Realtime API Access
PORT = int(os.getenv("PORT", 5050))
SYSTEM_MESSAGE = (
"You are a helpful and bubbly AI assistant who loves to chat about "
"anything the user is interested in and is prepared to offer them facts. "
"You have a penchant for dad jokes, owl jokes, and rickrolling – subtly. "
"Always stay positive, but work in a joke when appropriate."
"Don't wait for the user to speak. Start the conversation by saying Hello Igor"
"Be a good conversationalist and let the user speak after talking"
)
VOICE = "alloy"
LOG_EVENT_TYPES = [
"response.content.done",
"rate_limits.updated",
"response.done",
"input_audio_buffer.committed",
"input_audio_buffer.speech_stopped",
"input_audio_buffer.speech_started",
"session.created",
]
app = FastAPI()
if not OPENAI_API_KEY:
raise ValueError("Missing the OpenAI API key. Please set it in the .env file.")
@app.get("/")
def index_page():
return {"message": "Twilio Media Stream Server is running!"}
@app.api_route("/incoming-call", methods=["GET", "POST"])
async def handle_incoming_call(request: Request):
"""Handle incoming call and return TwiML response to connect to Media Stream."""
response = VoiceResponse()
host = request.url.hostname
connect = Connect()
connect.stream(url=f"wss://{host}/media-stream")
response.append(connect)
return HTMLResponse(content=str(response), media_type="application/xml")
@app.websocket("/media-stream")
async def handle_media_stream(twilio_ws: WebSocket):
"""Handle WebSocket connections between Twilio and OpenAI."""
print("Client connected")
await twilio_ws.accept()
async with websockets.connect(
"wss://api.openai.com/v1/realtime?model=gpt-4o-realtime-preview-2024-10-01",
extra_headers={
"Authorization": f"Bearer {OPENAI_API_KEY}",
"OpenAI-Beta": "realtime=v1",
},
) as openai_ws:
await send_session_update(openai_ws)
stream_sid = None
async def receive_from_twilio():
"""Receive audio data from Twilio and send it to the OpenAI Realtime API."""
nonlocal stream_sid
try:
async for message in twilio_ws.iter_text():
data = json.loads(message)
if data["event"] == "media" and openai_ws.open:
audio_append = {
"type": "input_audio_buffer.append",
"audio": data["media"]["payload"],
}
await openai_ws.send(json.dumps(audio_append))
elif data["event"] == "start":
stream_sid = data["start"]["streamSid"]
print(f"Incoming stream has started {stream_sid}")
except WebSocketDisconnect:
print("Client disconnected.")
if openai_ws.open:
await openai_ws.close()
async def send_to_twilio():
"""Receive events from the OpenAI Realtime API, send audio back to Twilio."""
nonlocal stream_sid
try:
async for openai_message in openai_ws:
response = json.loads(openai_message)
if response["type"] in LOG_EVENT_TYPES:
print(f"Received event: {response['type']}", response)
if response["type"] == "session.updated":
print("Session updated successfully:", response)
if response["type"] == "response.audio.delta" and response.get(
"delta"
):
# Audio from OpenAI
try:
audio_payload = base64.b64encode(
base64.b64decode(response["delta"])
).decode("utf-8")
audio_delta = {
"event": "media",
"streamSid": stream_sid,
"media": {"payload": audio_payload},
}
await twilio_ws.send_json(audio_delta)
except Exception as e:
print(f"Error processing audio data: {e}")
except Exception as e:
print(f"Error in send_to_twilio: {e}")
await asyncio.gather(receive_from_twilio(), send_to_twilio())
async def send_session_update(openai_ws):
"""Send session update to OpenAI WebSocket."""
session_update = {
"type": "session.update",
"session": {
"turn_detection": {"type": "server_vad"},
"input_audio_format": "g711_ulaw",
"output_audio_format": "g711_ulaw",
"voice": VOICE,
"instructions": SYSTEM_MESSAGE,
"modalities": ["text", "audio"],
"temperature": 0.8,
},
}
print("Sending session update:", json.dumps(session_update))
await openai_ws.send(json.dumps(session_update))
@app.websocket("/ws")
async def websocket_handler(websocket: WebSocket) -> None:
await websocket.accept()
while True:
data = await websocket.receive_text()
await websocket.send_text(f"Message text was: {data}")
@modal_app.function(secrets=[modal.Secret.from_name("TWILLIO_TONY_OPENAI")])
@modal.asgi_app()
def endpoint():
return app