Viewing File: /home/ubuntu/codegamaai-test/phone_call/app.py

from fastapi import FastAPI, Request, Response, WebSocket, status
from twilio.twiml.voice_response import VoiceResponse
import json
import os
from dotenv import load_dotenv
import openai

# Load environment variables
load_dotenv()

# Initialize the FastAPI app
app = FastAPI()

# OpenAI API key setup
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY

# Initial message to be spoken on the call
INITIAL_MESSAGE = "Hello, how are you?"

@app.post("/incoming-call")
async def handle_incoming_call(request: Request):
    voice_response = VoiceResponse()
    messages = request.cookies.get("messages")

    if not messages:
        # Start a new conversation with system context
        messages = json.dumps([
            {"role": "system", "content": "You are a helpful phone assistant."},
            {"role": "assistant", "content": INITIAL_MESSAGE}
        ])
        voice_response.say(INITIAL_MESSAGE)
    else:
        messages = json.loads(messages)

    # Gather the user's spoken input with a timeout to improve interaction speed
    gather = voice_response.gather(input='speech', timeout=5, action='/respond', enhanced=True, speechModel='phone_call')
    gather.say("Please tell me how I can assist you.")

    # Immediate feedback if no input is detected
    voice_response.say("I did not catch that. Please try again or speak louder.")
    voice_response.redirect('/incoming-call')  # Redirect to handle more input

    # Return the response
    response = Response(content=str(voice_response), media_type='application/xml')
    response.set_cookie(key="messages", value=json.dumps(messages))
    return response

@app.post("/respond")
async def handle_response(request: Request):
    form_data = await request.form()
    voice_input = form_data.get("SpeechResult")

    messages_cookie = request.cookies.get("messages")
    if messages_cookie:
        messages = json.loads(messages_cookie)
    else:
        messages = []

    messages.append({"role": "user", "content": voice_input})

    # Generate response using OpenAI
    chat_completion = openai.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=messages,
        temperature=0
    )
    assistant_response = chat_completion.choices[0].message.content.strip()
    messages.append({"role": "assistant", "content": assistant_response})

    # Create a response with the OpenAI completion
    voice_response = VoiceResponse()
    voice_response.say(assistant_response)
    voice_response.redirect('/incoming-call')

    # Return the updated response
    response = Response(content=str(voice_response), media_type='application/xml')
    response.set_cookie(key="messages", value=json.dumps(messages))
    return response

@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
    await websocket.accept()
    try:
        while True:
            data = await websocket.receive_text()
            # Process your streaming data here
            print("Received via WebSocket:", data)  # Log the data received
    except Exception as e:
        print('WebSocket error:', e)
    finally:
        await websocket.close()

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=3000, ssl_keyfile="privkey.pem", ssl_certfile="fullchain.pem")
Back to Directory File Manager