const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY, }); const response = await openai.chat.completions.create({ model: "gpt-3.5-turbo", messages: [ { role: "system", content: "You are a sentient robot...", }, { role: "user", content: "Tell me about JavaScript", }, ], temperature: 1, max_tokens: 256, top_p: 1, frequency_penalty: 0, presence_penalty: 0, });
{ "id": "chatcmpl-85DcgktUmpLrXgLYkMfofDsoOzJfG", "object": "chat.completion", "created": 1696254698, "model": "gpt-3.5-turbo-0613", "choices": [ { "index": 0, "message": { "role": "assistant", "content": "Apologies, but as a robot..." }, "finish_reason": "stop" } ], "usage": { "prompt_tokens": 48, "completion_tokens": 161, "total_tokens": 209 } }
{ "model": "gpt-3.5-turbo", "messages": [ { "role": "user", "content": "What's the weather like in Boston today?" } ], "functions": [ { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "unit": { "type": "string", "enum": ["celsius", "fahrenheit"] } }, "required": ["location"] } } ], "function_call": "auto" }
{ "choices": [ { "finish_reason": "function_call", "index": 0, "message": { "content": null, "function_call": { "arguments": "{\n \"location\": \"Boston, MA\"\n}", "name": "get_current_weather" }, "role": "assistant" } } ], "created": 1694028367, "model": "gpt-3.5-turbo-0613", "object": "chat.completion", "usage": { ... } }
Token generation vs. response time No conversational memory
Approach 1: Async API Calls
Approach 2: Streaming via Server Sent Events
netlify/functions/hello.js
export const handler = async (event) => { return { statusCode: 200, body: "Hello World!" }; };
.netlify/functions/hello
import axios from "axios"; export const handler = async (event) => { const res = await axios.post( "https://api.openai.com/v1/chat/completions", { //OpenAI API Request Body }, { headers: { "Content-Type": "application/json", Authorization: `Bearer ${process.env.OPENAI_API_KEY}`, }, } ); return { statusCode: 200, body: res?.data }; };
import { stream } from "@netlify/functions"; import axios from "axios"; export const handler = stream(async (event) => { const res = await axios.post( "https://api.openai.com/v1/chat/completions", { //OpenAI API Request Body stream: true, }, { headers: { "Content-Type": "application/json", Authorization: `Bearer ${process.env.OPENAI_API_KEY}`, }, responseType: "stream", } ); return { headers: { "content-type": "text/event-stream" }, statusCode: 200, body: res?.data, }; });
Approach 1: RESTful API Integration
Approach 2: Real-Time Updates with SSE
import axios from "axios"; async function callNetlifyFunction(userPrompt) { const response = await axios.get( `/.netlify/functions/bot?prompt=${userPrompt}` ); } callNetlifyFunction("Hello");
function subscribeToEventStream(endpoint, userPrompt) { const eventSource = new EventSource( `/.netlify/functions/bot?prompt=${userPrompt}` ); eventSource.onmessage = (event) => { console.log("Data received:", event.data); if (event.data === "[DONE]") { console.log("Closing stream upon receiving [DONE]"); eventSource.close(); } }; eventSource.onerror = (error) => { console.error("EventSource encountered an error:", error); eventSource.close(); }; } subscribeToEventStream("Hello");