NotesWhat is notes.io?

Notes brand slogan

Notes - notes.io


// TRANSCRIBE

exports.handler = function(context, event, callback) {
// Create a TwiML Voice Response object to build the response
const twiml = new Twilio.twiml.VoiceResponse();

// If no previous conversation is present, or if the conversation is empty, start the conversation
if (!event.request.cookies.convo) {
// Greet the user with a message using AWS Polly Neural voice
twiml.say({
voice: 'Polly.Joanna-Neural',
},
"Hi! This is Tikva security customer support. How may I help you today? "
);
}

// Listen to the user's speech and pass the input to the /respond Function
twiml.gather({
speechTimeout: 'auto', // Automatically determine the end of user speech
speechModel: 'experimental_conversations', // Use the conversation-based speech recognition model
input: 'speech', // Specify speech as the input type
action: '/respond', // Send the collected input to /respond
});

// Create a Twilio Response object
const response = new Twilio.Response();

// Set the response content type to XML (TwiML)
response.appendHeader('Content-Type', 'application/xml');

// Set the response body to the generated TwiML
response.setBody(twiml.toString());

// If no conversation cookie is present, set an empty conversation cookie
if (!event.request.cookies.convo) {
response.setCookie('convo', '', ['Path=/']);
}

// Return the response to Twilio

return callback(null, response);
};









// RESPOND
// Import required modules
//const { Configuration, OpenAIApi } = require("openai");
const OpenAIApi = require("openai");

// Define the main function for handling requests
exports.handler = async function(context, event, callback) {
// Set up the OpenAI API with the API key
//const configuration = new Configuration({ apiKey: context.OPENAI_API_KEY });
//const openai = new OpenAIApi(configuration);
const openai = new OpenAIApi({ apiKey: context.OPENAI_API_KEY });

// Set up the Twilio VoiceResponse object to generate the TwiML
const twiml = new Twilio.twiml.VoiceResponse();

// Initiate the Twilio Response object to handle updating the cookie with the chat history
const response = new Twilio.Response();

// Parse the cookie value if it exists
const cookieValue = event.request.cookies.convo;
const cookieData = cookieValue ?
JSON.parse(decodeURIComponent(cookieValue)) :
null;

// Get the user's voice input from the event
let voiceInput = event.SpeechResult;

// Create a conversation variable to store the dialog and the user's input to the conversation history
const conversation = cookieData?.conversation || [];
conversation.push(`user: ${voiceInput}`);

// Get the AI's response based on the conversation history
const aiResponse = await generateAIResponse(conversation.join(";"));

// For some reason the OpenAI API loves to prepend the name or role in its responses, so let's remove 'assistant:' 'Joanna:', or 'user:' from the AI response if it's the first word
const cleanedAiResponse = aiResponse.replace(/^w+:s*/i, "").trim();

// Add the AI's response to the conversation history
conversation.push(`assistant: ${aiResponse}`);

// Limit the conversation history to the last 10 messages; you can increase this if you want but keeping things short for this demonstration improves performance
while (conversation.length > 10) {
conversation.shift();
}

// Generate some <Say> TwiML using the cleaned up AI response
twiml.say({
voice: "Polly.Joanna-Neural",
},
cleanedAiResponse
);

// Redirect to the Function where the <Gather> is capturing the caller's speech
twiml.redirect({
method: "POST",
},
`/transcribe`
);

// Since we're using the response object to handle cookies we can't just pass the TwiML straight back to the callback, we need to set the appropriate header and return the TwiML in the body of the response
response.appendHeader("Content-Type", "application/xml");
response.setBody(twiml.toString());

// Update the conversation history cookie with the response from the OpenAI API
const newCookieValue = encodeURIComponent(
JSON.stringify({
conversation,
})
);
response.setCookie("convo", newCookieValue, ["Path=/"]);

// Return the response to the handler
return callback(null, response);

// Function to generate the AI response based on the conversation history
async function generateAIResponse(conversation) {
const messages = formatConversation(conversation);
return await createChatCompletion(messages);
}

// Function to create a chat completion using the OpenAI API
async function createChatCompletion(messages) {
try {
//const completion = await openai.createChatCompletion({
const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: messages,
temperature: 0.8, // Controls the randomness of the generated responses. Higher values (e.g., 1.0) make the output more random and creative, while lower values (e.g., 0.2) make it more focused and deterministic. You can adjust the temperature based on your desired level of creativity and exploration.
max_tokens: 100, //You can adjust this number to control the length of the generated responses. Keep in mind that setting max_tokens too low might result in responses that are cut off and don't make sense.
// top_p: 0.9, Set the top_p value to around 0.9 to keep the generated responses focused on the most probable tokens without completely eliminating creativity. Adjust the value based on the desired level of exploration.
// n: 1, Specifies the number of completions you want the model to generate. Generating multiple completions will increase the time it takes to receive the responses.
});
// Check if the response has a status code of 500
if (completion.status === 500) {
console.error("Error: OpenAI API returned a 500 status code."); // Log an error message indicating that the OpenAI API returned a 500 status code
twiml.say({
// Create a TwiML say element to provide an error message to the user
voice: "Polly.Joanna-Neural",
},
"Oops, looks like I got an error from the OpenAI API on that request. Let's try that again."
);
twiml.redirect({
// Create a TwiML redirect element to redirect the user to the /transcribe endpoint
method: "POST",
},
`/transcribe`
);
response.appendHeader("Content-Type", "application/xml"); // Set the Content-Type header of the response to "application/xml"
response.setBody(twiml.toString()); // Set the body of the response to the XML string representation of the TwiML response
return callback(null, response); // Return the response to the callback function
}
//return completion.data.choices[0].message.content;
return completion.choices[0].message.content;
} catch (error) {
// Check if the error is a timeout error
if (error.code === "ETIMEDOUT" || error.code === "ESOCKETTIMEDOUT") {
console.error("Error: OpenAI API request timed out."); // Log an error message indicating that the OpenAI API request timed out
twiml.say({
// Create a TwiML say element to provide an error message to the user
voice: "Polly.Joanna-Neural",
},
"I'm sorry, but it's taking me a little bit too long to respond. Let's try that again, one more time."
);
twiml.redirect({
// Create a TwiML redirect element to redirect the user to the /transcribe endpoint
method: "POST",
},
`/transcribe`
);
response.appendHeader("Content-Type", "application/xml"); // Set the Content-Type header of the response to "application/xml"
response.setBody(twiml.toString()); // Set the body of the response to the XML string representation of the TwiML response
return callback(null, response); // Return the response to the callback function
} else {
console.error("Error during OpenAI API request:", error);
throw error;
}
}
}

// Function to format the conversation history into a format that the OpenAI API can understand
function formatConversation(conversation) {
let isAI = true;
const messages = [{
role: "system",
content: "You are a creative, funny, friendly and amusing AI assistant named Joanna. Please provide engaging but concise responses.",
},
{
role: "user",
content: "We are having a casual conversation over the telephone so please provide engaging but concise responses.",
},
];

// Iterate through the conversation history and alternate between 'assistant' and 'user' roles
for (const message of conversation.split(";")) {
const role = isAI ? "assistant" : "user";
messages.push({
role: role,
content: message,
});
isAI = !isAI;
}
return messages;
}
}
     
 
what is notes.io
 

Notes.io is a web-based application for taking notes. You can take your notes and share with others people. If you like taking long notes, notes.io is designed for you. To date, over 8,000,000,000 notes created and continuing...

With notes.io;

  • * You can take a note from anywhere and any device with internet connection.
  • * You can share the notes in social platforms (YouTube, Facebook, Twitter, instagram etc.).
  • * You can quickly share your contents without website, blog and e-mail.
  • * You don't need to create any Account to share a note. As you wish you can use quick, easy and best shortened notes with sms, websites, e-mail, or messaging services (WhatsApp, iMessage, Telegram, Signal).
  • * Notes.io has fabulous infrastructure design for a short link and allows you to share the note as an easy and understandable link.

Fast: Notes.io is built for speed and performance. You can take a notes quickly and browse your archive.

Easy: Notes.io doesn’t require installation. Just write and share note!

Short: Notes.io’s url just 8 character. You’ll get shorten link of your note when you want to share. (Ex: notes.io/q )

Free: Notes.io works for 12 years and has been free since the day it was started.


You immediately create your first note and start sharing with the ones you wish. If you want to contact us, you can use the following communication channels;


Email: [email protected]

Twitter: http://twitter.com/notesio

Instagram: http://instagram.com/notes.io

Facebook: http://facebook.com/notesio



Regards;
Notes.io Team

     
 
Shortened Note Link
 
 
Looding Image
 
     
 
Long File
 
 

For written notes was greater than 18KB Unable to shorten.

To be smaller than 18KB, please organize your notes, or sign in.