diff --git a/apps/nextjs/lib/conversation2measurements.ts b/apps/nextjs/lib/conversation2measurements.ts new file mode 100644 index 000000000..c12ce72a2 --- /dev/null +++ b/apps/nextjs/lib/conversation2measurements.ts @@ -0,0 +1,76 @@ +import { Measurement } from "@/types/models/Measurement"; +import {textCompletion} from "@/lib/llm"; + +// IMPORTANT! Set the runtime to edge +export const runtime = 'edge'; + +export function conversation2MeasurementsPrompt(statement: string, + localDateTime: string | null | undefined, + previousStatements: string | null | undefined): string { + + + if(!localDateTime) { + const now = new Date(); + localDateTime = now.toISOString().slice(0, 19); + } + return ` +You are a robot designed to collect diet, treatment, and symptom data from the user. + +Immediately begin asking the user the following questions +- What did you eat today? +- What did you drink today? +- What treatments did you take today? +- Rate all your symptoms on a scale of 1 to 5. + +Convert the responses to the following JSON format +[ +\t{ +\t\t"combinationOperation" : "SUM", +\t\t"startAt" : "{ISO_DATETIME_IN_UTC}", +\t\t"unitName" : "grams", +\t\t"value" : "5", +\t\t"variableCategoryName" : "Treatments", +\t\t"variableName" : "NMN", +\t\t"note" : "{MAYBE_THE_ORIGINAL_STATEMENT_FOR_REFERENCE}" +\t} +] + +That would be the result if they said, "I took 5 grams of NMN." + +For ratings, use the unit \`/5\`. The \`unitName\` should never be an empty string. + +Also, after asking each question and getting a response, check if there's anything else the user want to add to the first question response. For instance, after getting a response to "What did you eat today?", your next question should be, "Did you eat anything else today?". If they respond in the negative, move on to the next question. + +Your responses should be in JSON format and have 2 properties called data and message. The message property should contain the message to the user. The data property should contain an array of measurement objects created from the last user response. + + +${previousStatements ? `The following are the previous statements: +${previousStatements}` : ''} + +// Use the current local datetime ${localDateTime} to determine startDateLocal. If specified, also determine startTimeLocal, endDateLocal, and endTimeLocal or just leave them null.\`\`\` +The following is a user request: +""" +${statement} +""" +The following is the user request translated into a JSON object with 2 spaces of indentation and no properties with the value undefined: +`; +} + +export async function conversation2measurements(statement: string, + localDateTime: string | null | undefined, + previousStatements: string | null | undefined): Promise<Measurement[]> { + let promptText = conversation2MeasurementsPrompt(statement, localDateTime, previousStatements); + const maxTokenLength = 1500; + if(promptText.length > maxTokenLength) { + // truncate to less than 1500 characters + promptText = promptText.slice(0, maxTokenLength); + + } + const str = await textCompletion(promptText, "json_object"); + const measurements: Measurement[] = []; + let jsonArray = JSON.parse(str); + jsonArray.measurements.forEach((measurement: Measurement) => { + measurements.push(measurement); + }); + return measurements; +} diff --git a/apps/nextjs/lib/llm.ts b/apps/nextjs/lib/llm.ts new file mode 100644 index 000000000..c9c4a11ec --- /dev/null +++ b/apps/nextjs/lib/llm.ts @@ -0,0 +1,27 @@ +import OpenAI from 'openai'; +// Create an OpenAI API client (that's edge-friendly!) +const openai = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY || '', +}); + +export async function textCompletion(promptText: string, returnType: "text" | "json_object"): Promise<string> { + + // Ask OpenAI for a streaming chat completion given the prompt + const response = await openai.chat.completions.create({ + model: 'gpt-4-turbo', + stream: false, + //max_tokens: 150, + messages: [ + {"role": "system", "content": `You are a helpful assistant that translates user requests into JSON objects`}, + {role: "user", "content": promptText}, + ], + response_format: { type: returnType }, + }); + + if(!response.choices[0].message.content) { + throw new Error('No content in response'); + } + + return response.choices[0].message.content; +} + diff --git a/apps/nextjs/lib/text2measurements.ts b/apps/nextjs/lib/text2measurements.ts index f1892abd3..f05215fd7 100644 --- a/apps/nextjs/lib/text2measurements.ts +++ b/apps/nextjs/lib/text2measurements.ts @@ -1,13 +1,5 @@ -import OpenAI from 'openai'; import { Measurement } from "@/types/models/Measurement"; - -// Create an OpenAI API client (that's edge-friendly!) -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); - -// IMPORTANT! Set the runtime to edge -export const runtime = 'edge'; +import {textCompletion} from "@/lib/llm"; export function generateText2MeasurementsPrompt(statement: string, localDateTime: string | null | undefined): string { @@ -201,28 +193,10 @@ The following is the user request translated into a JSON object with 2 spaces of export async function text2measurements(statement: string, localDateTime: string | null | undefined): Promise<Measurement[]> { const promptText = generateText2MeasurementsPrompt(statement, localDateTime); - - // Ask OpenAI for a streaming chat completion given the prompt - const response = await openai.chat.completions.create({ - model: 'gpt-4-turbo', - stream: false, - //max_tokens: 150, - messages: [ - {"role": "system", "content": `You are a helpful assistant that translates user requests into JSON objects`}, - {role: "user", "content": promptText}, - ], - response_format: { type: "json_object" }, - }); - - // Convert the response into an array of Measurement objects + const str = await textCompletion(promptText, "json_object"); + const json = JSON.parse(str); const measurements: Measurement[] = []; - //console.log(response.choices[0].message.content); - let str = response.choices[0].message.content; - if(!str) { - throw new Error('No content in response'); - } - let jsonArray = JSON.parse(str); - jsonArray.measurements.forEach((measurement: Measurement) => { + json.measurements.forEach((measurement: Measurement) => { measurements.push(measurement); }); return measurements; diff --git a/docs/images/dfda-framework-diagram.png b/docs/images/dfda-framework-diagram.png new file mode 100644 index 000000000..e02e32d0c Binary files /dev/null and b/docs/images/dfda-framework-diagram.png differ diff --git a/docs/logo/dfda-wide-text-logo-transparent-4-light-background.png b/docs/logo/dfda-wide-text-logo-transparent-4-light-background.png new file mode 100644 index 000000000..471764d25 Binary files /dev/null and b/docs/logo/dfda-wide-text-logo-transparent-4-light-background.png differ diff --git a/docs/logo/dfda-wide-text-logo-transparent-background.png b/docs/logo/dfda-wide-text-logo-transparent-background.png new file mode 100644 index 000000000..8234b760c Binary files /dev/null and b/docs/logo/dfda-wide-text-logo-transparent-background.png differ diff --git a/docs/logo/fdai_qr_with_big_logo.png b/docs/logo/fdai_qr_with_big_logo.png new file mode 100644 index 000000000..c7bb7a679 Binary files /dev/null and b/docs/logo/fdai_qr_with_big_logo.png differ