Skip to content

Commit

Permalink
Merge pull request #20 from tpaulshippy/mock-llm
Browse files Browse the repository at this point in the history
Integration test with mock LLM
  • Loading branch information
steve8708 authored Jun 10, 2024
2 parents 25ba826 + 8eac630 commit a344018
Show file tree
Hide file tree
Showing 11 changed files with 266 additions and 5 deletions.
4 changes: 3 additions & 1 deletion .github/workflows/buildOnPR.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,7 @@ jobs:
run: npm run lint
- name: Build
run: npm run build
- name: Test
- name: Unit Test
run: npm test
- name: Integration Test
run: npm run test:integration
4 changes: 3 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@
"ma": "./dist/cli.mjs"
},
"scripts": {
"test": "vitest run",
"test": "vitest run --exclude src/tests/integration",
"test:integration": "vitest run src/tests/integration --exclude src/tests/integration/add.test.ts --poolOptions.threads.singleThread",
"test:all": "vitest run",
"start": "jiti ./src/cli.ts",
"lint:fix": "prettier --write . && eslint --fix",
"lint": "prettier --check . && eslint",
Expand Down
2 changes: 2 additions & 0 deletions src/helpers/config.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ describe('getConfig', () => {
MODEL: 'gpt-4o',
OPENAI_API_ENDPOINT: 'https://api.openai.com/v1',
OPENAI_KEY: undefined,
USE_MOCK_LLM: false,
MOCK_LLM_RECORD_FILE: undefined,
};

it('should return an object with defaults and the env if no config is provided', async () => {
Expand Down
6 changes: 6 additions & 0 deletions src/helpers/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,12 @@ const configParsers = {
LANGUAGE(language?: string) {
return language || 'en';
},
MOCK_LLM_RECORD_FILE(filename?: string) {
return filename;
},
USE_MOCK_LLM(useMockLlm?: string) {
return useMockLlm === 'true';
},
} as const;

type ConfigKeys = keyof typeof configParsers;
Expand Down
33 changes: 30 additions & 3 deletions src/helpers/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import { removeBackticks } from './remove-backticks';
import ollama from 'ollama';
import dedent from 'dedent';
import { removeInitialSlash } from './remove-initial-slash';
import { captureLlmRecord, mockedLlmCompletion } from './mock-llm';

const defaultModel = 'gpt-4o';
export const USE_ASSISTANT = true;
Expand Down Expand Up @@ -124,7 +125,16 @@ export const getSimpleCompletion = async function (options: {
messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[];
onChunk?: (chunk: string) => void;
}) {
const { MODEL: model } = await getConfig();
const {
MODEL: model,
MOCK_LLM_RECORD_FILE: mockLlmRecordFile,
USE_MOCK_LLM: useMockLlm,
} = await getConfig();

if (useMockLlm) {
return mockedLlmCompletion(mockLlmRecordFile, options.messages);
}

if (useOllama(model)) {
const response = await ollama.chat({
model: model,
Expand All @@ -140,6 +150,7 @@ export const getSimpleCompletion = async function (options: {
options.onChunk(chunk.message.content);
}
}
captureLlmRecord(options.messages, output, mockLlmRecordFile);

return output;
}
Expand All @@ -162,6 +173,8 @@ export const getSimpleCompletion = async function (options: {
}
}

captureLlmRecord(options.messages, output, mockLlmRecordFile);

return output;
};

Expand All @@ -170,7 +183,15 @@ export const getCompletion = async function (options: {
options: RunOptions;
useAssistant?: boolean;
}) {
const { MODEL: model } = await getConfig();
const {
MODEL: model,
MOCK_LLM_RECORD_FILE: mockLlmRecordFile,
USE_MOCK_LLM: useMockLlm,
} = await getConfig();
if (useMockLlm) {
return mockedLlmCompletion(mockLlmRecordFile, options.messages);
}

const useModel = model || defaultModel;
const useOllamaChat = useOllama(useModel);

Expand All @@ -190,6 +211,8 @@ export const getCompletion = async function (options: {
}
}
process.stdout.write('\n');

captureLlmRecord(options.messages, output, mockLlmRecordFile);
return output;
}
const openai = await getOpenAi();
Expand Down Expand Up @@ -245,7 +268,9 @@ export const getCompletion = async function (options: {
})
.on('textDone', () => {
process.stdout.write('\n');
resolve(removeBackticks(result));
const output = removeBackticks(result);
captureLlmRecord(options.messages, output, mockLlmRecordFile);
resolve(output);
});
});
} else {
Expand All @@ -264,6 +289,8 @@ export const getCompletion = async function (options: {
}
}
process.stdout.write('\n');
captureLlmRecord(options.messages, output, mockLlmRecordFile);

return output;
}
};
69 changes: 69 additions & 0 deletions src/helpers/mock-llm.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import { readFile, writeFile } from 'fs/promises';
import { KnownError } from './error';
import { formatMessage } from './test';
import OpenAI from 'openai';

const readMockLlmRecordFile = async (
mockLlmRecordFile: string
): Promise<{ completions: any[] }> => {
const mockLlmRecordFileContents = await readFile(
mockLlmRecordFile,
'utf-8'
).catch(() => '');
let jsonLlmRecording;
try {
jsonLlmRecording = JSON.parse(mockLlmRecordFileContents.toString());
} catch {
jsonLlmRecording = { completions: [] };
}
return jsonLlmRecording;
};

export const captureLlmRecord = async (
messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[],
output: string,
mockLlmRecordFile?: string
) => {
if (mockLlmRecordFile) {
const jsonLlmRecording = await readMockLlmRecordFile(mockLlmRecordFile);

jsonLlmRecording.completions.push({
inputs: messages,
output: output,
});

await writeFile(
mockLlmRecordFile,
JSON.stringify(jsonLlmRecording, null, 2)
);
}
};
export const mockedLlmCompletion = async (
mockLlmRecordFile: string | undefined,
messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[]
) => {
if (!mockLlmRecordFile) {
throw new KnownError(
'You need to set the MOCK_LLM_RECORD_FILE environment variable to use the mock LLM'
);
}
const jsonLlmRecording = await readMockLlmRecordFile(mockLlmRecordFile);
const completion = jsonLlmRecording.completions.find(
(completion: { inputs: any }) => {
// Match on system input only
return (
JSON.stringify(completion.inputs[0]) === JSON.stringify(messages[0])
);
}
);
if (!completion) {
throw new KnownError(
`No completion found for the given system input in the MOCK_LLM_RECORD_FILE: ${JSON.stringify(
messages[0]
)}`
);
}
process.stdout.write(formatMessage('\n'));
process.stderr.write(formatMessage(completion.output));
return completion.output;
};
Empty file.
Empty file added src/tests/integration/add.ts
Empty file.
48 changes: 48 additions & 0 deletions src/tests/integration/cli.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import { execaCommand } from 'execa';
import { readFile, writeFile } from 'fs/promises';
import { afterAll, describe, expect, it } from 'vitest';
import { removeBackticks } from '../../helpers/remove-backticks';

const integrationTestPath = 'src/tests/integration';

describe('cli', () => {
it('should run with mock LLM', async () => {
// Write the test file using the mock LLM record
const mockLlmRecordFile = 'test/fixtures/add.json';
const mockLlmRecordFileContents = await readFile(
mockLlmRecordFile,
'utf-8'
);
const jsonLlmRecording = JSON.parse(mockLlmRecordFileContents.toString());

const testContents = jsonLlmRecording.completions[1].output;
await writeFile(
`${integrationTestPath}/add.test.ts`,
removeBackticks(testContents)
);

// Execute the CLI command
const result = await execaCommand(
`USE_MOCK_LLM=true MOCK_LLM_RECORD_FILE=test/fixtures/add.json jiti ./src/cli.ts ${integrationTestPath}/add.ts -f ${integrationTestPath}/add.test.ts -t "npm run test:all -- add"`,
{
input: '\x03',
shell: process.env.SHELL || true,
}
);

const output = result.stdout;

// Check the output
expect(output).toContain('add is not a function');
expect(output).toContain('Generating code...');
expect(output).toContain('Updated code');
expect(output).toContain('Running tests...');
expect(output).toContain(`6 passed`);
expect(output).toContain('All tests passed!');
});

afterAll(async () => {
await writeFile(`${integrationTestPath}/add.ts`, '');
await writeFile(`${integrationTestPath}/add.test.ts`, '');
});
});
62 changes: 62 additions & 0 deletions src/tests/integration/interactive.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import { execaCommand } from 'execa';
import { lstat, writeFile } from 'fs/promises';
import { beforeAll, describe, expect, it } from 'vitest';

const checkConfigFileExists = async () => {
return await lstat(`${process.env.HOME}/.micro-agent`)
.then(() => true)
.catch(() => false);
};

describe('interactive cli', () => {
beforeAll(async () => {
const configFileExists = await checkConfigFileExists();
if (!configFileExists) {
await writeFile(
`${process.env.HOME}/.micro-agent`,
'OPENAI_KEY=sk-1234567890abcdef1234567890abcdef'
);
}
});
it('should start interactive mode with an intro', async () => {
const result = await execaCommand('jiti ./src/cli.ts', {
input: '\x03',
shell: process.env.SHELL || true,
});

const output = result.stdout;

expect(output).toContain('🦾 Micro Agent');
});

it('should ask for an OpenAI key if not set', async () => {
// Rename the config file to simulate a fresh install
await execaCommand('mv ~/.micro-agent ~/.micro-agent.bak', {
shell: process.env.SHELL || true,
});
const result = await execaCommand('jiti ./src/cli.ts', {
input: '\x03',
shell: process.env.SHELL || true,
});

const output = result.stdout;

expect(output).toContain('Welcome newcomer! What is your OpenAI key?');

// Restore the config file
await execaCommand('mv ~/.micro-agent.bak ~/.micro-agent', {
shell: process.env.SHELL || true,
});
});

it('should ask for a prompt', async () => {
const result = await execaCommand('jiti ./src/cli.ts', {
input: '\x03',
shell: process.env.SHELL || true,
});

const output = result.stdout;

expect(output).toContain('What would you like to do?');
});
});
Loading

0 comments on commit a344018

Please sign in to comment.