From e0a55327339111f9321ee7936a4046abc9faa469 Mon Sep 17 00:00:00 2001 From: scosman Date: Sun, 23 Feb 2025 15:17:53 -0500 Subject: [PATCH 1/3] Much better prompt system for evals - Freeze non-frozen prompts into the task_run, so the evals are consisten - Expose frozen prompts via prompt UI --- app/desktop/studio_server/eval_api.py | 45 +++++-- app/desktop/studio_server/test_eval_api.py | 96 +++++++++++---- app/web_ui/src/lib/api_schema.d.ts | 50 +++++++- app/web_ui/src/lib/stores.ts | 2 +- .../[task_id]/[eval_id]/+page.svelte | 15 +-- .../[task_id]/saved/[prompt_id]/+page.svelte | 27 ++-- .../(app)/run/prompt_type_selector.svelte | 2 +- libs/core/kiln_ai/adapters/eval/g_eval.py | 18 ++- .../kiln_ai/adapters/eval/test_eval_runner.py | 10 +- .../core/kiln_ai/adapters/eval/test_g_eval.py | 10 +- libs/core/kiln_ai/adapters/prompt_builders.py | 64 +++++----- .../kiln_ai/adapters/test_prompt_builders.py | 116 ++++++------------ libs/core/kiln_ai/datamodel/eval.py | 4 - libs/core/kiln_ai/datamodel/prompt.py | 4 + libs/core/kiln_ai/datamodel/prompt_id.py | 21 +++- libs/core/kiln_ai/datamodel/task.py | 9 +- .../core/kiln_ai/datamodel/test_eval_model.py | 13 -- libs/core/kiln_ai/datamodel/test_prompt_id.py | 27 +++- libs/server/kiln_server/prompt_api.py | 32 ++++- 19 files changed, 348 insertions(+), 217 deletions(-) diff --git a/app/desktop/studio_server/eval_api.py b/app/desktop/studio_server/eval_api.py index 7b4a99fe..e8fc3a68 100644 --- a/app/desktop/studio_server/eval_api.py +++ b/app/desktop/studio_server/eval_api.py @@ -22,6 +22,7 @@ EvalOutputScore, EvalTemplate, ) +from kiln_ai.datamodel.prompt_id import is_frozen_prompt from kiln_ai.datamodel.task import RunConfigProperties, TaskRunConfig from kiln_ai.utils.name_generator import generate_memorable_name from kiln_server.task_api import task_from_id @@ -168,6 +169,33 @@ async def create_task_run_config( ) -> TaskRunConfig: task = task_from_id(project_id, task_id) name = request.name or generate_memorable_name() + + parent_project = task.parent_project() + if parent_project is None: + raise HTTPException( + status_code=400, + detail="Task must have a parent project.", + ) + + froze_prompt = False + prompt: BasePrompt | None = None + if not is_frozen_prompt(request.prompt_id): + # For dynamic prompts, we "freeze" a copy of this prompt into the task run config so we don't accidentially invalidate evals if the user changes something that impacts the prompt (example: chanding data for multi-shot, or chanding task for basic-prompt) + # We then point the task_run_config.run_properties.prompt_id to this new frozen prompt + froze_prompt = True + prompt_builder = prompt_builder_from_id(request.prompt_id, task) + prompt_name = generate_memorable_name() + prompt = BasePrompt( + name=prompt_name, + long_name=prompt_name + + " (frozen prompt from '" + + request.prompt_id + + "')", + generator_id=request.prompt_id, + prompt=prompt_builder.build_base_prompt(), + chain_of_thought_instructions=prompt_builder.chain_of_thought_prompt(), + ) + task_run_config = TaskRunConfig( parent=task, name=name, @@ -177,7 +205,13 @@ async def create_task_run_config( model_provider_name=request.model_provider_name, prompt_id=request.prompt_id, ), + prompt=prompt, ) + if froze_prompt: + # Set after, because the ID isn't known until the TaskRunConfig is created + task_run_config.run_config_properties.prompt_id = ( + f"task_run_config::{parent_project.id}::{task.id}::{task_run_config.id}" + ) task_run_config.save_to_file() return task_run_config @@ -190,19 +224,9 @@ async def create_eval_config( eval_id: str, request: CreateEvalConfigRequest, ) -> EvalConfig: - task = task_from_id(project_id, task_id) eval = eval_from_id(project_id, task_id, eval_id) name = request.name or generate_memorable_name() - # Create a prompt instance to save to the eval config - prompt_builder = prompt_builder_from_id(request.prompt_id, task) - prompt = BasePrompt( - name=request.prompt_id, - generator_id=request.prompt_id, - prompt=prompt_builder.build_base_prompt(), - chain_of_thought_instructions=prompt_builder.chain_of_thought_prompt(), - ) - eval_config = EvalConfig( name=name, config_type=request.type, @@ -215,7 +239,6 @@ async def create_eval_config( "adapter_name": "kiln_eval", }, ), - prompt=prompt, parent=eval, ) eval_config.save_to_file() diff --git a/app/desktop/studio_server/test_eval_api.py b/app/desktop/studio_server/test_eval_api.py index 009671e5..adbf3690 100644 --- a/app/desktop/studio_server/test_eval_api.py +++ b/app/desktop/studio_server/test_eval_api.py @@ -10,6 +10,7 @@ BasePrompt, DataSource, DataSourceType, + Project, PromptId, Task, ) @@ -47,12 +48,19 @@ def client(app): @pytest.fixture def mock_task(tmp_path): + project = Project( + id="project1", + name="Test Project", + path=tmp_path / "project.kiln", + ) + project.save_to_file() task = Task( id="task1", name="Test Task", description="Test Description", instruction="Test Instructions", path=tmp_path / "task.kiln", + parent=project, ) task.save_to_file() return task @@ -210,16 +218,23 @@ async def test_create_evaluator( async def test_create_task_run_config(client, mock_task_from_id, mock_task): mock_task_from_id.return_value = mock_task - response = client.post( - "/api/projects/project1/tasks/task1/task_run_config", - json={ - "name": "Test Task Run Config", - "description": "Test Description", - "model_name": "gpt-4o", - "model_provider_name": "openai", - "prompt_id": "simple_chain_of_thought_prompt_builder", - }, - ) + with ( + patch( + "app.desktop.studio_server.eval_api.generate_memorable_name" + ) as mock_generate_memorable_name, + ): + mock_generate_memorable_name.return_value = "Custom Name" + + response = client.post( + "/api/projects/project1/tasks/task1/task_run_config", + json={ + "name": "Test Task Run Config", + "description": "Test Description", + "model_name": "gpt-4o", + "model_provider_name": "openai", + "prompt_id": "simple_chain_of_thought_prompt_builder", + }, + ) assert response.status_code == 200 result = response.json() @@ -229,9 +244,13 @@ async def test_create_task_run_config(client, mock_task_from_id, mock_task): assert result["run_config_properties"]["model_provider_name"] == "openai" assert ( result["run_config_properties"]["prompt_id"] - == "simple_chain_of_thought_prompt_builder" + == "task_run_config::project1::task1::" + result["id"] + ) + assert result["prompt"]["name"] == "Custom Name" + assert ( + result["prompt"]["long_name"] + == "Custom Name (frozen prompt from 'simple_chain_of_thought_prompt_builder')" ) - # Fetch it from API fetch_response = client.get("/api/projects/project1/tasks/task1/task_run_configs") assert fetch_response.status_code == 200 @@ -239,6 +258,47 @@ async def test_create_task_run_config(client, mock_task_from_id, mock_task): assert len(configs) == 1 assert configs[0]["id"] == result["id"] assert configs[0]["name"] == result["name"] + assert configs[0]["prompt"]["name"] == "Custom Name" + assert configs[0]["prompt"]["long_name"] == ( + "Custom Name (frozen prompt from 'simple_chain_of_thought_prompt_builder')" + ) + assert configs[0]["run_config_properties"]["prompt_id"] == ( + "task_run_config::project1::task1::" + result["id"] + ) + + +@pytest.mark.asyncio +async def test_create_task_run_config_without_freezing( + client, mock_task_from_id, mock_task +): + mock_task_from_id.return_value = mock_task + + with ( + patch( + "app.desktop.studio_server.eval_api.generate_memorable_name" + ) as mock_generate_memorable_name, + ): + mock_generate_memorable_name.return_value = "Custom Name" + + response = client.post( + "/api/projects/project1/tasks/task1/task_run_config", + json={ + "name": "Test Task Run Config", + "description": "Test Description", + "model_name": "gpt-4o", + "model_provider_name": "openai", + "prompt_id": "id::prompt_123", + }, + ) + + assert response.status_code == 200 + result = response.json() + assert result["name"] == "Test Task Run Config" + assert result["description"] == "Test Description" + assert result["run_config_properties"]["model_name"] == "gpt-4o" + assert result["run_config_properties"]["model_provider_name"] == "openai" + assert result["run_config_properties"]["prompt_id"] == "id::prompt_123" + assert result["prompt"] is None @pytest.mark.asyncio @@ -249,15 +309,8 @@ async def test_create_eval_config( with ( patch("app.desktop.studio_server.eval_api.eval_from_id") as mock_eval_from_id, - patch( - "app.desktop.studio_server.eval_api.prompt_builder_from_id" - ) as mock_prompt_builder, ): mock_eval_from_id.return_value = mock_eval - mock_prompt_builder.return_value.build_base_prompt.return_value = "base prompt" - mock_prompt_builder.return_value.chain_of_thought_prompt.return_value = ( - "cot prompt" - ) response = client.post( "/api/projects/project1/tasks/task1/eval/eval1/create_eval_config", @@ -278,8 +331,6 @@ async def test_create_eval_config( result["model"]["properties"]["model_provider"] == valid_eval_config_request.provider ) - assert isinstance(result["prompt"], dict) - # mock_save.assert_called_once() # Fetch disk assert len(mock_eval.configs()) == 1 @@ -291,8 +342,6 @@ async def test_create_eval_config( assert ( config.model.properties["model_provider"] == valid_eval_config_request.provider ) - assert config.prompt.prompt == "base prompt" - assert config.prompt.chain_of_thought_instructions == "cot prompt" assert config.properties["eval_steps"][0] == "step1" assert config.properties["eval_steps"][1] == "step2" @@ -317,7 +366,6 @@ def test_get_eval_configs( assert config["config_type"] == mock_eval_config.config_type assert config["properties"] == mock_eval_config.properties assert config["model"]["type"] == mock_eval_config.model.type - assert isinstance(config["prompt"], dict) mock_eval_from_id.assert_called_once_with("project1", "task1", "eval1") diff --git a/app/web_ui/src/lib/api_schema.d.ts b/app/web_ui/src/lib/api_schema.d.ts index 2e44b7d3..3eb9417b 100644 --- a/app/web_ui/src/lib/api_schema.d.ts +++ b/app/web_ui/src/lib/api_schema.d.ts @@ -814,6 +814,40 @@ export interface paths { export type webhooks = Record; export interface components { schemas: { + /** ApiPrompt */ + ApiPrompt: { + /** + * Name + * @description A name for this entity. + */ + name: string; + /** + * Long Name + * @description A more detailed name for the prompt, usually incorporating the source of the prompt. + */ + long_name?: string | null; + /** + * Generator Id + * @description The id of the generator that created this prompt. + */ + generator_id?: string | null; + /** + * Prompt + * @description The prompt for the task. + */ + prompt: string; + /** + * Chain Of Thought Instructions + * @description Instructions for the model 'thinking' about the requirement prior to answering. Used for chain of thought style prompting. COT will not be used unless this is provided. + */ + chain_of_thought_instructions?: string | null; + /** Id */ + id: string; + /** Created At */ + created_at?: string | null; + /** Created By */ + created_by?: string | null; + }; /** AvailableModels */ AvailableModels: { /** Provider Name */ @@ -835,6 +869,11 @@ export interface components { * @description A name for this entity. */ name: string; + /** + * Long Name + * @description A more detailed name for the prompt, usually incorporating the source of the prompt. + */ + long_name?: string | null; /** * Generator Id * @description The id of the generator that created this prompt. @@ -1256,8 +1295,6 @@ export interface components { * @default {} */ properties: Record; - /** @description The prompt to use for this eval config. Both when running the task to generate outputs to evaluate and when explaining to the eval model what the goal of the task was. This is a frozen prompt, so this eval config is consistent over time (for example, if the user selects multi-shot prompting, this saves that dynamic prompt at the point the eval config is created). Freezing the prompt ensures consistent evals. */ - prompt: components["schemas"]["BasePrompt"]; /** Model Type */ readonly model_type: string; }; @@ -1658,6 +1695,11 @@ export interface components { * @description A name for this entity. */ name: string; + /** + * Long Name + * @description A more detailed name for the prompt, usually incorporating the source of the prompt. + */ + long_name?: string | null; /** * Generator Id * @description The id of the generator that created this prompt. @@ -1726,7 +1768,7 @@ export interface components { /** Generators */ generators: components["schemas"]["PromptGenerator"][]; /** Prompts */ - prompts: components["schemas"]["Prompt"][]; + prompts: components["schemas"]["ApiPrompt"][]; }; /** ProviderModel */ ProviderModel: { @@ -2255,6 +2297,8 @@ export interface components { description?: string | null; /** @description The run config properties to use for this task run. */ run_config_properties: components["schemas"]["RunConfigProperties"]; + /** @description A prompt to use for run config. */ + prompt?: components["schemas"]["BasePrompt"] | null; /** Model Type */ readonly model_type: string; }; diff --git a/app/web_ui/src/lib/stores.ts b/app/web_ui/src/lib/stores.ts index 5aefc889..a86dbe25 100644 --- a/app/web_ui/src/lib/stores.ts +++ b/app/web_ui/src/lib/stores.ts @@ -238,7 +238,7 @@ export function prompt_name_from_id(prompt_id: string): string { } if (!prompt_name) { prompt_name = get(current_task_prompts)?.prompts.find( - (prompt) => "id::" + prompt.id === prompt_id, + (prompt) => prompt.id === prompt_id, )?.name } if (!prompt_name) { diff --git a/app/web_ui/src/routes/(app)/evals/[project_id]/[task_id]/[eval_id]/+page.svelte b/app/web_ui/src/routes/(app)/evals/[project_id]/[task_id]/[eval_id]/+page.svelte index e8ede737..69d8746e 100644 --- a/app/web_ui/src/routes/(app)/evals/[project_id]/[task_id]/[eval_id]/+page.svelte +++ b/app/web_ui/src/routes/(app)/evals/[project_id]/[task_id]/[eval_id]/+page.svelte @@ -237,7 +237,6 @@ parts.push( model_name(eval_config.model.properties["model_name"], model_info), ) - parts.push(prompt_name_from_id(eval_config.prompt.name)) return eval_config.name + " — " + parts.join(", ") } @@ -317,11 +316,6 @@ eval_config.model.properties["model_provider"] + "", ), }) - // TODO remove this once we consolidate prompts - properties.push({ - name: "Prompt", - value: prompt_name_from_id(eval_config.prompt.name + ""), - }) return properties } @@ -658,9 +652,12 @@ )}
- {prompt_name_from_id( - task_run_config?.run_config_properties?.prompt_id, - )} + Prompt: + {task_run_config.prompt?.long_name || + task_run_config.prompt?.name || + prompt_name_from_id( + task_run_config?.run_config_properties?.prompt_id, + )}
{#if percent_complete}
import { page } from "$app/stores" - import { current_task, current_task_prompts } from "$lib/stores" + import { + current_task, + current_task_prompts, + prompt_name_from_id, + } from "$lib/stores" import AppPage from "../../../../../app_page.svelte" import Output from "../../../../../run/output.svelte" import { formatDate } from "$lib/utils/formatters" @@ -11,17 +15,22 @@ $: prompt_model = $current_task_prompts?.prompts.find( (prompt) => prompt.id === prompt_id, ) - let prompt_props = {} + let prompt_props: Record = {} $: { prompt_props = Object.fromEntries( Object.entries({ ID: prompt_model?.id, + Name: prompt_model?.name, + "Long Name": prompt_model?.long_name, "Created By": prompt_model?.created_by, - "Created At": formatDate(prompt_model?.created_at), + "Created At": formatDate(prompt_model?.created_at || undefined), "Chain of Thought": prompt_model?.chain_of_thought_instructions ? "Yes" : "No", - }).filter(([_, value]) => value !== undefined), + "Source Generator": prompt_model?.generator_id + ? prompt_name_from_id(prompt_model?.generator_id) + : undefined, + }).filter(([_, value]) => value !== undefined && value !== null), ) } @@ -29,9 +38,7 @@
{#if !$current_task_prompts}
@@ -55,14 +62,16 @@ {/if}
-
+
Details
{#each Object.entries(prompt_props) as [key, value]}
{key}
-
+
{value}
{/each} diff --git a/app/web_ui/src/routes/(app)/run/prompt_type_selector.svelte b/app/web_ui/src/routes/(app)/run/prompt_type_selector.svelte index 1c222d3f..3b310ccd 100644 --- a/app/web_ui/src/routes/(app)/run/prompt_type_selector.svelte +++ b/app/web_ui/src/routes/(app)/run/prompt_type_selector.svelte @@ -49,7 +49,7 @@ if (prompt.chain_of_thought_instructions && exclude_cot) { continue } - static_prompts.push(["id::" + prompt.id, prompt.name]) + static_prompts.push([prompt.id, prompt.name]) } if (static_prompts.length > 0) { grouped_options.push(["Saved Prompts", static_prompts]) diff --git a/libs/core/kiln_ai/adapters/eval/g_eval.py b/libs/core/kiln_ai/adapters/eval/g_eval.py index f0a12d02..75ffed12 100644 --- a/libs/core/kiln_ai/adapters/eval/g_eval.py +++ b/libs/core/kiln_ai/adapters/eval/g_eval.py @@ -4,7 +4,7 @@ from kiln_ai.adapters.adapter_registry import adapter_for_task from kiln_ai.adapters.eval.base_eval import BaseEval from kiln_ai.adapters.model_adapters.base_adapter import AdapterConfig, RunOutput -from kiln_ai.adapters.prompt_builders import PromptGenerators +from kiln_ai.adapters.prompt_builders import PromptGenerators, prompt_builder_from_id from kiln_ai.datamodel import Project, Task, TaskRun from kiln_ai.datamodel.eval import Eval, EvalConfig, EvalConfigType, EvalScores from kiln_ai.datamodel.task import RunConfig @@ -30,15 +30,25 @@ class GEvalTask(Task, parent_of={}): Note G-Eval implements both G-Eval and LLM as Judge as they are very similar. """ - def __init__(self, eval_config: EvalConfig): + def __init__(self, eval_config: EvalConfig, run_config: RunConfig): tmp_project = Project(name="GEval") + eval = eval_config.parent_eval() + if not eval: + raise ValueError("Eval config must have a parent eval") + task = eval.parent_task() + if not task: + raise ValueError("Eval must have a parent task") + + prompt_builder = prompt_builder_from_id(run_config.prompt_id, task) + base_prompt = prompt_builder.build_base_prompt() + system_instruction = f""" Your job to evaluate a model's performance on a task. Blocks will be marked with tags. The task the model was given is as follows: -{eval_config.prompt.prompt} +{base_prompt} """ @@ -88,7 +98,7 @@ def __init__(self, eval_config: EvalConfig, run_config: RunConfig): super().__init__(eval_config, run_config) - self.geval_task = GEvalTask(eval_config) + self.geval_task = GEvalTask(eval_config, run_config) async def run_eval(self, task_run: TaskRun) -> EvalScores: """ diff --git a/libs/core/kiln_ai/adapters/eval/test_eval_runner.py b/libs/core/kiln_ai/adapters/eval/test_eval_runner.py index 8aa47ec2..62dc57a2 100644 --- a/libs/core/kiln_ai/adapters/eval/test_eval_runner.py +++ b/libs/core/kiln_ai/adapters/eval/test_eval_runner.py @@ -89,7 +89,10 @@ def mock_run_config( run_config_properties=RunConfigProperties( model_name="gpt-4", model_provider_name="openai", - prompt_id="simple_prompt_builder", + prompt=BasePrompt( + name="test", + prompt="test", + ), ), parent=mock_task, ) @@ -234,7 +237,10 @@ def test_collect_tasks_multiple_run_configs( run_config_properties=RunConfigProperties( model_name="gpt-3.5", model_provider_name="openai", - prompt_id="simple_prompt_builder", + prompt=BasePrompt( + name="test", + prompt="test", + ), ), parent=mock_task, ) diff --git a/libs/core/kiln_ai/adapters/eval/test_g_eval.py b/libs/core/kiln_ai/adapters/eval/test_g_eval.py index e24fcb8b..815e9457 100644 --- a/libs/core/kiln_ai/adapters/eval/test_g_eval.py +++ b/libs/core/kiln_ai/adapters/eval/test_g_eval.py @@ -82,11 +82,6 @@ def test_eval_config(test_task): "adapter_name": "openai_compatible", }, ), - prompt=BasePrompt( - # TODO ensure it's called with the frozen prompt - name="Joke Generator Frozen Prompt", - prompt=test_task.instruction, - ), properties={ "eval_steps": [ "Is the joke funny?", @@ -106,8 +101,11 @@ def test_run_config(test_task): return RunConfig( model_name="llama_3_1_8b", model_provider_name="groq", - prompt_id="simple_prompt_builder", task=test_task, + prompt=BasePrompt( + name="test", + prompt="test", + ), ) diff --git a/libs/core/kiln_ai/adapters/prompt_builders.py b/libs/core/kiln_ai/adapters/prompt_builders.py index 68f58c94..b54d4832 100644 --- a/libs/core/kiln_ai/adapters/prompt_builders.py +++ b/libs/core/kiln_ai/adapters/prompt_builders.py @@ -1,9 +1,6 @@ import json from abc import ABCMeta, abstractmethod -from enum import Enum -from typing import Annotated, Dict - -from pydantic import AfterValidator +from typing import Dict from kiln_ai.datamodel import PromptGenerators, PromptId, Task, TaskRun from kiln_ai.utils.exhaustive_error import raise_exhaustive_enum_error @@ -292,48 +289,44 @@ def chain_of_thought_prompt(self) -> str | None: return self.prompt_model.chain_of_thought_instructions -class EvalPromptBuilder(BasePromptBuilder): - """A prompt builder that looks up a static prompt in an eval config.""" +class TaskRunConfigPromptBuilder(BasePromptBuilder): + """A prompt builder that looks up a static prompt in a task run config.""" - def __init__(self, task: Task, eval_config_prompt_id: str): - parts = eval_config_prompt_id.split("::") - if len(parts) != 5: + def __init__(self, task: Task, run_config_prompt_id: str): + parts = run_config_prompt_id.split("::") + if len(parts) != 4: raise ValueError( - f"Invalid eval prompt ID: {eval_config_prompt_id}. Expected format: 'eval_prompt::[project_id]::[task_id]::[eval_id]::[eval_config_id]'." + f"Invalid task run config prompt ID: {run_config_prompt_id}. Expected format: 'task_run_config::[project_id]::[task_id]::[run_config_id]'." ) task_id = parts[2] if task_id != task.id: raise ValueError( - f"Eval prompt ID: {eval_config_prompt_id}. Task ID mismatch. Expected: {task.id}, got: {task_id}." - ) - - eval_id = parts[3] - eval = next( - (eval for eval in task.evals(readonly=True) if eval.id == eval_id), - None, - ) - if not eval: - raise ValueError( - f"Eval ID not found: {eval_id} for prompt id {eval_config_prompt_id}" + f"Task run config prompt ID: {run_config_prompt_id}. Task ID mismatch. Expected: {task.id}, got: {task_id}." ) - eval_config_id = parts[4] - eval_config = next( + run_config_id = parts[3] + run_config = next( ( - eval_config - for eval_config in eval.configs(readonly=True) - if eval_config.id == eval_config_id + run_config + for run_config in task.run_configs(readonly=True) + if run_config.id == run_config_id ), None, ) - if not eval_config: + if not run_config: + raise ValueError( + f"Task run config ID not found: {run_config_id} for prompt id {run_config_prompt_id}" + ) + if run_config.prompt is None: raise ValueError( - f"Eval config ID not found: {eval_config_id} for prompt id {eval_config_prompt_id}" + f"Task run config ID {run_config_id} does not have a stored prompt. Used as prompt id {run_config_prompt_id}" ) - self.prompt_model = eval_config.prompt - self.id = eval_config_prompt_id + # Load the prompt from the model + self.prompt = run_config.prompt.prompt + self.cot_prompt = run_config.prompt.chain_of_thought_instructions + self.id = run_config_prompt_id super().__init__(task) @@ -341,10 +334,10 @@ def prompt_id(self) -> str | None: return self.id def build_base_prompt(self) -> str: - return self.prompt_model.prompt + return self.prompt def chain_of_thought_prompt(self) -> str | None: - return self.prompt_model.chain_of_thought_instructions + return self.cot_prompt class FineTunePromptBuilder(BasePromptBuilder): @@ -403,9 +396,10 @@ def prompt_builder_from_id(prompt_id: PromptId, task: Task) -> BasePromptBuilder prompt_id = prompt_id[4:] return SavedPromptBuilder(task, prompt_id) - # Eval prompts are prefixed with "eval_prompt::" - if prompt_id.startswith("eval_prompt::"): - return EvalPromptBuilder(task, prompt_id) + # Task run config prompts are prefixed with "task_run_config::" + # task_run_config::[project_id]::[task_id]::[run_config_id] + if prompt_id.startswith("task_run_config::"): + return TaskRunConfigPromptBuilder(task, prompt_id) # Fine-tune prompts are prefixed with "fine_tune_prompt::" if prompt_id.startswith("fine_tune_prompt::"): diff --git a/libs/core/kiln_ai/adapters/test_prompt_builders.py b/libs/core/kiln_ai/adapters/test_prompt_builders.py index bad1d1e4..43674375 100644 --- a/libs/core/kiln_ai/adapters/test_prompt_builders.py +++ b/libs/core/kiln_ai/adapters/test_prompt_builders.py @@ -1,14 +1,12 @@ import json import pytest -from pydantic import BaseModel, ValidationError from kiln_ai.adapters.model_adapters.base_adapter import BaseAdapter from kiln_ai.adapters.model_adapters.test_structured_output import ( build_structured_output_test_task, ) from kiln_ai.adapters.prompt_builders import ( - EvalPromptBuilder, FewShotChainOfThoughtPromptBuilder, FewShotPromptBuilder, FineTunePromptBuilder, @@ -18,6 +16,7 @@ SavedPromptBuilder, SimpleChainOfThoughtPromptBuilder, SimplePromptBuilder, + TaskRunConfigPromptBuilder, chain_of_thought_prompt, prompt_builder_from_id, ) @@ -36,7 +35,7 @@ TaskOutputRating, TaskRun, ) -from kiln_ai.datamodel.eval import Eval, EvalConfig, EvalConfigType, EvalOutputScore +from kiln_ai.datamodel.task import RunConfigProperties, Task, TaskRunConfig def test_simple_prompt_builder(tmp_path): @@ -589,107 +588,62 @@ def test_build_prompt_with_json_instructions(tmp_path): assert requirement.instruction in prompt_with_json -@pytest.fixture -def valid_eval_config_datasource(): - return DataSource( - type=DataSourceType.synthetic, - properties={ - "model_name": "gpt-4", - "model_provider": "openai", - "adapter_name": "openai_compatible", - }, - ) - - -def test_eval_prompt_builder(tmp_path, valid_eval_config_datasource): +def test_task_run_config_prompt_builder(tmp_path): task = build_test_task(tmp_path) - # Create an eval and eval config - eval = Eval( - name="test_eval", + run_config = TaskRunConfig( + name="test_run_config", parent=task, - eval_set_filter_id="tag::tag1", - eval_configs_filter_id="tag::tag2", - output_scores=[ - EvalOutputScore( - name="accuracy", - type="five_star", - ), - ], - ) - eval.save_to_file() - - eval_config = EvalConfig( - name="test_eval_config", - parent=eval, - config_type=EvalConfigType.g_eval, - model=valid_eval_config_datasource, + run_config_properties=RunConfigProperties( + model_name="gpt-4", + model_provider_name="openai", + prompt_id="simple_prompt_builder", + ), prompt=Prompt( - name="test_prompt", - prompt="test_eval_prompt", - chain_of_thought_instructions="Think carefully", + name="test prompt name", + prompt="test prompt content", + chain_of_thought_instructions="test step by step", ), - properties={"eval_steps": ["step1", "step2"]}, ) - eval_config.save_to_file() + run_config.save_to_file() # Construct the eval prompt ID - eval_prompt_id = ( - f"eval_prompt::{task.parent.id}::{task.id}::{eval.id}::{eval_config.id}" + run_config_prompt_id = ( + f"task_run_config::{task.parent.id}::{task.id}::{run_config.id}" ) - # Test successful creation, constructor and ID creation + # Test successful creation 2 ways: constructor and ID creation builders = [ - EvalPromptBuilder(task=task, eval_config_prompt_id=eval_prompt_id), - prompt_builder_from_id(eval_prompt_id, task), + TaskRunConfigPromptBuilder( + task=task, run_config_prompt_id=run_config_prompt_id + ), + prompt_builder_from_id(run_config_prompt_id, task), ] for builder in builders: assert ( - builder.build_prompt(include_json_instructions=False) == "test_eval_prompt" + builder.build_prompt(include_json_instructions=False) + == "test prompt content" ) - assert builder.chain_of_thought_prompt() == "Think carefully" - assert builder.prompt_id() == eval_prompt_id + assert builder.chain_of_thought_prompt() == "test step by step" + assert builder.prompt_id() == run_config_prompt_id - # test accessor - -def test_eval_prompt_builder_validation_errors(tmp_path): +def test_task_run_config_prompt_builder_validation_errors(tmp_path): task = build_test_task(tmp_path) # Test invalid format - with pytest.raises(ValueError, match="Invalid eval prompt ID"): - EvalPromptBuilder(task=task, eval_config_prompt_id="eval_prompt::wrong::format") + with pytest.raises(ValueError, match="Invalid task run config prompt ID"): + TaskRunConfigPromptBuilder( + task=task, run_config_prompt_id="task_run_config::wrong::format" + ) # Test task ID mismatch - wrong_task_id = f"eval_prompt::{task.parent.id}::wrong_task_id::eval_id::config_id" + wrong_task_id = f"task_run_config::{task.parent.id}::wrong_task_id::config_id" with pytest.raises(ValueError, match="Task ID mismatch"): - EvalPromptBuilder(task=task, eval_config_prompt_id=wrong_task_id) + TaskRunConfigPromptBuilder(task=task, run_config_prompt_id=wrong_task_id) # Test eval not found - nonexistent_eval = ( - f"eval_prompt::{task.parent.id}::{task.id}::nonexistent_eval::config_id" - ) - with pytest.raises(ValueError, match="Eval ID not found"): - EvalPromptBuilder(task=task, eval_config_prompt_id=nonexistent_eval) - - # Create eval but test config not found - eval = Eval( - name="test_eval", - parent=task, - eval_set_filter_id="tag::tag1", - eval_configs_filter_id="tag::tag2", - output_scores=[ - EvalOutputScore( - name="accuracy", - type="five_star", - ), - ], - ) - eval.save_to_file() - - nonexistent_config = ( - f"eval_prompt::{task.parent.id}::{task.id}::{eval.id}::nonexistent_config" - ) - with pytest.raises(ValueError, match="Eval config ID not found"): - EvalPromptBuilder(task=task, eval_config_prompt_id=nonexistent_config) + nonexistent_eval = f"task_run_config::{task.parent.id}::{task.id}::nonexistent_id" + with pytest.raises(ValueError, match="Task run config ID not found"): + TaskRunConfigPromptBuilder(task=task, run_config_prompt_id=nonexistent_eval) diff --git a/libs/core/kiln_ai/datamodel/eval.py b/libs/core/kiln_ai/datamodel/eval.py index 0bad43c2..6cfcc612 100644 --- a/libs/core/kiln_ai/datamodel/eval.py +++ b/libs/core/kiln_ai/datamodel/eval.py @@ -14,7 +14,6 @@ from kiln_ai.datamodel.datamodel_enums import TaskOutputRatingType from kiln_ai.datamodel.dataset_filters import DatasetFilterId from kiln_ai.datamodel.json_schema import string_to_json_key -from kiln_ai.datamodel.prompt import BasePrompt from kiln_ai.datamodel.task_output import DataSource, DataSourceType from kiln_ai.utils.exhaustive_error import raise_exhaustive_enum_error @@ -182,9 +181,6 @@ class EvalConfig(KilnParentedModel, KilnParentModel, parent_of={"runs": EvalRun} default={}, description="Properties to be used to execute the eval config. This is config_type specific and should serialize to a json dict.", ) - prompt: BasePrompt = Field( - description="The prompt to use for this eval config. Both when running the task to generate outputs to evaluate and when explaining to the eval model what the goal of the task was. This is a frozen prompt, so this eval config is consistent over time (for example, if the user selects multi-shot prompting, this saves that dynamic prompt at the point the eval config is created). Freezing the prompt ensures consistent evals." - ) def parent_eval(self) -> Union["Eval", None]: if self.parent is not None and self.parent.__class__.__name__ != "Eval": diff --git a/libs/core/kiln_ai/datamodel/prompt.py b/libs/core/kiln_ai/datamodel/prompt.py index 650712d9..3bcd44e6 100644 --- a/libs/core/kiln_ai/datamodel/prompt.py +++ b/libs/core/kiln_ai/datamodel/prompt.py @@ -11,6 +11,10 @@ class BasePrompt(BaseModel): """ name: str = NAME_FIELD + long_name: str | None = Field( + default=None, + description="A more detailed name for the prompt, usually incorporating the source of the prompt.", + ) generator_id: str | None = Field( default=None, description="The id of the generator that created this prompt.", diff --git a/libs/core/kiln_ai/datamodel/prompt_id.py b/libs/core/kiln_ai/datamodel/prompt_id.py index 4285aa00..2d2c5f02 100644 --- a/libs/core/kiln_ai/datamodel/prompt_id.py +++ b/libs/core/kiln_ai/datamodel/prompt_id.py @@ -48,12 +48,12 @@ def _check_prompt_id(id: str) -> str: ) return id - if id.startswith("eval_prompt::"): - # check it had a eval_id after the :: -- 'project_id::task_id::eval_id::eval_config_id' + if id.startswith("task_run_config::"): + # check it had a eval_id after the :: -- 'project_id::task_id::task_run_config_id' parts = id.split("::") - if len(parts) != 5: + if len(parts) != 4: raise ValueError( - f"Invalid eval prompt ID: {id}. Expected format: 'eval_prompt::[project_id]::[task_id]::[eval_id]'." + f"Invalid task run config prompt ID: {id}. Expected format: 'task_run_config::[project_id]::[task_id]::[task_run_config_id]'." ) return id @@ -67,3 +67,16 @@ def _check_prompt_id(id: str) -> str: return id raise ValueError(f"Invalid prompt ID: {id}") + + +def is_frozen_prompt(id: PromptId) -> bool: + """ + Check if the prompt ID is a frozen prompt. + """ + if id.startswith("id::"): + return True + if id.startswith("task_run_config::"): + return True + if id.startswith("fine_tune_prompt::"): + return True + return False diff --git a/libs/core/kiln_ai/datamodel/task.py b/libs/core/kiln_ai/datamodel/task.py index 52368868..af0bfb6d 100644 --- a/libs/core/kiln_ai/datamodel/task.py +++ b/libs/core/kiln_ai/datamodel/task.py @@ -16,7 +16,7 @@ from kiln_ai.datamodel.dataset_split import DatasetSplit from kiln_ai.datamodel.eval import Eval from kiln_ai.datamodel.json_schema import JsonObjectSchema, schema_from_json_str -from kiln_ai.datamodel.prompt import Prompt +from kiln_ai.datamodel.prompt import BasePrompt, Prompt from kiln_ai.datamodel.prompt_id import PromptGenerators, PromptId from kiln_ai.datamodel.task_run import TaskRun @@ -85,6 +85,13 @@ class TaskRunConfig(KilnParentedModel): run_config_properties: RunConfigProperties = Field( description="The run config properties to use for this task run." ) + # We usually want to persist the exact prompt, not just a prompt ID. + # We want the prompt to be perfectly consistent, and some prompt_ids are dynamic. + # The prompt ID in the run_config_properties likely points to this (although it's not required). + prompt: BasePrompt | None = Field( + default=None, + description="A prompt to use for run config.", + ) # Workaround to return typed parent without importing Task def parent_task(self) -> Union["Task", None]: diff --git a/libs/core/kiln_ai/datamodel/test_eval_model.py b/libs/core/kiln_ai/datamodel/test_eval_model.py index 44623539..911e9272 100644 --- a/libs/core/kiln_ai/datamodel/test_eval_model.py +++ b/libs/core/kiln_ai/datamodel/test_eval_model.py @@ -44,10 +44,6 @@ def valid_eval_config_data(): "adapter_name": "openai_compatible", }, ), - "prompt": BasePrompt( - name="Test Prompt", - prompt="Test prompt", - ), } @@ -64,15 +60,6 @@ def test_eval_config_valid(valid_eval_config): assert valid_eval_config.model.properties["model_name"] == "gpt-4" assert valid_eval_config.model.properties["model_provider"] == "openai" assert valid_eval_config.model.properties["adapter_name"] == "openai_compatible" - assert valid_eval_config.prompt.name == "Test Prompt" - assert valid_eval_config.prompt.prompt == "Test prompt" - - -def test_eval_config_missing_prompt(valid_eval_config): - with pytest.raises( - ValueError, match="Input should be a valid dictionary or instance of BasePromp" - ): - valid_eval_config.prompt = None def test_eval_config_missing_eval_steps(valid_eval_config): diff --git a/libs/core/kiln_ai/datamodel/test_prompt_id.py b/libs/core/kiln_ai/datamodel/test_prompt_id.py index 23cd1d3a..cf5d2326 100644 --- a/libs/core/kiln_ai/datamodel/test_prompt_id.py +++ b/libs/core/kiln_ai/datamodel/test_prompt_id.py @@ -5,6 +5,7 @@ PromptGenerators, PromptId, ) +from kiln_ai.datamodel.prompt_id import is_frozen_prompt # Test model to validate the PromptId type @@ -90,10 +91,10 @@ def test_prompt_generator_case_sensitivity(): @pytest.mark.parametrize( "valid_id", [ - "eval_prompt::project_123::task_456::eval_789::config_012", # Valid eval prompt ID + "task_run_config::project_123::task_456::config_123", # Valid task run config prompt ID ], ) -def test_valid_eval_prompt_id(valid_id): +def test_valid_task_run_config_prompt_id(valid_id): """Test that valid eval prompt IDs are accepted""" model = ModelTester(prompt_id=valid_id) assert model.prompt_id == valid_id @@ -102,13 +103,27 @@ def test_valid_eval_prompt_id(valid_id): @pytest.mark.parametrize( "invalid_id,expected_error", [ - ("eval_prompt::", "Invalid eval prompt ID"), - ("eval_prompt::p1::t1", "Invalid eval prompt ID"), - ("eval_prompt::p1::t1::e1", "Invalid eval prompt ID"), - ("eval_prompt::p1::t1::e1::c1::extra", "Invalid eval prompt ID"), + ("task_run_config::", "Invalid task run config prompt ID"), + ("task_run_config::p1", "Invalid task run config prompt ID"), + ("task_run_config::p1::t1", "Invalid task run config prompt ID"), + ("task_run_config::p1::t1::c1::extra", "Invalid task run config prompt ID"), ], ) def test_invalid_eval_prompt_id_format(invalid_id, expected_error): """Test that invalid eval prompt ID formats are rejected""" with pytest.raises(ValidationError, match=expected_error): ModelTester(prompt_id=invalid_id) + + +@pytest.mark.parametrize( + "id,should_be_frozen", + [ + ("simple_prompt_builder", False), + ("id::prompt_123", True), + ("task_run_config::p1::t1", True), + ("fine_tune_prompt::ft_123", True), + ], +) +def test_is_frozen_prompt(id, should_be_frozen): + """Test that the is_frozen_prompt function works""" + assert is_frozen_prompt(id) == should_be_frozen diff --git a/libs/server/kiln_server/prompt_api.py b/libs/server/kiln_server/prompt_api.py index 0b17cbb1..40c5a56c 100644 --- a/libs/server/kiln_server/prompt_api.py +++ b/libs/server/kiln_server/prompt_api.py @@ -1,10 +1,19 @@ +from datetime import datetime + from fastapi import FastAPI -from kiln_ai.datamodel import Prompt +from kiln_ai.datamodel import BasePrompt, Prompt, PromptId from pydantic import BaseModel from kiln_server.task_api import task_from_id +# This is a wrapper around the Prompt datamodel that adds an id field which represents the PromptID and not the data model ID. +class ApiPrompt(BasePrompt): + id: PromptId + created_at: datetime | None = None + created_by: str | None = None + + class PromptCreateRequest(BaseModel): name: str prompt: str @@ -21,7 +30,7 @@ class PromptGenerator(BaseModel): class PromptResponse(BaseModel): generators: list[PromptGenerator] - prompts: list[Prompt] + prompts: list[ApiPrompt] def connect_prompt_api(app: FastAPI): @@ -43,9 +52,26 @@ async def create_prompt( async def get_prompts(project_id: str, task_id: str) -> PromptResponse: parent_task = task_from_id(project_id, task_id) + prompts: list[ApiPrompt] = [] + for prompt in parent_task.prompts(): + properties = prompt.model_dump(exclude={"id"}) + prompts.append(ApiPrompt(id=f"id::{prompt.id}", **properties)) + + # Add any task run config prompts to the list + task_run_configs = parent_task.run_configs() + for task_run_config in task_run_configs: + if task_run_config.prompt: + properties = task_run_config.prompt.model_dump(exclude={"id"}) + prompts.append( + ApiPrompt( + id=f"task_run_config::{project_id}::{task_id}::{task_run_config.id}", + **properties, + ) + ) + return PromptResponse( generators=_prompt_generators, - prompts=parent_task.prompts(), + prompts=prompts, ) From e3a6a27a96825d2f508b1dd307acfa4838e51e34 Mon Sep 17 00:00:00 2001 From: scosman Date: Sun, 23 Feb 2025 16:00:15 -0500 Subject: [PATCH 2/3] CR feedback --- app/desktop/studio_server/eval_api.py | 10 ++++------ app/desktop/studio_server/test_eval_api.py | 4 +++- .../kiln_ai/adapters/eval/test_eval_runner.py | 14 ++------------ libs/core/kiln_ai/adapters/eval/test_g_eval.py | 5 +---- .../model_adapters/langchain_adapters.py | 4 +--- .../model_adapters/openai_model_adapter.py | 6 ++---- .../model_adapters/test_base_adapter.py | 11 +++++++++-- .../kiln_ai/adapters/test_prompt_builders.py | 4 +--- libs/core/kiln_ai/datamodel/task.py | 1 - libs/core/kiln_ai/datamodel/test_basemodel.py | 1 + libs/core/kiln_ai/datamodel/test_task.py | 18 +++++++++++++++--- 11 files changed, 39 insertions(+), 39 deletions(-) diff --git a/app/desktop/studio_server/eval_api.py b/app/desktop/studio_server/eval_api.py index e8fc3a68..dce33a6f 100644 --- a/app/desktop/studio_server/eval_api.py +++ b/app/desktop/studio_server/eval_api.py @@ -177,15 +177,13 @@ async def create_task_run_config( detail="Task must have a parent project.", ) - froze_prompt = False - prompt: BasePrompt | None = None + frozen_prompt: BasePrompt | None = None if not is_frozen_prompt(request.prompt_id): # For dynamic prompts, we "freeze" a copy of this prompt into the task run config so we don't accidentially invalidate evals if the user changes something that impacts the prompt (example: chanding data for multi-shot, or chanding task for basic-prompt) # We then point the task_run_config.run_properties.prompt_id to this new frozen prompt - froze_prompt = True prompt_builder = prompt_builder_from_id(request.prompt_id, task) prompt_name = generate_memorable_name() - prompt = BasePrompt( + frozen_prompt = BasePrompt( name=prompt_name, long_name=prompt_name + " (frozen prompt from '" @@ -205,9 +203,9 @@ async def create_task_run_config( model_provider_name=request.model_provider_name, prompt_id=request.prompt_id, ), - prompt=prompt, + prompt=frozen_prompt, ) - if froze_prompt: + if frozen_prompt is not None: # Set after, because the ID isn't known until the TaskRunConfig is created task_run_config.run_config_properties.prompt_id = ( f"task_run_config::{parent_project.id}::{task.id}::{task_run_config.id}" diff --git a/app/desktop/studio_server/test_eval_api.py b/app/desktop/studio_server/test_eval_api.py index adbf3690..d6b53df5 100644 --- a/app/desktop/studio_server/test_eval_api.py +++ b/app/desktop/studio_server/test_eval_api.py @@ -215,7 +215,9 @@ async def test_create_evaluator( @pytest.mark.asyncio -async def test_create_task_run_config(client, mock_task_from_id, mock_task): +async def test_create_task_run_config_with_freezing( + client, mock_task_from_id, mock_task +): mock_task_from_id.return_value = mock_task with ( diff --git a/libs/core/kiln_ai/adapters/eval/test_eval_runner.py b/libs/core/kiln_ai/adapters/eval/test_eval_runner.py index 62dc57a2..8c333f22 100644 --- a/libs/core/kiln_ai/adapters/eval/test_eval_runner.py +++ b/libs/core/kiln_ai/adapters/eval/test_eval_runner.py @@ -67,10 +67,6 @@ def mock_eval_config(mock_eval, data_source): name="test", model=data_source, parent=mock_eval, - prompt=BasePrompt( - name="test", - prompt="test", - ), properties={ "eval_steps": ["step1", "step2", "step3"], }, @@ -89,10 +85,7 @@ def mock_run_config( run_config_properties=RunConfigProperties( model_name="gpt-4", model_provider_name="openai", - prompt=BasePrompt( - name="test", - prompt="test", - ), + prompt_id="simple_prompt_builder", ), parent=mock_task, ) @@ -237,10 +230,7 @@ def test_collect_tasks_multiple_run_configs( run_config_properties=RunConfigProperties( model_name="gpt-3.5", model_provider_name="openai", - prompt=BasePrompt( - name="test", - prompt="test", - ), + prompt_id="simple_prompt_builder", ), parent=mock_task, ) diff --git a/libs/core/kiln_ai/adapters/eval/test_g_eval.py b/libs/core/kiln_ai/adapters/eval/test_g_eval.py index 815e9457..3e21fda4 100644 --- a/libs/core/kiln_ai/adapters/eval/test_g_eval.py +++ b/libs/core/kiln_ai/adapters/eval/test_g_eval.py @@ -102,10 +102,7 @@ def test_run_config(test_task): model_name="llama_3_1_8b", model_provider_name="groq", task=test_task, - prompt=BasePrompt( - name="test", - prompt="test", - ), + prompt_id="simple_prompt_builder", ) diff --git a/libs/core/kiln_ai/adapters/model_adapters/langchain_adapters.py b/libs/core/kiln_ai/adapters/model_adapters/langchain_adapters.py index e9896c69..79d9906e 100644 --- a/libs/core/kiln_ai/adapters/model_adapters/langchain_adapters.py +++ b/libs/core/kiln_ai/adapters/model_adapters/langchain_adapters.py @@ -84,11 +84,9 @@ def __init__( task=kiln_task, model_name=model_name, model_provider_name=provider, + prompt_id=prompt_id or datamodel.PromptGenerators.SIMPLE, ) - if prompt_id is not None: - run_config.prompt_id = prompt_id - super().__init__( run_config=run_config, tags=tags, diff --git a/libs/core/kiln_ai/adapters/model_adapters/openai_model_adapter.py b/libs/core/kiln_ai/adapters/model_adapters/openai_model_adapter.py index d5edcba5..94ec18d5 100644 --- a/libs/core/kiln_ai/adapters/model_adapters/openai_model_adapter.py +++ b/libs/core/kiln_ai/adapters/model_adapters/openai_model_adapter.py @@ -20,7 +20,7 @@ OpenAICompatibleConfig, ) from kiln_ai.adapters.parsers.json_parser import parse_json_string -from kiln_ai.datamodel import PromptId +from kiln_ai.datamodel import PromptGenerators, PromptId from kiln_ai.datamodel.task import RunConfig from kiln_ai.utils.exhaustive_error import raise_exhaustive_enum_error @@ -45,11 +45,9 @@ def __init__( task=kiln_task, model_name=config.model_name, model_provider_name=config.provider_name, + prompt_id=prompt_id or PromptGenerators.SIMPLE, ) - if prompt_id is not None: - run_config.prompt_id = prompt_id - super().__init__( run_config=run_config, tags=tags, diff --git a/libs/core/kiln_ai/adapters/model_adapters/test_base_adapter.py b/libs/core/kiln_ai/adapters/model_adapters/test_base_adapter.py index 3628fc72..8160294b 100644 --- a/libs/core/kiln_ai/adapters/model_adapters/test_base_adapter.py +++ b/libs/core/kiln_ai/adapters/model_adapters/test_base_adapter.py @@ -37,6 +37,7 @@ def adapter(base_task): task=base_task, model_name="test_model", model_provider_name="test_provider", + prompt_id="simple_prompt_builder", ), ) @@ -84,7 +85,10 @@ async def test_model_provider_missing_names(base_task): # Test with missing model name adapter = MockAdapter( run_config=RunConfig( - task=base_task, model_name="", model_provider_name="test_provider" + task=base_task, + model_name="", + model_provider_name="", + prompt_id="simple_prompt_builder", ), ) with pytest.raises( @@ -95,7 +99,10 @@ async def test_model_provider_missing_names(base_task): # Test with missing provider name adapter = MockAdapter( run_config=RunConfig( - task=base_task, model_name="test_model", model_provider_name="" + task=base_task, + model_name="test_model", + model_provider_name="", + prompt_id="simple_prompt_builder", ), ) with pytest.raises( diff --git a/libs/core/kiln_ai/adapters/test_prompt_builders.py b/libs/core/kiln_ai/adapters/test_prompt_builders.py index 43674375..d95bc7d8 100644 --- a/libs/core/kiln_ai/adapters/test_prompt_builders.py +++ b/libs/core/kiln_ai/adapters/test_prompt_builders.py @@ -28,14 +28,12 @@ FinetuneDataStrategy, Project, Prompt, - PromptGenerators, - PromptId, Task, TaskOutput, TaskOutputRating, TaskRun, ) -from kiln_ai.datamodel.task import RunConfigProperties, Task, TaskRunConfig +from kiln_ai.datamodel.task import RunConfigProperties, TaskRunConfig def test_simple_prompt_builder(tmp_path): diff --git a/libs/core/kiln_ai/datamodel/task.py b/libs/core/kiln_ai/datamodel/task.py index af0bfb6d..87c63b8c 100644 --- a/libs/core/kiln_ai/datamodel/task.py +++ b/libs/core/kiln_ai/datamodel/task.py @@ -53,7 +53,6 @@ class RunConfigProperties(BaseModel): ) prompt_id: PromptId = Field( description="The prompt to use for this run config. Defaults to building a simple prompt from the task if not provided.", - default=PromptGenerators.SIMPLE, ) diff --git a/libs/core/kiln_ai/datamodel/test_basemodel.py b/libs/core/kiln_ai/datamodel/test_basemodel.py index d93de053..de33f2df 100644 --- a/libs/core/kiln_ai/datamodel/test_basemodel.py +++ b/libs/core/kiln_ai/datamodel/test_basemodel.py @@ -501,6 +501,7 @@ def adapter(base_task): task=base_task, model_name="test_model", model_provider_name="test_provider", + prompt_id="simple_prompt_builder", ), ) diff --git a/libs/core/kiln_ai/datamodel/test_task.py b/libs/core/kiln_ai/datamodel/test_task.py index 333ef733..b60bd51e 100644 --- a/libs/core/kiln_ai/datamodel/test_task.py +++ b/libs/core/kiln_ai/datamodel/test_task.py @@ -8,7 +8,12 @@ def test_runconfig_valid_creation(): task = Task(id="task1", name="Test Task", instruction="Do something") - config = RunConfig(task=task, model_name="gpt-4", model_provider_name="openai") + config = RunConfig( + task=task, + model_name="gpt-4", + model_provider_name="openai", + prompt_id=PromptGenerators.SIMPLE, + ) assert config.task == task assert config.model_name == "gpt-4" @@ -21,10 +26,13 @@ def test_runconfig_missing_required_fields(): RunConfig() errors = exc_info.value.errors() - assert len(errors) == 3 # task, model_name, and model_provider_name are required + assert ( + len(errors) == 4 + ) # task, model_name, model_provider_name, and prompt_id are required assert any(error["loc"][0] == "task" for error in errors) assert any(error["loc"][0] == "model_name" for error in errors) assert any(error["loc"][0] == "model_provider_name" for error in errors) + assert any(error["loc"][0] == "prompt_id" for error in errors) def test_runconfig_custom_prompt_id(): @@ -47,7 +55,11 @@ def sample_task(): @pytest.fixture def sample_run_config_props(sample_task): - return RunConfigProperties(model_name="gpt-4", model_provider_name="openai") + return RunConfigProperties( + model_name="gpt-4", + model_provider_name="openai", + prompt_id=PromptGenerators.SIMPLE, + ) def test_task_run_config_valid_creation(sample_task, sample_run_config_props): From a46b94224c957688762a55d15aac13339ec7eedd Mon Sep 17 00:00:00 2001 From: scosman Date: Sun, 23 Feb 2025 16:24:22 -0500 Subject: [PATCH 3/3] improve comment --- libs/core/kiln_ai/datamodel/task.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/core/kiln_ai/datamodel/task.py b/libs/core/kiln_ai/datamodel/task.py index 87c63b8c..29f72e4e 100644 --- a/libs/core/kiln_ai/datamodel/task.py +++ b/libs/core/kiln_ai/datamodel/task.py @@ -84,9 +84,9 @@ class TaskRunConfig(KilnParentedModel): run_config_properties: RunConfigProperties = Field( description="The run config properties to use for this task run." ) - # We usually want to persist the exact prompt, not just a prompt ID. - # We want the prompt to be perfectly consistent, and some prompt_ids are dynamic. - # The prompt ID in the run_config_properties likely points to this (although it's not required). + # The prompt_id in the run_config_properties is the prompt ID to use for this task run. + # However, we want the prompt to be perfectly consistent, and some prompt_ids are dynamic. + # If we need to "freeze" a prompt, we can do so here (then point the prompt_id to this frozen prompt). prompt: BasePrompt | None = Field( default=None, description="A prompt to use for run config.",