Skip to content

Commit

Permalink
Merge pull request #235 from Kiln-AI/main
Browse files Browse the repository at this point in the history
Update docs
  • Loading branch information
scosman authored Mar 5, 2025
2 parents d649fdc + 2b985d4 commit c2ade0b
Show file tree
Hide file tree
Showing 47 changed files with 1,120 additions and 144 deletions.
6 changes: 6 additions & 0 deletions .github/workflows/windows_release_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,12 @@ jobs:
- name: Copy Windows Installer
run: cp ./app/desktop/Output/kilnsetup.exe ./app/desktop/build/dist/Kiln.Windows.Installer.exe

- name: Checksums (Windows Signed Installer)
if: runner.os == 'Windows'
run: |
certutil -hashfile ./app/desktop/build/dist/Kiln.Windows.Installer.exe SHA256
certutil -hashfile ./app/desktop/build/dist/Kiln/Kiln.exe SHA256
- name: Upload Build
uses: actions/upload-artifact@v4
with:
Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
<p align="center">
<a href="https://docs.getkiln.ai/docs/fine-tuning-guide"><strong>Fine Tuning</strong></a> •
<a href="https://docs.getkiln.ai/docs/synthetic-data-generation"><strong>Synthetic Data Generation</strong></a> •
<a href="https://docs.getkiln.ai/docs/collaboration"><strong>Dataset Collaboration</strong></a> •
<a href="https://docs.getkiln.ai/docs/evaluations"><strong>Evals</strong></a> •
<a href="https://docs.getkiln.ai/docs/collaboration"><strong>Collaboration</strong></a> •
<a href="https://docs.getkiln.ai"><strong>Docs</strong></a>
</p>

Expand Down
20 changes: 20 additions & 0 deletions app/desktop/studio_server/eval_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,11 @@ class EvalConfigCompareSummary(BaseModel):
not_rated_count: int


class UpdateEvalRequest(BaseModel):
name: str
description: str | None = None


def dataset_ids_in_filter(task: Task, filter_id: DatasetFilterId) -> Set[ID_TYPE]:
# Fetch all the dataset items IDs in a filter
filter = dataset_filter_from_id(filter_id)
Expand Down Expand Up @@ -253,6 +258,21 @@ async def get_task_run_configs(
async def get_eval(project_id: str, task_id: str, eval_id: str) -> Eval:
return eval_from_id(project_id, task_id, eval_id)

@app.patch("/api/projects/{project_id}/tasks/{task_id}/eval/{eval_id}")
async def update_eval(
project_id: str, task_id: str, eval_id: str, request: UpdateEvalRequest
) -> Eval:
eval = eval_from_id(project_id, task_id, eval_id)
eval.name = request.name
eval.description = request.description
eval.save_to_file()
return eval

@app.delete("/api/projects/{project_id}/tasks/{task_id}/eval/{eval_id}")
async def delete_eval(project_id: str, task_id: str, eval_id: str) -> None:
eval = eval_from_id(project_id, task_id, eval_id)
eval.delete()

@app.get("/api/projects/{project_id}/tasks/{task_id}/evals")
async def get_evals(project_id: str, task_id: str) -> list[Eval]:
task = task_from_id(project_id, task_id)
Expand Down
39 changes: 32 additions & 7 deletions app/desktop/studio_server/finetune_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,24 @@ class FinetuneWithStatus(BaseModel):
status: FineTuneStatus


class UpdateFinetuneRequest(BaseModel):
"""Request to update a finetune"""

name: str
description: str | None = None


def finetune_from_id(project_id: str, task_id: str, finetune_id: str) -> Finetune:
task = task_from_id(project_id, task_id)
finetune = Finetune.from_id_and_parent_path(finetune_id, task.path)
if finetune is None:
raise HTTPException(
status_code=404,
detail=f"Finetune with ID '{finetune_id}' not found",
)
return finetune


def connect_fine_tune_api(app: FastAPI):
@app.get("/api/projects/{project_id}/tasks/{task_id}/dataset_splits")
async def dataset_splits(project_id: str, task_id: str) -> list[DatasetSplit]:
Expand Down Expand Up @@ -136,13 +154,7 @@ async def finetunes(
async def finetune(
project_id: str, task_id: str, finetune_id: str
) -> FinetuneWithStatus:
task = task_from_id(project_id, task_id)
finetune = Finetune.from_id_and_parent_path(finetune_id, task.path)
if finetune is None:
raise HTTPException(
status_code=404,
detail=f"Finetune with ID '{finetune_id}' not found",
)
finetune = finetune_from_id(project_id, task_id, finetune_id)
if finetune.provider not in finetune_registry:
raise HTTPException(
status_code=400,
Expand All @@ -152,6 +164,19 @@ async def finetune(
status = await finetune_adapter(finetune).status()
return FinetuneWithStatus(finetune=finetune, status=status)

@app.patch("/api/projects/{project_id}/tasks/{task_id}/finetunes/{finetune_id}")
async def update_finetune(
project_id: str,
task_id: str,
finetune_id: str,
request: UpdateFinetuneRequest,
) -> Finetune:
finetune = finetune_from_id(project_id, task_id, finetune_id)
finetune.name = request.name
finetune.description = request.description
finetune.save_to_file()
return finetune

@app.get("/api/finetune_providers")
async def finetune_providers() -> list[FinetuneProvider]:
provider_models: dict[ModelProviderName, list[FinetuneProviderModel]] = {}
Expand Down
7 changes: 7 additions & 0 deletions app/desktop/studio_server/provider_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,13 @@ async def connect_ollama(custom_ollama_url: str | None = None) -> OllamaConnecti
detail="Failed to parse Ollama data - unsure which models are installed.",
)

# attempt to get the Ollama version
try:
version_body = requests.get(base_url + "/api/version", timeout=5).json()
ollama_connection.version = version_body.get("version", None)
except Exception:
pass

# Save the custom Ollama URL if used to connect
if custom_ollama_url and custom_ollama_url != Config.shared().ollama_base_url:
Config.shared().save_setting("ollama_base_url", custom_ollama_url)
Expand Down
76 changes: 76 additions & 0 deletions app/desktop/studio_server/test_eval_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1037,3 +1037,79 @@ async def test_set_current_eval_config(
# Verify the change persists by fetching the eval again
eval_from_disk = mock_task.evals()[0]
assert eval_from_disk.current_config_id == "eval_config1"


@pytest.mark.asyncio
async def test_update_eval(client, mock_task_from_id, mock_task, mock_eval):
"""Test updating an evaluation's name and description."""
mock_task_from_id.return_value = mock_task

# Get the eval before updating to verify the change
response = client.get("/api/projects/project1/tasks/task1/eval/eval1")
assert response.status_code == 200
eval_before = response.json()

# Verify initial values
assert eval_before["name"] == "Test Eval"
assert eval_before["description"] == "Test Description"

# Update the eval with new values
update_request = {"name": "Updated Eval Name", "description": "Updated Description"}

with patch("app.desktop.studio_server.eval_api.eval_from_id") as mock_eval_from_id:
mock_eval_from_id.return_value = mock_eval
response = client.patch(
"/api/projects/project1/tasks/task1/eval/eval1", json=update_request
)
assert response.status_code == 200
updated_eval = response.json()

# Verify the name and description were updated
assert updated_eval["name"] == "Updated Eval Name"
assert updated_eval["description"] == "Updated Description"
assert updated_eval["id"] == "eval1"

# Verify the change persists by checking the mock_eval object
assert mock_eval.name == "Updated Eval Name"
assert mock_eval.description == "Updated Description"

# load from disk and verify the change
eval_from_disk = mock_task.evals()[0]
assert eval_from_disk.name == "Updated Eval Name"
assert eval_from_disk.description == "Updated Description"


def test_delete_eval_success(client, mock_task_from_id, mock_eval, mock_task):
assert len(mock_task.evals()) == 1
# Set up the mock eval to be returned by eval_from_id
with patch("app.desktop.studio_server.eval_api.eval_from_id") as mock_eval_from_id:
mock_eval_from_id.return_value = mock_eval

# Make the delete request
response = client.delete("/api/projects/project1/tasks/task1/eval/eval1")

# Verify the response
assert response.status_code == 200

# Verify that eval_from_id was called with the correct parameters
mock_eval_from_id.assert_called_once_with("project1", "task1", "eval1")

# Verify that the eval was deleted
assert len(mock_task.evals()) == 0


def test_delete_eval_not_found(client):
# Set up the patch for eval_from_id to raise an HTTPException
with patch("app.desktop.studio_server.eval_api.eval_from_id") as mock_eval_from_id:
mock_eval_from_id.side_effect = HTTPException(
status_code=404, detail="Eval not found. ID: nonexistent_eval"
)

# Make the delete request
response = client.delete(
"/api/projects/project1/tasks/task1/eval/nonexistent_eval"
)

# Verify the response
assert response.status_code == 404
assert response.json()["detail"] == "Eval not found. ID: nonexistent_eval"
33 changes: 33 additions & 0 deletions app/desktop/studio_server/test_finetune_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1029,3 +1029,36 @@ def test_thinking_instructions_default(mock_cot):

mock_cot.assert_called_once_with(task)
assert result == "Default COT instructions"


async def test_update_finetune(client, mock_task_from_id_disk_backed, test_task):
"""Test updating a finetune's name and description"""
# Get the original finetune to verify changes later
original_finetune = next(ft for ft in test_task.finetunes() if ft.id == "ft1")
original_name = original_finetune.name

# Prepare update data
update_data = {
"name": "Updated Finetune Name",
"description": "Updated finetune description",
}

# Send PATCH request
response = client.patch(
"/api/projects/project1/tasks/task1/finetunes/ft1", json=update_data
)

# Verify response
assert response.status_code == 200
updated_finetune = response.json()
assert updated_finetune["id"] == "ft1"
assert updated_finetune["name"] == "Updated Finetune Name"
assert updated_finetune["description"] == "Updated finetune description"

mock_task_from_id_disk_backed.assert_called_with("project1", "task1")

# Verify save_to_file was called by checking if the finetune in the task was updated
updated_task_finetune = next(ft for ft in test_task.finetunes() if ft.id == "ft1")
assert updated_task_finetune.name == "Updated Finetune Name"
assert updated_task_finetune.description == "Updated finetune description"
assert updated_task_finetune.name != original_name
12 changes: 10 additions & 2 deletions app/desktop/studio_server/test_provider_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -814,7 +814,10 @@ async def test_connect_ollama_uses_custom_url_when_provided():

await connect_ollama("http://custom-url:11434")

mock_get.assert_called_once_with("http://custom-url:11434/api/tags", timeout=5)
assert mock_get.call_count == 2
assert mock_get.call_args_list[0][0][0] == "http://custom-url:11434/api/tags"
assert mock_get.call_args_list[0][1] == {"timeout": 5}
assert mock_get.call_args_list[1][0][0] == "http://custom-url:11434/api/version"


@pytest.mark.asyncio
Expand All @@ -835,7 +838,12 @@ async def test_connect_ollama_uses_default_url_when_no_custom_url():

await connect_ollama(None)

mock_get.assert_called_once_with("http://default-url:11434/api/tags", timeout=5)
assert mock_get.call_count == 2
assert mock_get.call_args_list[0][0][0] == "http://default-url:11434/api/tags"
assert mock_get.call_args_list[0][1] == {"timeout": 5}
assert (
mock_get.call_args_list[1][0][0] == "http://default-url:11434/api/version"
)


@pytest.mark.asyncio
Expand Down
9 changes: 6 additions & 3 deletions app/desktop/studio_server/test_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,18 @@ def test_strict_mode(client):

def test_connect_ollama_success(client):
with patch("requests.get") as mock_get:
mock_get.return_value.json.return_value = {
"models": [{"model": "phi3.5:latest"}]
}
# Set up mock to return different values on consecutive calls
mock_get.return_value.json.side_effect = [
{"models": [{"model": "phi3.5:latest"}]},
{"version": "0.5.0"},
]
response = client.get("/api/provider/ollama/connect")
assert response.status_code == 200
assert response.json() == {
"message": "Ollama connected",
"supported_models": ["phi3.5:latest"],
"untested_models": [],
"version": "0.5.0",
}


Expand Down
Loading

0 comments on commit c2ade0b

Please sign in to comment.