Skip to content

Commit

Permalink
Validation Outcome (#431)
Browse files Browse the repository at this point in the history
* start validation outcome changes

* fix gather_reasks for non-structured output

* lint fixes

* more lint fixes

* start test fixes, debug types

* fix tests

* fix types with overloads

* fix tests

* lint fixes

* lint fixes

* fix tests

* lint fixes

* switch to generics for ValidationOutcome

* allow destructuring

* remove None from generic type

* init commit, changes to handle error in guard

* handle error a layer deeper

* update return in text2sql

* remove extra fx in validation outcome

* use error instead of exception

* remove print statements plus lint

* fix type

* fix typing while maintaining type hinting

* fix other type issues

* autoformat

* lint fixes

* test fixes

* autoformat

* type fixes

* lint fix

* unused import

* uncomment test parameters

* merge/type fixes

* guard: Allow calling parse with preconfigured num_reasks (#423)

* Cron nb (#425)

* install deps + pkg for nb runs

* lock nb runner to 3.11.x

* use cohere api key from environ

* ref env vars for cohere + openai

* fix bad merge in code originally from validators.py (#427)

* fix bad merge in code originally from validators.py

* lint fixes

* bump version (#428)

* update notebooks

* Setup passed password (#429)

* use pypi pass from env

* upgrade pip before installing deps

* pass pypi pass explicitly

* use environ competently

* list -> List

* lint and test fixes

* autoformat

* lint and type fix

* fix test

* fix llm_output type

* ' -> "

* fix tests

* lint fixes

* fix notebooks again

* fix docs

* debug

* validated_response -> validated_output

---------

Co-authored-by: Nefertiti  Rogers <[email protected]>
Co-authored-by: Nefertiti  Rogers <[email protected]>
Co-authored-by: rafael <[email protected]>
Co-authored-by: zsimjee <[email protected]>
  • Loading branch information
5 people authored Nov 28, 2023
1 parent d8dfb2a commit 6e7a959
Show file tree
Hide file tree
Showing 61 changed files with 1,567 additions and 1,511 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ Call the `Guard` object with the LLM API call as the first argument and add any
import openai

# Wrap the OpenAI API call with the `guard` object
raw_llm_output, validated_output = guard(
raw_llm_output, validated_output, *rest = guard(
openai.Completion.create,
engine="text-davinci-003",
max_tokens=1024,
Expand Down
2 changes: 1 addition & 1 deletion docs/concepts/guard.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ from guardrails import Guard

guard = Guard.from_rail(...)

raw_output, validated_output = guard(
raw_output, validated_output, *rest = guard(
openai.Completion.create,
engine="text-davinci-003",
max_tokens=1024,
Expand Down
4 changes: 2 additions & 2 deletions docs/concepts/validators.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ Sometimes validators need addtional parameters that are only availble during run
```python
guard = Guard.from_rail("my_railspec.rail")

raw_output, guarded_output = guard(
raw_output, guarded_output, *rest = guard(
llm_api=openai.ChatCompletion.create,
model="gpt-3.5-turbo",
num_reasks=3,
Expand Down Expand Up @@ -134,7 +134,7 @@ ${guardrails.complete_json_suffix}

guard = Guard.from_rail_string(rail_string=rail_str)

raw_output, guarded_output = guard(
raw_output, guarded_output, *rest = guard(
llm_api=openai.ChatCompletion.create,
model="gpt-3.5-turbo"
)
Expand Down
6 changes: 3 additions & 3 deletions docs/defining_guards/pydantic.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@
"\"\"\"\n",
"guard = Guard.from_pydantic(output_class=Pet, prompt=prompt)\n",
"\n",
"raw_llm_output, validated_output = guard(\n",
"raw_llm_output, validated_output, *rest = guard(\n",
" llm_api=openai.Completion.create,\n",
" engine=\"text-davinci-003\"\n",
")\n",
Expand Down Expand Up @@ -378,7 +378,7 @@
"\n",
"guard = Guard.from_pydantic(output_class=Pet, prompt=prompt)\n",
"\n",
"raw_llm_output, validated_output = guard(\n",
"raw_llm_output, validated_output, *rest = guard(\n",
" llm_api=openai.Completion.create,\n",
" engine=\"text-davinci-003\",\n",
" max_tokens=1024,\n",
Expand Down Expand Up @@ -576,7 +576,7 @@
"\"\"\"\n",
"\n",
"guard = Guard.from_pydantic(output_class=Pet, prompt=prompt)\n",
"raw_llm_output, validated_output = guard(\n",
"raw_llm_output, validated_output, *rest = guard(\n",
" llm_api=openai.Completion.create,\n",
" engine=\"text-davinci-003\",\n",
" max_tokens=1024,\n",
Expand Down
2 changes: 1 addition & 1 deletion docs/defining_guards/rail.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ import guardrails as gd

# Create a Guard object
guard = gd.Guard.from_rail('path/to/rail/spec.xml') # (1)!
validated_output = guard(
_, validated_output, *rest = guard(
openai.Completion.create, # (2)!
**prompt_args,
*args,
Expand Down
2 changes: 1 addition & 1 deletion docs/defining_guards/strings.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@
" prompt=\"Generate a puppy name\"\n",
")\n",
"\n",
"raw_llm_output, validated_llm_response = guard(openai.Completion.create)\n",
"raw_llm_output, validated_llm_response, *rest = guard(openai.Completion.create)\n",
"print(validated_llm_response)\n",
"print(guard.state.most_recent_call.tree)"
]
Expand Down
44 changes: 19 additions & 25 deletions docs/examples/bug_free_python_code.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
},
{
"cell_type": "code",
"execution_count": 39,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -75,7 +75,7 @@
},
{
"cell_type": "code",
"execution_count": 40,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -113,7 +113,7 @@
},
{
"cell_type": "code",
"execution_count": 41,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -131,18 +131,9 @@
},
{
"cell_type": "code",
"execution_count": 42,
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/zaydsimjee/workspace/guardrails/guardrails/validatorsattr.py:285: UserWarning: Validator bug-free-python is not valid for element pythoncode.\n",
" warnings.warn(\n"
]
}
],
"outputs": [],
"source": [
"guard = gd.Guard.from_rail_string(rail_str)"
]
Expand All @@ -156,7 +147,7 @@
},
{
"cell_type": "code",
"execution_count": 43,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -173,7 +164,7 @@
},
{
"cell_type": "code",
"execution_count": 44,
"execution_count": 6,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -254,7 +245,7 @@
},
{
"cell_type": "code",
"execution_count": 45,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -264,7 +255,7 @@
"Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.\n",
"\"\"\"\n",
"\n",
"raw_llm_response, validated_response = guard(\n",
"response = guard(\n",
" openai.Completion.create,\n",
" prompt_params={\"leetcode_problem\": leetcode_problem},\n",
" engine=\"text-davinci-003\",\n",
Expand All @@ -285,7 +276,7 @@
},
{
"cell_type": "code",
"execution_count": 46,
"execution_count": 8,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -313,7 +304,7 @@
}
],
"source": [
"print(validated_response)"
"print(response.validated_output)"
]
},
{
Expand All @@ -326,7 +317,7 @@
},
{
"cell_type": "code",
"execution_count": 47,
"execution_count": 9,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -358,7 +349,10 @@
}
],
"source": [
"print(validated_response[\"python_code\"])"
"if response.validated_output is not None:\n",
" print(response.validated_output[\"python_code\"])\n",
"elif response.error is not None:\n",
" print(response.error)"
]
},
{
Expand All @@ -371,7 +365,7 @@
},
{
"cell_type": "code",
"execution_count": 48,
"execution_count": 10,
"metadata": {},
"outputs": [
{
Expand All @@ -390,7 +384,7 @@
],
"source": [
"try:\n",
" exec(validated_response[\"python_code\"])\n",
" exec(response.validated_output[\"python_code\"])\n",
" print(\"Success!\")\n",
"except Exception as e:\n",
" print(\"Failed!\")"
Expand All @@ -413,7 +407,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.11.6"
},
"orig_nbformat": 4,
"vscode": {
Expand Down
Loading

0 comments on commit 6e7a959

Please sign in to comment.