forked from protectai/llm-guard
-
Notifications
You must be signed in to change notification settings - Fork 0
/
openai_api.py
50 lines (40 loc) · 1.95 KB
/
openai_api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
"""
Before running the example, make sure the OPENAI_API_KEY environment variable is set by executing `echo $OPENAI_API_KEY`.
If it is not already set, it can be set by using `export OPENAI_API_KEY=YOUR_API_KEY` on Unix/Linux/MacOS systems or `set OPENAI_API_KEY=YOUR_API_KEY` on Windows systems.
"""
import os
from openai import OpenAI
from llm_guard import scan_output, scan_prompt
from llm_guard.input_scanners import Anonymize, PromptInjection, TokenLimit, Toxicity
from llm_guard.output_scanners import Deanonymize, NoRefusal, Relevance, Sensitive
from llm_guard.vault import Vault
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
vault = Vault()
input_scanners = [Anonymize(vault), Toxicity(), TokenLimit(), PromptInjection()]
output_scanners = [Deanonymize(vault), NoRefusal(), Relevance(), Sensitive()]
prompt = "Make an SQL insert statement to add a new user to our database. Name is John Doe. Email is [email protected] "
"but also possible to contact him with [email protected] email. Phone number is 555-123-4567 and "
"the IP address is 192.168.1.100. And credit card number is 4567-8901-2345-6789. "
"He works in Test LLC."
sanitized_prompt, results_valid, results_score = scan_prompt(input_scanners, prompt)
if any(results_valid.values()) is False:
print(f"Prompt {prompt} is not valid, scores: {results_score}")
exit(1)
print(f"Prompt: {sanitized_prompt}")
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": sanitized_prompt},
],
temperature=0,
max_tokens=512,
)
response_text = response.choices[0].message.content
sanitized_response_text, results_valid, results_score = scan_output(
output_scanners, sanitized_prompt, response_text
)
if any(results_valid.values()) is False:
print(f"Output {response_text} is not valid, scores: {results_score}")
exit(1)
print(f"Output: {sanitized_response_text}\n")