Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding initial test suite #10

Merged
merged 13 commits into from
Jul 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions .github/workflows/run-test-suite.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
---
name: aep
on:
pull_request:
branches:
- 'main'


workflow_dispatch:

jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.12']

env:
AZURE_VAULT_ID: ${{ secrets.TEST_AZURE_VAULT_ID }}
AZURE_CLIENT_ID: ${{ secrets.TEST_AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.TEST_AZURE_TENANT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.TEST_AZURE_CLIENT_SECRET }}
AZURE_OPENAI_ENDPOINT: ${{ secrets.TEST_AZURE_OPENAI_ENDPOINT }}
AZURE_OPENAI_KEY: ${{ secrets.TEST_AZURE_OPENAI_KEY }}
OPENAI_API_KEY: ${{ secrets.TEST_AZURE_OPENAI_KEY }}
AZURE_CS_ENDPOINT: ${{ secrets.TEST_AZURE_CS_ENDPOINT }}
AZURE_CS_KEY: ${{ secrets.TEST_AZURE_CS_KEY }}
SYSTEM_PROMPT_FILE: "system_prompts/prompts.json"
azure_openai_api_version: "2023-12-01-preview"
SYSTEM_API_KEY: "system"
OPENAI_API_TYPE: "azure"

steps:

- name: Checkout repository
uses: actions/checkout@v2

- name: Set up Python ${{ matrix.python-version }}
mrickettsk marked this conversation as resolved.
Show resolved Hide resolved
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}

- name: Run tests
run: |
pwd
echo "AZURE_VAULT_ID=${AZURE_VAULT_ID}" >> .env
echo "AZURE_CLIENT_ID=${AZURE_CLIENT_ID}" >> .env
echo "AZURE_TENANT_ID=${AZURE_TENANT_ID}" >> .env
echo "AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET}" >> .env
echo "AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT}" >> .env
echo "AZURE_OPENAI_KEY=${AZURE_OPENAI_KEY}" >> .env
echo "OPENAI_API_KEY=${OPENAI_API_KEY}" >> .env
echo "AZURE_CS_ENDPOINT=${AZURE_CS_ENDPOINT}" >> .env
echo "AZURE_CS_KEY=${AZURE_CS_KEY}" >> .env
echo "SYSTEM_PROMPT_FILE=${SYSTEM_PROMPT_FILE}" >> .env
echo "azure_openai_api_version=${azure_openai_api_version}" >> .env
echo "SYSTEM_API_KEY=${SYSTEM_API_KEY}" >> .env
echo "OPENAI_API_TYPE=${OPENAI_API_TYPE}" >> .env
make unittest

- name: Surface failing tests
if: always()
uses: pmeier/pytest-results-action@main
with:
# A list of JUnit XML files, directories containing the former, and wildcard
# patterns to process.
# See @actions/glob for supported patterns.
path: src/test-results.xml

# (Optional) Add a summary of the results at the top of the report
summary: true

# (Optional) Select which results should be included in the report.
# Follows the same syntax as `pytest -r`
display-options: fEX

# (Optional) Fail the workflow if no JUnit XML was found.
fail-on-empty: false

# (Optional) Title of the test results section in the workflow summary
title: AEP Test Results

env:
CI: true
6 changes: 5 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ mkdocs-build:
run:
./buildscripts/run_docker.sh

unittest:
$(MAKE) build-local
$(MAKE) run-tests

run-tests:
./buildscripts/run_tests.sh

Expand Down Expand Up @@ -56,4 +60,4 @@ inference-build:
./buildscripts/build_inference_service.sh

inference-run:
./buildscripts/run_inference_service.sh
./buildscripts/run_inference_service.sh
24 changes: 15 additions & 9 deletions src/helpers/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,9 @@ def reduce_prompt_tokens(prompt):


def check_for_prompt_inj(prompt):

event_logger.debug(f"Checking for prompt injection")
url = config.azure_cs_endpoint + "/contentsafety/text:shieldPrompt?api-version=2024-02-15-preview"
event_logger.debug(f"CS Config URL: {url}")
headers = {
'Ocp-Apim-Subscription-Key': config.azure_cs_key,
'Content-Type': 'application/json'
Expand All @@ -101,15 +102,20 @@ def check_for_prompt_inj(prompt):
f"{prompt}"
]
}
response = requests.post(url, headers=headers, data=json.dumps(data))
try:
response = requests.post(url, headers=headers, data=json.dumps(data))
event_logger.debug(f"Response from AI ContentSafety: {response.json()}")

# Log the response
response_json = response.json()
# Log the response
response_json = response.json()

# Check if attackDetected is True in either userPromptAnalysis or documentsAnalysis
if response_json['documentsAnalysis'][0]['attackDetected']:
event_logger.info(f"Response from AI ContentSafety: {response.json()}")
event_logger.info(f"Prompt injection Detected in: {prompt}")
return False # Fail if attackDetected is True
# Check if attackDetected is True in either userPromptAnalysis or documentsAnalysis
if response_json['documentsAnalysis'][0]['attackDetected']:
event_logger.info(f"Response from AI ContentSafety: {response.json()}")
event_logger.info(f"Prompt injection Detected in: {prompt}")
return False # Fail if attackDetected is True

except Exception as err:
event_logger.error(f"Failed to perform prompt injection detection: {err}")

return True
1 change: 1 addition & 0 deletions src/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,4 @@ prometheus_fastapi_instrumentator
pydantic-core
pytest
markdown
coverage
Loading