diff --git a/examples/python/agency-swarm/local_open_source_models.py b/examples/python/agency-swarm/local_open_source_models.py index 46e2898..bed5b45 100644 --- a/examples/python/agency-swarm/local_open_source_models.py +++ b/examples/python/agency-swarm/local_open_source_models.py @@ -6,9 +6,8 @@ load_dotenv("./.env") load_dotenv("../../../.env") -client = patch(OpenAI(default_headers={"LLM-PARAM-base-url": "http://localhost:11434"})) -# if using docker-compose, pass custom header to point to the ollama container instead of localhost -# client = patch(OpenAI(default_headers={"LLM-PARAM-base-url": "http://ollama:11434"})) +# remember to set OLLAMA_API_BASE_URL="http://ollama:11434" and base_url="http://localhost:8000/v1" in your env +client = patch(OpenAI()) set_openai_client(client) @@ -25,4 +24,4 @@ print(assistant) completion = agency.get_completion("What's something interesting about language models?") -print(completion) \ No newline at end of file +print(completion)