From 4d70374889115eff82f3c4b9a864cf2fab180db5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E0=AE=AE=E0=AE=A9=E0=AF=8B=E0=AE=9C=E0=AF=8D=E0=AE=95?= =?UTF-8?q?=E0=AF=81=E0=AE=AE=E0=AE=BE=E0=AE=B0=E0=AF=8D=20=E0=AE=AA?= =?UTF-8?q?=E0=AE=B4=E0=AE=A9=E0=AE=BF=E0=AE=9A=E0=AF=8D=E0=AE=9A=E0=AE=BE?= =?UTF-8?q?=E0=AE=AE=E0=AE=BF?= Date: Sun, 2 Mar 2025 17:59:27 +0530 Subject: [PATCH] Create llm_debugger.py --- scripts/llm_debugger.py | 50 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 scripts/llm_debugger.py diff --git a/scripts/llm_debugger.py b/scripts/llm_debugger.py new file mode 100644 index 000000000000..7a53b53daef7 --- /dev/null +++ b/scripts/llm_debugger.py @@ -0,0 +1,50 @@ +import json +import os +import warnings +with warnings.catch_warnings(): + warnings.simplefilter('ignore') + import litellm +import tomllib +from dotenv import load_dotenv + +load_dotenv() +with open('config.toml', 'rb') as f: + config = tomllib.load(f) + config = config['llm'] + group = config.get('use_group') + if group in config: + config.update(config[group]) + + +model = config['model'] +print('Using model:', model) +api_key = config.get('api_key') +base_url = config.get('base_url') +print('Using base_url:', base_url) +seed = config.get('seed', 42) +temperature = config.get('temperature', 0) +print('Using temperature:', temperature, 'and seed:', seed) +print('-' * 100) +args = ( + { + 'base_url': base_url, + } + if base_url + else {} +) +if base_url and model.startswith('ollama'): + os.environ['OLLAMA_API_BASE'] = base_url + +args, kwargs = json.load(open("logs/llm/request.json")) + +response = litellm.completion( + model=model, + api_key=api_key, + seed=seed, + temperature=temperature, + *args, + **kwargs, +) + +print(response.choices[0].message.content) +# print(response)