-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllm_client.py
197 lines (145 loc) · 6.73 KB
/
llm_client.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
from langchain_openai import ChatOpenAI
from langchain_community.callbacks import get_openai_callback
from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate
from config_manager import ConfigManager, FewShotPrompt
from langchain_core.messages import AIMessage
import logging
from html.parser import HTMLParser
from custom_exceptions import LLMException, CustomExceptionData
logger = logging.getLogger("LLMClient")
class ChatOpenAIWrapper:
def __init__(self):
"""
Langchain's ChatOpenAI class with some additional functionality
""" # noqa
self.config = ConfigManager()
self.llm_chat = ChatOpenAI(model="gpt-4o-2024-08-06",
openai_api_key=self.config.openai_api_key,
temperature=0.32)
def invoke(self, messages):
logger.info("Calling OpenAI")
return self.llm_chat.invoke(messages)
def __call__(self, messages):
return self.invoke(messages)
class LLMClient:
def __init__(self):
self.llm_chat = ChatOpenAIWrapper()
self.config = ConfigManager()
self.no_answer_keyword = "CANDIDATE_NO_DATA"
self.key_tag = "ANSWER"
self.exception_data = CustomExceptionData()
@staticmethod
def __build_prompt(config_manager_prompt: FewShotPrompt, prompt_example_mode: int = 0) -> ChatPromptTemplate:
"""
Build prompt, essentially getting from config
:param config_manager_prompt: prompt object from config
:param prompt_example_mode: 0 zer shot, 1 one shot, 2 few shots
:return: langchain's ChatPromptTemplate
""" # noqa
example_prompt = ChatPromptTemplate.from_messages([
("user", "{input}"),
("ai", "{output}")
])
match prompt_example_mode:
case 0:
prompt = ChatPromptTemplate.from_messages([
("system", config_manager_prompt.system_message),
("human", config_manager_prompt.user_message_template)
])
case 1:
single_shot = [{"input": config_manager_prompt.examples[0].user_message,
"output": config_manager_prompt.examples[0].ai_message}]
single_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=single_shot
)
prompt = ChatPromptTemplate.from_messages([
("system", config_manager_prompt.system_message),
single_shot_prompt,
("human", config_manager_prompt.user_message_template)
])
case 2:
few_shot = [{"input": ex.user_message, "output": ex.ai_message} for ex in
config_manager_prompt.examples]
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=few_shot
)
prompt = ChatPromptTemplate.from_messages([
("system", config_manager_prompt.system_message),
few_shot_prompt,
("human", config_manager_prompt.user_message_template)
])
case _:
raise
return prompt
def __tag_parser(self, message: AIMessage) -> str:
"""
Parser with new type of prompt, when AI should generate output with HTML tags
:param message: Langchain's AIMessage object
:return: Cleaned up answer string
""" # noqa
message_string = message.content
class TagParser(HTMLParser):
answer_data = False
answer = ""
def handle_starttag(self, tag, attrs):
if tag == "ANSWER":
self.answer_data = True
def handle_endtag(self, tag):
if tag == "ANSWER":
self.answer_data = False
def handle_data(self, data):
self.answer = str(data)
parser = TagParser()
logger.debug(f"Full LLM answer: \n {message_string}")
parser.feed(message_string)
if self.no_answer_keyword in parser.answer:
self.exception_data.reason = "LLM did not produce an answer!"
self.exception_data.llm_answer = message_string
raise LLMException(self.exception_data.reason, self.exception_data)
else:
# TODO: Return with quick cleanup, expand if needed
return parser.answer.replace("\n", "").strip()
# TODO: These two functions differ just by options field, can I combine it to one?
def answer_freely(self, question: str) -> str:
"""
Answer on question in free format
Boolean shows if LLM was able to answer a question
:param question: Question about resume to answer
:return: Call result and answer
"""
prompt = self.__build_prompt(self.config.prompt_answer_freely)
logger.debug("Full LLM prompt: \n {}".format(
prompt.invoke({"resume": str(self.config.user_info),
"question": question})))
chain = prompt | self.llm_chat | self.__tag_parser
with get_openai_callback() as cb:
answer = chain.invoke({"resume": str(self.config.user_info),
"question": question})
logger.warning(f"OpenAI call cost: {cb.total_cost}")
logger.info(f"The question: {question}\n"
f"LLM answer: {answer}")
return answer
def answer_with_options(self, question: str, options: list[str]) -> str:
"""
Answer on question from options provided
Boolean shows if LLM was able to answer a question
:param question: Question about resume to answer
:param options: Options to choose from
:return: Call result and answer
"""
prompt = self.__build_prompt(self.config.prompt_answer_with_options)
logger.debug("Full LLM prompt: \n {}".format(
prompt.invoke({"resume": str(self.config.user_info),
"question": question,
"options": str(options)})))
chain = prompt | self.llm_chat | self.__tag_parser
with get_openai_callback() as cb:
answer = chain.invoke({"resume": str(self.config.user_info),
"question": question,
"options": str(options)})
logger.warning(f"OpenAI call cost: {cb.total_cost}")
logger.info(f"The question: {question}\n "
f"LLM answer: {answer}")
return answer