-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlangchain_helper.py
180 lines (141 loc) · 4.93 KB
/
langchain_helper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
from pathlib import Path
import subprocess
from langchain_core.language_models.chat_models import (
BaseChatModel,
)
import openai_wrapper
from icecream import ic
from types import FrameType
from typing import Callable, List, TypeVar
from datetime import datetime, timedelta
import asyncio
def get_model_name(model: BaseChatModel):
# if model has model_name, return that
model_name = ""
if hasattr(model, "model_name") and model.model_name != "": # type: ignore
model_name = model.model_name # type: ignore
elif hasattr(model, "model") and model.model != "": # type: ignore
model_name = model.model # type: ignore
else:
model_name = str(model)
# Remove "models/" prefix if present
if model_name.startswith("models/"):
model_name = model_name[7:] # Skip "models/"
return model_name
def get_models(
openai: bool = False,
google: bool = False,
claude: bool = False,
llama: bool = False,
google_think: bool = False,
) -> List[BaseChatModel]:
ret = []
if google:
ret.append(get_model(google=True))
if google_think:
ret.append(get_model(google_think=True))
if claude:
ret.append(get_model(claude=True))
if llama:
ret.append(get_model(llama=True))
if openai:
ret.append(get_model(openai=True))
return ret
def get_model(
openai: bool = False,
google: bool = False,
claude: bool = False,
llama: bool = False,
google_think: bool = False,
) -> BaseChatModel:
"""
See changes in diff
"""
# if more then one is true, exit and fail
count_true = sum([openai, google, claude, llama, google_think])
if count_true > 1:
print("Only one model can be selected")
exit(1)
if count_true == 0:
# default to openai
openai = True
if google:
from langchain_google_genai import ChatGoogleGenerativeAI
model = ChatGoogleGenerativeAI(model="gemini-2.0-flash-exp")
elif google_think:
from langchain_google_genai import ChatGoogleGenerativeAI
model = ChatGoogleGenerativeAI(model="gemini-2.0-flash-thinking-exp-1219")
elif claude:
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(model_name="claude-3-5-sonnet-20241022")
elif llama:
from langchain_groq import ChatGroq
model = ChatGroq(model_name="llama-3.3-70b-versatile")
else:
from langchain_openai.chat_models import ChatOpenAI
model = ChatOpenAI(model=openai_wrapper.gpt4.name)
return model
def tracer_project_name():
import inspect
from pathlib import Path
import socket
# get the first caller name that is not in langchain_helper
def app_frame(stack) -> FrameType:
for frame in stack:
if frame.filename != __file__:
return frame
# if can't find anything use my parent
return stack[1]
caller_frame = app_frame(inspect.stack())
caller_function = caller_frame.function # type:ignore
caller_filename = Path(inspect.getfile(caller_frame.frame)).name # type:ignore
hostname = socket.gethostname() # Get the hostname
return f"{caller_filename}:{caller_function}[{hostname}]"
def langsmith_trace_if_requested(trace: bool, the_call):
if trace:
return langsmith_trace(the_call)
else:
the_call()
return
T = TypeVar("T")
async def async_run_on_llms(
lcel_func: Callable[[BaseChatModel], T], llms
) -> List[[T, BaseChatModel, timedelta]]: # type: ignore
async def timed_lcel_task(lcel_func, llm):
start_time = datetime.now()
result = await (lcel_func(llm)).ainvoke({})
end_time = datetime.now()
time_delta = end_time - start_time
return result, llm, time_delta
tasks = [timed_lcel_task(lcel_func, llm) for llm in llms]
return [result for result in await asyncio.gather(*tasks)]
def langsmith_trace(the_call):
from langchain_core.tracers.context import tracing_v2_enabled
from langchain.callbacks.tracers.langchain import wait_for_all_tracers
trace_name = tracer_project_name()
with tracing_v2_enabled(project_name=trace_name) as tracer:
ic("Using Langsmith:", trace_name)
the_call()
ic(tracer.get_run_url())
wait_for_all_tracers()
def to_gist_multiple(paths: List[Path]):
# Convert all paths to absolute paths and pass them as arguments
gist = subprocess.run(
["gh", "gist", "create"] + [str(path.absolute()) for path in paths],
check=True,
stdout=subprocess.PIPE,
text=True,
)
ic(gist)
ic(gist.stdout.strip())
subprocess.run(["open", gist.stdout.strip()])
def to_gist(path: Path):
gist = subprocess.run(
["gh", "gist", "create", str(path.absolute())],
check=True,
stdout=subprocess.PIPE,
text=True,
)
ic(gist)
ic(gist.stdout.strip())
subprocess.run(["open", gist.stdout.strip()])