Skip to content

Commit

Permalink
Merge pull request #3 from Vokturz/agents
Browse files Browse the repository at this point in the history
Agents
  • Loading branch information
Vokturz authored Jul 12, 2023
2 parents a675cf1 + f7efc3f commit 7369b07
Show file tree
Hide file tree
Showing 9 changed files with 673 additions and 60 deletions.
29 changes: 20 additions & 9 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,30 +4,41 @@
import asyncio
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
handler = StreamingStdOutCallbackHandler()

# Load environment variables
load_dotenv()

# Load custom tools
import src.custom_tools as custom_tools

tools = [custom_tools.disk_usage, custom_tools.memory_usage,
custom_tools.asyncArxivQueryRun(max_workers=4),
custom_tools.asyncDuckDuckGoSearchRun(max_workers=4)]

# You can load more tools using load_tools
# from langchain.agents import load_tools
# tools.extend(load_tools(['ddg-search', 'arxiv', 'requests_all']))

# Create SlackBot instance
bot = SlackBot(name='SlackBot', verbose=True,
max_tokens=500, model_type='openai',
chunk_size=500, # Chunk size for splitter
chunk_overlap=50, # Chunk overlap for splitter
k_similarity=5, # Numbers of chunks to return in retriever
log_filename='_slackbot.log'
log_filename='_slackbot.log',
tools=tools,
)

## LLM configuration
model_type = 'openai'
if model_type == 'llama':
if bot.model_type == 'llama':
config = dict(gpu_layers=40, temperature=0.8, batch_size=1024,
context_length=2048, threads=6, stream=True, max_new_tokens=500)
context_length=2048, threads=6, stream=True, max_new_tokens=bot.max_tokens)
else:
config = dict(model_name="gpt-3.5-turbo", temperature=0.8, max_tokens=500)
config = dict(model_name="gpt-3.5-turbo-16k", temperature=0.8, max_tokens=bot.max_tokens)

# Initialize LLM and embeddings
bot.app.logger.info("Initializing LLM and embeddings...")
bot.initialize_llm(model_type, max_tokens_threads=1000, config=config, callbacks=[handler])
bot.initialize_embeddings(model_type)
bot.initialize_llm(bot.model_type, max_tokens_threads=4000, config=config, callbacks=[handler])
bot.initialize_embeddings(bot.model_type)

# Create handlers for commands /ask, /modify_bot, /bot_info and bot mentions
create_handlers(bot)
Expand All @@ -45,8 +56,8 @@ async def start():
if __name__ == "__main__":
logger = bot.app.logger
try:
logger.info('App started.')
asyncio.run(start())
logger.info('App started.')
except KeyboardInterrupt:
logger.info('App stopped by user.')
except Exception as e:
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
chromadb==0.3.25
langchain==0.0.205
langchain==0.0.231
python-dotenv==1.0.0
slack_bolt==1.18.0
tiktoken==0.4.0
Expand Down
36 changes: 36 additions & 0 deletions src/custom_tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from langchain.tools import tool, DuckDuckGoSearchRun, ArxivQueryRun
from concurrent.futures import ThreadPoolExecutor
import asyncio
import subprocess
@tool
def disk_usage(query: str) -> str:
"""useful for when you need to answer questions about the disk usage."""
output = subprocess.check_output(['df', '-h'], text=True)
output = "This is the output of `df -h`:\n" + output
return output

@tool
def memory_usage(query: str) -> str:
"""useful for when you need to answer questions about memory usage."""
output = subprocess.check_output(['free', '-h'], text=True)
output = "This is the output of `free -h`. Mem refers to RAM memory and Swap to swap memory:\n" + output
return output

class asyncDuckDuckGoSearchRun(DuckDuckGoSearchRun):
# max number of parallel requests
max_workers: int = 2
async def _arun(self,query: str) -> str:
"""Use the tool asynchronously."""
executor = ThreadPoolExecutor(max_workers=self.max_workers)
results = await asyncio.get_running_loop().run_in_executor(executor, self._run, query)
return results

class asyncArxivQueryRun(ArxivQueryRun):
# max number of parallel requests
max_workers: int = 2
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
executor = ThreadPoolExecutor(max_workers=self.max_workers)
results = await asyncio.get_running_loop().run_in_executor(executor, self._run, query)
return results

95 changes: 84 additions & 11 deletions src/handlers.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from . import prompts
from .slackbot import SlackBot
from .utils import (parse_format_body, get_llm_reply,
extract_message_from_thread)
extract_message_from_thread, get_agent_reply)
from .ingest import process_uploaded_files
import json
import os
Expand Down Expand Up @@ -47,8 +47,12 @@ async def handle_ask(ack: Ack, respond: Respond, say: Say,
'instructions',
'query'])

channel_bot_info = bot.get_channel_llm_info(channel_id)
# Get the response from the LLM
response, initial_ts = await get_llm_reply(bot, prompt, parsed_body)
if channel_bot_info['as_agent']:
response, initial_ts = await get_agent_reply(bot, parsed_body, None)
else:
response, initial_ts = await get_llm_reply(bot, prompt, parsed_body)
# Format the response
response = f"*<@{user_id}> asked*: {parsed_body['query']}\n*Answer*:\n{response}"

Expand Down Expand Up @@ -93,6 +97,45 @@ async def handle_modify_bot(ack: Ack, body: Dict[str, Any],
view["blocks"][1]["element"]["initial_value"] = channel_bot_info['instructions']
view["blocks"][2]["element"]["initial_value"] = str(channel_bot_info['temperature'])

# OpenAI model, only if model_type is openai
if bot.model_type == 'openai':
if 'openai_model' in channel_bot_info:
initial_text = ("ChatModel: "
if channel_bot_info['openai_model'].startswith('gpt')
else "InstructModel: ")
initial_option = {"text": {
"type": "plain_text",
"text": f"{initial_text}{channel_bot_info['openai_model']}"
},
"value": channel_bot_info['openai_model']
}
view["blocks"][3]["element"]["initial_option"] = initial_option
else:
view["blocks"][3] = { "type": "section",
"text": { "type": "plain_text", "text": " "}}

# Agent or Chain
if channel_bot_info['as_agent']:
view["blocks"][4]["element"]["initial_option"]["value"] = "as_agent"
view["blocks"][4]["element"]["initial_option"]["text"]["text"] = "Use it as an Agent"
else:
view["blocks"][4]["element"]["initial_option"]["value"] = "as_llm_chain"
view["blocks"][4]["element"]["initial_option"]["text"]["text"] = "Use it as a LLM chain"


# Tools
all_options = []
for tool in bot.tool_names:
option = {
"text": {
"type": "plain_text",
"text": tool
},
"value": tool
}
all_options.append(option)
view["blocks"][5]["element"]["options"] = all_options

# Include channel_id in private_metadata
extra_data = {"channel_id": channel_id}
if '!no-notify' in body['text']:
Expand All @@ -101,6 +144,20 @@ async def handle_modify_bot(ack: Ack, body: Dict[str, Any],
extra_data["notify"] = True
view["private_metadata"] = json.dumps(extra_data)

initial_options = []
for tool in channel_bot_info['tool_names']:
if tool in bot.tool_names:
option = {
"text": {
"type": "plain_text",
"text": tool
},
"value": tool
}
initial_options.append(option)
if initial_options:
view["blocks"][5]["element"]["initial_options"] = initial_options

# Open view for bot modification
await bot.app.client.views_open(trigger_id=trigger_id, view=view)

Expand All @@ -120,7 +177,6 @@ async def handle_modify_bot_view(ack: Ack, body: Dict[str, Any],
channel_id = json.loads(view["private_metadata"])["channel_id"]
notify = json.loads(view["private_metadata"])["notify"]
user = body['user']['id']

# Iterate through each bot value and update it
for key in bot_values.keys():
input_value = values[key][key]['value']
Expand All @@ -136,6 +192,15 @@ async def handle_modify_bot_view(ack: Ack, body: Dict[str, Any],
return
bot_values[key] = input_value

if 'openai_model' in values:
bot_values['openai_model'] = values['openai_model']['openai_model']['selected_option']['value']
as_agent = values['use_it_as']['unused_action']['selected_option']['value']
bot_values['as_agent'] = True if 'as_agent' == as_agent else False

selected_tools = values['tool_names']['unused_action']['selected_options']
tool_names = [tool['value'] for tool in selected_tools]
bot_values['tool_names'] = tool_names

# Update channel's bot info
bot.define_channel_llm_info(channel_id, bot_values)
await ack()
Expand Down Expand Up @@ -167,9 +232,16 @@ async def handle_bot_info(ack: Ack, respond: Respond,
# Create a response string with the bot's default prompt and temperature
prompt = prompts.INITIAL_BOT_PROMPT
response = "*Default Prompt:*\n`"
if not bot_info['tool_names']:
bot_info['tool_names'] = ['None']
response += prompt.format(personality=bot_info["personality"],
instructions=bot_info["instructions"])
response += f"`\n*Temperature:* {bot_info['temperature']}"
response += (f"`\n*Temperature:* {bot_info['temperature']}"
f"\n*is Agent:* _{bot_info['as_agent']}_,"
f" *Tools:* _" + ', '.join(bot_info['tool_names']) + '_')

if bot.model_type == 'openai' and 'openai_model' in bot_info:
response += (f"\n*OpenAI Model:* _{bot_info['openai_model']}_")

# Send the response to the user
await respond(text=response)
Expand Down Expand Up @@ -325,15 +397,19 @@ async def handle_mention(say: Say,
extra_context = second_message['text']
qa_prompt = qa_prompt.partial(extra_context=extra_context)
if bot.verbose:
bot.app.logger.info(f"Asking RetrievalQA. "
bot.app.logger.info(f"Asking inside a Thread. "
f" {extra_context}:"
f" {channel_id}/{first_message['ts']}:"
f" {parsed_body['query']}")

# Get reply and update initial message
response, initial_ts = await get_llm_reply(bot, prompt, parsed_body,
first_ts=first_message['ts'],
qa_prompt=qa_prompt)
channel_bot_info = bot.get_channel_llm_info(channel_id)
if channel_bot_info['as_agent']:
response, initial_ts = await get_agent_reply(bot,parsed_body, first_message['ts'])
else:
response, initial_ts = await get_llm_reply(bot, prompt, parsed_body,
first_ts=first_message['ts'],
qa_prompt=qa_prompt)

client = bot.app.client
await client.chat_update(
Expand Down Expand Up @@ -476,9 +552,6 @@ async def handle_upload_files_view(ack: Ack,
args=(channel_id, msg_timestamp, texts,
file_name_list, extra_context))
thread.start()
# bot.define_thread_retriever_db(channel_id, msg_timestamp, texts)
# await say(f"_This is a QA Thread using files `{'` `'.join(file_name_list)}`_",
# thread_ts=msg_timestamp, channel=channel_id)

@bot.app.action("unused_action")
async def handle_unused(ack: Ack):
Expand Down
Loading

0 comments on commit 7369b07

Please sign in to comment.