Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

summarise cooldown #259

Merged
merged 5 commits into from
Aug 11, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 19 additions & 7 deletions cogs/commands/summarise.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
mentions = AllowedMentions(everyone=False, users=False, roles=False, replied_user=True)
model = "gpt-4o-mini"



def clean(msg, *prefixes):
for pre in prefixes:
msg = msg.strip().removeprefix(pre)
Expand All @@ -31,20 +33,30 @@ def __init__(self, bot: Bot):
openai.api_key = CONFIG.OPENAI_API_KEY
self.system_prompt = "People yap too much, I don't want to read all of it. In 200 words or less give me the gist of what is being said. Note that the messages are in reverse chronological order:"

def build_prompt(self, response_word_limit, bullet_points, channel_name):

response_word_limit = 500 if response_word_limit > 500 else response_word_limit
bullet_points = "Put it in bullet points for readability." if bullet_points else ""
prompt = f"""People yap too much, I don't want to read all of it. The topic is something related to {channel_name}. In {response_word_limit} words or less give me the gist of what is being said. {bullet_points} Note that the messages are in reverse chronological order:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

imo its probably gonna be easier to say in 2 sentances or less to cut down on response size

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in the future i will make the response size dynamic based on the number of words it scrapes, but ill keep it under two sentences for now

"""
return prompt

@commands.cooldown(CONFIG.SUMMARISE_LIMIT, CONFIG.SUMMARISE_COOLDOWN * 60, commands.BucketType.channel)
@commands.hybrid_command(help=LONG_HELP_TEXT, brief=SHORT_HELP_TEXT)
async def tldr(
self, ctx: Context, number_of_messages: int = 100):
self, ctx: Context, number_of_messages: int = 100, response_word_limit: int = 100, bullet_point_output: bool = False ):
number_of_messages = 400 if number_of_messages > 400 else number_of_messages



# avoid banned users
if not await is_author_banned_openai(ctx):
await ctx.send("You are banned from OpenAI!")
return

# get the last "number_of_messages" messages from the current channel and build the prompt
curr_channel = ctx.guild.get_channel(ctx.channel.id)
messages = curr_channel.history(limit=number_of_messages)
messages = await self.create_message(messages)
prompt = self.build_prompt(response_word_limit, bullet_point_output, ctx.channel)
messages = ctx.channel.history(limit=number_of_messages)
messages = await self.create_message(messages, prompt)

# send the prompt to the ai overlords to process
async with ctx.typing():
Expand All @@ -68,9 +80,9 @@ async def dispatch_api(self, messages) -> Optional[str]:
reply = clean(reply, "Apollo: ", "apollo: ", name)
return reply

async def create_message(self, message_chain):
async def create_message(self, message_chain, prompt):
# get initial prompt
initial = self.system_prompt + "\n"
initial = prompt + "\n"

# for each message, append it to the prompt as follows --- author : message \n
async for msg in message_chain:
Expand Down
4 changes: 4 additions & 0 deletions config.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ config:
portainer_api_key: portainer
# Liege Chancellor User ID
liege_chancellor_id: 1234
# Summarise Use Limit
summarise_limit: 3
# Summarise Cooldown Period (minutes)
summarise_cooldown: 10
# whether to load general.txt and markov chains
markov_enabled: False

Expand Down
2 changes: 2 additions & 0 deletions config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ def __init__(self, filepath: str):
self.AI_SYSTEM_PROMPT: str = parsed.get("ai_system_prompt")
self.PORTAINER_API_KEY: str = parsed.get("portainer_api_key")
self.LIEGE_CHANCELLOR_ID: int = parsed.get("liege_chancellor_id")
self.SUMMARISE_LIMIT: int = parsed.get("summarise_limit")
self.SUMMARISE_COOLDOWN: int = parsed.get("summarise_cooldown")
self.MARKOV_ENABLED: bool = parsed.get("markov_enabled")

# Configuration
Expand Down
Loading