Skip to content

Commit

Permalink
Merge pull request #259 from UWCS/summarise
Browse files Browse the repository at this point in the history
summarise cooldown
  • Loading branch information
Mole1424 authored Aug 11, 2024
2 parents fd19057 + bc6acca commit 99fc34e
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 8 deletions.
26 changes: 18 additions & 8 deletions cogs/commands/summarise.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
mentions = AllowedMentions(everyone=False, users=False, roles=False, replied_user=True)
model = "gpt-4o-mini"



def clean(msg, *prefixes):
for pre in prefixes:
msg = msg.strip().removeprefix(pre)
Expand All @@ -29,22 +31,30 @@ class Summarise(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
openai.api_key = CONFIG.OPENAI_API_KEY
self.system_prompt = "People yap too much, I don't want to read all of it. In 200 words or less give me the gist of what is being said. Note that the messages are in reverse chronological order:"

def build_prompt(self, bullet_points, channel_name):

bullet_points = "Put it in bullet points for readability." if bullet_points else ""
prompt = f"""People yap too much, I don't want to read all of it. The topic is something related to {channel_name}. In 2 sentences or less give me the gist of what is being said. {bullet_points} Note that the messages are in reverse chronological order:
"""
return prompt

@commands.cooldown(CONFIG.SUMMARISE_LIMIT, CONFIG.SUMMARISE_COOLDOWN * 60, commands.BucketType.channel)
@commands.hybrid_command(help=LONG_HELP_TEXT, brief=SHORT_HELP_TEXT)
async def tldr(
self, ctx: Context, number_of_messages: int = 100):
self, ctx: Context, number_of_messages: int = 100, bullet_point_output: bool = False ):
number_of_messages = 400 if number_of_messages > 400 else number_of_messages



# avoid banned users
if not await is_author_banned_openai(ctx):
await ctx.send("You are banned from OpenAI!")
return

# get the last "number_of_messages" messages from the current channel and build the prompt
curr_channel = ctx.guild.get_channel(ctx.channel.id)
messages = curr_channel.history(limit=number_of_messages)
messages = await self.create_message(messages)
prompt = self.build_prompt(bullet_point_output, ctx.channel)
messages = ctx.channel.history(limit=number_of_messages)
messages = await self.create_message(messages, prompt)

# send the prompt to the ai overlords to process
async with ctx.typing():
Expand All @@ -68,9 +78,9 @@ async def dispatch_api(self, messages) -> Optional[str]:
reply = clean(reply, "Apollo: ", "apollo: ", name)
return reply

async def create_message(self, message_chain):
async def create_message(self, message_chain, prompt):
# get initial prompt
initial = self.system_prompt + "\n"
initial = prompt + "\n"

# for each message, append it to the prompt as follows --- author : message \n
async for msg in message_chain:
Expand Down
4 changes: 4 additions & 0 deletions config.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ config:
portainer_api_key: portainer
# Liege Chancellor User ID
liege_chancellor_id: 1234
# Summarise Use Limit
summarise_limit: 3
# Summarise Cooldown Period (minutes)
summarise_cooldown: 10
# whether to load general.txt and markov chains
markov_enabled: False

Expand Down
2 changes: 2 additions & 0 deletions config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ def __init__(self, filepath: str):
self.AI_SYSTEM_PROMPT: str = parsed.get("ai_system_prompt")
self.PORTAINER_API_KEY: str = parsed.get("portainer_api_key")
self.LIEGE_CHANCELLOR_ID: int = parsed.get("liege_chancellor_id")
self.SUMMARISE_LIMIT: int = parsed.get("summarise_limit")
self.SUMMARISE_COOLDOWN: int = parsed.get("summarise_cooldown")
self.MARKOV_ENABLED: bool = parsed.get("markov_enabled")

# Configuration
Expand Down

0 comments on commit 99fc34e

Please sign in to comment.