Skip to content

Commit

Permalink
Merge branch 'master' into summarise
Browse files Browse the repository at this point in the history
  • Loading branch information
aaj2005 authored Aug 11, 2024
2 parents 87796af + fd19057 commit f94199e
Show file tree
Hide file tree
Showing 9 changed files with 605 additions and 618 deletions.
4 changes: 0 additions & 4 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,6 @@ jobs:
pip install --user pipenv
pipenv sync --dev
- name: Check formatting with black
run: |
pipenv run black --check --diff --exclude __tests__ .
- name: Check code style with ruff
run: |
pipenv run ruff check .
Expand Down
1 change: 0 additions & 1 deletion Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ markovify = "*"
deep-translator = "*"

[dev-packages]
black = "~=22.12"
pytest = "~=6.2"
pretend = "~=1.0"
pyright = "==1.1.303"
Expand Down
1,054 changes: 506 additions & 548 deletions Pipfile.lock

Large diffs are not rendered by default.

123 changes: 86 additions & 37 deletions cogs/commands/chatgpt.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
import re
from datetime import datetime, timedelta, timezone
from typing import Optional

Expand All @@ -20,7 +21,7 @@
GPT will be given the full chain of replied messages, *it does not look at latest messages*.
If you want to set a custom initial prompt, use `!prompt <prompt>` then reply to that.
This defaults to GPT3.5, if you need GPT4, use `--gpt4` to switch (will inherit down conversation)
This defaults to gpt 4o mini, if you need gpt4o full, use `--full` to switch (will inherit down conversation)
"""

SHORT_HELP_TEXT = "Apollo is smarter than you think..."
Expand All @@ -39,9 +40,7 @@ def clean(msg, *prefixes):
class ChatGPT(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot

openai.api_key = CONFIG.OPENAI_API_KEY
self.model = "gpt-3.5-turbo"
self.system_prompt = CONFIG.AI_SYSTEM_PROMPT
if CONFIG.AI_INCLUDE_NAMES:
self.system_prompt += "\nYou are in a Discord chat room, each message is prepended by the name of the message's author separated by a colon."
Expand All @@ -55,10 +54,10 @@ async def prompt(self, ctx: Context, *, message: str):
await ctx.message.add_reaction("✅")

@commands.hybrid_command(
help=LONG_HELP_TEXT, brief=SHORT_HELP_TEXT, usage="[--gpt4] <message ...>"
help=LONG_HELP_TEXT, brief=SHORT_HELP_TEXT, usage="[--full] <message ...>"
)
async def chat(self, ctx: Context, *, message: Optional[str] = None):
await self.cmd(ctx)
async def chat(self, ctx: Context, *, message: str):
await self.cmd(ctx, message)

@commands.Cog.listener()
async def on_message(self, message: discord.Message):
Expand All @@ -76,31 +75,34 @@ async def on_message(self, message: discord.Message):
return

ctx = await self.bot.get_context(message)
await self.cmd(ctx)
await self.cmd(ctx, message)

async def cmd(self, ctx: Context):
async def cmd(self, ctx: Context, message: str):
if not await is_author_banned_openai(ctx):
return

# Create history chain
messages, gpt4 = await self.create_history(ctx.message)
messages, full = await self.create_history(message, ctx)
if not messages or await self.in_cooldown(ctx):
return

# If valid, dispatch to OpenAI and reply
async with ctx.typing():
response = await self.dispatch_api(messages, gpt4)
response = await self.dispatch_api(messages, full)
if response:
prev = ctx.message
prev = ctx
for content in split_into_messages(response):
prev = await prev.reply(content, allowed_mentions=mentions)

async def create_history(self, message):
message_chain = await self.get_message_chain(message)
gpt4 = False
async def create_history(self, message, ctx):
# if message is string then slash command
message_chain = await self.get_message_chain(message, ctx)
full = False

# If a message in the chain triggered a !chat or !prompt or /chat
def is_cmd(m):
if isinstance(m, str):
return True
return (
m.content.startswith(chat_cmd)
or m.content.startswith(prompt_cmd)
Expand All @@ -112,33 +114,66 @@ def is_cmd(m):
return

# If first message starts with !prompt use that for initial
initial_msg = message_chain[0].content
initial_msg = (
message_chain[0]
if isinstance(message_chain[0], str)
else message_chain[0].content
)
if initial_msg.startswith(prompt_cmd):
initial = clean(initial_msg, prompt_cmd)
if initial.startswith("--gpt4"):
gpt4 = True
initial = clean(initial, "--gpt4")
if initial.startswith("--full"):
full = True
initial = clean(initial, "--full")
message_chain = message_chain[1:]
else:
initial = self.system_prompt
messages = [dict(role="system", content=initial)]

# Convert to dict form for request
for msg in message_chain:
role = "assistant" if msg.author == self.bot.user else "user"
role = (
"user"
if isinstance(msg, str) # slash commands will always be users
else "assistant" if msg.author == self.bot.user else "user"
)
content = msg if isinstance(msg, str) else msg.clean_content
# Skip empty messages (if you want to invoke on a pre-existing chain)
if not (content := clean(msg.clean_content, chat_cmd)):
if not (content := clean(content, chat_cmd)):
continue
if content.startswith("--gpt4"):
gpt4 = True
content = clean(content, "--gpt4")
# Add name to start of message for user msgs
if CONFIG.AI_INCLUDE_NAMES and msg.author != self.bot.user:
name, content = get_name_and_content(msg)
content = f"{name}: {clean(content, chat_cmd, '--gpt4')}"

messages.append(dict(role=role, content=content))
return messages, gpt4
if content.startswith("--full"):
full = True
content = clean(content, "--full")

if re.match(
r"https://cdn.discordapp.com/attachments/\d+/\d+/\w+\.\w+", content
):
messages.append(
dict(
role=role,
content=[
{
"type": "image_url",
"image_url": {"url": content, "detail": "low"},
}
],
)
)
else:
# Add name to start of message for user msgs
if CONFIG.AI_INCLUDE_NAMES and (
isinstance(msg, str) or msg.author != self.bot.user
):
name, content = (
get_name_and_content(msg)
if not isinstance(msg, str)
else (ctx.author.display_name, content)
)
content = f"{name}: {clean(content, chat_cmd, '--full')}"
messages.append(
dict(role=role, content=[{"type": "text", "text": content}])
)

return messages, full

async def in_cooldown(self, ctx):
# If is in allowed channel
Expand All @@ -160,11 +195,11 @@ async def in_cooldown(self, ctx):
self.cooldowns[ctx.channel.id] = now
return False

async def dispatch_api(self, messages, gpt4) -> Optional[str]:
logging.info(f"Making OpenAI request: {messages}")
async def dispatch_api(self, messages, full) -> Optional[str]:

# Make request
model = "gpt-4" if gpt4 else self.model
logging.info(f"Making OpenAI request: {messages}")
model = "gpt-4o" if full else "gpt-4o-mini"
response = await openai.ChatCompletion.acreate(model=model, messages=messages)
logging.info(f"OpenAI Response: {response}")

Expand All @@ -177,15 +212,29 @@ async def dispatch_api(self, messages, gpt4) -> Optional[str]:
return reply

async def get_message_chain(
self, message: discord.Message
) -> list[discord.Message]:
self, message: discord.Message | str, ctx: Context
) -> list[discord.Message | str]:
"""
Traverses a chain of replies to get a thread of chat messages between a user and Apollo.
"""
if message is None:
return []
previous = await self.fetch_previous(message)
return (await self.get_message_chain(previous)) + [message]
previous = (
await self.fetch_previous(message)
if isinstance(message, discord.Message)
else None
)
append = [message]

attachments = (
message.attachments
if isinstance(message, discord.Message)
else ctx.message.attachments
)
for attachment in attachments:
if attachment.content_type.startswith("image"):
append.append(attachment.url)
return (await self.get_message_chain(previous, ctx)) + append

async def fetch_previous(
self, message: discord.Message
Expand Down
12 changes: 9 additions & 3 deletions cogs/commands/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,16 @@
from discord.ext import commands
from discord.ext.commands import Bot, Context

from config import CONFIG


class Misc(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
with open("general.txt") as f:
text = f.read()
self.model = markovify.Text(text)
if CONFIG.MARKOV_ENABLED:
with open("general.txt") as f:
text = f.read()
self.model = markovify.Text(text)

@commands.hybrid_command()
async def zed0(self, ctx: Context):
Expand Down Expand Up @@ -133,6 +136,9 @@ async def babbage(self, ctx: Context):
@commands.hybrid_command()
async def markov(self, ctx: Context):
"""Generate a sentence using a markov chain model"""
if not CONFIG.MARKOV_ENABLED:
await ctx.send("Markov chains are disabled.")
return
await ctx.send(self.model.make_sentence())

translate_help = """
Expand Down
2 changes: 2 additions & 0 deletions config.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ config:
summarise_limit: 3
# Summarise Cooldown Period (minutes)
summarise_cooldown: 10
# whether to load general.txt and markov chains
markov_enabled: False

# Configuration
# Level of messages logged to file
Expand Down
1 change: 1 addition & 0 deletions config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def __init__(self, filepath: str):
self.LIEGE_CHANCELLOR_ID: int = parsed.get("liege_chancellor_id")
self.SUMMARISE_LIMIT: int = parsed.get("summarise_limit")
self.SUMMARISE_COOLDOWN: int = parsed.get("summarise_cooldown")
self.MARKOV_ENABLED: bool = parsed.get("markov_enabled")

# Configuration
self.LOG_LEVEL: str = parsed.get("log_level")
Expand Down
25 changes: 0 additions & 25 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,28 +1,3 @@
[tool.black]
line-length = 88
target_version = ['py310']
include = '\.pyi?$'
exclude = '''
(
/(
\.eggs # exclude a few common directories in the
| \.git # root of the project
| \.hg
| \.mypy_cache
| \.tox
| env
| _build
| buck-out
| build
| dist
| .venv
| venv
)/
)
'''


[tool.pytest.ini_options]
minversion = "6.0"
addopts = "-ra"
Expand Down
1 change: 1 addition & 0 deletions voting/splitutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def __init__(self, preferred):
# Default delimiter order
delimiters = ["\n", ";", ",", " "]


# Split voting choice arguments
def split_args(input: str, dels=None) -> List[str]:
if dels is None:
Expand Down

0 comments on commit f94199e

Please sign in to comment.