Skip to content

Commit

Permalink
Merge pull request #403 from LlmKira/dev
Browse files Browse the repository at this point in the history
Logic Mock In Recursive function snapshot process
  • Loading branch information
sudoskys authored Apr 20, 2024
2 parents ac5fe4a + 522d2b5 commit b049286
Show file tree
Hide file tree
Showing 14 changed files with 305 additions and 183 deletions.
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ or [one-api](https://github.com/songquanpeng/one-api) independently.

### 🍔 Login Modes

- `Login via url`: Use `/login token$https://provider.com` to Login. The program posts the token to the interface to
- `Login via url`: Use `/login <a token>$<something like https://provider.com/login>` to Login. The program posts the token to the interface to
retrieve configuration
information, [how to develop this](https://github.com/LlmKira/Openaibot/blob/81eddbff0f136697d5ad6e13ee1a7477b26624ed/app/components/credential.py#L20).
- `Login`: Use `/login https://api.com/v1$key$model` to login
- `Login`: Use `/login https://<api endpoint>/v1$<api key>$<the model>` to login

### 🧀 Plugin Can Do More

Expand Down Expand Up @@ -145,6 +145,9 @@ npm install pm2 -g
pm2 start pm2.json
```

> **Be sure to change the default password for the command, or disable open ports to prevent the database from being
scanned and attacked.**

### 🥣 Docker

Build Hub: [sudoskys/llmbot](https://hub.docker.com/repository/docker/sudoskys/llmbot/general)
Expand Down
9 changes: 9 additions & 0 deletions app/components/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from typing import Optional

from app.components.credential import Credential
from app.components.user_manager import USER_MANAGER


async def read_user_credential(user_id: str) -> Optional[Credential]:
user = await USER_MANAGER.read(user_id=user_id)
return user.credential
31 changes: 2 additions & 29 deletions app/components/credential.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
from urllib.parse import urlparse

import requests
from dotenv import load_dotenv
Expand All @@ -15,6 +14,7 @@ class Credential(BaseModel):
api_key: str
api_endpoint: str
api_model: str
api_tool_model: str = "gpt-3.5-turbo"

@classmethod
def from_provider(cls, token, provider_url):
Expand All @@ -36,37 +36,10 @@ def from_provider(cls, token, provider_url):
api_key=user_data["api_key"],
api_endpoint=user_data["api_endpoint"],
api_model=user_data["api_model"],
api_tool_model=user_data.get("api_tool_model", "gpt-3.5-turbo"),
)


def split_setting_string(input_string):
if not isinstance(input_string, str):
return None
segments = input_string.split("$")

# 检查链接的有效性
def is_valid_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False

# 开头为链接的情况
if is_valid_url(segments[0]) and len(segments) >= 3:
return segments[:3]
# 第二个元素为链接,第一个元素为字符串的情况
elif (
len(segments) == 2
and not is_valid_url(segments[0])
and is_valid_url(segments[1])
):
return segments
# 其他情况
else:
return None


load_dotenv()

if os.getenv("GLOBAL_OAI_KEY") and os.getenv("GLOBAL_OAI_ENDPOINT"):
Expand Down
85 changes: 67 additions & 18 deletions app/receiver/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,12 @@
from aio_pika.abc import AbstractIncomingMessage
from loguru import logger

from app.components import read_user_credential
from app.components.credential import global_credential
from llmkira.kv_manager.env import EnvManager
from llmkira.kv_manager.tool_call import GLOBAL_TOOLCALL_CACHE_HANDLER
from llmkira.logic import LLMLogic
from llmkira.memory import global_message_runtime
from llmkira.openai.cell import ToolCall
from llmkira.sdk.tools.register import ToolRegister
from llmkira.task import Task, TaskHeader
Expand Down Expand Up @@ -235,20 +239,7 @@ async def run_pending_task(task: TaskHeader, pending_task: ToolCall):
return logger.info(
f"[Snapshot Auth] \n--auth-require {pending_task.name} require."
)

# Resign Chain
# 时序实现,防止过度注册
if len(task.task_sign.tool_calls_pending) == 1:
if not has_been_called_recently(userid=task.receiver.uid, n_seconds=5):
logger.debug(
"ToolCall run out, resign a new request to request stop sign."
)
await create_child_snapshot(
task=task,
memory_able=True,
channel=task.receiver.platform,
)
# 运行函数, 传递模型的信息,以及上一条的结果的openai raw信息
# Run Function
run_result = await _tool_obj.load(
task=task,
receiver=task.receiver,
Expand All @@ -257,11 +248,72 @@ async def run_pending_task(task: TaskHeader, pending_task: ToolCall):
pending_task=pending_task,
refer_llm_result=task.task_sign.llm_response,
)
run_status = True
# 更新任务状态
if run_result.get("exception"):
run_status = False
await task.task_sign.complete_task(
tool_calls=pending_task, success_or_not=True, run_result=run_result
)
return run_result
# Resign Chain
# 时序实现,防止过度注册
if len(task.task_sign.tool_calls_pending) == 0:
if not has_been_called_recently(userid=task.receiver.uid, n_seconds=3):
credentials = await read_user_credential(user_id=task.receiver.uid)
if global_credential:
credentials = global_credential
logic = LLMLogic(
api_key=credentials.api_key,
api_endpoint=credentials.api_endpoint,
api_model=credentials.api_tool_model,
)
history = await global_message_runtime.update_session(
session_id=task.receiver.uid,
).read(lines=3)
logger.debug(f"Read History:{history}")
continue_ = await logic.llm_continue(
context=f"History:{history},ToolCallResult:{run_status}",
condition="Would you like to continue a chat?",
default=False,
)
if continue_.continue_it:
logger.debug(
"ToolCall run out, resign a new request to request stop sign."
)
await create_child_snapshot(
task=task,
memory_able=True,
channel=task.receiver.platform,
)
# 运行函数, 传递模型的信息,以及上一条的结果的openai raw信息
await Task.create_and_send(
queue_name=task.receiver.platform,
task=TaskHeader(
sender=task.sender,
receiver=task.receiver,
task_sign=task.task_sign.notify(
plugin_name=__receiver__,
response_snapshot=True,
memory_able=False,
),
message=[
EventMessage(
user_id=task.receiver.user_id,
chat_id=task.receiver.chat_id,
text=continue_.comment_to_user,
)
],
),
)
else:
if continue_.comment_to_user:
await reply_user(
platform=task.receiver.platform,
receiver=task.receiver,
task=task,
text=continue_.comment_to_user,
)
return run_status

async def process_function_call(self, message: AbstractIncomingMessage):
"""
Expand Down Expand Up @@ -307,9 +359,6 @@ async def run_task(self, task, pending_task):
try:
await self.run_pending_task(task=task, pending_task=pending_task)
except Exception as e:
await task.task_sign.complete_task(
tool_calls=pending_task, success_or_not=False, run_result=str(e)
)
logger.error(f"Function Call Error {e}")
raise e
finally:
Expand Down
9 changes: 2 additions & 7 deletions app/receiver/receiver_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
from loguru import logger
from telebot import formatting

from app.components.credential import Credential, global_credential
from app.components.user_manager import USER_MANAGER
from app.components import read_user_credential
from app.components.credential import global_credential
from app.middleware.llm_task import OpenaiMiddleware
from llmkira.kv_manager.env import EnvManager
from llmkira.openai import OpenaiError
Expand Down Expand Up @@ -48,11 +48,6 @@ async def get(self, user_id):
user_locks = UserLocks()


async def read_user_credential(user_id: str) -> Optional[Credential]:
user = await USER_MANAGER.read(user_id=user_id)
return user.credential


async def generate_authorization(
secrets: Dict, tool_invocation: ToolCall
) -> Tuple[dict, list, bool]:
Expand Down
26 changes: 17 additions & 9 deletions app/sender/discord/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,13 @@
__sender__ = "discord_hikari"
__default_disable_tool_action__ = False

from ..util_func import auth_reloader, is_command, is_empty_command, uid_make, login
from ..util_func import (
auth_reloader,
is_command,
is_empty_command,
uid_make,
save_credential,
)
from llmkira.openapi.trigger import get_trigger_loop
from ...components.credential import Credential, ProviderError

Expand Down Expand Up @@ -238,7 +244,7 @@ async def listen_login_url_command(
credential = Credential.from_provider(
token=token, provider_url=provider_url
)
await login(
await save_credential(
uid=uid_make(__sender__, ctx.user.id),
credential=credential,
)
Expand All @@ -264,17 +270,19 @@ async def listen_login_url_command(
)
async def listen_endpoint_command(
ctx: crescent.Context,
openai_endpoint: str,
openai_key: str,
openai_model: str,
api_endpoint: str,
api_key: str,
api_model: str,
api_tool_model: str = "gpt-3.5-turbo",
):
try:
credential = Credential(
api_endpoint=openai_endpoint,
api_key=openai_key,
api_model=openai_model,
api_endpoint=api_endpoint,
api_key=api_key,
api_model=api_model,
api_tool_model=api_tool_model,
)
await login(
await save_credential(
uid=uid_make(__sender__, ctx.user.id),
credential=credential,
)
Expand Down
26 changes: 17 additions & 9 deletions app/sender/kook/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,13 @@
__sender__ = "kook"
__default_disable_tool_action__ = False

from ..util_func import auth_reloader, is_command, is_empty_command, uid_make, login
from ..util_func import (
auth_reloader,
is_command,
is_empty_command,
uid_make,
save_credential,
)
from llmkira.openapi.trigger import get_trigger_loop
from ...components.credential import ProviderError, Credential

Expand Down Expand Up @@ -244,7 +250,7 @@ async def listen_login_url_command(
credential = Credential.from_provider(
token=token, provider_url=provider_url
)
await login(
await save_credential(
uid=uid_make(__sender__, msg.author_id),
credential=credential,
)
Expand Down Expand Up @@ -272,17 +278,19 @@ async def listen_login_url_command(
@bot.command(name="login")
async def listen_login_command(
msg: Message,
openai_endpoint: str,
openai_key: str,
openai_model: str,
api_endpoint: str,
api_key: str,
api_model: str = "gpt-3.5-turbo",
api_tool_model: str = "gpt-3.5-turbo",
):
try:
credential = Credential(
api_endpoint=openai_endpoint,
api_key=openai_key,
api_model=openai_model,
api_endpoint=api_endpoint,
api_key=api_key,
api_model=api_model,
api_tool_model=api_tool_model,
)
await login(
await save_credential(
uid=uid_make(__sender__, msg.author_id),
credential=credential,
)
Expand Down
46 changes: 4 additions & 42 deletions app/sender/slack/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,6 @@

__sender__ = "slack"

from ...components.credential import split_setting_string, Credential, ProviderError

SlackTask = Task(queue=__sender__)
__default_disable_tool_action__ = False
__join_cache__ = {}
Expand Down Expand Up @@ -232,46 +230,10 @@ async def listen_login_command(ack: AsyncAck, respond: AsyncRespond, command):
if not command.text:
return
_arg = command.text
settings = split_setting_string(_arg)
if not settings:
return await respond(
text=convert(
"🔑 **Incorrect format.**\n"
"You can set it via `https://api.com/v1$key$model` format, "
"or you can log in via URL using `token$https://provider.com`."
),
)
if len(settings) == 2:
try:
credential = Credential.from_provider(
token=settings[0], provider_url=settings[1]
)
except ProviderError as e:
return await respond(text=f"Login failed, website return {e}")
except Exception as e:
logger.error(f"Login failed {e}")
return await respond(text=f"Login failed, because {type(e)}")
else:
await login(
uid=uid_make(__sender__, command.user_id),
credential=credential,
)
return await respond(
text="Login success as provider! Welcome master!"
)
elif len(settings) == 3:
credential = Credential(
api_endpoint=settings[0], api_key=settings[1], api_model=settings[2]
)
await login(
uid=uid_make(__sender__, command.user_id),
credential=credential,
)
return await respond(
text=f"Login success as {settings[2]}! Welcome master! "
)
else:
return logger.trace(f"Login failed {settings}")
reply = await login(
uid=uid_make(__sender__, command.user_id), arg_string=_arg
)
return await respond(text=reply)

@bot.command(command="/env")
async def listen_env_command(ack: AsyncAck, respond: AsyncRespond, command):
Expand Down
Loading

0 comments on commit b049286

Please sign in to comment.