Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev new memory #226

Open
wants to merge 34 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
27e4f2e
add_trace_memory
weizjajj Nov 25, 2024
718d207
add_trace_memory
weizjajj Nov 25, 2024
dcd150d
fix knowledge trace
weizjajj Nov 26, 2024
d0832ae
support compress
weizjajj Nov 26, 2024
7448183
rename conversation
weizjajj Nov 26, 2024
807b08d
rename trace_memory to conversation_memory
weizjajj Nov 27, 2024
ae218b1
fix knowledge trace
weizjajj Nov 27, 2024
cda4899
add func support
weizjajj Nov 27, 2024
c6b030c
fix session_id
weizjajj Nov 28, 2024
9cc27af
调整存储结构
weizjajj Nov 28, 2024
d299fdd
change dir
weizjajj Nov 28, 2024
8b73890
add language info
weizjajj Nov 29, 2024
7dd2db0
暂存
weizjajj Dec 7, 2024
123da63
同步到子智能体
weizjajj Dec 10, 2024
e60acf6
融合到智能体使用当中
weizjajj Dec 11, 2024
4e59396
fix memory
weizjajj Dec 12, 2024
444a391
add summarize
weizjajj Dec 12, 2024
10d1184
add memory
weizjajj Dec 12, 2024
480442c
add memory
weizjajj Dec 16, 2024
1eed7f8
fix memory
weizjajj Dec 18, 2024
8338fc0
add max_content_length
weizjajj Dec 18, 2024
c83a5b1
Merge branch 'dev' into dev_conversation_memory
weizjajj Dec 18, 2024
c64eaee
fix
weizjajj Dec 19, 2024
ab7d576
fix query
weizjajj Dec 19, 2024
291fa1d
add additional_args
weizjajj Dec 20, 2024
0cda46b
finish memory
weizjajj Dec 25, 2024
0887032
finish memory
weizjajj Dec 25, 2024
567d991
finish memory
weizjajj Dec 30, 2024
166c1a5
Merge branch 'master' into dev_new_memory
weizjajj Dec 30, 2024
73a9bfa
add memory case
weizjajj Dec 30, 2024
426a1cf
add memory case
weizjajj Jan 2, 2025
599fc16
remove
weizjajj Jan 2, 2025
2e3bcef
fix memory
weizjajj Jan 9, 2025
fa07f36
Merge branch 'master' into dev_new_memory
weizjajj Jan 17, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
142 changes: 122 additions & 20 deletions agentuniverse/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,13 @@
# @Email : [email protected]
# @FileName: agent.py
import json
import uuid
from abc import abstractmethod, ABC
from datetime import datetime
from threading import Thread
from typing import Optional, Any, List

from langchain_core.runnables import RunnableSerializable
from langchain_core.runnables import RunnableSerializable, RunnableConfig
from langchain_core.utils.json import parse_json_markdown

from agentuniverse.agent.action.knowledge.knowledge import Knowledge
Expand All @@ -26,6 +28,7 @@
from agentuniverse.agent.output_object import OutputObject
from agentuniverse.agent.plan.planner.planner import Planner
from agentuniverse.agent.plan.planner.planner_manager import PlannerManager
from agentuniverse.agent.plan.planner.react_planner.stream_callback import InvokeCallbackHandler
from agentuniverse.base.annotation.trace import trace_agent
from agentuniverse.base.component.component_base import ComponentBase
from agentuniverse.base.component.component_enum import ComponentEnum
Expand All @@ -34,8 +37,9 @@
from agentuniverse.base.config.component_configer.configers.agent_configer \
import AgentConfiger
from agentuniverse.base.util.common_util import stream_output
from agentuniverse.base.context.framework_context_manager import FrameworkContextManager
from agentuniverse.base.util.logging.logging_util import LOGGER
from agentuniverse.base.util.memory_util import generate_messages
from agentuniverse.base.util.memory_util import generate_messages, get_memory_string
from agentuniverse.llm.llm import LLM
from agentuniverse.llm.llm_manager import LLMManager
from agentuniverse.prompt.chat_prompt import ChatPrompt
Expand Down Expand Up @@ -229,14 +233,16 @@ def as_langchain_tool(self):
)

def process_llm(self, **kwargs) -> LLM:
llm_name = kwargs.get('llm_name') or self.agent_model.profile.get('llm_model', {}).get('name')
return LLMManager().get_instance_obj(llm_name)
return LLMManager().get_instance_obj(self.llm_name)

def process_memory(self, agent_input: dict, **kwargs) -> Memory | None:
memory_name = kwargs.get('memory_name') or self.agent_model.memory.get('name')
memory: Memory = MemoryManager().get_instance_obj(memory_name)
if memory is None:
memory: Memory = MemoryManager().get_instance_obj(component_instance_name=self.memory_name)
conversation_memory: Memory = MemoryManager().get_instance_obj(
component_instance_name=self.conversation_memory_name)
if memory is None and conversation_memory is None:
return None
if memory is None:
memory = conversation_memory

chat_history: list = agent_input.get('chat_history')
# generate a list of temporary messages from the given chat history and add them to the memory instance.
Expand All @@ -245,16 +251,16 @@ def process_memory(self, agent_input: dict, **kwargs) -> Memory | None:
memory.add(temporary_messages, **agent_input)

params: dict = dict()
params['agent_llm_name'] = kwargs.get('llm_name') or self.agent_model.profile.get('llm_model', {}).get('name')
params['agent_llm_name'] = self.llm_name
return memory.set_by_agent_model(**params)

def invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, input_object: InputObject,
**kwargs):
if not input_object.get_data('output_stream'):
res = chain.invoke(input=agent_input)
res = chain.invoke(input=agent_input, config=self.get_run_config())
return res
result = []
for token in chain.stream(input=agent_input):
for token in chain.stream(input=agent_input, config=self.get_run_config()):
stream_output(input_object.get_data('output_stream', None), {
'type': 'token',
'data': {
Expand All @@ -268,10 +274,10 @@ def invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict,
async def async_invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict,
input_object: InputObject, **kwargs):
if not input_object.get_data('output_stream'):
res = await chain.ainvoke(input=agent_input)
res = await chain.ainvoke(input=agent_input, config=self.get_run_config())
return res
result = []
async for token in chain.astream(input=agent_input):
async for token in chain.astream(input=agent_input, config=self.get_run_config()):
stream_output(input_object.get_data('output_stream', None), {
'type': 'token',
'data': {
Expand All @@ -283,13 +289,12 @@ async def async_invoke_chain(self, chain: RunnableSerializable[Any, str], agent_
return "".join(result)

def invoke_tools(self, input_object: InputObject, **kwargs) -> str:
tool_names = kwargs.get('tool_names') or self.agent_model.action.get('tool', [])
if not tool_names:
if not self.tool_names:
return ''

tool_results: list = list()

for tool_name in tool_names:
for tool_name in self.tool_names:
tool: Tool = ToolManager().get_instance_obj(tool_name)
if tool is None:
continue
Expand All @@ -298,13 +303,12 @@ def invoke_tools(self, input_object: InputObject, **kwargs) -> str:
return "\n\n".join(tool_results)

def invoke_knowledge(self, query_str: str, input_object: InputObject, **kwargs) -> str:
knowledge_names = kwargs.get('knowledge_names') or self.agent_model.action.get('knowledge', [])
if not knowledge_names or not query_str:
if not self.knowledge_names or not query_str:
return ''

knowledge_results: list = list()

for knowledge_name in knowledge_names:
for knowledge_name in self.knowledge_names:
knowledge: Knowledge = KnowledgeManager().get_instance_obj(knowledge_name)
if knowledge is None:
continue
Expand All @@ -328,8 +332,7 @@ def process_prompt(self, agent_input: dict, **kwargs) -> ChatPrompt:
instruction=profile_instruction)

# get the prompt by the prompt version
prompt_version = kwargs.get('prompt_version') or self.agent_model.profile.get('prompt_version')
version_prompt: Prompt = PromptManager().get_instance_obj(prompt_version)
version_prompt: Prompt = PromptManager().get_instance_obj(self.prompt_version)

if version_prompt is None and not profile_prompt_model:
raise Exception("Either the `prompt_version` or `introduction & target & instruction`"
Expand All @@ -346,3 +349,102 @@ def process_prompt(self, agent_input: dict, **kwargs) -> ChatPrompt:
if image_urls:
chat_prompt.generate_image_prompt(image_urls)
return chat_prompt

def get_memory_params(self, agent_input: dict) -> dict:
memory_info = self.agent_model.memory
memory_types = self.agent_model.memory.get('memory_types', None)
prune = self.agent_model.memory.get('prune', False)
top_k = self.agent_model.memory.get('top_k', 20)
session_id = agent_input.get('session_id')
agent_id = self.agent_model.info.get('name')
if not session_id:
session_id = FrameworkContextManager().get_context('session_id')
if "agent_id" in memory_info:
agent_id = memory_info.get('agent_id')
params = {
'session_id': session_id,
'agent_id': agent_id,
'prune': prune,
'top_k': top_k
}
if memory_types:
params['memory_types'] = memory_types
if agent_input.get('input'):
params['input'] = agent_input.get('input')
if not self.agent_model.memory.get('name') and self.agent_model.memory.get('conversation_memory'):
params["type"] = ['input', 'output']
return params

def get_run_config(self, **kwargs) -> dict:
callbacks = [InvokeCallbackHandler(
source=self.agent_model.info.get('name'),
llm_name=self.llm_name
)]
return RunnableConfig(callbacks=callbacks)

def collect_current_memory(self, collect_type: str) -> bool:
collection_types = self.agent_model.memory.get('collection_types')
auto_trace = self.agent_model.memory.get('auto_trace', True)
if not auto_trace:
return False
if collection_types and collect_type not in collection_types:
return False
return True

def load_memory(self, memory, agent_input: dict):
if memory:
params = self.get_memory_params(agent_input)
LOGGER.info(f"Load memory with params: {params}")
memory_messages = memory.get(**params)
memory_str = get_memory_string(memory_messages, agent_input.get('agent_id'))
else:
return "Up to Now, No Chat History"
agent_input[memory.memory_key] = memory_str
return memory_str

def add_memory(self, memory: Memory, content: Any, type: str = 'Q&A', agent_input: dict[str, Any] = {}):
if not memory:
return
session_id = agent_input.get('session_id')
if not session_id:
session_id = FrameworkContextManager().get_context('session_id')
agent_id = self.agent_model.info.get('name')
message = Message(id=str(uuid.uuid4().hex),
source=agent_id,
content=content if isinstance(content, str) else json.dumps(content, ensure_ascii=False),
type=type,
metadata={
'agent_id': agent_id,
'session_id': session_id,
'type': type,
'timestamp': datetime.now(),
'gmt_created': datetime.now().isoformat()
})
memory.add([message], session_id=session_id, agent_id=agent_id)

def summarize_memory(self, agent_input: dict[str, Any] = {}, memory: Memory = None):
def do_summarize(params):
content = memory.summarize_memory(**params)
memory.add([
Message(
id=str(uuid.uuid4().hex),
source=self.agent_model.info.get('name'),
content=content,
type='summarize'
)
], session_id=params['session_id'], agent_id=params['agent_id'])

if memory:
params = self.get_memory_params(agent_input)
Thread(target=do_summarize, args=(params,)).start()

def load_summarize_memory(self, memory: Memory, agent_input: dict[str, Any] = {}) -> str:
if memory:
params = self.get_memory_params(agent_input)
params['type'] = 'summarize'
memory_messages = memory.get(**params)
if len(memory_messages) == 0:
return "Up to Now, No Summarize Memory"
else:
return memory_messages[-1].content
return "Up to Now, No Summarize Memory"
15 changes: 15 additions & 0 deletions agentuniverse/agent/default/rag_agent/memory_summarize_agent.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
info:
name: 'memory_summarize_agent'
description: 'memory summarize agent'
profile:
prompt_version: 'memory_summarize_cn_prompt'
llm_model:
name: 'deep_seek_llm'
# model_name: 'qwen2.5-72b-instruct'
temperature: 0.7
memory:
auto_trace: false
metadata:
type: 'AGENT'
module: 'agentuniverse.agent.template.default_summarize_agent_template'
class: 'SummarizeRagAgentTemplate'
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
introduction: 你是一位精通信息分析的ai助手。
target: 请仅基于以下聊天记录的内容,提供一份精简且准确的总结。总结应包含聊天的主要议题和结论,不要包括与总结无关的信息或评论。
instruction: |
请分析以下聊天记录,并基于先前的记忆概要。提取并回答以下问题:

1. 用户的主要意向是什么?
2. 之前聊天的内容摘要是什么?

回答要求:
- 对于每个问题,提供一个简洁明了的答案。
- 在概括聊天内容时,请确保包括关键点和讨论的主题,但不需要逐字重复。
- 总结应聚焦于信息的核心,而不是对话中的每一个细节。
- 确保意向的提取直接反映了用户的意图,而不过度解释或推测。

之前的聊天记录:
{input}

上一次总结的结果:
{summarize_content}

metadata:
type: 'PROMPT'
version: 'memory_summarize_cn_prompt'
Empty file.
Loading