Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add multi agent debate #1491

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 102 additions & 1 deletion apps/agents/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
from camel.agents import TaskSpecifyAgent
from camel.logger import get_logger
from camel.messages import BaseMessage
from camel.societies import RolePlaying
from camel.societies import RolePlaying, MultiAgentDebate
from camel.types import TaskType

REPO_ROOT = os.path.realpath(
Expand Down Expand Up @@ -394,6 +394,107 @@ def stop_session(state: State) -> Tuple[State, Dict, Dict]:
return state, gr.update(visible=False), gr.update(interactive=True)


def multi_agent_debate(
state: State,
society_name: str,
agents: List[str],
original_task: str,
max_messages: float,
with_task_specifier: bool,
word_limit: int,
language: str,
) -> Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
"""Creates a multi-agent debate session.

Args:
state (State): Role playing state.
society_name:
agents (List[str]): List of agent roles.
original_task (str): Original task field.
with_task_specifier (bool): Enable/Disable task specifier.
word_limit (int): Limit of words for task specifier.

Returns:
Union[Dict, Tuple[State, str, Union[str, Dict], ChatBotHistory, Dict]]:
- Updated state.
- Generated specified task.
- Planned task (if any).
- Chatbot window contents.
- Progress bar contents.
"""

if state.session is not None:
logger.info("Double click")
return {} # may fail

if society_name not in {"AI Society", "Code"}:
logger.error(f"Unrecognezed society {society_name}")
return {}

meta_dict: Optional[Dict[str, str]]
extend_sys_msg_meta_dicts: Optional[List[Dict]]
task_type: TaskType
if society_name == "AI Society":
meta_dict = None
extend_sys_msg_meta_dicts = None
task_type = TaskType.AI_SOCIETY
else: # "Code"
meta_dict = {"language": agents[0], "domain": agents[1]}
extend_sys_msg_meta_dicts = [meta_dict, meta_dict]
agents = [f"{agent} Programmer" for agent in agents]
task_type = TaskType.CODE

try:
task_specify_kwargs = (
dict(word_limit=word_limit) if with_task_specifier else None
)

session = MultiAgentDebate(
agents,
task_prompt=original_task,
with_task_specify=with_task_specifier,
task_specify_agent_kwargs=task_specify_kwargs,
with_task_planner=False,
task_type=task_type,
extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts,
extend_task_specify_meta_dict=meta_dict,
output_language=language,
)
except (openai.RateLimitError, RuntimeError) as ex:
logger.info("OpenAI API exception 0 " + str(ex))
return (state, str(ex), "", [], gr.update())

State.construct_inplace(state, session, int(max_messages), [], None)

specified_task_prompt = (
session.specified_task_prompt
if session.specified_task_prompt is not None
else ""
)
planned_task_prompt = (
session.planned_task_prompt
if session.planned_task_prompt is not None
else ""
)

planned_task_upd = gr.update(
value=planned_task_prompt,
visible=session.planned_task_prompt is not None,
)

progress_update = gr.update(
maximum=state.max_messages, value=1, visible=True
)

return (
state,
specified_task_prompt,
planned_task_upd,
state.chat,
progress_update,
)


def construct_ui(blocks, api_key: Optional[str] = None) -> None:
"""Build Gradio UI and populate with topics.

Expand Down
114 changes: 114 additions & 0 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1537,3 +1537,117 @@ def __repr__(self) -> str:
return (
f"ChatAgent({self.role_name}, {self.role_type}, {self.model_type})"
)


class DebateAgent(ChatAgent):
r"""Class for managing debates between multiple CAMEL Chat Agents.

Args:
system_message (Union[BaseMessage, str], optional): The system message
for the chat agent.
model (BaseModelBackend, optional): The model backend to use for
generating responses. (default: :obj:`ModelPlatformType.DEFAULT`
with `ModelType.DEFAULT`)
memory (AgentMemory, optional): The agent memory for managing chat
messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
(default: :obj:`None`)
message_window_size (int, optional): The maximum number of previous
messages to include in the context window. If `None`, no windowing
is performed. (default: :obj:`None`)
token_limit (int, optional): The maximum number of tokens in a context.
The context will be automatically pruned to fulfill the limitation.
If `None`, it will be set according to the backend model.
(default: :obj:`None`)
output_language (str, optional): The language to be output by the
agent. (default: :obj:`None`)
tools (Optional[List[Union[FunctionTool, Callable]]], optional): List
of available :obj:`FunctionTool` or :obj:`Callable`. (default:
:obj:`None`)
external_tools (Optional[List[Union[FunctionTool, Callable]]],
optional): List of external tools (:obj:`FunctionTool` or or
:obj:`Callable`) bind to one chat agent. When these tools are
called, the agent will directly return the request instead of
processing it. (default: :obj:`None`)
response_terminators (List[ResponseTerminator], optional): List of
:obj:`ResponseTerminator` bind to one chat agent.
(default: :obj:`None`)
scheduling_strategy (str): name of function that defines how to select
the next model in ModelManager. (default: :str:`round_robin`)
single_iteration (bool): Whether to let the agent perform only one
model calling at each step. (default: :obj:`False`)
"""

def __init__(
self,
system_message: Optional[Union[BaseMessage, str]] = None,
model: Optional[
Union[BaseModelBackend, List[BaseModelBackend]]
] = None,
memory: Optional[AgentMemory] = None,
message_window_size: Optional[int] = None,
token_limit: Optional[int] = None,
output_language: Optional[str] = None,
tools: Optional[List[Union[FunctionTool, Callable]]] = None,
external_tools: Optional[List[Union[FunctionTool, Callable]]] = None,
response_terminators: Optional[List[ResponseTerminator]] = None,
scheduling_strategy: str = "round_robin",
single_iteration: bool = False,
) -> None:
super().__init__(
system_message=system_message,
model=model,
memory=memory,
message_window_size=message_window_size,
token_limit=token_limit,
output_language=output_language,
tools=tools,
external_tools=external_tools,
response_terminators=response_terminators,
scheduling_strategy=scheduling_strategy,
single_iteration=single_iteration,
)

def initialize_agents(self, agent_configs: List[Dict[str, Any]]) -> None:
r"""Initializes multiple agents for the debate.

Args:
agent_configs (List[Dict[str, Any]]): A list of dictionaries
containing agent configurations.
"""
self.agents = []
for config in agent_configs:
agent = ChatAgent(
system_message=config.get("system_message"),
model=config.get("model"),
memory=config.get("memory"),
message_window_size=config.get("message_window_size"),
token_limit=config.get("token_limit"),
output_language=config.get("output_language"),
tools=config.get("tools"),
external_tools=config.get("external_tools"),
response_terminators=config.get("response_terminators"),
scheduling_strategy=config.get("scheduling_strategy"),
single_iteration=config.get("single_iteration"),
)
self.agents.append(agent)

def handle_turns(self) -> List[BaseMessage]:
r"""Handles turns for multiple agents in the debate.

Returns:
List[BaseMessage]: A list of messages from the agents.
"""
messages = []
for agent in self.agents:
response = agent.step("Your turn to debate.")
messages.append(response.msgs[0])
return messages

def determine_winner(self) -> Optional[ChatAgent]:
r"""Determines the winner of the debate.

Returns:
Optional[ChatAgent]: The winning agent, if any.
"""
# Implement your logic to determine the winner
return None
58 changes: 58 additions & 0 deletions camel/agents/critic_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,3 +200,61 @@ def reduce_step(
terminated=False,
info={},
)

def score_debate_round(self, user_output: str) -> int:
r"""Scores each round of user output during the debate.

Args:
user_output (str): The output from the user in the current round.

Returns:
int: The score for the current round.
"""
# Implement your scoring logic here
score = 0
if "helpful" in user_output:
score += 1
if "task completion" in user_output:
score += 1
return score

def save_score_as_reward(self, score: int, json_file: str) -> None:
r"""Saves the score as a reward in the JSON file.

Args:
score (int): The score to be saved.
json_file (str): The path to the JSON file.
"""
import json

with open(json_file, "r") as file:
data = json.load(file)

if "rewards" not in data:
data["rewards"] = []

data["rewards"].append(score)

with open(json_file, "w") as file:
json.dump(data, file, indent=4)

def generate_output(self, user_question: str, current_content: str) -> str:
r"""Generates output through LLM based on the overall user question and current content.

Args:
user_question (str): The overall user question.
current_content (str): The current content up to now.

Returns:
str: The generated output.
"""
input_message = BaseMessage(
role_name="user",
role_type="user",
meta_dict={},
content=f"Question: {user_question}\nContent: {current_content}",
)
response = self.step(input_message)
if response.msgs is None or len(response.msgs) == 0:
raise RuntimeError("Got None output messages.")
return response.msgs[0].content
72 changes: 72 additions & 0 deletions camel/societies/role_playing.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
CriticAgent,
TaskPlannerAgent,
TaskSpecifyAgent,
DebateAgent,
)
from camel.generators import SystemMessageGenerator
from camel.human import Human
Expand Down Expand Up @@ -549,3 +550,74 @@ def step(
info=user_response.info,
),
)


class MultiAgentDebate:
r"""Manages a debate session between multiple agents.

Args:
agent_configs (List[Dict[str, Any]]): A list of dictionaries
containing agent configurations.
task_prompt (str): The task prompt for the debate.
max_turns (int): The maximum number of turns for the debate.
output_language (str, optional): The language to be output by the
agents. (default: :obj:`None`)
"""

def __init__(
self,
agent_configs: List[Dict[str, Any]],
task_prompt: str,
max_turns: int,
output_language: Optional[str] = None,
) -> None:
self.agents = []
self.task_prompt = task_prompt
self.max_turns = max_turns
self.output_language = output_language
self.initialize_agents(agent_configs)

def initialize_agents(self, agent_configs: List[Dict[str, Any]]) -> None:
r"""Initializes multiple agents for the debate.

Args:
agent_configs (List[Dict[str, Any]]): A list of dictionaries
containing agent configurations.
"""
self.agents = []
for config in agent_configs:
agent = DebateAgent(
system_message=config.get("system_message"),
model=config.get("model"),
memory=config.get("memory"),
message_window_size=config.get("message_window_size"),
token_limit=config.get("token_limit"),
output_language=config.get("output_language"),
tools=config.get("tools"),
external_tools=config.get("external_tools"),
response_terminators=config.get("response_terminators"),
scheduling_strategy=config.get("scheduling_strategy"),
single_iteration=config.get("single_iteration"),
)
self.agents.append(agent)

def handle_turns(self) -> List[BaseMessage]:
r"""Handles turns for multiple agents in the debate.

Returns:
List[BaseMessage]: A list of messages from the agents.
"""
messages = []
for agent in self.agents:
response = agent.step("Your turn to debate.")
messages.append(response.msgs[0])
return messages

def determine_winner(self) -> Optional[DebateAgent]:
r"""Determines the winner of the debate.

Returns:
Optional[DebateAgent]: The winning agent, if any.
"""
# Implement your logic to determine the winner
return None
13 changes: 13 additions & 0 deletions docs/camel.societies.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,19 @@ camel.societies.role\_playing module
:undoc-members:
:show-inheritance:

Classes
-------

.. autoclass:: camel.societies.role_playing.RolePlaying
:members:
:undoc-members:
:show-inheritance:

.. autoclass:: camel.societies.role_playing.MultiAgentDebate
:members:
:undoc-members:
:show-inheritance:

Subpackages
-----------

Expand Down
Loading
Loading