Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Integrate siliconflow model #1541

Merged
merged 10 commits into from
Feb 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/build_package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ jobs:
DISCORD_BOT_TOKEN: "${{ secrets.DISCORD_BOT_TOKEN }}"
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
JINA_API_KEY: "${{ secrets.JINA_API_KEY }}"
SILICONFLOW_API_KEY: "${{ secrets.SILICONFLOW_API_KEY }}"
MOONSHOT_API_KEY: "${{ secrets.MOONSHOT_API_KEY }}"
run: |
source venv/bin/activate
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/pytest_apps.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ jobs:
COHERE_API_KEY: "${{ secrets.COHERE_API_KEY }}"
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
MOONSHOT_API_KEY: "${{ secrets.MOONSHOT_API_KEY }}"
SILICONFLOW_API_KEY: "${{ secrets.SILICONFLOW_API_KEY }}"
run: poetry run pytest -v apps/

pytest_examples:
Expand All @@ -51,4 +52,5 @@ jobs:
COHERE_API_KEY: "${{ secrets.COHERE_API_KEY }}"
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
MOONSHOT_API_KEY: "${{ secrets.MOONSHOT_API_KEY }}"
SILICONFLOW_API_KEY: "${{ secrets.SILICONFLOW_API_KEY }}"
run: poetry run pytest -v examples/
2 changes: 2 additions & 0 deletions .github/workflows/pytest_package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ jobs:
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
JINA_API_KEY: "${{ secrets.JINA_API_KEY }}"
MOONSHOT_API_KEY: "${{ secrets.MOONSHOT_API_KEY }}"
SILICONFLOW_API_KEY: "${{ secrets.SILICONFLOW_API_KEY }}"
run: poetry run pytest --fast-test-mode test/

pytest_package_llm_test:
Expand Down Expand Up @@ -109,6 +110,7 @@ jobs:
INTERNLM_API_KEY: "${{ secrets.INTERNLM_API_KEY }}"
JINA_API_KEY: "${{ secrets.JINA_API_KEY }}"
MOONSHOT_API_KEY: "${{ secrets.MOONSHOT_API_KEY }}"
SILICONFLOW_API_KEY: "${{ secrets.SILICONFLOW_API_KEY }}"
run: poetry run pytest --llm-test-only test/

pytest_package_very_slow_test:
Expand Down
3 changes: 3 additions & 0 deletions camel/configs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
SambaVerseAPIConfig,
)
from .sglang_config import SGLANG_API_PARAMS, SGLangConfig
from .siliconflow_config import SILICONFLOW_API_PARAMS, SiliconFlowConfig
from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
from .vllm_config import VLLM_API_PARAMS, VLLMConfig
from .yi_config import YI_API_PARAMS, YiConfig
Expand Down Expand Up @@ -82,4 +83,6 @@
'INTERNLM_API_PARAMS',
'MoonshotConfig',
"MOONSHOT_API_PARAMS",
'SiliconFlowConfig',
'SILICONFLOW_API_PARAMS',
]
91 changes: 91 additions & 0 deletions camel/configs/siliconflow_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from __future__ import annotations

from typing import Any, Sequence, Type, Union

from pydantic import BaseModel

from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven


class SiliconFlowConfig(BaseConfig):
r"""Defines the parameters for generating chat completions using the
SiliconFlow API.

Args:
temperature (float, optional): Determines the degree of randomness
in the response. (default: :obj:`0.7`)
top_p (float, optional): The top_p (nucleus) parameter is used to
dynamically adjust the number of choices for each predicted token
based on the cumulative probabilities. (default: :obj:`0.7`)
n (int, optional): Number of generations to return. (default::obj:`1`)
response_format (object, optional): An object specifying the format
that the model must output.
stream (bool, optional): If set, tokens are returned as Server-Sent
Events as they are made available. (default: :obj:`False`)
stop (str or list, optional): Up to :obj:`4` sequences where the API
will stop generating further tokens. (default: :obj:`None`)
max_tokens (int, optional): The maximum number of tokens to generate.
(default: :obj:`None`)
frequency_penalty (float, optional): Number between :obj:`-2.0` and
:obj:`2.0`. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's
likelihood to repeat the same line verbatim. See more information
about frequency and presence penalties. (default: :obj:`0.0`)
tools (list[FunctionTool], optional): A list of tools the model may
call. Currently, only functions are supported as a tool. Use this
to provide a list of functions the model may generate JSON inputs
for. A max of 128 functions are supported.
"""

temperature: float = 0.7
top_p: float = 0.7
n: int = 1
stream: bool = False
stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
max_tokens: Union[int, NotGiven] = NOT_GIVEN
response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN
frequency_penalty: float = 0.0

def as_dict(self) -> dict[str, Any]:
r"""Convert the current configuration to a dictionary.

This method converts the current configuration object to a dictionary
representation, which can be used for serialization or other purposes.

Returns:
dict[str, Any]: A dictionary representation of the current
configuration.
"""
config_dict = self.model_dump()
if self.tools:
from camel.toolkits import FunctionTool

tools_schema = []
for tool in self.tools:
if not isinstance(tool, FunctionTool):
raise ValueError(
f"The tool {tool} should "
"be an instance of `FunctionTool`."
)
tools_schema.append(tool.get_openai_tool_schema())
config_dict["tools"] = NOT_GIVEN
return config_dict


SILICONFLOW_API_PARAMS = {
param for param in SiliconFlowConfig.model_fields.keys()
}
7 changes: 6 additions & 1 deletion camel/embeddings/jina_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ class JinaEmbedding(BaseEmbedding[Union[str, Image.Image]]):
Jina AI. (default: :obj:`None`)
dimensions (Optional[int], optional): The dimension of the output
embeddings. (default: :obj:`None`)
embedding_type (Optional[str], optional): The type of embedding format
to generate. Options: 'int8' (binary encoding with higher storage
and transfer efficiency), 'uint8' (unsigned binary encoding with
higher storage and transfer efficiency), 'base64' (base64 string
encoding with higher transfer efficiency). (default: :obj:`None`)
task (Optional[str], optional): The type of task for text embeddings.
Options: retrieval.query, retrieval.passage, text-matching,
classification, separation. (default: :obj:`None`)
Expand Down Expand Up @@ -120,7 +125,7 @@ def embed_list(
else:
raise ValueError(
f"Input type {type(obj)} is not supported. "
"Must be either str or PIL.Image"
"Must be either str or PIL.Image."
)

data = {
Expand Down
3 changes: 3 additions & 0 deletions camel/models/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from camel.models.reka_model import RekaModel
from camel.models.samba_model import SambaModel
from camel.models.sglang_model import SGLangModel
from camel.models.siliconflow_model import SiliconFlowModel
from camel.models.stub_model import StubModel
from camel.models.togetherai_model import TogetherAIModel
from camel.models.vllm_model import VLLMModel
Expand Down Expand Up @@ -101,6 +102,8 @@ def create(
model_class = LiteLLMModel
elif model_platform.is_nvidia:
model_class = NvidiaModel
elif model_platform.is_siliconflow:
model_class = SiliconFlowModel

elif model_platform.is_openai and model_type.is_openai:
model_class = OpenAIModel
Expand Down
142 changes: 142 additions & 0 deletions camel/models/siliconflow_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
import os
from typing import Any, Dict, List, Optional, Union

from openai import OpenAI, Stream

from camel.configs import SILICONFLOW_API_PARAMS, SiliconFlowConfig
from camel.messages import OpenAIMessage
from camel.models import BaseModelBackend
from camel.types import (
ChatCompletion,
ChatCompletionChunk,
ModelType,
)
from camel.utils import (
BaseTokenCounter,
OpenAITokenCounter,
api_keys_required,
)


class SiliconFlowModel(BaseModelBackend):
r"""SiliconFlow API in a unified BaseModelBackend interface.

Args:
model_type (Union[ModelType, str]): Model for which a backend is
created.
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
that will be fed into OpenAI client. If :obj:`None`,
:obj:`SiliconFlowConfig().as_dict()` will be used.
(default: :obj:`None`)
api_key (Optional[str], optional): The API key for authenticating with
the SiliconFlow service. (default: :obj:`None`)
url (Optional[str], optional): The URL to the SiliconFlow service. If
not provided, :obj:`https://api.siliconflow.cn/v1/` will be used.
(default: :obj:`None`)
token_counter (Optional[BaseTokenCounter], optional): Token counter to
use for the model. If not provided, :obj:`OpenAITokenCounter(
ModelType.GPT_4O_MINI)` will be used.
(default: :obj:`None`)
"""

@api_keys_required(
[
("api_key", 'SILICONFLOW_API_KEY'),
]
)
def __init__(
self,
model_type: Union[ModelType, str],
model_config_dict: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
url: Optional[str] = None,
token_counter: Optional[BaseTokenCounter] = None,
) -> None:
if model_config_dict is None:
model_config_dict = SiliconFlowConfig().as_dict()
api_key = api_key or os.environ.get("SILICONFLOW_API_KEY")
url = url or os.environ.get(
"SILICONFLOW_API_BASE_URL",
"https://api.siliconflow.cn/v1/",
)
super().__init__(
model_type, model_config_dict, api_key, url, token_counter
)
self._client = OpenAI(
timeout=180,
max_retries=3,
api_key=self._api_key,
base_url=self._url,
)

def run(
self,
messages: List[OpenAIMessage],
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
r"""Runs inference of SiliconFlow chat completion.

Args:
messages (List[OpenAIMessage]): Message list with the chat history
in OpenAI API format.

Returns:
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
`ChatCompletion` in the non-stream mode, or
`Stream[ChatCompletionChunk]` in the stream mode.
"""
response = self._client.chat.completions.create(
messages=messages,
model=self.model_type,
**self.model_config_dict,
)
return response

@property
def token_counter(self) -> BaseTokenCounter:
r"""Initialize the token counter for the model backend.

Returns:
BaseTokenCounter: The token counter following the model's
tokenization style.
"""
if not self._token_counter:
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
return self._token_counter

def check_model_config(self):
r"""Check whether the model configuration contains any
unexpected arguments to SiliconFlow API.

Raises:
ValueError: If the model configuration dictionary contains any
unexpected arguments to SiliconFlow API.
"""
for param in self.model_config_dict:
if param not in SILICONFLOW_API_PARAMS:
raise ValueError(
f"Unexpected argument `{param}` is "
"input into SiliconFlow model backend."
)

@property
def stream(self) -> bool:
"""Returns whether the model is in stream mode, which sends partial
results each time.

Returns:
bool: Whether the model is in stream mode.
"""
return self.model_config_dict.get('stream', False)
Loading
Loading