Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add DeepSeek AI provider support to Eliza #2067

Merged
merged 9 commits into from
Jan 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 34 additions & 27 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -100,32 +100,32 @@ MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruc
LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct

# Infera Configuration
INFERA_API_KEY= # visit api.infera.org/docs to obtain an API key under /signup_user
INFERA_MODEL= # Default: llama3.2:latest
INFERA_SERVER_URL= # Default: https://api.infera.org/
SMALL_INFERA_MODEL= #Recommended: llama3.2:latest
MEDIUM_INFERA_MODEL= #Recommended: mistral-nemo:latest
LARGE_INFERA_MODEL= #Recommended: mistral-small:latest

# Venice Configuration
VENICE_API_KEY= # generate from venice settings
SMALL_VENICE_MODEL= # Default: llama-3.3-70b
MEDIUM_VENICE_MODEL= # Default: llama-3.3-70b
LARGE_VENICE_MODEL= # Default: llama-3.1-405b
IMAGE_VENICE_MODEL= # Default: fluently-xl

# Nineteen.ai Configuration
NINETEEN_AI_API_KEY= # Get a free api key from https://nineteen.ai/app/api
SMALL_NINETEEN_AI_MODEL= # Default: unsloth/Llama-3.2-3B-Instruct
MEDIUM_NINETEEN_AI_MODEL= # Default: unsloth/Meta-Llama-3.1-8B-Instruct
LARGE_NINETEEN_AI_MODEL= # Default: hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4
IMAGE_NINETEEN_AI_MODE= # Default: dataautogpt3/ProteusV0.4-Lightning

# Akash Chat API Configuration docs: https://chatapi.akash.network/documentation
AKASH_CHAT_API_KEY= # Get from https://chatapi.akash.network/
SMALL_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-2-3B-Instruct
MEDIUM_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-3-70B-Instruct
LARGE_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-1-405B-Instruct-FP8
INFERA_API_KEY= # visit api.infera.org/docs to obtain an API key under /signup_user
INFERA_MODEL= # Default: llama3.2:latest
INFERA_SERVER_URL= # Default: https://api.infera.org/
SMALL_INFERA_MODEL= #Recommended: llama3.2:latest
MEDIUM_INFERA_MODEL= #Recommended: mistral-nemo:latest
LARGE_INFERA_MODEL= #Recommended: mistral-small:latest

# Venice Configuration
VENICE_API_KEY= # generate from venice settings
SMALL_VENICE_MODEL= # Default: llama-3.3-70b
MEDIUM_VENICE_MODEL= # Default: llama-3.3-70b
LARGE_VENICE_MODEL= # Default: llama-3.1-405b
IMAGE_VENICE_MODEL= # Default: fluently-xl

# Nineteen.ai Configuration
NINETEEN_AI_API_KEY= # Get a free api key from https://nineteen.ai/app/api
SMALL_NINETEEN_AI_MODEL= # Default: unsloth/Llama-3.2-3B-Instruct
MEDIUM_NINETEEN_AI_MODEL= # Default: unsloth/Meta-Llama-3.1-8B-Instruct
LARGE_NINETEEN_AI_MODEL= # Default: hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4
IMAGE_NINETEEN_AI_MODE= # Default: dataautogpt3/ProteusV0.4-Lightning

# Akash Chat API Configuration docs: https://chatapi.akash.network/documentation
AKASH_CHAT_API_KEY= # Get from https://chatapi.akash.network/
SMALL_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-2-3B-Instruct
MEDIUM_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-3-70B-Instruct
LARGE_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-1-405B-Instruct-FP8

# Livepeer configuration
LIVEPEER_GATEWAY_URL= # Free inference gateways and docs: https://livepeer-eliza.com/
Expand Down Expand Up @@ -238,6 +238,13 @@ MEDIUM_VOLENGINE_MODEL= # Default: doubao-pro-128k
LARGE_VOLENGINE_MODEL= # Default: doubao-pro-256k
VOLENGINE_EMBEDDING_MODEL= # Default: doubao-embedding

# DeepSeek Configuration
DEEPSEEK_API_URL= # Default: https://api.deepseek.com
SMALL_DEEPSEEK_MODEL= # Default: deepseek-chat
MEDIUM_DEEPSEEK_MODEL= # Default: deepseek-chat
LARGE_DEEPSEEK_MODEL= # Default: deepseek-chat


# fal.ai Configuration
FAL_API_KEY=
FAL_AI_LORA_PATH=
Expand Down Expand Up @@ -545,4 +552,4 @@ AKASH_MANIFEST_VALIDATION_LEVEL=strict

# Quai Network Ecosystem
QUAI_PRIVATE_KEY=
QUAI_RPC_URL=https://rpc.quai.network
QUAI_RPC_URL=https://rpc.quai.network
3 changes: 0 additions & 3 deletions agent/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -777,9 +777,6 @@ export async function createAgent(
getSecret(character, "QUAI_PRIVATE_KEY")
? quaiPlugin
: null,
getSecret(character, "QUAI_PRIVATE_KEY")
? quaiPlugin
: null,
].filter(Boolean),
providers: [],
actions: [],
Expand Down
59 changes: 59 additions & 0 deletions packages/core/src/generation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -966,6 +966,37 @@ export async function generateText({
break;
}

case ModelProviderName.DEEPSEEK: {
elizaLogger.debug("Initializing Deepseek model.");
const serverUrl = models[provider].endpoint;
const deepseek = createOpenAI({
apiKey,
baseURL: serverUrl,
fetch: runtime.fetch,
});

const { text: deepseekResponse } = await aiGenerateText({
model: deepseek.languageModel(model),
prompt: context,
temperature: temperature,
system:
runtime.character.system ??
settings.SYSTEM_PROMPT ??
undefined,
tools: tools,
onStepFinish: onStepFinish,
maxSteps: maxSteps,
maxTokens: max_response_length,
frequencyPenalty: frequency_penalty,
presencePenalty: presence_penalty,
experimental_telemetry: experimental_telemetry,
});

response = deepseekResponse;
elizaLogger.debug("Received response from Deepseek model.");
break;
}

default: {
const errorMessage = `Unsupported provider: ${provider}`;
elizaLogger.error(errorMessage);
Expand Down Expand Up @@ -1893,6 +1924,8 @@ export async function handleProvider(
return await handleOpenRouter(options);
case ModelProviderName.OLLAMA:
return await handleOllama(options);
case ModelProviderName.DEEPSEEK:
return await handleDeepSeek(options);
default: {
const errorMessage = `Unsupported provider: ${provider}`;
elizaLogger.error(errorMessage);
Expand Down Expand Up @@ -2152,6 +2185,32 @@ async function handleOllama({
});
}

/**
* Handles object generation for DeepSeek models.
*
* @param {ProviderOptions} options - Options specific to DeepSeek.
* @returns {Promise<GenerateObjectResult<unknown>>} - A promise that resolves to generated objects.
*/
async function handleDeepSeek({
model,
apiKey,
schema,
schemaName,
schemaDescription,
mode,
modelOptions,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
const openai = createOpenAI({ apiKey, baseURL: models.deepseek.endpoint });
return await aiGenerateObject({
model: openai.languageModel(model),
schema,
schemaName,
schemaDescription,
mode,
...modelOptions,
});
}

// Add type definition for Together AI response
interface TogetherAIImageResponse {
data: Array<{
Expand Down
34 changes: 33 additions & 1 deletion packages/core/src/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ export const models: Models = {
frequency_penalty: 0.4,
presence_penalty: 0.4,
temperature: 0.7,
}
},
},
},
[ModelProviderName.REDPILL]: {
Expand Down Expand Up @@ -966,6 +966,38 @@ export const models: Models = {
},
},
},
[ModelProviderName.DEEPSEEK]: {
endpoint: settings.DEEPSEEK_API_URL || "https://api.deepseek.com",
model: {
[ModelClass.SMALL]: {
name: settings.SMALL_DEEPSEEK_MODEL || "deepseek-chat",
stop: [],
maxInputTokens: 128000,
maxOutputTokens: 8192,
frequency_penalty: 0.0,
presence_penalty: 0.0,
temperature: 0.7,
},
[ModelClass.MEDIUM]: {
name: settings.MEDIUM_DEEPSEEK_MODEL || "deepseek-chat",
stop: [],
maxInputTokens: 128000,
maxOutputTokens: 8192,
frequency_penalty: 0.0,
presence_penalty: 0.0,
temperature: 0.7,
},
[ModelClass.LARGE]: {
name: settings.LARGE_DEEPSEEK_MODEL || "deepseek-chat",
stop: [],
maxInputTokens: 128000,
maxOutputTokens: 8192,
frequency_penalty: 0.0,
presence_penalty: 0.0,
temperature: 0.7,
},
},
},
};

export function getModelSettings(
Expand Down
5 changes: 3 additions & 2 deletions packages/core/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,7 @@ export type Models = {
[ModelProviderName.NINETEEN_AI]: Model;
[ModelProviderName.AKASH_CHAT_API]: Model;
[ModelProviderName.LIVEPEER]: Model;
[ModelProviderName.DEEPSEEK]: Model;
[ModelProviderName.INFERA]: Model;
};

Expand Down Expand Up @@ -260,8 +261,8 @@ export enum ModelProviderName {
NINETEEN_AI = "nineteen_ai",
AKASH_CHAT_API = "akash_chat_api",
LIVEPEER = "livepeer",
LETZAI = "letzai",
INFERA = "infera",
DEEPSEEK="deepseek",
INFERA="infera"
}

/**
Expand Down
Loading
Loading