Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add DeepSeek AI provider support to Eliza #2067

Merged
merged 9 commits into from
Jan 11, 2025
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ LARGE_OPENAI_MODEL= # Default: gpt-4o
EMBEDDING_OPENAI_MODEL= # Default: text-embedding-3-small
IMAGE_OPENAI_MODEL= # Default: dall-e-3


# Eternal AI's Decentralized Inference API
ETERNALAI_URL=
ETERNALAI_MODEL= # Default: "neuralmagic/Meta-Llama-3.1-405B-Instruct-quantized.w4a16"
Expand All @@ -37,6 +38,8 @@ SMALL_HYPERBOLIC_MODEL= # Default: meta-llama/Llama-3.2-3B-Instruct
MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruct
LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct

DEEPSEEK_API_KEY= # DeepSeek API key

# Livepeer configuration
LIVEPEER_GATEWAY_URL= # Free inference gateways and docs: https://livepeer-eliza.com/
LIVEPEER_IMAGE_MODEL= # Default: ByteDance/SDXL-Lightning
Expand Down Expand Up @@ -175,6 +178,13 @@ MEDIUM_VOLENGINE_MODEL= # Default: doubao-pro-128k
LARGE_VOLENGINE_MODEL= # Default: doubao-pro-256k
VOLENGINE_EMBEDDING_MODEL= # Default: doubao-embedding

# DeepSeek Configuration
DEEPSEEK_API_URL= # Default: https://api.deepseek.com
SMALL_DEEPSEEK_MODEL= # Default: deepseek-chat
MEDIUM_DEEPSEEK_MODEL= # Default: deepseek-chat
LARGE_DEEPSEEK_MODEL= # Default: deepseek-chat


# EVM
EVM_PRIVATE_KEY=
EVM_PROVIDER_URL=
Expand Down
5 changes: 5 additions & 0 deletions agent/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,11 @@ export function getTokenForProvider(
character.settings?.secrets?.GOOGLE_GENERATIVE_AI_API_KEY ||
settings.GOOGLE_GENERATIVE_AI_API_KEY
);
case ModelProviderName.DEEPSEEK:
return (
character.settings?.secrets?.DEEPSEEK_API_KEY ||
settings.DEEPSEEK_API_KEY
);
default:
const errorMessage = `Failed to get token - unsupported model provider: ${provider}`;
elizaLogger.error(errorMessage);
Expand Down
59 changes: 59 additions & 0 deletions packages/core/src/generation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -795,6 +795,37 @@ export async function generateText({
break;
}

case ModelProviderName.DEEPSEEK: {
elizaLogger.debug("Initializing Deepseek model.");
const serverUrl = models[provider].endpoint;
const deepseek = createOpenAI({
apiKey,
baseURL: serverUrl,
fetch: runtime.fetch,
});

const { text: deepseekResponse } = await aiGenerateText({
model: deepseek.languageModel(model),
prompt: context,
temperature: temperature,
system:
runtime.character.system ??
settings.SYSTEM_PROMPT ??
undefined,
tools: tools,
onStepFinish: onStepFinish,
maxSteps: maxSteps,
maxTokens: max_response_length,
frequencyPenalty: frequency_penalty,
presencePenalty: presence_penalty,
experimental_telemetry: experimental_telemetry,
});

response = deepseekResponse;
elizaLogger.debug("Received response from Deepseek model.");
break;
}

default: {
const errorMessage = `Unsupported provider: ${provider}`;
elizaLogger.error(errorMessage);
Expand Down Expand Up @@ -1664,6 +1695,8 @@ export async function handleProvider(
return await handleOpenRouter(options);
case ModelProviderName.OLLAMA:
return await handleOllama(options);
case ModelProviderName.DEEPSEEK:
return await handleDeepSeek(options);
default: {
const errorMessage = `Unsupported provider: ${provider}`;
elizaLogger.error(errorMessage);
Expand Down Expand Up @@ -1886,6 +1919,32 @@ async function handleOllama({
});
}

/**
* Handles object generation for DeepSeek models.
*
* @param {ProviderOptions} options - Options specific to DeepSeek.
* @returns {Promise<GenerateObjectResult<unknown>>} - A promise that resolves to generated objects.
*/
async function handleDeepSeek({
model,
apiKey,
schema,
schemaName,
schemaDescription,
mode,
modelOptions,
}: ProviderOptions): Promise<GenerateObjectResult<unknown>> {
const openai = createOpenAI({ apiKey, baseURL: models.deepseek.endpoint });
return await aiGenerateObject({
model: openai.languageModel(model),
schema,
schemaName,
schemaDescription,
mode,
...modelOptions,
});
}

// Add type definition for Together AI response
interface TogetherAIImageResponse {
data: Array<{
Expand Down
16 changes: 16 additions & 0 deletions packages/core/src/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -514,6 +514,22 @@ export const models: Models = {
[ModelClass.IMAGE]: settings.LIVEPEER_IMAGE_MODEL || "ByteDance/SDXL-Lightning",
},
},
[ModelProviderName.DEEPSEEK]: {
endpoint: settings.DEEPSEEK_API_URL || "https://api.deepseek.com",
settings: {
stop: [],
maxInputTokens: 128000,
maxOutputTokens: 8192,
frequency_penalty: 0.0,
presence_penalty: 0.0,
temperature: 0.7,
},
model: {
[ModelClass.SMALL]: settings.SMALL_DEEPSEEK_MODEL || "deepseek-chat",
[ModelClass.MEDIUM]: settings.MEDIUM_DEEPSEEK_MODEL || "deepseek-chat",
[ModelClass.LARGE]: settings.LARGE_DEEPSEEK_MODEL || "deepseek-chat",
},
},
};

export function getModel(provider: ModelProviderName, type: ModelClass) {
Expand Down
2 changes: 2 additions & 0 deletions packages/core/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,7 @@ export type Models = {
[ModelProviderName.VENICE]: Model;
[ModelProviderName.AKASH_CHAT_API]: Model;
[ModelProviderName.LIVEPEER]: Model;
[ModelProviderName.DEEPSEEK]: Model;
};

/**
Expand Down Expand Up @@ -243,6 +244,7 @@ export enum ModelProviderName {
VENICE = "venice",
AKASH_CHAT_API = "akash_chat_api",
LIVEPEER = "livepeer",
DEEPSEEK = "deepseek",
}

/**
Expand Down
84 changes: 11 additions & 73 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading