Skip to content

Commit

Permalink
Merge remote-tracking branch 'first/main'
Browse files Browse the repository at this point in the history
# Conflicts:
#	.env.template
#	app/components/emoji.tsx
#	app/constant.ts
  • Loading branch information
kiritoko1029 committed Feb 12, 2025
2 parents e912365 + 48cd4b1 commit 319e554
Show file tree
Hide file tree
Showing 28 changed files with 462 additions and 66 deletions.
6 changes: 6 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -88,3 +88,9 @@ HEADER_LOGO_URL=

###(optional) 公告路径,支持markdown语法
ANNOUNCEMENT_PATH=

### siliconflow Api key (optional)
SILICONFLOW_API_KEY=

### siliconflow Api url (optional)
SILICONFLOW_URL=
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
MIT License

Copyright (c) 2023-2024 Zhang Yifei
Copyright (c) 2023-2025 NextChat

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
Expand Down
11 changes: 6 additions & 5 deletions app/client/platforms/bytedance.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import {
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat";
import { fetch } from "@/app/utils/stream";

export interface OpenAIListModelResponse {
Expand Down Expand Up @@ -84,10 +84,11 @@ export class DoubaoApi implements LLMApi {
}

async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,
content: getMessageTextContent(v),
}));
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = await preProcessImageContent(v.content);
messages.push({ role: v.role, content });
}

const modelConfig = {
...useAppConfig.getState().modelConfig,
Expand Down
7 changes: 6 additions & 1 deletion app/client/platforms/deepseek.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import {
DEEPSEEK_BASE_URL,
DeepSeek,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import {
useAccessStore,
Expand Down Expand Up @@ -116,10 +117,14 @@ export class DeepSeekApi implements LLMApi {

// console.log(chatPayload);

const isR1 =
options.config.model.endsWith("-reasoner") ||
options.config.model.endsWith("-r1");

// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
);

if (shouldStream) {
Expand Down
19 changes: 16 additions & 3 deletions app/client/platforms/google.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
import {
ApiPath,
Google,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import {
ChatOptions,
getHeaders,
Expand Down Expand Up @@ -69,9 +74,16 @@ export class GeminiProApi implements LLMApi {
.join("\n\n");
};

let content = "";
if (Array.isArray(res)) {
res.map((item) => {
content += getTextFromParts(item?.candidates?.at(0)?.content?.parts);
});
}

return (
getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
res?.error?.message ||
""
);
Expand Down Expand Up @@ -190,10 +202,11 @@ export class GeminiProApi implements LLMApi {
headers: getHeaders(),
};

const isThinking = options.config.model.includes("-thinking");
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
);

if (shouldStream) {
Expand Down
5 changes: 4 additions & 1 deletion app/client/platforms/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import {
Azure,
REQUEST_TIMEOUT_MS,
ServiceProvider,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import {
ChatMessageTool,
Expand Down Expand Up @@ -361,7 +362,9 @@ export class ChatGPTApi implements LLMApi {
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
isDalle3 || isO1OrO3
? REQUEST_TIMEOUT_MS_FOR_THINKING
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
);

const res = await fetch(chatPath, chatPayload);
Expand Down
66 changes: 55 additions & 11 deletions app/client/platforms/siliconflow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@ import {
ApiPath,
SILICONFLOW_BASE_URL,
SiliconFlow,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
DEFAULT_MODELS,
} from "@/app/constant";
import {
useAccessStore,
Expand All @@ -13,7 +14,7 @@ import {
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { streamWithThink } from "@/app/utils/chat";
import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
Expand All @@ -25,12 +26,22 @@ import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
isVisionModel,
} from "@/app/utils";
import { RequestPayload } from "./openai";

import { fetch } from "@/app/utils/stream";
export interface SiliconFlowListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}

export class SiliconflowApi implements LLMApi {
private disableListModels = true;
private disableListModels = false;

path(path: string): string {
const accessStore = useAccessStore.getState();
Expand Down Expand Up @@ -71,13 +82,16 @@ export class SiliconflowApi implements LLMApi {
}

async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
if (v.role === "assistant") {
const content = getMessageTextContentWithoutThinking(v);
messages.push({ role: v.role, content });
} else {
const content = getMessageTextContent(v);
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content });
}
}
Expand Down Expand Up @@ -120,10 +134,10 @@ export class SiliconflowApi implements LLMApi {

// console.log(chatPayload);

// make a fetch request
// Use extended timeout for thinking models as they typically require more processing time
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
);

if (shouldStream) {
Expand Down Expand Up @@ -174,21 +188,21 @@ export class SiliconflowApi implements LLMApi {

// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.trim().length === 0) &&
(!content || content.trim().length === 0)
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}

if (reasoning && reasoning.trim().length > 0) {
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.trim().length > 0) {
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
Expand Down Expand Up @@ -238,6 +252,36 @@ export class SiliconflowApi implements LLMApi {
}

async models(): Promise<LLMModel[]> {
return [];
if (this.disableListModels) {
return DEFAULT_MODELS.slice();
}

const res = await fetch(this.path(SiliconFlow.ListModelPath), {
method: "GET",
headers: {
...getHeaders(),
},
});

const resJson = (await res.json()) as SiliconFlowListModelResponse;
const chatModels = resJson.data;
console.log("[Models]", chatModels);

if (!chatModels) {
return [];
}

let seq = 1000; //同 Constant.ts 中的排序保持一致
return chatModels.map((m) => ({
name: m.id,
available: true,
sorted: seq++,
provider: {
id: "siliconflow",
providerName: "SiliconFlow",
providerType: "siliconflow",
sorted: 14,
},
}));
}
}
4 changes: 2 additions & 2 deletions app/client/platforms/xai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import {
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";

Expand Down Expand Up @@ -62,7 +62,7 @@ export class XAIApi implements LLMApi {
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = getMessageTextContent(v);
const content = await preProcessImageContent(v.content);
messages.push({ role: v.role, content });
}

Expand Down
71 changes: 61 additions & 10 deletions app/components/emoji.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,21 @@ import EmojiPicker, {

import { ModelType } from "../store";

import BotIcon from "../icons/bot.svg";
import BlackBotIcon from "../icons/black-bot.svg";
import BotIconDefault from "../icons/llm-icons/default.svg";
import BotIconOpenAI from "../icons/llm-icons/openai.svg";
import BotIconGemini from "../icons/llm-icons/gemini.svg";
import BotIconGemma from "../icons/llm-icons/gemma.svg";
import BotIconClaude from "../icons/llm-icons/claude.svg";
import BotIconMeta from "../icons/llm-icons/meta.svg";
import BotIconMistral from "../icons/llm-icons/mistral.svg";
import BotIconDeepseek from "../icons/llm-icons/deepseek.svg";
import BotIconMoonshot from "../icons/llm-icons/moonshot.svg";
import BotIconQwen from "../icons/llm-icons/qwen.svg";
import BotIconWenxin from "../icons/llm-icons/wenxin.svg";
import BotIconGrok from "../icons/llm-icons/grok.svg";
import BotIconHunyuan from "../icons/llm-icons/hunyuan.svg";
import BotIconDoubao from "../icons/llm-icons/doubao.svg";
import BotIconChatglm from "../icons/llm-icons/chatglm.svg";

export function getEmojiUrl(unified: string, style: EmojiStyle) {
// Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis
Expand All @@ -33,17 +46,55 @@ export function AvatarPicker(props: {
}

export function Avatar(props: { model?: ModelType; avatar?: string }) {
let LlmIcon = BotIconDefault;

if (props.model) {
const modelName = props.model.toLowerCase();

if (
modelName.startsWith("gpt") ||
modelName.startsWith("chatgpt") ||
modelName.startsWith("dall-e") ||
modelName.startsWith("dalle") ||
modelName.startsWith("o1") ||
modelName.startsWith("o3")
) {
LlmIcon = BotIconOpenAI;
} else if (modelName.startsWith("gemini")) {
LlmIcon = BotIconGemini;
} else if (modelName.startsWith("gemma")) {
LlmIcon = BotIconGemma;
} else if (modelName.startsWith("claude")) {
LlmIcon = BotIconClaude;
} else if (modelName.toLowerCase().includes("llama")) {
LlmIcon = BotIconMeta;
} else if (modelName.startsWith("mixtral")) {
LlmIcon = BotIconMistral;
} else if (modelName.toLowerCase().includes("deepseek")) {
LlmIcon = BotIconDeepseek;
} else if (modelName.startsWith("moonshot")) {
LlmIcon = BotIconMoonshot;
} else if (modelName.startsWith("qwen")) {
LlmIcon = BotIconQwen;
} else if (modelName.startsWith("ernie")) {
LlmIcon = BotIconWenxin;
} else if (modelName.startsWith("grok")) {
LlmIcon = BotIconGrok;
} else if (modelName.startsWith("hunyuan")) {
LlmIcon = BotIconHunyuan;
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
LlmIcon = BotIconDoubao;
} else if (
modelName.toLowerCase().includes("glm") ||
modelName.startsWith("cogview-") ||
modelName.startsWith("cogvideox-")
) {
LlmIcon = BotIconChatglm;
}

return (
<div className="no-dark">
{props.model?.startsWith("gpt-4") ||
props.model?.startsWith("chatgpt-4o") ||
props.model?.startsWith("o1") ||
props.model?.startsWith("o3") ? (
<BlackBotIcon className="user-avatar" />
) : (
<BotIcon className="user-avatar" />
)}
<LlmIcon className="user-avatar" width={30} height={30} />
</div>
);
}
Expand Down
Loading

0 comments on commit 319e554

Please sign in to comment.