diff --git a/client/data/config.json b/client/data/config.json
index fa8bfdddb..78af9bff9 100644
--- a/client/data/config.json
+++ b/client/data/config.json
@@ -23,7 +23,7 @@
"model": "gpt-3.5-turbo",
"name": "FastAI-4k",
"contextMaxToken": 4000,
- "systemMaxToken": 2400,
+ "quoteMaxToken": 2000,
"maxTemperature": 1.2,
"price": 1.5
},
@@ -31,7 +31,7 @@
"model": "gpt-3.5-turbo-16k",
"name": "FastAI-16k",
"contextMaxToken": 16000,
- "systemMaxToken": 8000,
+ "quoteMaxToken": 8000,
"maxTemperature": 1.2,
"price": 3
},
@@ -39,7 +39,7 @@
"model": "gpt-4",
"name": "FastAI-Plus",
"contextMaxToken": 8000,
- "systemMaxToken": 4000,
+ "quoteMaxToken": 4000,
"maxTemperature": 1.2,
"price": 45
}
diff --git a/client/src/api/fetch.ts b/client/src/api/fetch.ts
index e5a774a6e..f2ec88713 100644
--- a/client/src/api/fetch.ts
+++ b/client/src/api/fetch.ts
@@ -1,7 +1,7 @@
-import { sseResponseEventEnum } from '@/constants/chat';
+import { sseResponseEventEnum, TaskResponseKeyEnum } from '@/constants/chat';
import { getErrText } from '@/utils/tools';
import { parseStreamChunk } from '@/utils/adapt';
-import { QuoteItemType } from '@/types/chat';
+import type { ChatHistoryItemResType } from '@/types/chat';
interface StreamFetchProps {
url?: string;
@@ -17,8 +17,7 @@ export const streamFetch = ({
}: StreamFetchProps) =>
new Promise<{
responseText: string;
- errMsg: string;
- newChatId: string | null;
+ [TaskResponseKeyEnum.responseData]: ChatHistoryItemResType[];
}>(async (resolve, reject) => {
try {
const response = await window.fetch(url, {
@@ -42,7 +41,7 @@ export const streamFetch = ({
// response data
let responseText = '';
let errMsg = '';
- const newChatId = response.headers.get('newChatId');
+ let responseData: ChatHistoryItemResType[] = [];
const read = async () => {
try {
@@ -51,8 +50,7 @@ export const streamFetch = ({
if (response.status === 200 && !errMsg) {
return resolve({
responseText,
- errMsg,
- newChatId
+ responseData
});
} else {
return reject({
@@ -78,7 +76,7 @@ export const streamFetch = ({
onMessage(answer);
responseText += answer;
} else if (item.event === sseResponseEventEnum.appStreamResponse) {
- console.log(data);
+ responseData = data;
} else if (item.event === sseResponseEventEnum.error) {
errMsg = getErrText(data, '流响应错误');
}
@@ -88,8 +86,7 @@ export const streamFetch = ({
if (err?.message === 'The user aborted a request.') {
return resolve({
responseText,
- errMsg,
- newChatId
+ responseData
});
}
reject(getErrText(err, '请求异常'));
diff --git a/client/src/components/ChatBox/ResponseDetailModal.tsx b/client/src/components/ChatBox/ResponseDetailModal.tsx
new file mode 100644
index 000000000..3756a57b1
--- /dev/null
+++ b/client/src/components/ChatBox/ResponseDetailModal.tsx
@@ -0,0 +1,7 @@
+import React from 'react';
+
+const ResponseDetailModal = () => {
+ return
ResponseDetailModal
;
+};
+
+export default ResponseDetailModal;
diff --git a/client/src/components/ChatBox/index.tsx b/client/src/components/ChatBox/index.tsx
index ab74ba60e..c7e115ca7 100644
--- a/client/src/components/ChatBox/index.tsx
+++ b/client/src/components/ChatBox/index.tsx
@@ -9,7 +9,12 @@ import React, {
useEffect
} from 'react';
import { throttle } from 'lodash';
-import { ChatItemType, ChatSiteItemType, ExportChatType } from '@/types/chat';
+import {
+ ChatHistoryItemResType,
+ ChatItemType,
+ ChatSiteItemType,
+ ExportChatType
+} from '@/types/chat';
import { useToast } from '@/hooks/useToast';
import {
useCopyData,
@@ -35,6 +40,7 @@ import { useRouter } from 'next/router';
import { useGlobalStore } from '@/store/global';
import { QuoteItemType } from '@/types/chat';
import { FlowModuleTypeEnum } from '@/constants/flow';
+import { TaskResponseKeyEnum } from '@/constants/chat';
import dynamic from 'next/dynamic';
const QuoteModal = dynamic(() => import('./QuoteModal'));
@@ -131,9 +137,10 @@ const ChatBox = (
variableModules?: VariableItemType[];
welcomeText?: string;
onUpdateVariable?: (e: Record) => void;
- onStartChat: (
- e: StartChatFnProps
- ) => Promise<{ responseText?: string; rawSearch?: QuoteItemType[] }>;
+ onStartChat: (e: StartChatFnProps) => Promise<{
+ responseText: string;
+ [TaskResponseKeyEnum.responseData]: ChatHistoryItemResType[];
+ }>;
onDelMessage?: (e: { contentId?: string; index: number }) => void;
},
ref: ForwardedRef
@@ -294,7 +301,7 @@ const ChatBox = (
const messages = adaptChatItem_openAI({ messages: newChatList, reserveId: true });
- const { rawSearch } = await onStartChat({
+ const { responseData } = await onStartChat({
messages,
controller: abortSignal,
generatingMessage,
@@ -308,7 +315,7 @@ const ChatBox = (
return {
...item,
status: 'finish',
- rawSearch
+ responseData
};
})
);
diff --git a/client/src/constants/chat.ts b/client/src/constants/chat.ts
index 695346a04..7f3be2207 100644
--- a/client/src/constants/chat.ts
+++ b/client/src/constants/chat.ts
@@ -51,5 +51,11 @@ export const ChatSourceMap = {
}
};
+export enum ChatModuleEnum {
+ 'AIChat' = 'AI Chat',
+ 'KBSearch' = 'KB Search',
+ 'CQ' = 'Classify Question'
+}
+
export const HUMAN_ICON = `https://fastgpt.run/icon/human.png`;
export const LOGO_ICON = `https://fastgpt.run/icon/logo.png`;
diff --git a/client/src/pages/api/chat/delChatRecordByContentId.ts b/client/src/pages/api/chat/delChatRecordByContentId.ts
index 2b7402f27..29c9c00f4 100644
--- a/client/src/pages/api/chat/delChatRecordByContentId.ts
+++ b/client/src/pages/api/chat/delChatRecordByContentId.ts
@@ -25,7 +25,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
// 删除一条数据库记录
await Chat.updateOne(
{
- _id: chatId,
+ chatId,
userId
},
{ $pull: { content: { _id: contentId } } }
diff --git a/client/src/pages/api/chat/init.ts b/client/src/pages/api/chat/init.ts
index 348c499dd..d1e1cba6b 100644
--- a/client/src/pages/api/chat/init.ts
+++ b/client/src/pages/api/chat/init.ts
@@ -8,6 +8,7 @@ import { authApp } from '@/service/utils/auth';
import mongoose from 'mongoose';
import type { ChatSchema } from '@/types/mongoSchema';
import { getSpecialModule } from '@/components/ChatBox';
+import { TaskResponseKeyEnum } from '@/constants/chat';
/* 初始化我的聊天框,需要身份验证 */
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
@@ -67,7 +68,8 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
$project: {
_id: '$content._id',
obj: '$content.obj',
- value: '$content.value'
+ value: '$content.value',
+ [TaskResponseKeyEnum.responseData]: `$content.${TaskResponseKeyEnum.responseData}`
}
}
])
diff --git a/client/src/pages/api/openapi/text/gptMessagesSlice.ts b/client/src/pages/api/openapi/text/gptMessagesSlice.ts
index 7498d4bc6..69c6e3328 100644
--- a/client/src/pages/api/openapi/text/gptMessagesSlice.ts
+++ b/client/src/pages/api/openapi/text/gptMessagesSlice.ts
@@ -46,7 +46,7 @@ export function gpt_chatItemTokenSlice({
maxToken
}: {
messages: ChatItemType[];
- model?: ModelType;
+ model?: string;
maxToken: number;
}) {
let result: ChatItemType[] = [];
diff --git a/client/src/pages/api/system/getInitData.ts b/client/src/pages/api/system/getInitData.ts
index eba6f298c..2202a9cdd 100644
--- a/client/src/pages/api/system/getInitData.ts
+++ b/client/src/pages/api/system/getInitData.ts
@@ -68,7 +68,7 @@ export function setDefaultData() {
model: 'gpt-3.5-turbo',
name: 'FastAI-4k',
contextMaxToken: 4000,
- systemMaxToken: 2400,
+ quoteMaxToken: 2400,
maxTemperature: 1.2,
price: 1.5
},
@@ -76,7 +76,7 @@ export function setDefaultData() {
model: 'gpt-3.5-turbo-16k',
name: 'FastAI-16k',
contextMaxToken: 16000,
- systemMaxToken: 8000,
+ quoteMaxToken: 8000,
maxTemperature: 1.2,
price: 3
},
@@ -84,7 +84,7 @@ export function setDefaultData() {
model: 'gpt-4',
name: 'FastAI-Plus',
contextMaxToken: 8000,
- systemMaxToken: 4000,
+ quoteMaxToken: 4000,
maxTemperature: 1.2,
price: 45
}
diff --git a/client/src/pages/app/detail/components/BasicEdit/index.tsx b/client/src/pages/app/detail/components/BasicEdit/index.tsx
index 5d3edad49..250fb1516 100644
--- a/client/src/pages/app/detail/components/BasicEdit/index.tsx
+++ b/client/src/pages/app/detail/components/BasicEdit/index.tsx
@@ -454,7 +454,7 @@ const ChatTest = ({ appId }: { appId: string }) => {
const history = messages.slice(-historyMaxLen - 2, -2);
// 流请求,获取数据
- const { responseText } = await streamFetch({
+ const { responseText, responseData } = await streamFetch({
url: '/api/chat/chatTest',
data: {
history,
@@ -468,7 +468,7 @@ const ChatTest = ({ appId }: { appId: string }) => {
abortSignal: controller
});
- return { responseText };
+ return { responseText, responseData };
},
[modules, appId, appDetail.name]
);
diff --git a/client/src/pages/app/detail/components/Edit/components/ChatTest.tsx b/client/src/pages/app/detail/components/Edit/components/ChatTest.tsx
index 09c83ad96..001566c90 100644
--- a/client/src/pages/app/detail/components/Edit/components/ChatTest.tsx
+++ b/client/src/pages/app/detail/components/Edit/components/ChatTest.tsx
@@ -49,7 +49,7 @@ const ChatTest = (
const history = messages.slice(-historyMaxLen - 2, -2);
// 流请求,获取数据
- const { responseText } = await streamFetch({
+ const { responseText, responseData } = await streamFetch({
url: '/api/chat/chatTest',
data: {
history,
@@ -63,7 +63,7 @@ const ChatTest = (
abortSignal: controller
});
- return { responseText };
+ return { responseText, responseData };
},
[app._id, app.name, modules]
);
diff --git a/client/src/pages/chat/index.tsx b/client/src/pages/chat/index.tsx
index 359331450..1adaf3b30 100644
--- a/client/src/pages/chat/index.tsx
+++ b/client/src/pages/chat/index.tsx
@@ -60,7 +60,7 @@ const Chat = ({ appId, chatId }: { appId: string; chatId: string }) => {
const prompts = messages.slice(-2);
const completionChatId = chatId ? chatId : nanoid();
- const { responseText } = await streamFetch({
+ const { responseText, responseData } = await streamFetch({
data: {
messages: prompts,
variables,
@@ -106,7 +106,7 @@ const Chat = ({ appId, chatId }: { appId: string; chatId: string }) => {
history: ChatBoxRef.current?.getChatHistory() || state.history
}));
- return { responseText };
+ return { responseText, responseData };
},
[appId, chatId, history, router, setChatData, updateHistory]
);
diff --git a/client/src/pages/chat/share.tsx b/client/src/pages/chat/share.tsx
index 208dfefee..2bfe73c1c 100644
--- a/client/src/pages/chat/share.tsx
+++ b/client/src/pages/chat/share.tsx
@@ -46,7 +46,7 @@ const ShareChat = ({ shareId, chatId }: { shareId: string; chatId: string }) =>
const prompts = messages.slice(-2);
const completionChatId = chatId ? chatId : nanoid();
- const { responseText } = await streamFetch({
+ const { responseText, responseData } = await streamFetch({
data: {
messages: prompts,
variables,
@@ -91,7 +91,7 @@ const ShareChat = ({ shareId, chatId }: { shareId: string; chatId: string }) =>
'*'
);
- return { responseText };
+ return { responseText, responseData };
},
[chatId, router, saveChatResponse, shareId]
);
diff --git a/client/src/service/models/chat.ts b/client/src/service/models/chat.ts
index 8ff200533..37e039e78 100644
--- a/client/src/service/models/chat.ts
+++ b/client/src/service/models/chat.ts
@@ -68,7 +68,8 @@ const ChatSchema = new Schema({
answer: String,
temperature: Number,
maxToken: Number,
- finishMessages: Array,
+ quoteList: Array,
+ completeMessages: Array,
similarity: Number,
limit: Number,
cqList: Array,
diff --git a/client/src/service/moduleDispatch/agent/classifyQuestion.ts b/client/src/service/moduleDispatch/agent/classifyQuestion.ts
index 628d09b16..1d0298d59 100644
--- a/client/src/service/moduleDispatch/agent/classifyQuestion.ts
+++ b/client/src/service/moduleDispatch/agent/classifyQuestion.ts
@@ -1,7 +1,7 @@
import { adaptChatItem_openAI } from '@/utils/plugin/openai';
import { ChatContextFilter } from '@/service/utils/chat/index';
import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
-import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
+import { ChatModuleEnum, ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
import { getOpenAIApi, axiosConfig } from '@/service/ai/openai';
import type { ClassifyQuestionAgentItemType } from '@/types/app';
import { countModelPrice } from '@/service/events/pushBill';
@@ -17,7 +17,6 @@ export type CQResponse = {
[key: string]: any;
};
-const moduleName = 'Classify Question';
const agentModel = 'gpt-3.5-turbo';
const agentFunName = 'agent_user_question';
const maxTokens = 2000;
@@ -88,7 +87,7 @@ export const dispatchClassifyQuestion = async (props: Record): Prom
return {
[result.key]: 1,
[TaskResponseKeyEnum.responseData]: {
- moduleName,
+ moduleName: ChatModuleEnum.CQ,
price: countModelPrice({ model: agentModel, tokens }),
model: agentModel,
tokens,
diff --git a/client/src/service/moduleDispatch/chat/oneapi.ts b/client/src/service/moduleDispatch/chat/oneapi.ts
index d13a3a731..9b1f6fa45 100644
--- a/client/src/service/moduleDispatch/chat/oneapi.ts
+++ b/client/src/service/moduleDispatch/chat/oneapi.ts
@@ -6,12 +6,13 @@ import { modelToolMap } from '@/utils/plugin';
import { ChatContextFilter } from '@/service/utils/chat/index';
import type { ChatItemType, QuoteItemType } from '@/types/chat';
import type { ChatHistoryItemResType } from '@/types/chat';
-import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
+import { ChatModuleEnum, ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
import { parseStreamChunk, textAdaptGptResponse } from '@/utils/adapt';
import { getOpenAIApi, axiosConfig } from '@/service/ai/openai';
import { TaskResponseKeyEnum } from '@/constants/chat';
import { getChatModel } from '@/service/utils/data';
import { countModelPrice } from '@/service/events/pushBill';
+import { ChatModelItemType } from '@/types/model';
export type ChatProps = {
res: NextApiResponse;
@@ -30,8 +31,6 @@ export type ChatResponse = {
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
};
-const moduleName = 'AI Chat';
-
/* request openai chat */
export const dispatchChatCompletion = async (props: Record): Promise => {
let {
@@ -54,24 +53,153 @@ export const dispatchChatCompletion = async (props: Record): Promis
return Promise.reject('The chat model is undefined, you need to select a chat model.');
}
+ const { filterQuoteQA, quotePrompt } = filterQuote({
+ quoteQA,
+ model: modelConstantsData
+ });
+
+ const { messages, filterMessages } = getChatMessages({
+ model: modelConstantsData,
+ history,
+ quotePrompt,
+ userChatInput,
+ systemPrompt,
+ limitPrompt
+ });
+ const { max_tokens } = getMaxTokens({
+ model: modelConstantsData,
+ maxToken,
+ filterMessages
+ });
+ // console.log(messages);
+
// FastGpt temperature range: 1~10
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
+ const chatAPI = getOpenAIApi();
+ const response = await chatAPI.createChatCompletion(
+ {
+ model,
+ temperature: Number(temperature || 0),
+ max_tokens,
+ messages,
+ // frequency_penalty: 0.5, // 越大,重复内容越少
+ // presence_penalty: -0.5, // 越大,越容易出现新内容
+ stream
+ },
+ {
+ timeout: stream ? 60000 : 480000,
+ responseType: stream ? 'stream' : 'json',
+ ...axiosConfig()
+ }
+ );
+
+ const { answerText, totalTokens, completeMessages } = await (async () => {
+ if (stream) {
+ // sse response
+ const { answer } = await streamResponse({ res, response });
+ // count tokens
+ const completeMessages = filterMessages.concat({
+ obj: ChatRoleEnum.AI,
+ value: answer
+ });
+
+ const totalTokens = countOpenAIToken({
+ messages: completeMessages
+ });
+
+ return {
+ answerText: answer,
+ totalTokens,
+ completeMessages
+ };
+ } else {
+ const answer = stream ? '' : response.data.choices?.[0].message?.content || '';
+ const totalTokens = stream ? 0 : response.data.usage?.total_tokens || 0;
+
+ const completeMessages = filterMessages.concat({
+ obj: ChatRoleEnum.AI,
+ value: answer
+ });
+
+ return {
+ answerText: answer,
+ totalTokens,
+ completeMessages
+ };
+ }
+ })();
+
+ return {
+ [TaskResponseKeyEnum.answerText]: answerText,
+ [TaskResponseKeyEnum.responseData]: {
+ moduleName: ChatModuleEnum.AIChat,
+ price: countModelPrice({ model, tokens: totalTokens }),
+ model: modelConstantsData.name,
+ tokens: totalTokens,
+ question: userChatInput,
+ answer: answerText,
+ maxToken,
+ quoteList: filterQuoteQA,
+ completeMessages
+ }
+ };
+};
+
+function filterQuote({
+ quoteQA = [],
+ model
+}: {
+ quoteQA: ChatProps['quoteQA'];
+ model: ChatModelItemType;
+}) {
+ const sliceResult = modelToolMap.tokenSlice({
+ model: model.model,
+ maxToken: model.quoteMaxToken,
+ messages: quoteQA.map((item, i) => ({
+ obj: ChatRoleEnum.System,
+ value: `${i + 1}. [${item.q}\n${item.a}]`
+ }))
+ });
+
+ // slice filterSearch
+ const filterQuoteQA = quoteQA.slice(0, sliceResult.length);
+
+ const quotePrompt =
+ filterQuoteQA.length > 0
+ ? `下面是知识库内容:
+${filterQuoteQA.map((item, i) => `${i + 1}. [${item.q}\n${item.a}]`).join('\n')}
+`
+ : '';
+
+ return {
+ filterQuoteQA,
+ quotePrompt
+ };
+}
+function getChatMessages({
+ quotePrompt,
+ history = [],
+ systemPrompt,
+ limitPrompt,
+ userChatInput,
+ model
+}: {
+ quotePrompt: string;
+ history: ChatProps['history'];
+ systemPrompt: string;
+ limitPrompt: string;
+ userChatInput: string;
+ model: ChatModelItemType;
+}) {
const limitText = (() => {
if (limitPrompt) return limitPrompt;
- if (quoteQA.length > 0 && !limitPrompt) {
+ if (quotePrompt && !limitPrompt) {
return '根据知识库内容回答问题,仅回复知识库提供的内容,不要对知识库内容做补充说明。';
}
return '';
})();
- const quotePrompt =
- quoteQA.length > 0
- ? `下面是知识库内容:
-${quoteQA.map((item, i) => `${i + 1}. [${item.q}\n${item.a}]`).join('\n')}
-`
- : '';
-
const messages: ChatItemType[] = [
...(quotePrompt
? [
@@ -103,92 +231,41 @@ ${quoteQA.map((item, i) => `${i + 1}. [${item.q}\n${item.a}]`).join('\n')}
value: userChatInput
}
];
- const modelTokenLimit = getChatModel(model)?.contextMaxToken || 4000;
const filterMessages = ChatContextFilter({
- model,
+ model: model.model,
prompts: messages,
- maxTokens: Math.ceil(modelTokenLimit - 300) // filter token. not response maxToken
+ maxTokens: Math.ceil(model.contextMaxToken - 300) // filter token. not response maxToken
});
const adaptMessages = adaptChatItem_openAI({ messages: filterMessages, reserveId: false });
- const chatAPI = getOpenAIApi();
- console.log(adaptMessages);
-
- /* count response max token */
- const promptsToken = modelToolMap.countTokens({
- model,
- messages: filterMessages
- });
- maxToken = maxToken + promptsToken > modelTokenLimit ? modelTokenLimit - promptsToken : maxToken;
-
- const response = await chatAPI.createChatCompletion(
- {
- model,
- temperature: Number(temperature || 0),
- max_tokens: maxToken,
- messages: adaptMessages,
- // frequency_penalty: 0.5, // 越大,重复内容越少
- // presence_penalty: -0.5, // 越大,越容易出现新内容
- stream
- },
- {
- timeout: stream ? 60000 : 480000,
- responseType: stream ? 'stream' : 'json',
- ...axiosConfig()
- }
- );
-
- const { answerText, totalTokens, finishMessages } = await (async () => {
- if (stream) {
- // sse response
- const { answer } = await streamResponse({ res, response });
- // count tokens
- const finishMessages = filterMessages.concat({
- obj: ChatRoleEnum.AI,
- value: answer
- });
-
- const totalTokens = countOpenAIToken({
- messages: finishMessages
- });
-
- return {
- answerText: answer,
- totalTokens,
- finishMessages
- };
- } else {
- const answer = stream ? '' : response.data.choices?.[0].message?.content || '';
- const totalTokens = stream ? 0 : response.data.usage?.total_tokens || 0;
-
- const finishMessages = filterMessages.concat({
- obj: ChatRoleEnum.AI,
- value: answer
- });
-
- return {
- answerText: answer,
- totalTokens,
- finishMessages
- };
- }
- })();
return {
- [TaskResponseKeyEnum.answerText]: answerText,
- [TaskResponseKeyEnum.responseData]: {
- moduleName,
- price: countModelPrice({ model, tokens: totalTokens }),
- model: modelConstantsData.name,
- tokens: totalTokens,
- question: userChatInput,
- answer: answerText,
- maxToken,
- finishMessages
- }
+ messages: adaptMessages,
+ filterMessages
};
-};
+}
+function getMaxTokens({
+ maxToken,
+ model,
+ filterMessages = []
+}: {
+ maxToken: number;
+ model: ChatModelItemType;
+ filterMessages: ChatProps['history'];
+}) {
+ const tokensLimit = model.contextMaxToken;
+ /* count response max token */
+ const promptsToken = modelToolMap.countTokens({
+ model: model.model,
+ messages: filterMessages
+ });
+ maxToken = maxToken + promptsToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
+
+ return {
+ max_tokens: maxToken
+ };
+}
async function streamResponse({ res, response }: { res: NextApiResponse; response: any }) {
let answer = '';
diff --git a/client/src/service/moduleDispatch/kb/search.ts b/client/src/service/moduleDispatch/kb/search.ts
index a69d4bdf6..54e88f3be 100644
--- a/client/src/service/moduleDispatch/kb/search.ts
+++ b/client/src/service/moduleDispatch/kb/search.ts
@@ -1,6 +1,6 @@
import { PgClient } from '@/service/pg';
import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
-import { TaskResponseKeyEnum } from '@/constants/chat';
+import { ChatModuleEnum, TaskResponseKeyEnum } from '@/constants/chat';
import { getVector } from '@/pages/api/openapi/plugin/vector';
import { countModelPrice } from '@/service/events/pushBill';
import type { SelectedKbType } from '@/types/plugin';
@@ -20,8 +20,6 @@ export type KBSearchResponse = {
quoteQA: QuoteItemType[];
};
-const moduleName = 'KB Search';
-
export async function dispatchKBSearch(props: Record): Promise {
const {
kbList = [],
@@ -65,7 +63,7 @@ export async function dispatchKBSearch(props: Record): Promise 0 ? true : undefined,
quoteQA: searchRes,
responseData: {
- moduleName,
+ moduleName: ChatModuleEnum.KBSearch,
price: countModelPrice({ model: vectorModel.model, tokens: tokenLen }),
model: vectorModel.name,
tokens: tokenLen,
diff --git a/client/src/service/utils/chat/index.ts b/client/src/service/utils/chat/index.ts
index 484aa4f67..9dc2617b4 100644
--- a/client/src/service/utils/chat/index.ts
+++ b/client/src/service/utils/chat/index.ts
@@ -1,7 +1,6 @@
import { ChatItemType } from '@/types/chat';
import { modelToolMap } from '@/utils/plugin';
-import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
-import { sseResponse } from '../tools';
+import { ChatRoleEnum } from '@/constants/chat';
import { OpenAiChatEnum } from '@/constants/model';
import type { NextApiResponse } from 'next';
@@ -18,18 +17,6 @@ export type StreamResponseType = {
model: `${OpenAiChatEnum}`;
[key: string]: any;
};
-export type StreamResponseReturnType = {
- responseContent: string;
- totalTokens: number;
- finishMessages: ChatItemType[];
-};
-
-/* delete invalid symbol */
-const simplifyStr = (str = '') =>
- str
- .replace(/\n+/g, '\n') // 连续空行
- .replace(/[^\S\r\n]+/g, ' ') // 连续空白内容
- .trim();
/* slice chat context by tokens */
export const ChatContextFilter = ({
diff --git a/client/src/types/chat.d.ts b/client/src/types/chat.d.ts
index bc8465369..4204d128a 100644
--- a/client/src/types/chat.d.ts
+++ b/client/src/types/chat.d.ts
@@ -56,7 +56,8 @@ export type ChatHistoryItemResType = {
question?: string;
temperature?: number;
maxToken?: number;
- finishMessages?: ChatItemType[];
+ quoteList?: QuoteItemType[];
+ completeMessages?: ChatItemType[];
// kb search
similarity?: number;
diff --git a/client/src/types/model.d.ts b/client/src/types/model.d.ts
index 715457bf5..0f42b9fdb 100644
--- a/client/src/types/model.d.ts
+++ b/client/src/types/model.d.ts
@@ -2,7 +2,7 @@ export type ChatModelItemType = {
model: string;
name: string;
contextMaxToken: number;
- systemMaxToken: number;
+ quoteMaxToken: number;
maxTemperature: number;
price: number;
};