This commit is contained in:
Archer
2023-10-17 10:00:32 +08:00
committed by GitHub
parent dd8f2744bf
commit 3b776b6639
98 changed files with 1525 additions and 983 deletions

View File

@@ -7,7 +7,6 @@ import { textAdaptGptResponse } from '@/utils/adapt';
import { getAIApi } from '@fastgpt/core/ai/config';
import type { ChatCompletion, StreamChatType } from '@fastgpt/core/ai/type';
import { TaskResponseKeyEnum } from '@/constants/chat';
import { getChatModel } from '@/service/utils/data';
import { countModelPrice } from '@/service/common/bill/push';
import { ChatModelItemType } from '@/types/model';
import { postTextCensor } from '@fastgpt/common/plusApi/censor';
@@ -15,12 +14,13 @@ import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/core/ai/constant'
import { AppModuleItemType } from '@/types/app';
import { countMessagesTokens, sliceMessagesTB } from '@/utils/common/tiktoken';
import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
import { defaultQuotePrompt, defaultQuoteTemplate } from '@/global/core/prompt/AIChat';
import { Prompt_QuotePromptList, Prompt_QuoteTemplateList } from '@/global/core/prompt/AIChat';
import type { AIChatProps } from '@/types/core/aiChat';
import { replaceVariable } from '@/utils/common/tools/text';
import { FlowModuleTypeEnum } from '@/constants/flow';
import type { ModuleDispatchProps } from '@/types/core/chat/type';
import { responseWrite, responseWriteController } from '@/service/common/stream';
import { responseWrite, responseWriteController } from '@fastgpt/common/tools/stream';
import { getChatModel, ModelTypeEnum } from '@/service/core/ai/model';
export type ChatProps = ModuleDispatchProps<
AIChatProps & {
@@ -47,12 +47,13 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
user,
outputs,
inputs: {
model = global.chatModels[0]?.model,
model,
temperature = 0,
maxToken = 4000,
history = [],
quoteQA = [],
userChatInput,
isResponseAnswerText = true,
systemPrompt = '',
limitPrompt,
quoteTemplate,
@@ -63,6 +64,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return Promise.reject('Question is empty');
}
stream = stream && isResponseAnswerText;
// temperature adapt
const modelConstantsData = getChatModel(model);
@@ -110,18 +113,18 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
model,
temperature,
max_tokens,
stream,
messages: [
...(modelConstantsData.defaultSystem
...(modelConstantsData.defaultSystemChatPrompt
? [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: modelConstantsData.defaultSystem
content: modelConstantsData.defaultSystemChatPrompt
}
]
: []),
...messages
],
stream
]
});
const { answerText, totalTokens, completeMessages } = await (async () => {
@@ -172,7 +175,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
[TaskResponseKeyEnum.responseData]: {
moduleType: FlowModuleTypeEnum.chatNode,
moduleName,
price: user.openaiAccount?.key ? 0 : countModelPrice({ model, tokens: totalTokens }),
price: user.openaiAccount?.key
? 0
: countModelPrice({ model, tokens: totalTokens, type: ModelTypeEnum.chat }),
model: modelConstantsData.name,
tokens: totalTokens,
question: userChatInput,
@@ -198,7 +203,7 @@ function filterQuote({
maxTokens: model.quoteMaxToken,
messages: quoteQA.map((item, index) => ({
obj: ChatRoleEnum.System,
value: replaceVariable(quoteTemplate || defaultQuoteTemplate, {
value: replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
...item,
index: index + 1
})
@@ -212,7 +217,7 @@ function filterQuote({
filterQuoteQA.length > 0
? `${filterQuoteQA
.map((item, index) =>
replaceVariable(quoteTemplate || defaultQuoteTemplate, {
replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
...item,
index: `${index + 1}`
})
@@ -243,7 +248,7 @@ function getChatMessages({
model: ChatModelItemType;
}) {
const question = quoteText
? replaceVariable(quotePrompt || defaultQuotePrompt, {
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
quote: quoteText,
question: userChatInput
})
@@ -275,7 +280,7 @@ function getChatMessages({
const filterMessages = ChatContextFilter({
messages,
maxTokens: Math.ceil(model.contextMaxToken - 300) // filter token. not response maxToken
maxTokens: Math.ceil(model.maxToken - 300) // filter token. not response maxToken
});
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
@@ -294,7 +299,7 @@ function getMaxTokens({
model: ChatModelItemType;
filterMessages: ChatProps['inputs']['history'];
}) {
const tokensLimit = model.contextMaxToken;
const tokensLimit = model.maxToken;
/* count response max token */
const promptsToken = countMessagesTokens({
@@ -349,7 +354,7 @@ async function streamResponse({
stream.controller?.abort();
break;
}
const content = part.choices[0]?.delta?.content || '';
const content = part.choices?.[0]?.delta?.content || '';
answer += content;
responseWrite({