perf: chat上下文截断;QA提示词

This commit is contained in:
archer
2023-04-21 23:30:26 +08:00
parent 4397a0ad6b
commit 3ea2cf1dcb
10 changed files with 63 additions and 34 deletions

View File

@@ -61,7 +61,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
}
// 控制在 tokens 数量,防止超出
// const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
// 格式化文本内容成 chatgpt 格式
const map = {
@@ -69,14 +69,25 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
};
const formatPrompts: ChatCompletionRequestMessage[] = prompts.map((item: ChatItemType) => ({
role: map[item.obj],
content: item.value
}));
// console.log(formatPrompts);
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
(item: ChatItemType) => ({
role: map[item.obj],
content: item.value
})
);
// 计算温度
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
// console.log({
// model: model.service.chatModel,
// temperature: temperature,
// // max_tokens: modelConstantsData.maxToken,
// messages: formatPrompts,
// frequency_penalty: 0.5, // 越大,重复内容越少
// presence_penalty: -0.5, // 越大,越容易出现新内容
// stream: true,
// stop: ['.!?。']
// });
// 获取 chatAPI
const chatAPI = getOpenAIApi(userApiKey || systemKey);
// 发出请求

View File

@@ -1,7 +1,7 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { connectToDatabase } from '@/service/mongo';
import { authChat } from '@/service/utils/chat';
import { httpsAgent, systemPromptFilter } from '@/service/utils/tools';
import { httpsAgent, systemPromptFilter, openaiChatFilter } from '@/service/utils/tools';
import { ChatCompletionRequestMessage, ChatCompletionRequestMessageRoleEnum } from 'openai';
import { ChatItemType } from '@/types/chat';
import { jsonRes } from '@/service/response';
@@ -79,7 +79,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
`vector <=> '[${promptVector}]' < ${similarity}`
],
order: [{ field: 'vector', mode: `<=> '[${promptVector}]'` }],
limit: 30
limit: 20
});
const formatRedisPrompt: string[] = vectorSearch.rows.map((item) => `${item.q}\n${item.a}`);
@@ -116,7 +116,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
}
// 控制在 tokens 数量,防止超出
// const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
// 格式化文本内容成 chatgpt 格式
const map = {
@@ -124,10 +124,12 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
};
const formatPrompts: ChatCompletionRequestMessage[] = prompts.map((item: ChatItemType) => ({
role: map[item.obj],
content: item.value
}));
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
(item: ChatItemType) => ({
role: map[item.obj],
content: item.value
})
);
// console.log(formatPrompts);
// 计算温度
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);