perf: model framwork

This commit is contained in:
archer
2023-05-03 10:57:56 +08:00
parent aa74625f96
commit 91decc3683
19 changed files with 71 additions and 104 deletions

View File

@@ -3,7 +3,7 @@ import { getOpenAIApi } from '@/service/utils/auth';
import { axiosConfig } from '@/service/utils/tools';
import { getOpenApiKey } from '../utils/openai';
import type { ChatCompletionRequestMessage } from 'openai';
import { ChatModelEnum } from '@/constants/model';
import { OpenAiChatEnum } from '@/constants/model';
import { pushSplitDataBill } from '@/service/events/pushBill';
import { generateVector } from './generateVector';
import { openaiError2 } from '../errorCode';
@@ -88,7 +88,7 @@ A2:
chatAPI
.createChatCompletion(
{
model: ChatModelEnum.GPT35,
model: OpenAiChatEnum.GPT35,
temperature: 0.8,
n: 1,
messages: [

View File

@@ -1,5 +1,5 @@
import { connectToDatabase, Bill, User } from '../mongo';
import { modelList, ChatModelEnum, embeddingModel } from '@/constants/model';
import { ChatModelMap, OpenAiChatEnum, ChatModelType, embeddingModel } from '@/constants/model';
import { BillTypeEnum } from '@/constants/user';
import { countChatTokens } from '@/utils/tools';
@@ -11,7 +11,7 @@ export const pushChatBill = async ({
messages
}: {
isPay: boolean;
chatModel: `${ChatModelEnum}`;
chatModel: ChatModelType;
userId: string;
chatId?: '' | string;
messages: { role: 'system' | 'user' | 'assistant'; content: string }[];
@@ -30,10 +30,8 @@ export const pushChatBill = async ({
if (isPay) {
await connectToDatabase();
// 获取模型单价格
const modelItem = modelList.find((item) => item.chatModel === chatModel);
// 计算价格
const unitPrice = modelItem?.price || 5;
const unitPrice = ChatModelMap[chatModel]?.price || 5;
const price = unitPrice * tokens;
try {
@@ -88,8 +86,7 @@ export const pushSplitDataBill = async ({
if (isPay) {
try {
// 获取模型单价格, 都是用 gpt35 拆分
const modelItem = modelList.find((item) => item.chatModel === ChatModelEnum.GPT35);
const unitPrice = modelItem?.price || 3;
const unitPrice = ChatModelMap[OpenAiChatEnum.GPT35]?.price || 3;
// 计算价格
const price = unitPrice * tokenLen;
@@ -97,7 +94,7 @@ export const pushSplitDataBill = async ({
const res = await Bill.create({
userId,
type,
modelName: ChatModelEnum.GPT35,
modelName: OpenAiChatEnum.GPT35,
textLen: text.length,
tokenLen,
price

View File

@@ -4,7 +4,7 @@ import {
ModelVectorSearchModeMap,
ModelVectorSearchModeEnum,
ChatModelMap,
ChatModelEnum
OpenAiChatEnum
} from '@/constants/model';
const ModelSchema = new Schema({
@@ -57,7 +57,7 @@ const ModelSchema = new Schema({
// 聊天时使用的模型
type: String,
enum: Object.keys(ChatModelMap),
default: ChatModelEnum.GPT35
default: OpenAiChatEnum.GPT35
}
},
share: {

View File

@@ -1,6 +1,6 @@
import { openaiCreateEmbedding } from '../utils/openai';
import { PgClient } from '@/service/pg';
import { ModelDataStatusEnum, ModelVectorSearchModeEnum } from '@/constants/model';
import { ModelDataStatusEnum, ModelVectorSearchModeEnum, ChatModelMap } from '@/constants/model';
import { ModelSchema } from '@/types/mongoSchema';
import { systemPromptFilter } from '../utils/tools';
@@ -9,9 +9,9 @@ import { systemPromptFilter } from '../utils/tools';
*/
export const searchKb_openai = async ({
apiKey,
isPay,
isPay = true,
text,
similarity,
similarity = 0.2,
model,
userId
}: {
@@ -20,7 +20,7 @@ export const searchKb_openai = async ({
text: string;
model: ModelSchema;
userId: string;
similarity: number;
similarity?: number;
}): Promise<{
code: 200 | 201;
searchPrompt?: {
@@ -28,6 +28,8 @@ export const searchKb_openai = async ({
value: string;
};
}> => {
const modelConstantsData = ChatModelMap[model.chat.chatModel];
// 获取提示词的向量
const { vector: promptVector } = await openaiCreateEmbedding({
isPay,
@@ -78,11 +80,11 @@ export const searchKb_openai = async ({
}
// 有匹配情况下system 添加知识库内容。
// 系统提示词过滤,最多 2500 tokens
// 系统提示词过滤,最多 65% tokens
const filterSystemPrompt = systemPromptFilter({
model: model.chat.chatModel,
prompts: systemPrompts,
maxTokens: 2500
maxTokens: Math.floor(modelConstantsData.contextMaxToken * 0.65)
});
return {

View File

@@ -3,7 +3,7 @@ import jwt from 'jsonwebtoken';
import { ChatItemSimpleType } from '@/types/chat';
import { countChatTokens, sliceTextByToken } from '@/utils/tools';
import { ChatCompletionRequestMessageRoleEnum, ChatCompletionRequestMessage } from 'openai';
import { ChatModelEnum } from '@/constants/model';
import type { ChatModelType } from '@/constants/model';
/* 密码加密 */
export const hashPassword = (psw: string) => {
@@ -44,7 +44,7 @@ export const openaiChatFilter = ({
prompts,
maxTokens
}: {
model: `${ChatModelEnum}`;
model: ChatModelType;
prompts: ChatItemSimpleType[];
maxTokens: number;
}) => {