Compare commits

...

5 Commits

Author SHA1 Message Date
archer
6ff5db7b41 fix: btn位置 2023-04-14 01:37:45 +08:00
archer
56a0b48b97 perf: 文案;feat: 知识库模糊搜索 2023-04-13 21:34:36 +08:00
archer
ff24042df5 feat: chatgpt 对外api 2023-04-12 22:39:30 +08:00
archer
c31d247f07 feat: 知识库openapi 2023-04-12 21:54:57 +08:00
archer
e903eb5b94 perf: lafgpt 2023-04-12 19:03:27 +08:00
13 changed files with 251 additions and 55 deletions

View File

@@ -34,7 +34,7 @@ run: ## Run a dev service from host.
.PHONY: docker-build
docker-build: ## Build docker image with the desktop-frontend.
docker build -t c121914yu/fast-gpt:latest .
docker build -t c121914yu/fast-gpt:latest . --network host --build-arg HTTP_PROXY=http://127.0.0.1:7890 --build-arg HTTPS_PROXY=http://127.0.0.1:7890
##@ Deployment

View File

@@ -49,6 +49,7 @@ export const getModelTrainings = (id: string) =>
type GetModelDataListProps = RequestPaging & {
modelId: string;
searchText: string;
};
/**
* 获取模型的知识库数据

View File

@@ -4,20 +4,20 @@ import { connectToDatabase } from '@/service/mongo';
import { authToken } from '@/service/utils/tools';
import { connectRedis } from '@/service/redis';
import { VecModelDataIdx } from '@/constants/redis';
import { SearchOptions } from 'redis';
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
try {
let {
modelId,
pageNum = 1,
pageSize = 10
pageSize = 10,
searchText = ''
} = req.query as {
modelId: string;
pageNum: string;
pageSize: string;
searchText: string;
};
const { authorization } = req.headers;
pageNum = +pageNum;
@@ -40,7 +40,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
// 从 redis 中获取数据
const searchRes = await redis.ft.search(
VecModelDataIdx,
`@modelId:{${modelId}} @userId:{${userId}}`,
`@modelId:{${modelId}} @userId:{${userId}} ${searchText ? `*${searchText}*` : ''}`,
{
RETURN: ['q', 'text', 'status'],
LIMIT: {

View File

@@ -0,0 +1,158 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { connectToDatabase, Model } from '@/service/mongo';
import { getOpenAIApi } from '@/service/utils/chat';
import { httpsAgent, openaiChatFilter, authOpenApiKey } from '@/service/utils/tools';
import { ChatCompletionRequestMessage, ChatCompletionRequestMessageRoleEnum } from 'openai';
import { ChatItemType } from '@/types/chat';
import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream';
import { modelList } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill';
import { gpt35StreamResponse } from '@/service/utils/openai';
/* 发送提示词 */
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
let step = 0; // step=1时表示开始了流响应
const stream = new PassThrough();
stream.on('error', () => {
console.log('error: ', 'stream error');
stream.destroy();
});
res.on('close', () => {
stream.destroy();
});
res.on('error', () => {
console.log('error: ', 'request error');
stream.destroy();
});
try {
const {
prompts,
modelId,
isStream = true
} = req.body as {
prompts: ChatItemType[];
modelId: string;
isStream: boolean;
};
if (!prompts || !modelId) {
throw new Error('缺少参数');
}
if (!Array.isArray(prompts)) {
throw new Error('prompts is not array');
}
if (prompts.length > 30 || prompts.length === 0) {
throw new Error('prompts length range 1-30');
}
await connectToDatabase();
let startTime = Date.now();
const { apiKey, userId } = await authOpenApiKey(req);
const model = await Model.findOne({
_id: modelId,
userId
});
if (!model) {
throw new Error('无权使用该模型');
}
const modelConstantsData = modelList.find((item) => item.model === model.service.modelName);
if (!modelConstantsData) {
throw new Error('模型加载异常');
}
// 如果有系统提示词,自动插入
if (model.systemPrompt) {
prompts.unshift({
obj: 'SYSTEM',
value: model.systemPrompt
});
}
// 控制在 tokens 数量,防止超出
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
// 格式化文本内容成 chatgpt 格式
const map = {
Human: ChatCompletionRequestMessageRoleEnum.User,
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
};
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
(item: ChatItemType) => ({
role: map[item.obj],
content: item.value
})
);
// console.log(formatPrompts);
// 计算温度
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
// 获取 chatAPI
const chatAPI = getOpenAIApi(apiKey);
// 发出请求
const chatResponse = await chatAPI.createChatCompletion(
{
model: model.service.chatModel,
temperature: temperature,
// max_tokens: modelConstantsData.maxToken,
messages: formatPrompts,
frequency_penalty: 0.5, // 越大,重复内容越少
presence_penalty: -0.5, // 越大,越容易出现新内容
stream: isStream,
stop: ['.!?。']
},
{
timeout: 40000,
responseType: isStream ? 'stream' : 'json',
httpsAgent: httpsAgent(true)
}
);
console.log('api response time:', `${(Date.now() - startTime) / 1000}s`);
step = 1;
let responseContent = '';
if (isStream) {
const streamResponse = await gpt35StreamResponse({
res,
stream,
chatResponse
});
responseContent = streamResponse.responseContent;
} else {
responseContent = chatResponse.data.choices?.[0]?.message?.content || '';
jsonRes(res, {
data: responseContent
});
}
const promptsContent = formatPrompts.map((item) => item.content).join('');
// 只有使用平台的 key 才计费
pushChatBill({
isPay: true,
modelName: model.service.modelName,
userId,
text: promptsContent + responseContent
});
} catch (err: any) {
if (step === 1) {
// 直接结束流
console.log('error结束');
stream.destroy();
} else {
res.status(500);
jsonRes(res, {
code: 500,
error: err
});
}
}
}

View File

@@ -83,25 +83,26 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
下面是一些例子:
实现一个手机号发生注册验证码方法.
1. 从 query 中获取 phone.
2. 校验手机号格式是否正确,不正确则返回错误响应,消息为:手机号格式错误.
2. 校验手机号格式是否正确,不正确则返回错误码501,原因为:手机号格式错误.
3. 给 phone 发送一个短信验证码,验证码长度为6位字符串,内容为:你正在注册laf,验证码为:code.
4. 数据库添加数据,表为"codes",内容为 {phone, code}.
实现根据手机号注册账号,需要验证手机验证码.
1. 从 body 中获取 phone 和 code.
2. 校验手机号格式是否正确,不正确返回错误响应,消息为:手机号格式错误.
2. 获取数据库数据,表为"codes",查找是否有符合 phone, code 等于body参数的记录,没有的话错误响应,消息为:验证码不正确.
2. 校验手机号格式是否正确,不正确返回错误码501,原因为:手机号格式错误.
2. 获取数据库数据,表为"codes",查找是否有符合 phone, code 等于body参数的记录,没有的话返回错误码500,原因为:验证码不正确.
4. 添加数据库数据,表为"users" ,内容为{phone, code, createTime}.
5. 删除数据库数据,删除 code 记录.
6. 返回新建用户的Id: return {userId}
更新博客记录。传入blogId,blogText,tags,还需要记录更新的时间.
1. 从 body 中获取 blogId,blogText 和 tags.
2. 校验 blogId 是否为空,为空则错误响应,消息为:博客ID不能为空.
3. 校验 blogText 是否为空,为空则错误响应,消息为:博客内容不能为空.
4. 校验 tags 是否为数组,不是则错误响应,消息为:标签必须为数组.
2. 校验 blogId 是否为空,为空则返回错误码500,原因为:博客ID不能为空.
3. 校验 blogText 是否为空,为空则返回错误码500,原因为:博客内容不能为空.
4. 校验 tags 是否为数组,不是则返回错误码500,原因为:标签必须为数组.
5. 获取当前时间,记录为 updateTime.
6. 更新数据库数据,表为"blogs",更新符合 blogId 的记录的内容为{blogText, tags, updateTime}.
7. 返回结果 {message: "更新博客记录成功"}.`
7. 返回结果 "更新博客记录成功"`
},
{
role: 'user',
@@ -161,8 +162,8 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
}
}
// textArr 筛选,最多 3200 tokens
const systemPrompt = systemPromptFilter(formatRedisPrompt, 3200);
// textArr 筛选,最多 3000 tokens
const systemPrompt = systemPromptFilter(formatRedisPrompt, 3000);
prompts.unshift({
obj: 'SYSTEM',

View File

@@ -10,7 +10,7 @@ import { ChatCompletionRequestMessage, ChatCompletionRequestMessageRoleEnum } fr
import { ChatItemType } from '@/types/chat';
import { jsonRes } from '@/service/response';
import { PassThrough } from 'stream';
import { modelList } from '@/constants/model';
import { modelList, ModelVectorSearchModeMap, ModelVectorSearchModeEnum } from '@/constants/model';
import { pushChatBill } from '@/service/events/pushBill';
import { connectRedis } from '@/service/redis';
import { VecModelDataPrefix } from '@/constants/redis';
@@ -84,11 +84,13 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
text: prompts[prompts.length - 1].value // 取最后一个
});
// 搜索系统提示词, 按相似度从 redis 中搜出相关的 q 和 text
const similarity = ModelVectorSearchModeMap[model.search.mode]?.similarity || 0.22;
// 搜索系统提示词, 按相似度从 redis 中搜出相关的 q 和 text
const redisData: any[] = await redis.sendCommand([
'FT.SEARCH',
`idx:${VecModelDataPrefix}:hash`,
`@modelId:{${modelId}} @vector:[VECTOR_RANGE 0.22 $blob]=>{$YIELD_DISTANCE_AS: score}`,
`@modelId:{${modelId}} @vector:[VECTOR_RANGE ${similarity} $blob]=>{$YIELD_DISTANCE_AS: score}`,
'RETURN',
'1',
'text',
@@ -120,7 +122,24 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
formatRedisPrompt.unshift(prompts.shift()?.value || '');
}
if (formatRedisPrompt.length > 0) {
/* 高相似度+退出,无法匹配时直接退出 */
if (
formatRedisPrompt.length === 0 &&
model.search.mode === ModelVectorSearchModeEnum.hightSimilarity
) {
return res.send('对不起,你的问题不在知识库中。');
}
/* 高相似度+无上下文,不添加额外知识 */
if (
formatRedisPrompt.length === 0 &&
model.search.mode === ModelVectorSearchModeEnum.noContext
) {
prompts.unshift({
obj: 'SYSTEM',
value: model.systemPrompt
});
} else {
// 有匹配或者低匹配度模式情况下,添加知识库内容。
// 系统提示词过滤,最多 2800 tokens
const systemPrompt = systemPromptFilter(formatRedisPrompt, 2800);
@@ -130,8 +149,6 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
'YYYY/MM/DD HH:mm:ss'
)} ${systemPrompt}"`
});
} else {
return res.send('对不起,你的问题不在知识库中。');
}
// 控制在 tokens 数量,防止超出

View File

@@ -15,7 +15,8 @@ import {
Menu,
MenuButton,
MenuList,
MenuItem
MenuItem,
Input
} from '@chakra-ui/react';
import type { ModelSchema } from '@/types/mongoSchema';
import type { RedisModelDataItemType } from '@/types/redis';
@@ -40,9 +41,11 @@ const SelectFileModel = dynamic(() => import('./SelectFileModal'));
const SelectUrlModel = dynamic(() => import('./SelectUrlModal'));
const SelectCsvModal = dynamic(() => import('./SelectCsvModal'));
let lastSearch = '';
const ModelDataCard = ({ model }: { model: ModelSchema }) => {
const { Loading, setIsLoading } = useLoading();
const [searchText, setSearchText] = useState('');
const {
data: modelDataList,
isLoading,
@@ -54,7 +57,8 @@ const ModelDataCard = ({ model }: { model: ModelSchema }) => {
api: getModelDataList,
pageSize: 8,
params: {
modelId: model._id
modelId: model._id,
searchText
}
});
@@ -158,9 +162,33 @@ const ModelDataCard = ({ model }: { model: ModelSchema }) => {
</MenuList>
</Menu>
</Flex>
{!!(splitDataLen && splitDataLen > 0) && (
<Box fontSize={'xs'}>{splitDataLen}...</Box>
)}
<Flex mt={4}>
{/* 拆分数据提示 */}
{!!(splitDataLen && splitDataLen > 0) && (
<Box fontSize={'xs'}>{splitDataLen}...</Box>
)}
<Box flex={1}></Box>
<Input
maxW={'240px'}
size={'sm'}
value={searchText}
placeholder="搜索相关问题和答案,回车确认"
onChange={(e) => setSearchText(e.target.value)}
onBlur={() => {
if (searchText === lastSearch) return;
getData(1);
lastSearch = searchText;
}}
onKeyDown={(e) => {
if (searchText === lastSearch) return;
if (e.key === 'Enter') {
getData(1);
lastSearch = searchText;
}
}}
/>
</Flex>
<Box mt={4}>
<TableContainer minH={'500px'}>
<Table variant={'simple'}>

View File

@@ -54,20 +54,20 @@ const ModelEditForm = ({
})}
></Input>
</Flex>
<Flex alignItems={'center'} mt={4}>
<Flex alignItems={'center'} mt={5}>
<Box flex={'0 0 80px'} w={0}>
modelId:
</Box>
<Box>{getValues('_id')}</Box>
</Flex>
</FormControl>
<Flex alignItems={'center'} mt={4}>
<Flex alignItems={'center'} mt={5}>
<Box flex={'0 0 80px'} w={0}>
:
:
</Box>
<Box>{getValues('service.modelName')}</Box>
<Box>{modelList.find((item) => item.model === getValues('service.modelName'))?.name}</Box>
</Flex>
<Flex alignItems={'center'} mt={4}>
<Flex alignItems={'center'} mt={5}>
<Box flex={'0 0 80px'} w={0}>
:
</Box>
@@ -80,7 +80,7 @@ const ModelEditForm = ({
</Box>
</Flex>
<Flex mt={5} alignItems={'center'}>
<Box flex={'0 0 80px'}>:</Box>
<Box flex={'0 0 150px'}></Box>
<Button
colorScheme={'gray'}
variant={'outline'}

View File

@@ -21,7 +21,6 @@ const ModelDetail = ({ modelId }: { modelId: string }) => {
const { isPc, media } = useScreen();
const { setLoading } = useGlobalStore();
// const SelectFileDom = useRef<HTMLInputElement>(null);
const [model, setModel] = useState<ModelSchema>(defaultModel);
const formHooks = useForm<ModelSchema>({
defaultValues: model
@@ -243,11 +242,6 @@ const ModelDetail = ({ modelId }: { modelId: string }) => {
<Grid mt={5} gridTemplateColumns={media('1fr 1fr', '1fr')} gridGap={5}>
<ModelEditForm formHooks={formHooks} handleDelModel={handleDelModel} canTrain={canTrain} />
{/* {canTrain && (
<Card p={4}>
<Training model={model} />
</Card>
)} */}
{canTrain && model._id && (
<Card
p={4}
@@ -263,11 +257,6 @@ const ModelDetail = ({ modelId }: { modelId: string }) => {
</Card>
)}
</Grid>
{/* 文件选择 */}
{/* <Box position={'absolute'} w={0} h={0} overflow={'hidden'}>
<input ref={SelectFileDom} type="file" accept=".jsonl" onChange={startTraining} />
</Box> */}
</>
);
};

View File

@@ -16,6 +16,7 @@ import { formatModelStatus } from '@/constants/model';
import dayjs from 'dayjs';
import type { ModelSchema } from '@/types/mongoSchema';
import { useRouter } from 'next/router';
import { modelList } from '@/constants/model';
const ModelTable = ({
models = [],
@@ -31,6 +32,15 @@ const ModelTable = ({
key: 'name',
dataIndex: 'name'
},
{
title: '模型类型',
key: 'service',
render: (model: ModelSchema) => (
<Box fontWeight={'bold'} whiteSpace={'pre-wrap'} maxW={'200px'}>
{modelList.find((item) => item.model === model.service.modelName)?.name}
</Box>
)
},
{
title: '最后更新时间',
key: 'updateTime',
@@ -51,15 +61,7 @@ const ModelTable = ({
</Tag>
)
},
{
title: 'AI模型',
key: 'service',
render: (item: ModelSchema) => (
<Box wordBreak={'break-all'} whiteSpace={'pre-wrap'} maxW={'200px'}>
{item.service.modelName}
</Box>
)
},
{
title: '操作',
key: 'control',
@@ -69,7 +71,7 @@ const ModelTable = ({
</Button>
<Button
colorScheme={'gray'}
variant={'outline'}
onClick={() => router.push(`/model/detail?modelId=${item._id}`)}
>

View File

@@ -30,7 +30,7 @@ const BillTable = () => {
<Th></Th>
<Th></Th>
<Th>Tokens </Th>
<Th></Th>
<Th></Th>
</Tr>
</Thead>
<Tbody fontSize={'sm'}>

View File

@@ -77,7 +77,7 @@ const PayRecordTable = () => {
<Th></Th>
<Th></Th>
<Th></Th>
<Th></Th>
<Th></Th>
<Th></Th>
</Tr>
</Thead>

View File

@@ -21,7 +21,7 @@ export const pushChatBill = async ({
try {
// 计算 token 数量
const tokens = Math.floor(encode(text).length * 0.7);
const tokens = Math.floor(encode(text).length * 0.75);
console.log(`chat generate success. text len: ${text.length}. token len: ${tokens}`);