Compare commits

...

28 Commits

Author SHA1 Message Date
archer
569772148f model 2023-07-05 18:12:00 +08:00
archer
63d1657f9b admin withdraw 2023-07-05 11:25:12 +08:00
archer
d00ac152b5 price 2023-07-05 09:54:20 +08:00
archer
e979a55f19 fix: ts 2023-07-04 21:35:09 +08:00
archer
026d87c61e fix: refresh 2023-07-04 21:30:42 +08:00
archer
2a45fe520b perf: code and inform 2023-07-04 21:24:32 +08:00
archer
8635de866f docs 2023-07-04 18:09:09 +08:00
archer
982e36e79d docs 2023-07-04 15:54:18 +08:00
archer
dda7847f77 price 2023-07-04 11:52:32 +08:00
archer
93fc9ee65d perf: ci 2023-07-04 11:52:32 +08:00
archer
a4e2c6510f perf: admin 2023-07-04 11:52:31 +08:00
archer
c411ca4bd4 text 2023-07-04 11:52:31 +08:00
archer
8af10a7c9a perf: animation 2023-07-04 11:52:30 +08:00
archer
65cab349b0 fix: unstream response type 2023-07-04 11:52:29 +08:00
archer
ca814dcaf4 fix: admin chart 2023-07-04 11:52:29 +08:00
archer
1367ba9d32 feat: admin 2023-07-04 11:52:28 +08:00
archer
62489ef12f fix: share chat 2023-07-04 11:52:27 +08:00
archer
3d2043c16f docs 2023-07-04 11:52:27 +08:00
archer
95066262b7 close ssr query 2023-07-04 11:52:26 +08:00
archer
deb9be4160 fix: pay error catch 2023-07-04 11:52:26 +08:00
kssdxw
f382b2194d fix: openai data truncation (#112) 2023-07-04 11:51:56 +08:00
archer
a4744dd78f price 2023-06-25 20:38:49 +08:00
archer
a9d258d992 fix: docs 2023-06-25 16:30:13 +08:00
archer
f56a339ad1 perf: index 2023-06-25 16:05:43 +08:00
archer
68eca25df4 fix: ssr close 2023-06-25 14:16:54 +08:00
archer
9eed321471 perf: docker-compose 2023-06-25 13:41:44 +08:00
archer
426176db47 fix: apikey 2023-06-25 13:20:00 +08:00
archer
cfb31afbd9 fix: select ui;perf: max link and compose 2023-06-25 10:52:58 +08:00
57 changed files with 742 additions and 420 deletions

79
.github/workflows/admin-image.yml vendored Normal file
View File

@@ -0,0 +1,79 @@
name: Build fastgpt-admin images and copy image to docker hub
on:
workflow_dispatch:
push:
paths:
- 'admin/**'
branches:
- 'main'
tags:
- 'v*.*.*'
jobs:
build-admin-images:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Install Dependencies
run: |
sudo apt update && sudo apt install -y nodejs npm
- name: Set up QEMU (optional)
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
driver-opts: network=host
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
- name: Set DOCKER_REPO_TAGGED based on branch or tag
run: |
if [[ "${{ github.ref_name }}" == "main" ]]; then
echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt-admin:latest" >> $GITHUB_ENV
else
echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt-admin:${{ github.ref_name }}" >> $GITHUB_ENV
fi
- name: Build and publish image for main branch or tag push event
env:
DOCKER_REPO_TAGGED: ${{ env.DOCKER_REPO_TAGGED }}
run: |
cd client && \
docker buildx build \
--platform linux/amd64,linux/arm64 \
--label "org.opencontainers.image.source= https://github.com/ ${{ github.repository_owner }}/FastGPT" \
--label "org.opencontainers.image.description=fastgpt-admin image" \
--label "org.opencontainers.image.licenses=MIT" \
--push \
-t ${DOCKER_REPO_TAGGED} \
-f Dockerfile \
.
push-to-docker-hub:
needs: build-admin-images
runs-on: ubuntu-20.04
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Set DOCKER_REPO_TAGGED based on branch or tag
run: |
if [[ "${{ github.ref_name }}" == "main" ]]; then
echo "IMAGE_TAG=latest" >> $GITHUB_ENV
else
echo "IMAGE_TAG=${{ github.ref_name }}" >> $GITHUB_ENV
fi
- name: Pull image from GitHub Container Registry
run: docker pull ghcr.io/${{ github.repository_owner }}/fastgpt-admin:${{env.IMAGE_TAG}}
- name: Tag image with Docker Hub repository name and version tag
run: docker tag ghcr.io/${{ github.repository_owner }}/fastgpt-admin:${{env.IMAGE_TAG}} ${{ secrets.DOCKER_IMAGE_NAME }}:${{env.IMAGE_TAG}}
- name: Push image to Docker Hub
run: docker push ${{ secrets.DOCKER_IMAGE_NAME }}:${{env.IMAGE_TAG}}

View File

@@ -1,13 +1,15 @@
name: Build images and copy image to docker
name: Build fastgpt images and copy image to docker hub
on:
workflow_dispatch:
push:
paths:
- 'client/**'
branches:
- 'main'
tags:
- 'v*.*.*'
jobs:
build-images:
build-fastgpt-images:
runs-on: ubuntu-20.04
steps:
- name: Checkout
@@ -52,7 +54,7 @@ jobs:
-f Dockerfile \
.
push-to-docker-hub:
needs: build-images
needs: build-fastgpt-images
runs-on: ubuntu-20.04
steps:
- name: Checkout code

View File

@@ -23,7 +23,7 @@ Fast GPT 允许你使用自己的 openai API KEY 来快速的调用 openai 接
- [Sealos 部署](https://sealos.io/docs/examples/ai-applications/install-fastgpt-on-desktop) 无需服务器,代理和域名。
- [docker-compose 部署](docs/deploy/docker.md)
- [由社区贡献的宝塔部署和本地运行教程](https://space.bilibili.com/431177525/channel/collectiondetail?sid=1370663)
- [由社区贡献的宝塔部署和本地运行教程](https://www.bilibili.com/video/BV1tV4y1y7Mj/?vd_source=92041a1a395f852f9d89158eaa3f61b4)
## :point_right: RoadMap
@@ -34,14 +34,6 @@ Fast GPT 允许你使用自己的 openai API KEY 来快速的调用 openai 接
添加 wx 进入:
![Demo](https://otnvvf-imgs.oss.laf.run/wx300.jpg)
## 👀 其他
- [FastGpt 常见问题](https://kjqvjse66l.feishu.cn/docx/HtrgdT0pkonP4kxGx8qcu6XDnGh)
- [docker 部署教程](https://www.bilibili.com/video/BV1jo4y147fT/)
- [公众号接入](https://www.bilibili.com/video/BV1xh4y1t7fy/)
- [FastGpt V3.4 更新集合](https://www.bilibili.com/video/BV1Lo4y147Qh/?vd_source=92041a1a395f852f9d89158eaa3f61b4)
- [FastGpt 知识库演示](https://www.bilibili.com/video/BV1Wo4y1p7i1/)
## Powered by
- [TuShan: 5 分钟搭建后台管理系统](https://github.com/msgbyte/tushan)
@@ -49,6 +41,17 @@ Fast GPT 允许你使用自己的 openai API KEY 来快速的调用 openai 接
- [Sealos: 快速部署集群应用](https://github.com/labring/sealos)
- [One API: 令牌管理 & 二次分发,支持 Azure](https://github.com/songquanpeng/one-api)
## 👀 其他
- [FastGpt 常见问题](https://kjqvjse66l.feishu.cn/docx/HtrgdT0pkonP4kxGx8qcu6XDnGh)
- [docker 部署教程视频](https://www.bilibili.com/video/BV1jo4y147fT/)
- [公众号接入视频教程](https://www.bilibili.com/video/BV1xh4y1t7fy/)
- [FastGpt 知识库演示](https://www.bilibili.com/video/BV1Wo4y1p7i1/)
## 第三方生态
- [luolinAI: 企微机器人,开箱即用](https://github.com/luolin-ai/FastGPT-Enterprise-WeChatbot)
## 🌟 Star History
[![Star History Chart](https://api.star-history.com/svg?repos=c121914yu/FastGPT&type=Date)](https://star-history.com/#c121914yu/FastGPT&Date)

View File

@@ -24,7 +24,6 @@ export const useKbRoute = (app) => {
}
: {})
};
console.log(where);
const kbsRaw = await Kb.find(where)
.skip(start)

View File

@@ -8,11 +8,12 @@ const hashPassword = (psw) => {
return crypto.createHash('sha256').update(psw).digest('hex');
};
const day = 60;
export const useUserRoute = (app) => {
// 统计近 30 天注册用户数量
app.get('/users/data', auth(), async (req, res) => {
try {
const day = 60;
let startCount = await User.countDocuments({
createTime: { $lt: new Date(Date.now() - day * 24 * 60 * 60 * 1000) }
});
@@ -92,7 +93,6 @@ export const useUserRoute = (app) => {
res.status(500).json({ error: 'Error fetching users' });
}
});
// 创建用户
app.post('/users', auth(), async (req, res) => {
try {
@@ -134,7 +134,6 @@ export const useUserRoute = (app) => {
res.status(500).json({ error: 'Error updating user' });
}
});
// 新增: 获取 pays 列表
app.get('/pays', auth(), async (req, res) => {
try {
@@ -179,4 +178,58 @@ export const useUserRoute = (app) => {
res.status(500).json({ error: 'Error fetching pays', details: err.message });
}
});
// 获取本月账单
app.get('/pays/data', auth(), async (req, res) => {
try {
let startCount = 0;
const paysRaw = await Pay.aggregate([
{
$match: {
status: 'SUCCESS',
createTime: {
$gte: new Date(Date.now() - day * 24 * 60 * 60 * 1000 + 8 * 60 * 60 * 1000) // 补时差
}
}
},
{
$addFields: {
adjustedCreateTime: { $add: ['$createTime', 8 * 60 * 60 * 1000] }
}
},
{
$group: {
_id: {
year: { $year: '$adjustedCreateTime' },
month: { $month: '$adjustedCreateTime' },
day: { $dayOfMonth: '$adjustedCreateTime' }
},
count: { $sum: '$price' }
}
},
{
$project: {
_id: 0,
date: { $dateFromParts: { year: '$_id.year', month: '$_id.month', day: '$_id.day' } },
count: 1
}
},
{ $sort: { date: 1 } }
]);
const countResult = paysRaw.map((item) => {
startCount += item.count;
return {
date: item.date,
total: startCount,
count: item.count
};
});
res.json(countResult);
} catch (err) {
console.log(`Error fetching users: ${err}`);
res.status(500).json({ error: 'Error fetching users' });
}
});
};

View File

@@ -14,14 +14,31 @@ import {
import dayjs from 'dayjs';
const authStorageKey = 'tushan:auth';
const PRICE_SCALE = 100000;
type UsersChartDataType = { count: number; date: string; increase: number; increaseRate: string };
type fetchChatData = {
count: number;
total?: number;
date: string;
increase?: number;
increaseRate?: string;
};
type chatDataType = {
date: string;
userCount: number;
userIncrease?: number;
userIncreaseRate?: string;
payTotal: number;
payCount: number;
};
export const Dashboard: React.FC = React.memo(() => {
const [userCount, setUserCount] = useState(0); //用户数量
const [kbCount, setkbCount] = useState(0);
const [modelCount, setmodelCount] = useState(0);
const [usersData, setUsersData] = useState<UsersChartDataType[]>([]);
const [chatData, setChatData] = useState<chatDataType[]>([]);
useEffect(() => {
const baseUrl = import.meta.env.VITE_PUBLIC_SERVER_URL;
@@ -56,20 +73,33 @@ export const Dashboard: React.FC = React.memo(() => {
setmodelCount(Number(modelTotalCount));
}
};
const fetchUserData = async () => {
const userResponse: UsersChartDataType[] = await fetch(`${baseUrl}/users/data`, {
headers
}).then((res) => res.json());
setUsersData(
userResponse.map((item) => ({
...item,
date: dayjs(item.date).format('MM/DD')
}))
);
const fetchChatData = async () => {
const [userResponse, payResponse]: fetchChatData[][] = await Promise.all([
fetch(`${baseUrl}/users/data`, {
headers
}).then((res) => res.json()),
fetch(`${baseUrl}/pays/data`, {
headers
}).then((res) => res.json())
]);
const data = userResponse.map((item, i) => {
const pay = payResponse.find((pay) => item.date === pay.date);
return {
date: dayjs(item.date).format('MM/DD'),
userCount: item.count,
userIncrease: item.increase,
userIncreaseRate: item.increaseRate,
payCount: pay ? pay.count / PRICE_SCALE : 0,
payTotal: pay?.total ? pay.total / PRICE_SCALE : 0
};
});
setChatData(data);
};
fetchCounts();
fetchUserData();
fetchChatData();
}, []);
return (
@@ -101,7 +131,13 @@ export const Dashboard: React.FC = React.memo(() => {
</Grid.Row>
<Divider />
<UserChart data={usersData} />
<div>
<strong> & </strong>
<UserChart data={chatData} />
</div>
<Divider />
</Card>
</Space>
</div>
@@ -162,7 +198,7 @@ const DataItem = React.memo((props: { icon: React.ReactElement; title: string; c
DataItem.displayName = 'DataItem';
const CustomTooltip = ({ active, payload }: any) => {
const data = payload?.[0]?.payload as UsersChartDataType;
const data = payload?.[0]?.payload as chatDataType;
if (active && data) {
return (
<div
@@ -174,13 +210,19 @@ const CustomTooltip = ({ active, payload }: any) => {
}}
>
<p className="label">
count: <strong>{data.count}</strong>
: <strong>{data.date}</strong>
</p>
<p className="label">
increase: <strong>{data.increase}</strong>
: <strong>{data.userCount}</strong>
</p>
<p className="label">
increaseRate: <strong>{data.increaseRate}</strong>
: <strong>{data.userIncrease}</strong>
</p>
<p className="label">
: <strong>{data.payCount}</strong>
</p>
<p className="label">
60: <strong>{data.payTotal}</strong>
</p>
</div>
);
@@ -188,7 +230,7 @@ const CustomTooltip = ({ active, payload }: any) => {
return null;
};
const UserChart = ({ data }: { data: UsersChartDataType[] }) => {
const UserChart = ({ data }: { data: chatDataType[] }) => {
return (
<ResponsiveContainer width="100%" height={320}>
<AreaChart
@@ -198,14 +240,14 @@ const UserChart = ({ data }: { data: UsersChartDataType[] }) => {
margin={{ top: 10, right: 30, left: 0, bottom: 0 }}
>
<defs>
<linearGradient id="colorUv" x1="0" y1="0" x2="0" y2="1">
<stop offset="5%" stopColor="#8884d8" stopOpacity={0.8} />
<stop offset="95%" stopColor="#8884d8" stopOpacity={0} />
</linearGradient>
<linearGradient id="colorPv" x1="0" y1="0" x2="0" y2="1">
<linearGradient id="userCount" x1="0" y1="0" x2="0" y2="1">
<stop offset="5%" stopColor="#82ca9d" stopOpacity={0.8} />
<stop offset="95%" stopColor="#82ca9d" stopOpacity={0} />
</linearGradient>
<linearGradient id="payTotal" x1="0" y1="0" x2="0" y2="1">
<stop offset="5%" stopColor="#8884d8" stopOpacity={0.8} />
<stop offset="95%" stopColor="#8884d8" stopOpacity={0} />
</linearGradient>
</defs>
<XAxis dataKey="date" />
<YAxis />
@@ -213,10 +255,17 @@ const UserChart = ({ data }: { data: UsersChartDataType[] }) => {
<Tooltip content={<CustomTooltip />} />
<Area
type="monotone"
dataKey="count"
dataKey="userCount"
stroke="#82ca9d"
fillOpacity={1}
fill="url(#colorPv)"
fill="url(#userCount)"
/>
<Area
type="monotone"
dataKey="payTotal"
stroke="#8884d8"
fillOpacity={1}
fill="url(#payTotal)"
/>
</AreaChart>
</ResponsiveContainer>

View File

@@ -1,7 +1,7 @@
# 运行端口,如果不是 3000 口运行,需要改成其他的。注意:不是改了这个变量就会变成其他端口,而是因为改成其他端口,才用这个变量。
PORT=3000
# database max link
DB_MAX_LINK=15
DB_MAX_LINK=5
# 代理
# AXIOS_PROXY_HOST=127.0.0.1
# AXIOS_PROXY_PORT=7890

View File

@@ -8,9 +8,10 @@
| --- | --- |
| 知识库 - 索引 | 0.001 |
| chatgpt - 对话 | 0.015 |
| chatgpt16K - 对话 | 0.015 |
| gpt4 - 对话 | 0.1 |
| 文件拆分 | 0.015 |
| chatgpt16K - 对话 | 0.03 |
| 窝牛 GPT4 不稳定 - 对话 | 0.015 |
| gpt4 - 对话 | 0.45 |
| 文件拆分 | 0.03 |
**其他问题**
| 交流群 | 小助手 |

View File

@@ -20,9 +20,9 @@ FastGpt 项目完全开源,可随意私有化部署,去除平台风险忧虑
| --- | --- |
| 知识库 - 索引 | 0.001 |
| chatgpt - 对话 | 0.015 |
| chatgpt16K - 对话 | 0.015 |
| gpt4 - 对话 | 0.1 |
| 文件拆分 | 0.015 |
| chatgpt16K - 对话 | 0.03 |
| gpt4 - 对话 | 0.45 |
| 文件拆分 | 0.03 |
### 交流群/问题反馈

View File

@@ -1,7 +1,6 @@
### Fast GPT V3.9
1. 限时优惠活动,更低价的 tokens
2. 新增 - 直接分段训练,可调节段落大小
3. 优化 - tokens 计算性能。
4. 优化 - key 池管理,结合 one-api 项目,实现更方便的 key 池管理,具体参考[docker 部署 FastGpt](https://github.com/c121914yu/FastGPT/blob/main/docs/deploy/docker.md)
5. 新增 - V2 版 OpenAPI可以在任意第三方套壳 ChatGpt 项目中直接使用 FastGpt 的应用,注意!是直接,不需要改任何代码。具体参考[API 文档中《在第三方应用中使用 FastGpt》](https://kjqvjse66l.feishu.cn/docx/DmLedTWtUoNGX8xui9ocdUEjnNh)
1. 新增 - 直接分段训练,可调节段落大小
2. 优化 - tokens 计算性能
3. 优化 - key 池管理,结合 one-api 项目,实现更方便的 key 池管理,具体参考[docker 部署 FastGpt](https://github.com/c121914yu/FastGPT/blob/main/docs/deploy/docker.md)
4. 新增 - V2 版 OpenAPI可以在任意第三方套壳 ChatGpt 项目中直接使用 FastGpt 的应用,注意!是直接,不需要改任何代码。具体参考[API 文档中《在第三方应用中使用 FastGpt](https://kjqvjse66l.feishu.cn/docx/DmLedTWtUoNGX8xui9ocdUEjnNh)

View File

@@ -51,7 +51,7 @@ const Layout = ({ children }: { children: JSX.Element }) => {
return () => {
window.removeEventListener('resize', resize);
};
}, [setScreenWidth]);
}, []);
const { data: unread = 0 } = useQuery(['getUnreadCount'], getUnreadCount, {
enabled: !!userInfo,
@@ -64,8 +64,8 @@ const Layout = ({ children }: { children: JSX.Element }) => {
h={'100%'}
bgGradient={'linear(to-t,rgba(173, 206, 255, 0.05) 0%, rgba(173, 206, 255, 0.12) 100%)'}
>
{isPc ? (
pcUnShowLayoutRoute[router.pathname] ? (
<Box h={'100%'} display={['none', 'block']}>
{pcUnShowLayoutRoute[router.pathname] ? (
<Auth>{children}</Auth>
) : (
<>
@@ -76,19 +76,22 @@ const Layout = ({ children }: { children: JSX.Element }) => {
<Auth>{children}</Auth>
</Box>
</>
)
) : phoneUnShowLayoutRoute[router.pathname] || isChatPage ? (
<Auth>{children}</Auth>
) : (
<Flex h={'100%'} flexDirection={'column'}>
<Box flex={'1 0 0'} h={0} overflow={'overlay'}>
<Auth>{children}</Auth>
</Box>
<Box h={'50px'} borderTop={'1px solid rgba(0,0,0,0.1)'}>
<NavbarPhone unread={unread} />
</Box>
</Flex>
)}
)}
</Box>
<Box h={'100%'} display={['block', 'none']}>
{phoneUnShowLayoutRoute[router.pathname] || isChatPage ? (
<Auth>{children}</Auth>
) : (
<Flex h={'100%'} flexDirection={'column'}>
<Box flex={'1 0 0'} h={0} overflow={'overlay'}>
<Auth>{children}</Auth>
</Box>
<Box h={'50px'} borderTop={'1px solid rgba(0,0,0,0.1)'}>
<NavbarPhone unread={unread} />
</Box>
</Flex>
)}
</Box>
</Box>
<Loading loading={loading} />
</>

View File

@@ -1,7 +1,7 @@
import React, { useMemo } from 'react';
import { useRouter } from 'next/router';
import MyIcon from '../Icon';
import { Flex } from '@chakra-ui/react';
import { Flex, Box } from '@chakra-ui/react';
import { useChatStore } from '@/store/chat';
import Badge from '../Badge';
@@ -11,24 +11,28 @@ const NavbarPhone = ({ unread }: { unread: number }) => {
const navbarList = useMemo(
() => [
{
label: '聊天',
icon: 'tabbarChat',
link: `/chat?modelId=${lastChatModelId}&chatId=${lastChatId}`,
activeLink: ['/chat'],
unread: 0
},
{
label: '应用',
icon: 'tabbarModel',
link: `/model`,
activeLink: ['/model'],
unread: 0
},
{
label: '工具',
icon: 'tabbarMore',
link: '/tools',
activeLink: ['/tools'],
unread: 0
},
{
label: '我的',
icon: 'tabbarMe',
link: '/number',
activeLink: ['/number'],
@@ -57,7 +61,9 @@ const NavbarPhone = ({ unread }: { unread: number }) => {
textAlign={'center'}
alignItems={'center'}
h={'100%'}
pt={1}
px={3}
transform={'scale(0.9)'}
{...(item.activeLink.includes(router.asPath)
? {
color: '#7089f1'
@@ -89,6 +95,7 @@ const NavbarPhone = ({ unread }: { unread: number }) => {
>
<Badge isDot count={item.unread}>
<MyIcon name={item.icon as any} width={'20px'} height={'20px'} />
<Box fontSize={'12px'}>{item.label}</Box>
</Badge>
</Flex>
))}

View File

@@ -1,4 +1,4 @@
import React from 'react';
import React, { useRef } from 'react';
import { Menu, MenuButton, MenuList, MenuItem, Button, useDisclosure } from '@chakra-ui/react';
import type { ButtonProps } from '@chakra-ui/react';
import { ChevronDownIcon } from '@chakra-ui/icons';
@@ -13,6 +13,7 @@ interface Props extends ButtonProps {
}
const MySelect = ({ placeholder, value, width = 'auto', list, onchange, ...props }: Props) => {
const ref = useRef<HTMLDivElement>(null);
const menuItemStyles = {
borderRadius: 'sm',
py: 2,
@@ -26,8 +27,9 @@ const MySelect = ({ placeholder, value, width = 'auto', list, onchange, ...props
return (
<Menu autoSelect={false} onOpen={onOpen} onClose={onClose}>
<MenuButton style={{ width: '100%' }} as={'span'}>
<MenuButton style={{ width: '100%', position: 'relative' }} as={'span'}>
<Button
ref={ref}
width={width}
px={3}
variant={'base'}
@@ -47,9 +49,15 @@ const MySelect = ({ placeholder, value, width = 'auto', list, onchange, ...props
</Button>
</MenuButton>
<MenuList
minW={
Array.isArray(width) ? width.map((item) => `${item} !important`) : `${width} !important`
}
minW={(() => {
const w = ref.current?.clientWidth;
if (w) {
return `${w}px !important`;
}
return Array.isArray(width)
? width.map((item) => `${item} !important`)
: `${width} !important`;
})()}
p={'6px'}
border={'1px solid #fff'}
boxShadow={'0px 2px 4px rgba(161, 167, 179, 0.25), 0px 0px 1px rgba(121, 141, 159, 0.25);'}
@@ -78,4 +86,4 @@ const MySelect = ({ placeholder, value, width = 'auto', list, onchange, ...props
);
};
export default MySelect;
export default React.memo(MySelect);

View File

@@ -1,4 +1,3 @@
import { getSystemModelList } from '@/api/system';
import type { ShareChatEditType } from '@/types/model';
import type { ModelSchema } from '@/types/mongoSchema';
@@ -10,7 +9,8 @@ export enum OpenAiChatEnum {
'GPT35' = 'gpt-3.5-turbo',
'GPT3516k' = 'gpt-3.5-turbo-16k',
'GPT4' = 'gpt-4',
'GPT432k' = 'gpt-4-32k'
'GPT432k' = 'gpt-4-32k',
'GPT4LOW' = 'gpt-4-0314'
}
export type ChatModelType = `${OpenAiChatEnum}`;
@@ -25,6 +25,14 @@ export type ChatModelItemType = {
};
export const ChatModelMap = {
[OpenAiChatEnum.GPT4LOW]: {
chatModel: OpenAiChatEnum.GPT4LOW,
name: '窝牛Gpt4不稳定',
contextMaxToken: 4000,
systemMaxToken: 2400,
maxTemperature: 1.2,
price: 1.5
},
[OpenAiChatEnum.GPT35]: {
chatModel: OpenAiChatEnum.GPT35,
name: 'Gpt35-4k',
@@ -39,7 +47,7 @@ export const ChatModelMap = {
contextMaxToken: 16000,
systemMaxToken: 8000,
maxTemperature: 1.2,
price: 1.5
price: 3
},
[OpenAiChatEnum.GPT4]: {
chatModel: OpenAiChatEnum.GPT4,
@@ -47,7 +55,7 @@ export const ChatModelMap = {
contextMaxToken: 8000,
systemMaxToken: 4000,
maxTemperature: 1.2,
price: 10
price: 45
},
[OpenAiChatEnum.GPT432k]: {
chatModel: OpenAiChatEnum.GPT432k,
@@ -59,15 +67,12 @@ export const ChatModelMap = {
}
};
let chatModelList: ChatModelItemType[] = [];
export const getChatModelList = async () => {
if (chatModelList.length > 0) {
return chatModelList;
}
const list = await getSystemModelList();
chatModelList = list;
return list;
};
export const chatModelList: ChatModelItemType[] = [
ChatModelMap[OpenAiChatEnum.GPT3516k],
ChatModelMap[OpenAiChatEnum.GPT35],
ChatModelMap[OpenAiChatEnum.GPT4LOW],
ChatModelMap[OpenAiChatEnum.GPT4]
];
export const defaultModel: ModelSchema = {
_id: 'modelId',

View File

@@ -8,7 +8,7 @@ interface Props {
export function useScreen(data?: Props) {
const { defaultIsPc = false } = data || {};
const [isPc] = useMediaQuery('(min-width: 900px)', {
ssr: true,
ssr: false,
fallback: defaultIsPc
});

View File

@@ -49,7 +49,7 @@ function App({ Component, pageProps }: AppProps) {
/>
<link rel="icon" href="/favicon.ico" />
</Head>
<Script src="/js/particles.js" strategy="lazyOnload"></Script>
<Script src="/js/particles.js"></Script>
<Script src="/js/qrcode.min.js" strategy="afterInteractive"></Script>
<Script src="/js/pdf.js" strategy="afterInteractive"></Script>
<Script src="/js/html2pdf.bundle.min.js" strategy="afterInteractive"></Script>
@@ -60,10 +60,6 @@ function App({ Component, pageProps }: AppProps) {
src={`https://www.recaptcha.net/recaptcha/api.js?render=${googleVerKey}`}
strategy="afterInteractive"
></Script>
<Script
src={`https://www.google.com/recaptcha/api.js?render=${googleVerKey}`}
strategy="afterInteractive"
></Script>
</>
)}
<QueryClientProvider client={queryClient}>

View File

@@ -1,37 +0,0 @@
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { authUser } from '@/service/utils/auth';
import { connectToDatabase, TrainingData } from '@/service/mongo';
import { TrainingModeEnum } from '@/constants/plugin';
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
try {
await authUser({ req, authRoot: true });
await connectToDatabase();
// split queue data
const result = await TrainingData.aggregate([
{
$group: {
_id: '$mode',
count: { $sum: 1 }
}
}
]);
jsonRes(res, {
data: {
qaListLen: result.find((item) => item._id === TrainingModeEnum.qa)?.count || 0,
vectorListLen: result.find((item) => item._id === TrainingModeEnum.index)?.count || 0
}
});
} catch (error) {
console.log(error);
jsonRes(res, {
code: 500,
error
});
}
}

View File

@@ -0,0 +1,62 @@
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { authUser } from '@/service/utils/auth';
import { connectToDatabase, TrainingData, User, promotionRecord } from '@/service/mongo';
import { TrainingModeEnum } from '@/constants/plugin';
import mongoose from 'mongoose';
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
try {
await authUser({ req, authRoot: true });
const { amount, userId, type } = req.body as {
amount: number;
userId: number;
type: 'withdraw';
};
await connectToDatabase();
if (!userId || !amount || type !== 'withdraw' || amount <= 0) {
throw new Error('params is error');
}
// check promotion balance
const countResidue: { totalAmount: number }[] = await promotionRecord.aggregate([
{ $match: { userId: new mongoose.Types.ObjectId(userId) } },
{
$group: {
_id: null, // 分组条件,这里使用 null 表示不分组
totalAmount: { $sum: '$amount' } // 计算 amount 字段的总和
}
},
{
$project: {
_id: false, // 排除 _id 字段
totalAmount: true // 只返回 totalAmount 字段
}
}
]);
const balance = countResidue[0].totalAmount;
if (balance < amount) {
throw new Error('可提现余额不足');
}
// add record
await promotionRecord.create({
userId,
type,
amount: -amount
});
jsonRes(res, {
data: balance
});
} catch (error) {
jsonRes(res, {
code: 500,
error
});
}
}

View File

@@ -129,7 +129,8 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
// 发出请求
const { streamResponse, responseMessages, responseText, totalTokens } =
await modelServiceToolMap[model.chat.chatModel].chatCompletion({
await modelServiceToolMap.chatCompletion({
model: model.chat.chatModel,
apiKey,
temperature: +temperature,
messages: completePrompts,

View File

@@ -150,13 +150,15 @@ export async function appKbSearch({
}
];
const fixedSystemTokens = modelToolMap[model.chat.chatModel].countTokens({
const fixedSystemTokens = modelToolMap.countTokens({
model: model.chat.chatModel,
messages: [...userSystemPrompt, ...userLimitPrompt]
});
// filter part quote by maxToken
const sliceResult = modelToolMap[model.chat.chatModel]
const sliceResult = modelToolMap
.tokenSlice({
model: model.chat.chatModel,
maxToken: modelConstantsData.systemMaxToken - fixedSystemTokens,
messages: filterSearch.map((item, i) => ({
obj: ChatRoleEnum.System,

View File

@@ -78,7 +78,8 @@ export async function pushDataToKb({
if (mode === TrainingModeEnum.qa) {
// count token
const token = modelToolMap[OpenAiChatEnum.GPT3516k].countTokens({
const token = modelToolMap.countTokens({
model: OpenAiChatEnum.GPT3516k,
messages: [{ obj: 'System', value: item.q }]
});
if (token > modeMaxToken[TrainingModeEnum.qa]) {

View File

@@ -39,7 +39,12 @@ export async function openaiEmbedding({
input,
mustPay = false
}: { userId: string; mustPay?: boolean } & Props) {
const apiKey = getSystemOpenAiKey();
const { userOpenAiKey, systemAuthKey } = await getApiKey({
model: 'gpt-3.5-turbo',
userId,
mustPay
});
const apiKey = userOpenAiKey || systemAuthKey;
// 获取 chatAPI
const chatAPI = getOpenAIApi(apiKey);
@@ -68,7 +73,7 @@ export async function openaiEmbedding({
});
pushGenerateVectorBill({
isPay: mustPay,
isPay: !userOpenAiKey,
userId,
text: input.join(''),
tokenLen: result.tokenLen

View File

@@ -169,7 +169,7 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
model: model.chat.chatModel,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 },
choices: [
{ message: [{ role: 'assistant', content: response }], finish_reason: 'stop', index: 0 }
{ message: { role: 'assistant', content: response }, finish_reason: 'stop', index: 0 }
]
});
}
@@ -196,7 +196,8 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
// start model api. responseText and totalTokens: valid only if stream = false
const { streamResponse, responseMessages, responseText, totalTokens } =
await modelServiceToolMap[model.chat.chatModel].chatCompletion({
await modelServiceToolMap.chatCompletion({
model: model.chat.chatModel,
apiKey: userOpenAiKey || apiKey,
temperature: +temperature,
maxToken: model.chat.maxToken,
@@ -298,7 +299,7 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
model: model.chat.chatModel,
usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: tokens },
choices: [
{ message: [{ role: 'assistant', content: answer }], finish_reason: 'stop', index: 0 }
{ message: { role: 'assistant', content: answer }, finish_reason: 'stop', index: 0 }
]
});
}

View File

@@ -9,6 +9,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
chatModelList.push(ChatModelMap[OpenAiChatEnum.GPT3516k]);
chatModelList.push(ChatModelMap[OpenAiChatEnum.GPT35]);
chatModelList.push(ChatModelMap[OpenAiChatEnum.GPT4LOW]);
chatModelList.push(ChatModelMap[OpenAiChatEnum.GPT4]);
jsonRes(res, {

View File

@@ -2,7 +2,7 @@ import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { connectToDatabase, User, Pay, TrainingData } from '@/service/mongo';
import { authUser } from '@/service/utils/auth';
import { PaySchema, UserModelSchema } from '@/types/mongoSchema';
import { PaySchema } from '@/types/mongoSchema';
import dayjs from 'dayjs';
import { getPayResult } from '@/service/utils/wxpay';
import { pushPromotionRecord } from '@/service/utils/promotion';

View File

@@ -4,6 +4,7 @@ import { jsonRes } from '@/service/response';
import { connectToDatabase, Inform, User } from '@/service/mongo';
import { authUser } from '@/service/utils/auth';
import { InformTypeEnum } from '@/constants/user';
import { startSendInform } from '@/service/events/sendInform';
export type Props = {
type: `${InformTypeEnum}`;
@@ -37,25 +38,26 @@ export async function sendInform({ type, title, content, userId }: Props) {
try {
if (userId) {
// skip it if have same inform within 5 minutes
const inform = await Inform.findOne({
type,
title,
content,
userId,
read: false,
time: { $lte: new Date(Date.now() + 5 * 60 * 1000) }
global.sendInformQueue.push(async () => {
// skip it if have same inform within 5 minutes
const inform = await Inform.findOne({
type,
title,
content,
userId,
time: { $gte: new Date(Date.now() - 5 * 60 * 1000) }
});
if (inform) return;
await Inform.create({
type,
title,
content,
userId
});
});
if (inform) return;
await Inform.create({
type,
title,
content,
userId
});
startSendInform();
return;
}

View File

@@ -63,8 +63,9 @@ import { adaptChatItem_openAI } from '@/utils/plugin/openai';
const textareaMinH = '22px';
const Chat = ({ modelId, chatId }: { modelId: string; chatId: string }) => {
const Chat = () => {
const router = useRouter();
const { modelId = '', chatId = '' } = router.query as { modelId: string; chatId: string };
const theme = useTheme();
const ChatBox = useRef<HTMLDivElement>(null);
@@ -950,11 +951,4 @@ const Chat = ({ modelId, chatId }: { modelId: string; chatId: string }) => {
);
};
Chat.getInitialProps = ({ query, req }: any) => {
return {
modelId: query?.modelId || '',
chatId: query?.chatId || ''
};
};
export default Chat;

View File

@@ -60,8 +60,9 @@ import { adaptChatItem_openAI } from '@/utils/plugin/openai';
const textareaMinH = '22px';
const Chat = ({ shareId, historyId }: { shareId: string; historyId: string }) => {
const Chat = () => {
const router = useRouter();
const { shareId = '', historyId } = router.query as { shareId: string; historyId: string };
const theme = useTheme();
const ChatBox = useRef<HTMLDivElement>(null);
@@ -490,14 +491,13 @@ const Chat = ({ shareId, historyId }: { shareId: string; historyId: string }) =>
]);
// 初始化聊天框
useQuery(['init', historyId], () => {
useQuery(['init', shareId, historyId], () => {
if (!shareId) {
return null;
}
if (!historyId) {
router.replace(`/chat/share?shareId=${shareId}&historyId=${new Types.ObjectId()}`);
return null;
return router.replace(`/chat/share?shareId=${shareId}&historyId=${new Types.ObjectId()}`);
}
return loadChatInfo();
@@ -837,11 +837,4 @@ const Chat = ({ shareId, historyId }: { shareId: string; historyId: string }) =>
);
};
Chat.getInitialProps = ({ query, req }: any) => {
return {
shareId: query?.shareId || '',
historyId: query?.historyId || ''
};
};
export default Chat;

View File

@@ -140,8 +140,10 @@ const Home = () => {
useEffect(() => {
(async () => {
const { data: git } = await axios.get('https://api.github.com/repos/c121914yu/FastGPT');
setStar(git.stargazers_count);
try {
const { data: git } = await axios.get('https://api.github.com/repos/c121914yu/FastGPT');
setStar(git.stargazers_count);
} catch (error) {}
})();
}, []);

View File

@@ -154,7 +154,7 @@ const DataCard = ({ kbId }: { kbId: string }) => {
mr={[2, 4]}
size={'sm'}
onClick={() => {
refetchData(pageNum);
getData(pageNum);
getTrainingData({ kbId, init: true });
}}
/>

View File

@@ -7,8 +7,9 @@ import SideBar from '@/components/SideBar';
import KbList from './components/KbList';
import KbDetail from './components/Detail';
const Kb = ({ kbId }: { kbId: string }) => {
const Kb = () => {
const router = useRouter();
const { kbId = '' } = router.query as { kbId: string };
const { isPc } = useGlobalStore();
const { lastKbId } = useUserStore();
@@ -35,9 +36,3 @@ const Kb = ({ kbId }: { kbId: string }) => {
};
export default Kb;
Kb.getInitialProps = ({ query, req }: any) => {
return {
kbId: query?.kbId || ''
};
};

View File

@@ -12,10 +12,10 @@ import { MyModelsTypeEnum } from '@/constants/user';
import dynamic from 'next/dynamic';
const Avatar = dynamic(() => import('@/components/Avatar'), {
ssr: true
ssr: false
});
const Tabs = dynamic(() => import('@/components/Tabs'), {
ssr: true
ssr: false
});
const ModelList = ({ modelId }: { modelId: string }) => {

View File

@@ -5,7 +5,7 @@ import dynamic from 'next/dynamic';
import MyIcon from '@/components/Icon';
const APIKeyModal = dynamic(() => import('@/components/APIKeyModal'), {
ssr: true
ssr: false
});
const API = ({ modelId }: { modelId: string }) => {

View File

@@ -21,7 +21,7 @@ import { useSelectFile } from '@/hooks/useSelectFile';
import { compressImg } from '@/utils/file';
import { getErrText } from '@/utils/tools';
import { useConfirm } from '@/hooks/useConfirm';
import { ChatModelMap, getChatModelList } from '@/constants/model';
import { ChatModelMap, chatModelList } from '@/constants/model';
import { formatPrice } from '@/utils/user';
import type { ModelSchema } from '@/types/mongoSchema';
@@ -75,7 +75,7 @@ const Settings = ({ modelId }: { modelId: string }) => {
}
return max;
}, [getValues, setValue]);
}, [getValues, setValue, refresh]);
// 提交保存模型修改
const saveSubmitSuccess = useCallback(
@@ -185,8 +185,6 @@ const Settings = ({ modelId }: { modelId: string }) => {
}
});
const { data: chatModelList = [] } = useQuery(['initChatModelList'], getChatModelList);
return (
<Box
pb={3}
@@ -240,12 +238,12 @@ const Settings = ({ modelId }: { modelId: string }) => {
</Box>
<MySelect
width={['100%', '280px']}
width={['100%', '300px']}
value={getValues('chat.chatModel')}
list={chatModelList.map((item) => ({
id: item.chatModel,
label: `${item.name} (${formatPrice(
ChatModelMap[getValues('chat.chatModel')]?.price,
ChatModelMap[item.chatModel]?.price,
1000
)} 元/1k tokens)`
}))}
@@ -265,7 +263,7 @@ const Settings = ({ modelId }: { modelId: string }) => {
{ label: '严谨', value: 0 },
{ label: '发散', value: 10 }
]}
width={['100%', '260px']}
width={['95%', '280px']}
min={0}
max={10}
activeVal={getValues('chat.temperature')}
@@ -286,7 +284,7 @@ const Settings = ({ modelId }: { modelId: string }) => {
{ label: '100', value: 100 },
{ label: `${tokenLimit}`, value: tokenLimit }
]}
width={['100%', '260px']}
width={['95%', '280px']}
min={100}
max={tokenLimit}
step={50}

View File

@@ -13,8 +13,9 @@ const ModelDetail = dynamic(() => import('./components/detail/index'), {
ssr: false
});
const Model = ({ modelId }: { modelId: string }) => {
const Model = () => {
const router = useRouter();
const { modelId = '' } = router.query as { modelId: string };
const { isPc } = useGlobalStore();
const { lastModelId } = useUserStore();
@@ -41,9 +42,3 @@ const Model = ({ modelId }: { modelId: string }) => {
};
export default Model;
Model.getInitialProps = ({ query, req }: any) => {
return {
modelId: query?.modelId || ''
};
};

View File

@@ -40,7 +40,7 @@ const BillTable = () => {
<Tr>
<Th></Th>
<Th></Th>
<Th></Th>
<Th></Th>
<Th></Th>
<Th>Tokens </Th>
<Th></Th>

View File

@@ -115,9 +115,9 @@ const PayModal = ({ onClose }: { onClose: () => void }) => {
| --- | --- |
| 知识库 - 索引 | 0.001 |
| chatgpt - 对话 | 0.015 |
| chatgpt16K - 对话 | 0.015 |
| gpt4 - 对话 | 0.1 |
| 文件拆分 | 0.015 |`}
| chatgpt16K - 对话 | 0.03 |
| gpt4 - 对话 | 0.45 |
| 文件拆分 | 0.03 |`}
/>
</>
)}

View File

@@ -23,21 +23,21 @@ import Tabs from '@/components/Tabs';
import BillTable from './components/BillTable';
const PayRecordTable = dynamic(() => import('./components/PayRecordTable'), {
ssr: true
ssr: false
});
const PromotionTable = dynamic(() => import('./components/PromotionTable'), {
ssr: true
ssr: false
});
const InformTable = dynamic(() => import('./components/InformTable'), {
ssr: true
ssr: false
});
const PayModal = dynamic(() => import('./components/PayModal'), {
loading: () => <Loading fixed={false} />,
ssr: true
ssr: false
});
const WxConcat = dynamic(() => import('@/components/WxConcat'), {
loading: () => <Loading fixed={false} />,
ssr: true
ssr: false
});
enum TableEnum {
@@ -54,7 +54,6 @@ const NumberSetting = ({ tableType }: { tableType: `${TableEnum}` }) => {
{ label: '佣金', id: TableEnum.promotion, Component: <PromotionTable /> },
{ label: '通知', id: TableEnum.inform, Component: <InformTable /> }
]);
const [currentTab, setCurrentTab] = useState(tableType);
const router = useRouter();
const { copyData } = useCopyData();
@@ -84,7 +83,14 @@ const NumberSetting = ({ tableType }: { tableType: `${TableEnum}` }) => {
async (data: UserUpdateParams) => {
setLoading(true);
try {
data.openaiKey && (await authOpenAiKey(data.openaiKey));
if (data.openaiKey) {
const text = await authOpenAiKey(data.openaiKey);
text &&
toast({
title: text,
status: 'warning'
});
}
await putUserInfo({
openaiKey: data.openaiKey,
avatar: data.avatar
@@ -95,7 +101,7 @@ const NumberSetting = ({ tableType }: { tableType: `${TableEnum}` }) => {
});
reset(data);
toast({
title: '更新成功',
title: '更新数据成功',
status: 'success'
});
} catch (error) {
@@ -195,7 +201,7 @@ const NumberSetting = ({ tableType }: { tableType: `${TableEnum}` }) => {
<Box flex={'0 0 85px'}>openaiKey:</Box>
<Input
{...register(`openaiKey`)}
maxW={'300px'}
maxW={'350px'}
placeholder={'openai账号。回车或失去焦点保存'}
size={'sm'}
onBlur={handleSubmit(onclickSave)}
@@ -214,7 +220,8 @@ const NumberSetting = ({ tableType }: { tableType: `${TableEnum}` }) => {
{[
{ label: '佣金比例', value: `${userInfo?.promotion.rate || 15}%` },
{ label: '已注册用户数', value: `${invitedAmount}` },
{ label: '累计佣金', value: `${historyAmount}` }
{ label: '累计佣金', value: `${historyAmount}` },
{ label: '可用佣金', value: `${residueAmount}` }
].map((item) => (
<Flex key={item.label} alignItems={'center'} mt={4} justifyContent={'space-between'}>
<Box w={'120px'}>{item.label}</Box>
@@ -251,13 +258,13 @@ const NumberSetting = ({ tableType }: { tableType: `${TableEnum}` }) => {
m={'auto'}
w={'200px'}
list={tableList.current}
activeId={currentTab}
activeId={tableType}
size={'sm'}
onChange={(id: any) => setCurrentTab(id)}
onChange={(id: any) => router.replace(`/number?type=${id}`)}
/>
<Box minH={'300px'}>
{(() => {
const item = tableList.current.find((item) => item.id === currentTab);
const item = tableList.current.find((item) => item.id === tableType);
return item ? item.Component : null;
})()}

View File

@@ -63,8 +63,9 @@ export async function generateQA(): Promise<any> {
// 请求 chatgpt 获取回答
const response = await Promise.all(
[data.q].map((text) =>
modelServiceToolMap[OpenAiChatEnum.GPT3516k]
modelServiceToolMap
.chatCompletion({
model: OpenAiChatEnum.GPT3516k,
apiKey: systemAuthKey,
temperature: 0.8,
messages: [
@@ -153,7 +154,8 @@ A2:
sendInform({
type: 'system',
title: 'QA 任务中止',
content: '由于账号余额不足QA 任务中止,重新充值后将会继续。',
content:
'由于账号余额不足,索引生成任务中止,重新充值后将会继续。暂停的任务将在 7 天后被删除。',
userId
});
console.log('余额不足,暂停向量生成任务');

View File

@@ -104,7 +104,8 @@ export async function generateVector(): Promise<any> {
sendInform({
type: 'system',
title: '索引生成任务中止',
content: '由于账号余额不足,索引生成任务中止,重新充值后将会继续。',
content:
'由于账号余额不足,索引生成任务中止,重新充值后将会继续。暂停的任务将在 7 天后被删除。',
userId
});
console.log('余额不足,暂停向量生成任务');

View File

@@ -0,0 +1,16 @@
export const startSendInform = async () => {
if (global.sendInformQueue.length === 0 || global.sendInformQueueLen > 0) return;
global.sendInformQueueLen++;
try {
const fn = global.sendInformQueue[global.sendInformQueue.length - 1];
await fn();
global.sendInformQueue.pop();
global.sendInformQueueLen--;
startSendInform();
} catch (error) {
global.sendInformQueueLen--;
startSendInform();
}
};

View File

@@ -15,6 +15,10 @@ const TrainingDataSchema = new Schema({
ref: 'kb',
required: true
},
expireAt: {
type: Date,
default: () => new Date()
},
lockTime: {
type: Date,
default: () => new Date('2000/1/1')
@@ -44,5 +48,13 @@ const TrainingDataSchema = new Schema({
}
});
try {
TrainingDataSchema.index({ lockTime: 1 });
TrainingDataSchema.index({ userId: 1 });
TrainingDataSchema.index({ expireAt: 1 }, { expireAfterSeconds: 7 * 24 * 60 });
} catch (error) {
console.log(error);
}
export const TrainingData: MongoModel<TrainingDateType> =
models['trainingData'] || model('trainingData', TrainingDataSchema);

View File

@@ -20,6 +20,8 @@ export async function connectToDatabase(): Promise<void> {
pgIvfflatProbe: 10,
sensitiveCheck: false
};
global.sendInformQueue = [];
global.sendInformQueueLen = 0;
// proxy obj
if (process.env.AXIOS_PROXY_HOST && process.env.AXIOS_PROXY_PORT) {
global.httpsAgent = tunnel.httpsOverHttp({
@@ -36,9 +38,9 @@ export async function connectToDatabase(): Promise<void> {
global.mongodb = await mongoose.connect(process.env.MONGODB_URI as string, {
bufferCommands: true,
dbName: process.env.MONGODB_NAME,
maxConnecting: Number(process.env.DB_MAX_LINK || 10),
maxPoolSize: Number(process.env.DB_MAX_LINK || 10),
minPoolSize: 5
maxConnecting: Number(process.env.DB_MAX_LINK || 5),
maxPoolSize: Number(process.env.DB_MAX_LINK || 5),
minPoolSize: 2
});
console.log('mongo connected');
} catch (error) {

View File

@@ -12,7 +12,7 @@ export const connectPg = async () => {
user: process.env.PG_USER,
password: process.env.PG_PASSWORD,
database: process.env.PG_DB_NAME,
max: Number(process.env.DB_MAX_LINK || 10),
max: Number(process.env.DB_MAX_LINK || 5),
idleTimeoutMillis: 30000,
connectionTimeoutMillis: 5000
});

View File

@@ -176,39 +176,18 @@ export const getApiKey = async ({
userId: string;
mustPay?: boolean;
}) => {
const user = await User.findById(userId);
const user = await User.findById(userId, 'openaiKey balance');
if (!user) {
return Promise.reject(ERROR_ENUM.unAuthorization);
}
const keyMap = {
[OpenAiChatEnum.GPT35]: {
userOpenAiKey: user.openaiKey || '',
systemAuthKey: getSystemOpenAiKey()
},
[OpenAiChatEnum.GPT3516k]: {
userOpenAiKey: user.openaiKey || '',
systemAuthKey: getSystemOpenAiKey()
},
[OpenAiChatEnum.GPT4]: {
userOpenAiKey: user.openaiKey || '',
systemAuthKey: getSystemOpenAiKey()
},
[OpenAiChatEnum.GPT432k]: {
userOpenAiKey: user.openaiKey || '',
systemAuthKey: getSystemOpenAiKey()
}
};
if (!keyMap[model]) {
return Promise.reject('App model is exists');
}
const userOpenAiKey = user.openaiKey || '';
const systemAuthKey = getSystemOpenAiKey();
// 有自己的key
if (!mustPay && keyMap[model].userOpenAiKey) {
if (!mustPay && userOpenAiKey) {
return {
user,
userOpenAiKey: keyMap[model].userOpenAiKey,
userOpenAiKey,
systemAuthKey: ''
};
}
@@ -219,9 +198,8 @@ export const getApiKey = async ({
}
return {
user,
userOpenAiKey: '',
systemAuthKey: keyMap[model].systemAuthKey
systemAuthKey
};
};

View File

@@ -27,6 +27,7 @@ export type StreamResponseType = {
chatResponse: any;
prompts: ChatItemType[];
res: NextApiResponse;
model: `${OpenAiChatEnum}`;
[key: string]: any;
};
export type StreamResponseReturnType = {
@@ -35,49 +36,9 @@ export type StreamResponseReturnType = {
finishMessages: ChatItemType[];
};
export const modelServiceToolMap: Record<
ChatModelType,
{
chatCompletion: (data: ChatCompletionType) => Promise<ChatCompletionResponseType>;
streamResponse: (data: StreamResponseType) => Promise<StreamResponseReturnType>;
}
> = {
[OpenAiChatEnum.GPT35]: {
chatCompletion: (data: ChatCompletionType) =>
chatResponse({ model: OpenAiChatEnum.GPT35, ...data }),
streamResponse: (data: StreamResponseType) =>
openAiStreamResponse({
model: OpenAiChatEnum.GPT35,
...data
})
},
[OpenAiChatEnum.GPT3516k]: {
chatCompletion: (data: ChatCompletionType) =>
chatResponse({ model: OpenAiChatEnum.GPT3516k, ...data }),
streamResponse: (data: StreamResponseType) =>
openAiStreamResponse({
model: OpenAiChatEnum.GPT3516k,
...data
})
},
[OpenAiChatEnum.GPT4]: {
chatCompletion: (data: ChatCompletionType) =>
chatResponse({ model: OpenAiChatEnum.GPT4, ...data }),
streamResponse: (data: StreamResponseType) =>
openAiStreamResponse({
model: OpenAiChatEnum.GPT4,
...data
})
},
[OpenAiChatEnum.GPT432k]: {
chatCompletion: (data: ChatCompletionType) =>
chatResponse({ model: OpenAiChatEnum.GPT432k, ...data }),
streamResponse: (data: StreamResponseType) =>
openAiStreamResponse({
model: OpenAiChatEnum.GPT432k,
...data
})
}
export const modelServiceToolMap = {
chatCompletion: chatResponse,
streamResponse: openAiStreamResponse
};
/* delete invalid symbol */
@@ -124,7 +85,8 @@ export const ChatContextFilter = ({
}
// 去掉 system 的 token
maxTokens -= modelToolMap[model].countTokens({
maxTokens -= modelToolMap.countTokens({
model,
messages: systemPrompts
});
@@ -135,7 +97,8 @@ export const ChatContextFilter = ({
for (let i = chatPrompts.length - 1; i >= 0; i--) {
chats.unshift(chatPrompts[i]);
const tokens = modelToolMap[model].countTokens({
const tokens = modelToolMap.countTokens({
model,
messages: chats
});
@@ -164,13 +127,14 @@ export const resStreamResponse = async ({
res.setHeader('X-Accel-Buffering', 'no');
res.setHeader('Cache-Control', 'no-cache, no-transform');
const { responseContent, totalTokens, finishMessages } = await modelServiceToolMap[
model
].streamResponse({
chatResponse,
prompts,
res
});
const { responseContent, totalTokens, finishMessages } = await modelServiceToolMap.streamResponse(
{
chatResponse,
prompts,
res,
model
}
);
return { responseContent, totalTokens, finishMessages };
};
@@ -186,16 +150,27 @@ export const V2_StreamResponse = async ({
}) => {
let responseContent = '';
let error: any = null;
let truncateData = '';
const clientRes = async (data: string) => {
//部分代理会导致流式传输时的数据被截断不为json格式这里做一个兼容
const { content = '' } = (() => {
try {
if (truncateData) {
try {
//判断是否为json如果是的话直接跳过后续拼装操作注意极端情况下可能出现截断成3截以上情况也可以兼容
JSON.parse(data);
} catch (e) {
data = truncateData + data;
}
truncateData = '';
}
const json = JSON.parse(data);
const content: string = json?.choices?.[0].delta.content || '';
error = json.error;
responseContent += content;
return { content };
} catch (error) {
truncateData = data;
return {};
}
})();
@@ -248,7 +223,8 @@ export const V2_StreamResponse = async ({
value: responseContent
});
const totalTokens = modelToolMap[model].countTokens({
const totalTokens = modelToolMap.countTokens({
model,
messages: finishMessages
});

View File

@@ -35,7 +35,8 @@ export const chatResponse = async ({
const adaptMessages = adaptChatItem_openAI({ messages: filterMessages, reserveId: false });
const chatAPI = getOpenAIApi(apiKey);
const promptsToken = modelToolMap[model].countTokens({
const promptsToken = modelToolMap.countTokens({
model,
messages: filterMessages
});
@@ -116,7 +117,8 @@ export const openAiStreamResponse = async ({
value: responseContent
});
const totalTokens = modelToolMap[model].countTokens({
const totalTokens = modelToolMap.countTokens({
model,
messages: finishMessages
});

View File

@@ -1,8 +1,8 @@
// @ts-ignore
import Payment from 'wxpay-v3';
export const getPayment = () => {
return new Payment({
export const getPayment = () =>
new Payment({
appid: process.env.WX_APPID,
mchid: process.env.WX_MCHID,
private_key: process.env.WX_PRIVATE_KEY?.replace(/\\n/g, '\n'),
@@ -10,22 +10,30 @@ export const getPayment = () => {
apiv3_private_key: process.env.WX_V3_CODE,
notify_url: process.env.WX_NOTIFY_URL
});
};
export const nativePay = (amount: number, payId: string): Promise<string> =>
getPayment()
.native({
export const nativePay = async (amount: number, payId: string) => {
try {
const res = await getPayment().native({
description: 'Fast GPT 余额充值',
out_trade_no: payId,
amount: {
total: amount
}
})
.then((res: any) => JSON.parse(res.data).code_url);
});
return JSON.parse(res.data).code_url as string;
} catch (error) {
return Promise.reject(error);
}
};
export const getPayResult = (payId: string) =>
getPayment()
.getTransactionsByOutTradeNo({
export const getPayResult = async (payId: string) => {
try {
const res = await getPayment().getTransactionsByOutTradeNo({
out_trade_no: payId
})
.then((res: any) => JSON.parse(res.data));
});
return JSON.parse(res.data);
} catch (error) {
return Promise.reject(error);
}
};

View File

@@ -21,7 +21,9 @@ declare global {
var QRCode: any;
var qaQueueLen: number;
var vectorQueueLen: number;
var OpenAiEncMap: Record<string, Tiktoken>;
var OpenAiEncMap: Tiktoken;
var sendInformQueue: (() => Promise<void>)[];
var sendInformQueueLen: number;
var systemEnv: {
vectorMaxProcess: number;
qaMaxProcess: number;

View File

@@ -70,6 +70,7 @@ export interface TrainingDataSchema {
_id: string;
userId: string;
kbId: string;
expireAt: Date;
lockTime: Date;
mode: `${TrainingModeEnum}`;
prompt: string;

View File

@@ -19,7 +19,7 @@ export interface UserUpdateParams {
export interface UserBillType {
id: string;
time: Date;
modelName: BillSchema['modelName'];
modelName: string;
type: BillSchema['type'];
textLen: number;
tokenLen: number;

View File

@@ -1,17 +1,17 @@
import { formatPrice } from './user';
import dayjs from 'dayjs';
import type { BillSchema } from '../types/mongoSchema';
import type { UserBillType } from '@/types/user';
import { ChatItemType } from '@/types/chat';
import { ChatCompletionRequestMessageRoleEnum } from 'openai';
import { ChatRoleEnum } from '@/constants/chat';
import type { MessageItemType } from '@/pages/api/openapi/v1/chat/completions';
import { ChatModelMap, OpenAiChatEnum } from '@/constants/model';
export const adaptBill = (bill: BillSchema): UserBillType => {
return {
id: bill._id,
type: bill.type,
modelName: bill.modelName,
modelName: ChatModelMap[bill.modelName as `${OpenAiChatEnum}`]?.name || bill.modelName,
time: bill.time,
textLen: bill.textLen,
tokenLen: bill.tokenLen,

View File

@@ -152,7 +152,7 @@ export const splitText_token = ({ text, maxLen }: { text: string; maxLen: number
const slideLen = Math.floor(maxLen * 0.3);
try {
const enc = getOpenAiEncMap()[OpenAiChatEnum.GPT35];
const enc = getOpenAiEncMap();
// filter empty text. encode sentence
const encodeText = enc.encode(text);

View File

@@ -4,32 +4,8 @@ import type { ChatItemType } from '@/types/chat';
import { countOpenAIToken, openAiSliceTextByToken } from './openai';
import { gpt_chatItemTokenSlice } from '@/pages/api/openapi/text/gptMessagesSlice';
export const modelToolMap: Record<
ChatModelType,
{
countTokens: (data: { messages: ChatItemType[] }) => number;
sliceText: (data: { text: string; length: number }) => string;
tokenSlice: (data: { messages: ChatItemType[]; maxToken: number }) => ChatItemType[];
}
> = {
[OpenAiChatEnum.GPT35]: {
countTokens: ({ messages }) => countOpenAIToken({ model: OpenAiChatEnum.GPT35, messages }),
sliceText: (data) => openAiSliceTextByToken({ model: OpenAiChatEnum.GPT35, ...data }),
tokenSlice: (data) => gpt_chatItemTokenSlice({ model: OpenAiChatEnum.GPT35, ...data })
},
[OpenAiChatEnum.GPT3516k]: {
countTokens: ({ messages }) => countOpenAIToken({ model: OpenAiChatEnum.GPT3516k, messages }),
sliceText: (data) => openAiSliceTextByToken({ model: OpenAiChatEnum.GPT3516k, ...data }),
tokenSlice: (data) => gpt_chatItemTokenSlice({ model: OpenAiChatEnum.GPT3516k, ...data })
},
[OpenAiChatEnum.GPT4]: {
countTokens: ({ messages }) => countOpenAIToken({ model: OpenAiChatEnum.GPT4, messages }),
sliceText: (data) => openAiSliceTextByToken({ model: OpenAiChatEnum.GPT4, ...data }),
tokenSlice: (data) => gpt_chatItemTokenSlice({ model: OpenAiChatEnum.GPT4, ...data })
},
[OpenAiChatEnum.GPT432k]: {
countTokens: ({ messages }) => countOpenAIToken({ model: OpenAiChatEnum.GPT432k, messages }),
sliceText: (data) => openAiSliceTextByToken({ model: OpenAiChatEnum.GPT432k, ...data }),
tokenSlice: (data) => gpt_chatItemTokenSlice({ model: OpenAiChatEnum.GPT432k, ...data })
}
export const modelToolMap = {
countTokens: countOpenAIToken,
sliceText: openAiSliceTextByToken,
tokenSlice: gpt_chatItemTokenSlice
};

View File

@@ -4,7 +4,6 @@ import { ChatRoleEnum } from '@/constants/chat';
import { ChatCompletionRequestMessageRoleEnum } from 'openai';
import { OpenAiChatEnum } from '@/constants/model';
import axios from 'axios';
import dayjs from 'dayjs';
import type { MessageItemType } from '@/pages/api/openapi/v1/chat/completions';
export const getOpenAiEncMap = () => {
@@ -14,28 +13,11 @@ export const getOpenAiEncMap = () => {
if (typeof global !== 'undefined' && global.OpenAiEncMap) {
return global.OpenAiEncMap;
}
const enc = {
[OpenAiChatEnum.GPT35]: encoding_for_model('gpt-3.5-turbo', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
'<|im_sep|>': 100266
}),
[OpenAiChatEnum.GPT3516k]: encoding_for_model('gpt-3.5-turbo', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
'<|im_sep|>': 100266
}),
[OpenAiChatEnum.GPT4]: encoding_for_model('gpt-4', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
'<|im_sep|>': 100266
}),
[OpenAiChatEnum.GPT432k]: encoding_for_model('gpt-4-32k', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
'<|im_sep|>': 100266
})
};
const enc = encoding_for_model('gpt-3.5-turbo', {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
'<|im_sep|>': 100266
});
if (typeof window !== 'undefined') {
window.OpenAiEncMap = enc;
@@ -78,7 +60,7 @@ export function countOpenAIToken({
const adaptMessages = adaptChatItem_openAI({ messages, reserveId: true });
const token = adaptMessages.reduce((sum, item) => {
const text = `${item.role}\n${item.content}`;
const enc = getOpenAiEncMap()[model];
const enc = getOpenAiEncMap();
const encodeText = enc.encode(text);
const tokens = encodeText.length + diffVal;
return sum + tokens;
@@ -96,7 +78,7 @@ export const openAiSliceTextByToken = ({
text: string;
length: number;
}) => {
const enc = getOpenAiEncMap()[model];
const enc = getOpenAiEncMap();
const encodeText = enc.encode(text);
const decoder = new TextDecoder();
return decoder.decode(enc.decode(encodeText.slice(0, length)));
@@ -111,16 +93,11 @@ export const authOpenAiKey = async (key: string) => {
})
.then((res) => {
if (!res.data.access_until) {
return Promise.reject('OpenAI Key 无效,请重试或更换 key');
}
const keyExpiredTime = dayjs(res.data.access_until * 1000);
const currentTime = dayjs();
if (keyExpiredTime.isBefore(currentTime)) {
return Promise.reject('OpenAI Key 已过期');
return Promise.resolve('OpenAI Key 可能无效');
}
})
.catch((err) => {
console.log(err);
return Promise.reject(err?.response?.data?.error || 'OpenAI 账号无效,请重试或更换 key');
return Promise.reject(err?.response?.data?.error?.message || 'OpenAI Key 可能无效');
});
};

View File

@@ -1,4 +1,4 @@
# host 版本
# host 版本, 不使用本机代理
version: '3.3'
services:
pg:
@@ -6,8 +6,10 @@ services:
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.4.2 # 阿里云
container_name: pg
restart: always
ports:
ports: # 生产环境建议不要暴露
- 8100:5432
networks:
- fastgpt
environment:
# 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
- POSTGRES_USER=fastgpt
@@ -15,31 +17,40 @@ services:
- POSTGRES_DB=fastgpt
volumes:
# 刚创建的文件
- /root/fastgpt/pg/init.sql:/docker-entrypoint-initdb.d/init.sh
- /root/fastgpt/pg/data:/var/lib/postgresql/data
mongodb:
- ./pg/init.sql:/docker-entrypoint-initdb.d/init.sh
- ./pg/data:/var/lib/postgresql/data
mongo:
# image: mongo:5.0.18
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云
container_name: mongo
restart: always
ports:
ports: # 生产环境建议不要暴露
- 27017:27017
networks:
- fastgpt
environment:
# 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
- MONGO_INITDB_ROOT_USERNAME=username
- MONGO_INITDB_ROOT_PASSWORD=password
volumes:
- /root/fastgpt/mongo/data:/data/db
- /root/fastgpt/mongo/logs:/var/log/mongodb
- ./mongo/data:/data/db
- ./mongo/logs:/var/log/mongodb
fastgpt:
container_name: fastgpt
# image: c121914yu/fast-gpt:latest # docker hub
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:latest # 阿里云
# network_mode: host #
ports:
- 3000:3000
networks:
- fastgpt
depends_on:
- mongo
- pg
restart: always
container_name: fastgpt
environment: # 可选的变量,不需要的话需要去掉
- PORT=3000 # 运行的端口地址,如果不是 3000需要修改成实际地址。
- DB_MAX_LINK=15 # database max link
- DB_MAX_LINK=5 # database max link
# proxy可选
- AXIOS_PROXY_HOST=127.0.0.1
- AXIOS_PROXY_PORT=7890
@@ -59,17 +70,17 @@ services:
# root key, 最高权限,可以内部接口互相调用
- ROOT_KEY=root_key
# 和上方mongo镜像的username,password对应
- MONGODB_URI=mongodb://username:password@0.0.0.0:27017/?authSource=admin
- MONGODB_URI=mongodb://username:password@mongo:27017/?authSource=admin
- MONGODB_NAME=fastgpt
- PG_HOST=0.0.0.0
- PG_PORT=8100
- PG_HOST=pg
- PG_PORT=5432
# 和上方PG镜像对应.
- PG_USER=fastgpt
- PG_PASSWORD=1234
- PG_DB_NAME=fastgpt
# oneapi 配置 推荐使用 one-api 管理key
- ONEAPI_URL=https://kfcwurtbijvh.cloud.sealos.io/v1
- ONEAPI_KEY=sk-itJ9v8qthRiFDzfs62Ea21Aa9b004c8791937dCf4cC568Ff
- ONEAPI_URL=https://xxxxx.cloud.sealos.io/v1
- ONEAPI_KEY=sk-xxxxxx
# openai 相关配置:使用了 oneapi 后,下面只需要填下 OPENAI_BASE_URL (国外可全忽略)
- OPENAIKEY=sk-xxxxx
- OPENAI_BASE_URL=https://api.openai.com/v1
@@ -80,16 +91,21 @@ services:
restart: always
ports:
- 3001:3001
networks:
- fastgpt
depends_on:
- mongo
- fastgpt
environment:
- MONGODB_URI=mongodb://username:password@0.0.0.0:27017/?authSource=admin
- MONGODB_URI=mongodb://username:password@mongo:27017/?authSource=admin
- MONGODB_NAME=fastgpt
- ADMIN_USER=username
- ADMIN_PASS=password
- ADMIN_SECRET=any
- PARENT_URL=http://0.0.0.0:3000
- PARENT_URL=http://fastgpt:3000
- PARENT_ROOT_KEY=root_key
key-admin:
container_name: key-admin
keyadmin:
container_name: keyadmin
image: justsong/one-api
restart: always
ports:
@@ -97,7 +113,7 @@ services:
environment:
- TZ=Asia/Shanghai
volumes:
- /home/ubuntu/data/one-api:/data
- /keyadmin:/data
nginx: # 宝塔不需要额外装 nginx
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/nginx:alpine3.17
# image: nginx:alpine3.17
@@ -106,8 +122,135 @@ services:
network_mode: host
volumes:
# 刚创建的文件
- /root/fastgpt/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /root/fastgpt/nginx/logs:/var/log/nginx
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/logs:/var/log/nginx
# https证书没有的话不填对应的nginx.conf也要修改
- /root/fastgpt/nginx/ssl/docgpt.key:/ssl/docgpt.key
- /root/fastgpt/nginx/ssl/docgpt.pem:/ssl/docgpt.pem
- ./nginx/ssl/docgpt.key:/ssl/docgpt.key
- ./nginx/ssl/docgpt.pem:/ssl/docgpt.pem
networks:
fastgpt:
# host 版本, 不推荐,推荐直接用上面的,用个 BASE_URL 中转
# version: '3.3'
# services:
# pg:
# # image: ankane/pgvector:v0.4.2 # dockerhub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.4.2 # 阿里云
# container_name: pg
# restart: always
# ports: # 生产环境建议不要暴露
# - 8100:5432
# networks:
# - fastgpt
# environment:
# # 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
# - POSTGRES_USER=fastgpt
# - POSTGRES_PASSWORD=1234
# - POSTGRES_DB=fastgpt
# volumes:
# # 刚创建的文件
# - ./pg/init.sql:/docker-entrypoint-initdb.d/init.sh
# - ./pg/data:/var/lib/postgresql/data
# mongo:
# # image: mongo:5.0.18
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云
# container_name: mongo
# restart: always
# ports: # 生产环境建议不要暴露
# - 27017:27017
# networks:
# - fastgpt
# environment:
# # 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
# - MONGO_INITDB_ROOT_USERNAME=username
# - MONGO_INITDB_ROOT_PASSWORD=password
# volumes:
# - ./mongo/data:/data/db
# - ./mongo/logs:/var/log/mongodb
# fastgpt:
# # image: ghcr.io/c121914yu/fastgpt:latest # github
# # image: c121914yu/fast-gpt:latest # docker hub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:latest # 阿里云
# network_mode: host
# restart: always
# container_name: fastgpt
# environment: # 可选的变量,不需要的话需要去掉
# - PORT=3000 # 运行的端口地址,如果不是 3000需要修改成实际地址。
# - DB_MAX_LINK=15 # database max link
# # proxy可选
# - AXIOS_PROXY_HOST=127.0.0.1
# - AXIOS_PROXY_PORT=7890
# # 发送邮箱验证码配置。用的是QQ邮箱。参考 nodeMail 获取MAILE_CODE自行百度。
# - MY_MAIL=54545@qq.com
# - MAILE_CODE=1234
# # 阿里短信服务(邮箱和短信至少二选一)
# - aliAccessKeyId=xxxx
# - aliAccessKeySecret=xxxx
# - aliSignName=xxxxx
# - aliTemplateCode=SMS_xxxx
# # google V3 安全校验(可选)
# - CLIENT_GOOGLE_VER_TOKEN=xxx
# - SERVICE_GOOGLE_VER_TOKEN=xx
# # token加密凭证随便填作为登录凭证
# - TOKEN_KEY=xxxx
# # root key, 最高权限,可以内部接口互相调用
# - ROOT_KEY=xxx
# # 和上方mongo镜像的username,password对应
# - MONGODB_URI=mongodb://username:password@0.0.0.0:27017/?authSource=admin
# - MONGODB_NAME=fastgpt
# - PG_HOST=0.0.0.0
# - PG_PORT=8100
# # 和上方PG镜像对应.
# - PG_USER=fastgpt
# - PG_PASSWORD=1234
# - PG_DB_NAME=fastgpt
# # oneapi 配置 推荐使用 one-api 管理key
# - ONEAPI_URL=https://xxxxx.cloud.sealos.io/v1
# - ONEAPI_KEY=sk-xxxxx
# # openai 相关配置:使用了 oneapi 后,下面只需要填下 OPENAI_BASE_URL (国外可全忽略)
# - OPENAIKEY=sk-xxxxx
# - OPENAI_BASE_URL=https://api.openai.com/v1
# - OPENAI_BASE_URL_AUTH=可选的安全凭证,会放到 header.auth 里
# fastgpt-admin:
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-admin:latest
# container_name: fastgpt-admin
# restart: always
# ports:
# - 3001:3001
# networks:
# - fastgpt
# depends_on:
# - mongo
# - fastgpt
# environment:
# - MONGODB_URI=mongodb://username:password@mongo:27017/?authSource=admin
# - MONGODB_NAME=fastgpt
# - ADMIN_USER=username
# - ADMIN_PASS=password
# - ADMIN_SECRET=any
# - PARENT_URL=http://fastgpt:3000
# - PARENT_ROOT_KEY=root_key
# key-admin:
# container_name: key-admin
# image: justsong/one-api
# restart: always
# ports:
# - 3002:3000
# environment:
# - TZ=Asia/Shanghai
# volumes:
# - /home/ubuntu/data/one-api:/data
# nginx: # 宝塔不需要额外装 nginx
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/nginx:alpine3.17
# # image: nginx:alpine3.17
# container_name: nginx
# restart: always
# network_mode: host
# volumes:
# # 刚创建的文件
# - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
# - ./nginx/logs:/var/log/nginx
# # https证书没有的话不填对应的nginx.conf也要修改
# - ./nginx/ssl/docgpt.key:/ssl/docgpt.key
# - ./nginx/ssl/docgpt.pem:/ssl/docgpt.pem
# networks:
# fastgpt:

View File

@@ -30,7 +30,7 @@ https://cloud.sealos.io/
```
# 下面的地址是 Sealos 提供的,务必写上 v1
OPENAI_BASE_URL=https://xxxx.cloud.sealos.io/v1
ONEAPI_URL=https://xxxx.cloud.sealos.io/v1
# 下面的 key 由 one-api 提供
OPENAIKEY=sk-xxxxxx
ONEAPI_KEY=sk-xxxxxx
```