Compare commits
120 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
46eb96c72e | ||
|
|
0540c2e46a | ||
|
|
d13b823065 | ||
|
|
71f58b791f | ||
|
|
4b1cc6878c | ||
|
|
c6a5f16336 | ||
|
|
7ed3c91ac6 | ||
|
|
1f801d1464 | ||
|
|
d0e65431d0 | ||
|
|
a21b2ccdd0 | ||
|
|
8767c576be | ||
|
|
fb08f61eb5 | ||
|
|
ce68791c3c | ||
|
|
3294be5e7f | ||
|
|
ec86847280 | ||
|
|
dd2d93c953 | ||
|
|
e60c36b423 | ||
|
|
1f112f7715 | ||
|
|
adbaa8b37b | ||
|
|
29c95d24ae | ||
|
|
e0b1a78344 | ||
|
|
2774940851 | ||
|
|
c2c73ed23c | ||
|
|
9682c82713 | ||
|
|
e8d4933dc4 | ||
|
|
0b6020a9cd | ||
|
|
894beee266 | ||
|
|
405e453ed3 | ||
|
|
79d289e25b | ||
|
|
51054f5829 | ||
|
|
317fba1855 | ||
|
|
f61e467d04 | ||
|
|
27de1cad47 | ||
|
|
3ea2cf1dcb | ||
|
|
4397a0ad6b | ||
|
|
4f51839026 | ||
|
|
3c0fa30aaf | ||
|
|
02abe42afe | ||
|
|
088a90de10 | ||
|
|
a98c56f968 | ||
|
|
1e5714da1b | ||
|
|
867d69659f | ||
|
|
d44203bff1 | ||
|
|
629a147741 | ||
|
|
9e951fbc15 | ||
|
|
a540ee944a | ||
|
|
b064e704f3 | ||
|
|
7e54421190 | ||
|
|
647f701692 | ||
|
|
0db413ab52 | ||
|
|
426eceac22 | ||
|
|
1ee527ceb8 | ||
|
|
03f1ab1a2f | ||
|
|
faf722fa15 | ||
|
|
36dad6df33 | ||
|
|
6ff5db7b41 | ||
|
|
56a0b48b97 | ||
|
|
ff24042df5 | ||
|
|
c31d247f07 | ||
|
|
e903eb5b94 | ||
|
|
c605964fa8 | ||
|
|
1fe5cd751a | ||
|
|
488e2f476e | ||
|
|
915b104b8a | ||
|
|
aaa350a13e | ||
|
|
6a2b34cb92 | ||
|
|
7f26b31f53 | ||
|
|
2a597964a2 | ||
|
|
c1d3a46dc7 | ||
|
|
0c55beb72d | ||
|
|
9b1c0e1a3c | ||
|
|
a7988c164e | ||
|
|
99e5fbd0f5 | ||
|
|
5e4c4dd79b | ||
|
|
70584783a5 | ||
|
|
705ac1c27e | ||
|
|
52d00d0562 | ||
|
|
9a145f223f | ||
|
|
b7cd4dec89 | ||
|
|
33154a9c19 | ||
|
|
e1c7503611 | ||
|
|
d04c298132 | ||
|
|
eceda01c19 | ||
|
|
ea1681e1eb | ||
|
|
f6c4b4c96d | ||
|
|
22cc9c85be | ||
|
|
43f8d6008f | ||
|
|
29c5554f9e | ||
|
|
9b18a46456 | ||
|
|
d5923bc64f | ||
|
|
f19c2d2ca1 | ||
|
|
84d91f3f76 | ||
|
|
7811f7482b | ||
|
|
9c8ca7dd25 | ||
|
|
1409916bd0 | ||
|
|
fc7edcb54f | ||
|
|
87d35042de | ||
|
|
77dc961a07 | ||
|
|
9a45fb64c2 | ||
|
|
881c36542c | ||
|
|
f88c6031f5 | ||
|
|
8a02b3b04a | ||
|
|
d460305871 | ||
|
|
144bed5a77 | ||
|
|
96fc917bad | ||
|
|
794a3698ad | ||
|
|
fbbc32361b | ||
|
|
dc329041f3 | ||
|
|
5feb2e19bf | ||
|
|
ec22cd8320 | ||
|
|
8c7efcbd1a | ||
|
|
afc5947bfb | ||
|
|
40189a6899 | ||
|
|
b73829a25c | ||
|
|
a7c5d3cc05 | ||
|
|
cc36a13f17 | ||
|
|
943abbe0fb | ||
|
|
b13c3c4da5 | ||
|
|
c12aa7fdf7 | ||
|
|
85e11abc0a |
@@ -8,3 +8,4 @@ README.md
|
||||
|
||||
.yalc/
|
||||
yalc.lock
|
||||
testApi/
|
||||
@@ -1,8 +1,27 @@
|
||||
AXIOS_PROXY_HOST=127.0.0.1
|
||||
AXIOS_PROXY_PORT=33210
|
||||
MONGODB_URI=
|
||||
MY_MAIL=
|
||||
MAILE_CODE=
|
||||
TOKEN_KEY=
|
||||
OPENAIKEY=
|
||||
REDIS_URL=
|
||||
# proxy
|
||||
# AXIOS_PROXY_HOST=127.0.0.1
|
||||
# AXIOS_PROXY_PORT=7890
|
||||
# OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
# OPENAI_BASE_URL_AUTH=可选的安全凭证
|
||||
# 是否开启队列任务。 1-开启,0-关闭(请求parentUrl去执行任务,单机时直接填1)
|
||||
queueTask=1
|
||||
parentUrl=https://hostname/api/openapi/startEvents
|
||||
# email
|
||||
MY_MAIL=xxx@qq.com
|
||||
MAILE_CODE=xxx
|
||||
# ali ems
|
||||
aliAccessKeyId=xxx
|
||||
aliAccessKeySecret=xxx
|
||||
aliSignName=xxx
|
||||
aliTemplateCode=SMS_xxx
|
||||
# token
|
||||
TOKEN_KEY=xxx
|
||||
# openai
|
||||
OPENAIKEY=sk-xxx
|
||||
# db
|
||||
MONGODB_URI=mongodb://username:password@0.0.0.0:27017/test?authSource=admin
|
||||
PG_HOST=0.0.0.0
|
||||
PG_PORT=8100
|
||||
PG_USER=xxx
|
||||
PG_PASSWORD=xxx
|
||||
PG_DB_NAME=xxx
|
||||
50
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
sudo apt update && sudo apt install -y nodejs npm
|
||||
- # Add support for more platforms with QEMU (optional)
|
||||
# https://github.com/docker/setup-qemu-action
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Login to gitbub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GH_PAT }}
|
||||
- name: build and publish image
|
||||
env:
|
||||
# fork friendly ^^
|
||||
DOCKER_REPO: ghcr.io/${{ github.repository_owner }}/fast-gpt
|
||||
run: |
|
||||
docker buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--label "org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/FastGPT" \
|
||||
--label "org.opencontainers.image.description=fast-gpt image" \
|
||||
--label "org.opencontainers.image.licenses=MIT" \
|
||||
--push \
|
||||
-t ${DOCKER_REPO}:latest \
|
||||
-f Dockerfile \
|
||||
.
|
||||
4
.gitignore
vendored
@@ -34,6 +34,6 @@ yarn-error.log*
|
||||
# typescript
|
||||
*.tsbuildinfo
|
||||
next-env.d.ts
|
||||
/public/trainData/
|
||||
/.vscode/
|
||||
platform.json
|
||||
platform.json
|
||||
testApi/
|
||||
@@ -54,13 +54,4 @@ USER nextjs
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
ENV PORT 3000
|
||||
ENV MAX_USER ''
|
||||
ENV AXIOS_PROXY_HOST ''
|
||||
ENV AXIOS_PROXY_PORT ''
|
||||
ENV MONGODB_URI ''
|
||||
ENV MY_MAIL ''
|
||||
ENV MAILE_CODE ''
|
||||
ENV TOKEN_KEY ''
|
||||
|
||||
CMD ["node", "server.js"]
|
||||
|
||||
2
Makefile
@@ -34,7 +34,7 @@ run: ## Run a dev service from host.
|
||||
|
||||
.PHONY: docker-build
|
||||
docker-build: ## Build docker image with the desktop-frontend.
|
||||
docker build -t c121914yu/fast-gpt:latest .
|
||||
docker build -t c121914yu/fast-gpt:latest . --network host --build-arg HTTP_PROXY=http://127.0.0.1:7890 --build-arg HTTPS_PROXY=http://127.0.0.1:7890
|
||||
|
||||
##@ Deployment
|
||||
|
||||
|
||||
322
README.md
@@ -1,112 +1,256 @@
|
||||
# Fast GPT
|
||||
|
||||
Fast GPT 允许你使用自己的 openai API KEY 来快速的调用 openai 接口,包括 GPT3 及其微调方法,以及最新的 gpt3.5 接口。
|
||||
Fast GPT 允许你使用自己的 openai API KEY 来快速的调用 openai 接口,目前集成了 gpt35 和 embedding. 可构建自己的知识库。
|
||||
|
||||
## 初始化
|
||||
复制 .env.template 成 .env.local ,填写核心参数
|
||||
|
||||
```
|
||||
AXIOS_PROXY_HOST=axios代理地址,目前 openai 接口都需要走代理,本机的话就填 127.0.0.1
|
||||
AXIOS_PROXY_PORT=代理端口
|
||||
MONGODB_URI=mongo数据库地址(例如:mongodb://username:password@ip:27017/?authSource=admin&readPreference=primary&appname=MongoDB%20Compass&directConnection=true&ssl=false)
|
||||
MY_MAIL=发送验证码邮箱
|
||||
MAILE_CODE=邮箱秘钥(代理里设置的是QQ邮箱,不知道怎么找这个 code 的,可以百度搜"nodemailer发送邮件")
|
||||
TOKEN_KEY=随便填一个,用于生成和校验 token
|
||||
```
|
||||
## 知识库原理
|
||||

|
||||
|
||||
## 开发
|
||||
**配置环境变量**
|
||||
```bash
|
||||
# proxy(可选)
|
||||
AXIOS_PROXY_HOST=127.0.0.1
|
||||
AXIOS_PROXY_PORT=7890
|
||||
# openai 中转连接(可选)
|
||||
OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
OPENAI_BASE_URL_AUTH=可选的安全凭证
|
||||
# 是否开启队列任务。 1-开启,0-关闭(请求 parentUrl 去执行任务,单机时直接填1)
|
||||
queueTask=1
|
||||
parentUrl=https://hostname/api/openapi/startEvents
|
||||
# 发送邮箱验证码配置。参考 nodeMail 获取参数,自行百度。
|
||||
MY_MAIL=xxx@qq.com
|
||||
MAILE_CODE=xxx
|
||||
# 阿里短信服务(邮箱和短信至少二选一)
|
||||
aliAccessKeyId=xxx
|
||||
aliAccessKeySecret=xxx
|
||||
aliSignName=xxx
|
||||
aliTemplateCode=SMS_xxx
|
||||
# token(随便填,作为登录凭证)
|
||||
TOKEN_KEY=xxx
|
||||
# openai key
|
||||
OPENAIKEY=sk-xxx
|
||||
# mongo连接地址
|
||||
MONGODB_URI=mongodb://username:password@0.0.0.0:27017/test?authSource=admin
|
||||
# mongo数据库名称
|
||||
MONGODB_NAME=xxx
|
||||
# pg 数据库相关内容,和 docker-compose 对上
|
||||
PG_HOST=0.0.0.0
|
||||
PG_PORT=8102
|
||||
PG_USER=xxx
|
||||
PG_PASSWORD=xxx
|
||||
PG_DB_NAME=xxx
|
||||
```
|
||||
**运行**
|
||||
```
|
||||
pnpm dev
|
||||
```
|
||||
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
|
||||
|
||||
## 部署
|
||||
|
||||
### docker 模式
|
||||
请准备好 docker, mongo,代理, 和nginx。 镜像走本机的代理,所以用 network=host,port 改成代理的端口,clash 一般都是 7890。
|
||||
### 代理环境(国外服务器可忽略)
|
||||
1. [clash 方案](./docs/proxy/clash.md) - 仅需一台服务器(需要有 clash)
|
||||
2. [nginx 方案](./docs/proxy/nginx.md) - 需要一台国外服务器
|
||||
3. [cloudflare 方案](./docs/proxy/cloudflare.md) - 需要有域名(每日免费 10w 次代理请求)
|
||||
|
||||
#### docker 打包
|
||||
```bash
|
||||
docker build -t imageName:tag .
|
||||
docker push imageName:tag
|
||||
```
|
||||
|
||||
#### 服务器拉取镜像和运行
|
||||
```bash
|
||||
# 服务器拉取部署, imageName 替换成镜像名
|
||||
docker pull imageName:tag
|
||||
docker stop fast-gpt || true
|
||||
docker rm fast-gpt || true
|
||||
docker run -d --network=host --name fast-gpt \
|
||||
-e AXIOS_PROXY_HOST=127.0.0.1 \
|
||||
-e AXIOS_PROXY_PORT=7890 \
|
||||
-e MY_MAIL=your email\
|
||||
-e MAILE_CODE=your email code \
|
||||
-e TOKEN_KEY=任意一个内容 \
|
||||
-e MONGODB_URI="mongodb://user:password@127.0.0.0:27017/?authSource=admin&readPreference=primary&appname=MongoDB%20Compass&ssl=false" \
|
||||
imageName:tag
|
||||
```
|
||||
|
||||
#### 软件教程:docker 安装
|
||||
### docker 部署
|
||||
#### 1. 安装 docker 和 docker-compose
|
||||
这个不同系统略有区别,百度安装下。验证安装成功后进行下一步。下面给出一个例子:
|
||||
```bash
|
||||
# 安装docker
|
||||
curl -sSL https://get.daocloud.io/docker | sh
|
||||
curl -L https://get.daocloud.io/docker | sh
|
||||
sudo systemctl start docker
|
||||
# 安装 docker-compose
|
||||
curl -L https://github.com/docker/compose/releases/download/1.23.2/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
|
||||
sudo chmod +x /usr/local/bin/docker-compose
|
||||
# 验证安装
|
||||
docker -v
|
||||
docker-compose -v
|
||||
```
|
||||
|
||||
#### 软件教程:mongo 安装
|
||||
|
||||
#### 2. 创建3个初始化文件
|
||||
手动创建或者直接把 deploy 里内容复制过去
|
||||
|
||||
**/root/fast-gpt/pg/init.sql**
|
||||
```sql
|
||||
set -e
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
|
||||
CREATE EXTENSION vector;
|
||||
-- init table
|
||||
CREATE TABLE modelData (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
vector VECTOR(1536),
|
||||
status VARCHAR(50) NOT NULL,
|
||||
user_id VARCHAR(50) NOT NULL,
|
||||
model_id VARCHAR(50) NOT NULL,
|
||||
q TEXT NOT NULL,
|
||||
a TEXT NOT NULL
|
||||
);
|
||||
-- create index
|
||||
CREATE INDEX modelData_status_index ON modelData USING HASH (status);
|
||||
CREATE INDEX modelData_userId_index ON modelData USING HASH (user_id);
|
||||
CREATE INDEX modelData_modelId_index ON modelData USING HASH (model_id);
|
||||
EOSQL
|
||||
```
|
||||
|
||||
**/root/fast-gpt/nginx/nginx.conf**
|
||||
```conf
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
worker_rlimit_nofile 51200;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
access_log off;
|
||||
server_names_hash_bucket_size 512;
|
||||
client_header_buffer_size 32k;
|
||||
large_client_header_buffers 4 32k;
|
||||
client_max_body_size 50M;
|
||||
|
||||
gzip on;
|
||||
gzip_min_length 1k;
|
||||
gzip_buffers 4 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_comp_level 6;
|
||||
gzip_vary on;
|
||||
gzip_types text/plain application/x-javascript text/css application/javascript application/json application/xml;
|
||||
gzip_disable "MSIE [1-6]\.";
|
||||
|
||||
open_file_cache max=1000 inactive=1d;
|
||||
open_file_cache_valid 30s;
|
||||
open_file_cache_min_uses 8;
|
||||
open_file_cache_errors off;
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name docgpt.ahapocket.cn;
|
||||
ssl_certificate /ssl/docgpt.pem;
|
||||
ssl_certificate_key /ssl/docgpt.key;
|
||||
ssl_session_timeout 5m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
server {
|
||||
listen 80;
|
||||
server_name docgpt.ahapocket.cn;
|
||||
rewrite ^(.*) https://$server_name$1 permanent;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**/root/fast-gpt/docker-compose.yml**
|
||||
```yml
|
||||
version: "3.3"
|
||||
services:
|
||||
fast-gpt:
|
||||
image: c121914yu/fast-gpt:latest
|
||||
network_mode: host
|
||||
restart: always
|
||||
container_name: fast-gpt
|
||||
environment:
|
||||
# - AXIOS_PROXY_HOST=127.0.0.1
|
||||
# - AXIOS_PROXY_PORT=7890
|
||||
# - OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
# - OPENAI_BASE_URL_AUTH=可选的安全凭证
|
||||
- MY_MAIL=xxxx@qq.com
|
||||
- MAILE_CODE=xxxx
|
||||
- aliAccessKeyId=xxxx
|
||||
- aliAccessKeySecret=xxxx
|
||||
- aliSignName=xxxxx
|
||||
- aliTemplateCode=SMS_xxxx
|
||||
- TOKEN_KEY=xxxx
|
||||
- queueTask=1
|
||||
- parentUrl=https://hostname/api/openapi/startEvents
|
||||
- MONGODB_URI=mongodb://username:passsword@0.0.0.0:27017/?authSource=admin
|
||||
- MONGODB_NAME=xxx
|
||||
- PG_HOST=0.0.0.0
|
||||
- PG_PORT=8100
|
||||
- PG_USER=xxx
|
||||
- PG_PASSWORD=xxx
|
||||
- PG_DB_NAME=xxx
|
||||
- OPENAIKEY=sk-xxxxx
|
||||
nginx:
|
||||
image: nginx:alpine3.17
|
||||
container_name: nginx
|
||||
restart: always
|
||||
network_mode: host
|
||||
volumes:
|
||||
- /root/fast-gpt/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- /root/fast-gpt/nginx/logs:/var/log/nginx
|
||||
- /root/fast-gpt/nginx/ssl/docgpt.key:/ssl/docgpt.key
|
||||
- /root/fast-gpt/nginx/ssl/docgpt.pem:/ssl/docgpt.pem
|
||||
pg:
|
||||
image: ankane/pgvector
|
||||
container_name: pg
|
||||
restart: always
|
||||
ports:
|
||||
- 8100:5432
|
||||
environment:
|
||||
- POSTGRES_USER=xxx
|
||||
- POSTGRES_PASSWORD=xxx
|
||||
- POSTGRES_DB=xxx
|
||||
volumes:
|
||||
- /root/fast-gpt/pg/data:/var/lib/postgresql/data
|
||||
- /root/fast-gpt/pg/init.sql:/docker-entrypoint-initdb.d/init.sh
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
mongodb:
|
||||
image: mongo:4.0.1
|
||||
container_name: mongo
|
||||
restart: always
|
||||
ports:
|
||||
- 27017:27017
|
||||
environment:
|
||||
- MONGO_INITDB_ROOT_USERNAME=username
|
||||
- MONGO_INITDB_ROOT_PASSWORD=password
|
||||
volumes:
|
||||
- /root/fast-gpt/mongo/data:/data/db
|
||||
- /root/fast-gpt/mongo/logs:/var/log/mongodb
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
```
|
||||
|
||||
#### 3. 运行 docker-compose
|
||||
下面是一个辅助脚本,也可以直接 docker-compose up -d
|
||||
**run.sh 运行文件**
|
||||
```bash
|
||||
docker pull mongo:6.0.4
|
||||
docker stop mongo
|
||||
docker rm mongo
|
||||
docker run -d --name mongo \
|
||||
-e MONGO_INITDB_ROOT_USERNAME= \
|
||||
-e MONGO_INITDB_ROOT_PASSWORD= \
|
||||
-v /root/service/mongo:/data/db \
|
||||
mongo:6.0.4
|
||||
#!/bin/bash
|
||||
docker-compose pull
|
||||
docker-compose up -d
|
||||
|
||||
# 检查 mongo 运行情况, 有成功的 logs 代表访问成功
|
||||
docker logs mongo
|
||||
```
|
||||
#### 软件教程: clash 代理
|
||||
```bash
|
||||
# 下载包
|
||||
curl https://glados.rocks/tools/clash-linux.zip -o clash.zip
|
||||
# 解压
|
||||
unzip clash.zip
|
||||
# 下载终端配置⽂件(改成自己配置文件路径)
|
||||
curl https://update.glados-config.com/clash/98980/8f30944/70870/glados-terminal.yaml > config.yaml
|
||||
# 赋予运行权限
|
||||
chmod +x ./clash-linux-amd64-v1.10.0
|
||||
# 记得配置端口变量:
|
||||
export ALL_PROXY=socks5://127.0.0.1:7891
|
||||
export http_proxy=http://127.0.0.1:7890
|
||||
export https_proxy=http://127.0.0.1:7890
|
||||
export HTTP_PROXY=http://127.0.0.1:7890
|
||||
export HTTPS_PROXY=http://127.0.0.1:7890
|
||||
echo "Docker Compose 重新拉取镜像完成!"
|
||||
|
||||
# 运行脚本: 删除clash - 到 clash 目录 - 删除缓存 - 执行运行
|
||||
# 会生成一个 nohup.out 文件,可以看到 clash 的 logs
|
||||
OLD_PROCESS=$(pgrep clash)
|
||||
if [ ! -z "$OLD_PROCESS" ]; then
|
||||
echo "Killing old process: $OLD_PROCESS"
|
||||
kill $OLD_PROCESS
|
||||
fi
|
||||
sleep 2
|
||||
cd **/clash
|
||||
rm -f ./nohup.out || true
|
||||
rm -f ./cache.db || true
|
||||
nohup ./clash-linux-amd64-v1.10.0 -d ./ &
|
||||
echo "Restart clash"
|
||||
# 删除本地旧镜像
|
||||
images=$(docker images --format "{{.ID}} {{.Repository}}" | grep fast-gpt)
|
||||
|
||||
# 将镜像 ID 和名称放入数组中
|
||||
IFS=$'\n' read -rd '' -a image_array <<<"$images"
|
||||
|
||||
# 遍历数组并删除所有旧的镜像
|
||||
for ((i=1; i<${#image_array[@]}; i++))
|
||||
do
|
||||
image=${image_array[$i]}
|
||||
image_id=${image%% *}
|
||||
docker rmi $image_id
|
||||
done
|
||||
```
|
||||
|
||||
#### 软件教程:Nginx
|
||||
...没写,这个百度吧。
|
||||
|
||||
#### redis
|
||||
## 其他优化点
|
||||
### Git Action 自动打包镜像
|
||||
.github里拥有一个 git 提交到 main 分支时自动打包 amd64 和 arm64 镜像的 actions。你仅需要提前在 git 配置好 session。
|
||||
|
||||
```bash
|
||||
# 索引
|
||||
# FT.CREATE idx:model:data ON JSON PREFIX 1 model:data: SCHEMA $.modelId AS modelId TAG $.dataId AS dataId TAG $.vector AS vector VECTOR FLAT 6 DIM 1536 DISTANCE_METRIC COSINE TYPE FLOAT32
|
||||
# FT.CREATE idx:model:data:hash ON HASH PREFIX 1 model:data: SCHEMA modelId TAG dataId TAG vector VECTOR FLAT 6 DIM 1536 DISTANCE_METRIC COSINE TYPE FLOAT32
|
||||
FT.CREATE idx:model:data ON HASH PREFIX 1 model:data: SCHEMA modelId TAG userId TAG q TEXT text TEXT vector VECTOR FLAT 6 DIM 1536 DISTANCE_METRIC COSINE TYPE FLOAT32
|
||||
```
|
||||
1. 创建账号 session: 头像 -> settings -> 最底部 Developer settings -> Personal access tokens -> tokens(classic) -> 创建新 session,把一些看起来需要的权限勾上。
|
||||
2. 添加 session 到仓库: 仓库 -> settings -> Secrets and variables -> Actions -> 创建secret
|
||||
3. 填写 secret: Name-GH_PAT, Secret-第一步的tokens
|
||||
|
||||
## 其他问题
|
||||
### Mac 可能的问题
|
||||
> 因为教程有部分镜像不兼容arm64,所以写个文档指导新手如何快速在mac上面搭建fast-gpt[如何在mac上面部署fastgpt](./docs/mac.md)
|
||||
|
||||
66
deploy/docker-compose.yml
Normal file
@@ -0,0 +1,66 @@
|
||||
version: '3.3'
|
||||
services:
|
||||
fast-gpt:
|
||||
image: c121914yu/fast-gpt:latest
|
||||
network_mode: host
|
||||
restart: always
|
||||
container_name: fast-gpt
|
||||
environment:
|
||||
# - AXIOS_PROXY_HOST=127.0.0.1
|
||||
# - AXIOS_PROXY_PORT=7890
|
||||
# - OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
# - OPENAI_BASE_URL_AUTH=可选的安全凭证
|
||||
- MY_MAIL=xxxx@qq.com
|
||||
- MAILE_CODE=xxxx
|
||||
- aliAccessKeyId=xxxx
|
||||
- aliAccessKeySecret=xxxx
|
||||
- aliSignName=xxxxx
|
||||
- aliTemplateCode=SMS_xxxx
|
||||
- TOKEN_KEY=xxxx
|
||||
- queueTask=1
|
||||
- parentUrl=https://hostname/api/openapi/startEvents
|
||||
- MONGODB_URI=mongodb://username:passsword@0.0.0.0:27017/?authSource=admin
|
||||
- MONGODB_NAME=xxx
|
||||
- PG_HOST=0.0.0.0
|
||||
- PG_PORT=8100
|
||||
- PG_USER=xxx
|
||||
- PG_PASSWORD=xxx
|
||||
- PG_DB_NAME=xxx
|
||||
- OPENAIKEY=sk-xxxxx
|
||||
nginx:
|
||||
image: nginx:alpine3.17
|
||||
container_name: nginx
|
||||
restart: always
|
||||
network_mode: host
|
||||
volumes:
|
||||
- /root/fast-gpt/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- /root/fast-gpt/nginx/logs:/var/log/nginx
|
||||
- /root/fast-gpt/nginx/ssl/docgpt.key:/ssl/docgpt.key
|
||||
- /root/fast-gpt/nginx/ssl/docgpt.pem:/ssl/docgpt.pem
|
||||
pg:
|
||||
image: ankane/pgvector
|
||||
container_name: pg
|
||||
restart: always
|
||||
ports:
|
||||
- 8100:5432
|
||||
environment:
|
||||
- POSTGRES_USER=xxx
|
||||
- POSTGRES_PASSWORD=xxx
|
||||
- POSTGRES_DB=xxx
|
||||
volumes:
|
||||
- /root/fast-gpt/pg/data:/var/lib/postgresql/data
|
||||
- /root/fast-gpt/pg/init.sql:/docker-entrypoint-initdb.d/init.sh
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
mongodb:
|
||||
image: mongo:4.0.1
|
||||
container_name: mongo
|
||||
restart: always
|
||||
ports:
|
||||
- 27017:27017
|
||||
environment:
|
||||
- MONGO_INITDB_ROOT_USERNAME=username
|
||||
- MONGO_INITDB_ROOT_PASSWORD=password
|
||||
volumes:
|
||||
- /root/fast-gpt/mongo/data:/data/db
|
||||
- /root/fast-gpt/mongo/logs:/var/log/mongodb
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
49
deploy/nginx/nginx.conf
Normal file
@@ -0,0 +1,49 @@
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
worker_rlimit_nofile 51200;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
access_log off;
|
||||
server_names_hash_bucket_size 512;
|
||||
client_header_buffer_size 32k;
|
||||
large_client_header_buffers 4 32k;
|
||||
client_max_body_size 50M;
|
||||
|
||||
gzip on;
|
||||
gzip_min_length 1k;
|
||||
gzip_buffers 4 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_comp_level 6;
|
||||
gzip_vary on;
|
||||
gzip_types text/plain application/x-javascript text/css application/javascript application/json application/xml;
|
||||
gzip_disable "MSIE [1-6]\.";
|
||||
|
||||
open_file_cache max=1000 inactive=1d;
|
||||
open_file_cache_valid 30s;
|
||||
open_file_cache_min_uses 8;
|
||||
open_file_cache_errors off;
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name docgpt.ahapocket.cn;
|
||||
ssl_certificate /ssl/docgpt.pem;
|
||||
ssl_certificate_key /ssl/docgpt.key;
|
||||
ssl_session_timeout 5m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
server {
|
||||
listen 80;
|
||||
server_name docgpt.ahapocket.cn;
|
||||
rewrite ^(.*) https://$server_name$1 permanent;
|
||||
}
|
||||
}
|
||||
19
deploy/pg/init.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
set -e
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
|
||||
CREATE EXTENSION vector;
|
||||
-- init table
|
||||
CREATE TABLE modelData (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
vector VECTOR(1536),
|
||||
status VARCHAR(50) NOT NULL,
|
||||
user_id VARCHAR(50) NOT NULL,
|
||||
model_id VARCHAR(50) NOT NULL,
|
||||
q TEXT NOT NULL,
|
||||
a TEXT NOT NULL
|
||||
);
|
||||
-- create index
|
||||
CREATE INDEX modelData_status_index ON modelData USING HASH (status);
|
||||
CREATE INDEX modelData_userId_index ON modelData USING HASH (user_id);
|
||||
CREATE INDEX modelData_modelId_index ON modelData USING HASH (model_id);
|
||||
EOSQL
|
||||
19
deploy/run.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
docker-compose pull
|
||||
docker-compose up -d
|
||||
|
||||
echo "Docker Compose 重新拉取镜像完成!"
|
||||
|
||||
# 删除本地旧镜像
|
||||
images=$(docker images --format "{{.ID}} {{.Repository}}" | grep fast-gpt)
|
||||
|
||||
# 将镜像 ID 和名称放入数组中
|
||||
IFS=$'\n' read -rd '' -a image_array <<<"$images"
|
||||
|
||||
# 遍历数组并删除所有旧的镜像
|
||||
for ((i=1; i<${#image_array[@]}; i++))
|
||||
do
|
||||
image=${image_array[$i]}
|
||||
image_id=${image%% *}
|
||||
docker rmi $image_id
|
||||
done
|
||||
BIN
docs/imgs/KBProcess.jpg
Normal file
|
After Width: | Height: | Size: 69 KiB |
100
docs/mac.md
Normal file
@@ -0,0 +1,100 @@
|
||||
## 怎么在mac上面部署fastgpt
|
||||
|
||||
### 前置条件
|
||||
|
||||
1、可以 curl api.openai.com
|
||||
|
||||
2、有openai key
|
||||
|
||||
3、有邮箱MAILE_CODE
|
||||
|
||||
4、有docker
|
||||
|
||||
```
|
||||
docker -v
|
||||
```
|
||||
|
||||
5、有pnpm ,可以使用`brew install pnpm`安装
|
||||
|
||||
6、需要创建一个放置pg和mongo数据的文件夹,这里创建在`~/fastgpt`目录中,里面有`pg` 和`mongo `两个文件夹
|
||||
|
||||
```
|
||||
➜ fast-gpt pwd
|
||||
/Users/jie/fast-gpt
|
||||
➜ fast-gpt ls
|
||||
mongo pg
|
||||
```
|
||||
|
||||
|
||||
|
||||
### docker部署方式
|
||||
|
||||
这种方式主要是为了方便调试,可以使用`pnpm dev ` 运行fast-gpt项目
|
||||
|
||||
**1、.env.local 文件**
|
||||
|
||||
```
|
||||
# proxy
|
||||
AXIOS_PROXY_HOST=127.0.0.1
|
||||
AXIOS_PROXY_PORT_FAST=7890
|
||||
AXIOS_PROXY_PORT_NORMAL=7890
|
||||
queueTask=1
|
||||
# email
|
||||
MY_MAIL= {Your Mail}
|
||||
MAILE_CODE={Yoir Mail code}
|
||||
# ali ems
|
||||
aliAccessKeyId=xxx
|
||||
aliAccessKeySecret=xxx
|
||||
aliSignName=xxx
|
||||
aliTemplateCode=SMS_xxx
|
||||
# token
|
||||
TOKEN_KEY=sswada
|
||||
# openai
|
||||
OPENAIKEY={Your openapi key}
|
||||
# db
|
||||
MONGODB_URI=mongodb://username:password@0.0.0.0:27017/test?authSource=admin
|
||||
PG_HOST=0.0.0.0
|
||||
PG_PORT=8100
|
||||
PG_USER=xxx
|
||||
PG_PASSWORD=xxx
|
||||
PG_DB_NAME=xxx
|
||||
```
|
||||
|
||||
**2、部署mongo**
|
||||
|
||||
```
|
||||
docker run --name mongo -p 27017:27017 -e MONGO_INITDB_ROOT_USERNAME=username -e MONGO_INITDB_ROOT_PASSWORD=password -v ~/fast-gpt/mongo/data:/data/db -d mongo:4.0.1
|
||||
```
|
||||
|
||||
**3、部署pgsql**
|
||||
|
||||
```
|
||||
docker run -it --name pg -e "POSTGRES_PASSWORD=xxx" -e POSTGRES_USER=xxx -p 8100:5432 -v ~/fast-gpt/pg/data:/var/lib/postgresql/data -d octoberlan/pgvector:v0.4.1
|
||||
```
|
||||
|
||||
进pgsql容器运行
|
||||
|
||||
```
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
|
||||
CREATE EXTENSION vector;
|
||||
-- init table
|
||||
CREATE TABLE modelData (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
vector VECTOR(1536),
|
||||
status VARCHAR(50) NOT NULL,
|
||||
user_id VARCHAR(50) NOT NULL,
|
||||
model_id VARCHAR(50) NOT NULL,
|
||||
q TEXT NOT NULL,
|
||||
a TEXT NOT NULL
|
||||
);
|
||||
-- create index
|
||||
CREATE INDEX modelData_status_index ON modelData (status);
|
||||
CREATE INDEX modelData_modelId_index ON modelData (modelId);
|
||||
CREATE INDEX modelData_userId_index ON modelData (userId);
|
||||
EOSQL
|
||||
```
|
||||
|
||||
|
||||
|
||||
4、**最后在FASTGPT项目里面运行pnpm dev 运行项目,然后进入localhost:3000 看项目是否跑起来了**
|
||||
68
docs/proxy/clash.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# 安装 clash
|
||||
|
||||
clash 会在本机启动代理。对应的,你需要配置项目的两个环境变量:
|
||||
|
||||
```
|
||||
AXIOS_PROXY_HOST=127.0.0.1
|
||||
AXIOS_PROXY_PORT=7890
|
||||
```
|
||||
|
||||
需要注的是,在你的 config.yaml 文件中,最好仅指定 api.openai.com 走代理,其他请求都直连。
|
||||
|
||||
**安装clash**
|
||||
```bash
|
||||
# 下载包
|
||||
curl https://glados.rocks/tools/clash-linux.zip -o clash.zip
|
||||
# 解压
|
||||
unzip clash.zip
|
||||
# 下载终端配置⽂件(改成自己配置文件路径)
|
||||
curl https://update.glados-config.com/clash/98980/8f30944/70870/glados-terminal.yaml > config.yaml
|
||||
# 赋予运行权限
|
||||
chmod +x ./clash-linux-amd64-v1.10.0
|
||||
```
|
||||
|
||||
**runClash.sh**
|
||||
```sh
|
||||
# 记得配置端口变量:
|
||||
export ALL_PROXY=socks5://127.0.0.1:7891
|
||||
export http_proxy=http://127.0.0.1:7890
|
||||
export https_proxy=http://127.0.0.1:7890
|
||||
export HTTP_PROXY=http://127.0.0.1:7890
|
||||
export HTTPS_PROXY=http://127.0.0.1:7890
|
||||
|
||||
# 运行脚本: 删除clash - 到 clash 目录 - 删除缓存 - 执行运行. 会生成一个 nohup.out 文件,可以看到 clash 的 logs
|
||||
OLD_PROCESS=$(pgrep clash)
|
||||
if [ ! -z "$OLD_PROCESS" ]; then
|
||||
echo "Killing old process: $OLD_PROCESS"
|
||||
kill $OLD_PROCESS
|
||||
fi
|
||||
sleep 2
|
||||
cd **/clash
|
||||
rm -f ./nohup.out || true
|
||||
rm -f ./cache.db || true
|
||||
nohup ./clash-linux-amd64-v1.10.0 -d ./ &
|
||||
echo "Restart clash"
|
||||
```
|
||||
|
||||
**config.yaml配置例子**
|
||||
```yaml
|
||||
mixed-port: 7890
|
||||
allow-lan: false
|
||||
bind-address: '*'
|
||||
mode: rule
|
||||
log-level: warning
|
||||
dns:
|
||||
enable: true
|
||||
ipv6: false
|
||||
nameserver:
|
||||
- 8.8.8.8
|
||||
- 8.8.4.4
|
||||
cache-size: 400
|
||||
proxies:
|
||||
-
|
||||
proxy-groups:
|
||||
- { name: '♻️ 自动选择', type: url-test, proxies: [香港V01×1.5], url: 'https://api.openai.com', interval: 3600}
|
||||
rules:
|
||||
- 'DOMAIN-SUFFIX,api.openai.com,♻️ 自动选择'
|
||||
- 'MATCH,DIRECT'
|
||||
```
|
||||
46
docs/proxy/cloudflare.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# cloudflare 代理配置
|
||||
|
||||
[来自 "不做了睡觉" 教程](https://gravel-twister-d32.notion.site/FastGPT-API-ba7bb261d5fd4fd9bbb2f0607dacdc9e)
|
||||
|
||||
**workers 配置文件**
|
||||
|
||||
```js
|
||||
const TELEGRAPH_URL = 'https://api.openai.com';
|
||||
|
||||
addEventListener('fetch', (event) => {
|
||||
event.respondWith(handleRequest(event.request));
|
||||
});
|
||||
|
||||
async function handleRequest(request) {
|
||||
// 安全校验
|
||||
if (request.headers.get('auth') !== 'auth_code') {
|
||||
return new Response('UnAuthorization', { status: 403 });
|
||||
}
|
||||
|
||||
const url = new URL(request.url);
|
||||
url.host = TELEGRAPH_URL.replace(/^https?:\/\//, '');
|
||||
|
||||
const modifiedRequest = new Request(url.toString(), {
|
||||
headers: request.headers,
|
||||
method: request.method,
|
||||
body: request.body,
|
||||
redirect: 'follow'
|
||||
});
|
||||
|
||||
const response = await fetch(modifiedRequest);
|
||||
const modifiedResponse = new Response(response.body, response);
|
||||
|
||||
// 添加允许跨域访问的响应头
|
||||
modifiedResponse.headers.set('Access-Control-Allow-Origin', '*');
|
||||
|
||||
return modifiedResponse;
|
||||
}
|
||||
```
|
||||
|
||||
**对应的环境变量**
|
||||
务必别忘了填 v1
|
||||
|
||||
```
|
||||
OPENAI_BASE_URL=https://xxxxxx/v1
|
||||
OPENAI_BASE_URL_AUTH=auth_code
|
||||
```
|
||||
72
docs/proxy/nginx.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# nginx 反向代理 openai 接口
|
||||
如果你有国外的服务器,可以通过配置 nginx 反向代理,转发 openai 相关的请求,从而让国内的服务器可以通过访问该 nginx 去访问 openai 接口。
|
||||
|
||||
```conf
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
worker_rlimit_nofile 51200;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
resolver 8.8.8.8;
|
||||
proxy_ssl_server_name on;
|
||||
|
||||
access_log off;
|
||||
server_names_hash_bucket_size 512;
|
||||
client_header_buffer_size 32k;
|
||||
large_client_header_buffers 4 32k;
|
||||
client_max_body_size 50M;
|
||||
|
||||
gzip on;
|
||||
gzip_min_length 1k;
|
||||
gzip_buffers 4 8k;
|
||||
gzip_http_version 1.1;
|
||||
gzip_comp_level 6;
|
||||
gzip_vary on;
|
||||
gzip_types text/plain application/x-javascript text/css application/javascript application/json application/xml;
|
||||
gzip_disable "MSIE [1-6]\.";
|
||||
|
||||
open_file_cache max=1000 inactive=1d;
|
||||
open_file_cache_valid 30s;
|
||||
open_file_cache_min_uses 8;
|
||||
open_file_cache_errors off;
|
||||
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name your_host;
|
||||
ssl_certificate /ssl/your_host.pem;
|
||||
ssl_certificate_key /ssl/your_host.key;
|
||||
ssl_session_timeout 5m;
|
||||
|
||||
location ~ /openai/(.*) {
|
||||
# auth check
|
||||
if ($http_authkey != "xxxxxx") {
|
||||
return 403;
|
||||
}
|
||||
|
||||
proxy_pass https://api.openai.com/$1$is_args$args;
|
||||
proxy_set_header Host api.openai.com;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# 流式响应
|
||||
proxy_set_header Connection '';
|
||||
proxy_http_version 1.1;
|
||||
chunked_transfer_encoding off;
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
# 一般响应
|
||||
proxy_buffer_size 128k;
|
||||
proxy_buffers 4 256k;
|
||||
proxy_busy_buffers_size 256k;
|
||||
}
|
||||
}
|
||||
server {
|
||||
listen 80;
|
||||
server_name ai.fastgpt.run;
|
||||
rewrite ^(.*) https://$server_name$1 permanent;
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -1,13 +1,15 @@
|
||||
/** @type {import('next').NextConfig} */
|
||||
|
||||
const path = require('path');
|
||||
const isDev = process.env.NODE_ENV === 'development';
|
||||
|
||||
const nextConfig = {
|
||||
output: 'standalone',
|
||||
reactStrictMode: false,
|
||||
compress: true,
|
||||
|
||||
webpack(config) {
|
||||
config.experiments = {
|
||||
asyncWebAssembly: true,
|
||||
layers: true
|
||||
};
|
||||
config.module.rules = config.module.rules.concat([
|
||||
{
|
||||
test: /\.svg$/i,
|
||||
|
||||
13
package.json
@@ -11,8 +11,13 @@
|
||||
"format": "prettier --config \"./.prettierrc.js\" --write \"./src/**/*.{ts,tsx,scss}\""
|
||||
},
|
||||
"dependencies": {
|
||||
"@alicloud/dysmsapi20170525": "^2.0.23",
|
||||
"@alicloud/openapi-client": "^0.4.5",
|
||||
"@alicloud/tea-util": "^1.4.5",
|
||||
"@chakra-ui/icons": "^2.0.17",
|
||||
"@chakra-ui/react": "^2.5.1",
|
||||
"@chakra-ui/system": "^2.5.5",
|
||||
"@dqbd/tiktoken": "^1.0.6",
|
||||
"@emotion/react": "^11.10.6",
|
||||
"@emotion/styled": "^11.10.6",
|
||||
"@next/font": "13.1.6",
|
||||
@@ -24,7 +29,7 @@
|
||||
"eventsource-parser": "^0.1.0",
|
||||
"formidable": "^2.1.1",
|
||||
"framer-motion": "^9.0.6",
|
||||
"gpt-token-utils": "^1.2.0",
|
||||
"graphemer": "^1.4.0",
|
||||
"hyperdown": "^2.4.29",
|
||||
"immer": "^9.0.19",
|
||||
"jsonwebtoken": "^9.0.0",
|
||||
@@ -36,6 +41,8 @@
|
||||
"nodemailer": "^6.9.1",
|
||||
"nprogress": "^0.2.0",
|
||||
"openai": "^3.2.1",
|
||||
"papaparse": "^5.4.1",
|
||||
"pg": "^8.10.0",
|
||||
"react": "18.2.0",
|
||||
"react-dom": "18.2.0",
|
||||
"react-hook-form": "^7.43.1",
|
||||
@@ -48,6 +55,7 @@
|
||||
"sass": "^1.58.3",
|
||||
"sharp": "^0.31.3",
|
||||
"tunnel": "^0.0.6",
|
||||
"wxpay-v3": "^3.0.2",
|
||||
"zustand": "^4.3.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -57,11 +65,12 @@
|
||||
"@types/lodash": "^4.14.191",
|
||||
"@types/node": "18.14.0",
|
||||
"@types/nodemailer": "^6.4.7",
|
||||
"@types/papaparse": "^5.3.7",
|
||||
"@types/pg": "^8.6.6",
|
||||
"@types/react": "18.0.28",
|
||||
"@types/react-dom": "18.0.11",
|
||||
"@types/react-syntax-highlighter": "^15.5.6",
|
||||
"@types/tunnel": "^0.0.3",
|
||||
"@types/uuid": "^9.0.1",
|
||||
"eslint": "8.34.0",
|
||||
"eslint-config-next": "13.1.6",
|
||||
"husky": "^8.0.3",
|
||||
|
||||
1338
pnpm-lock.yaml
generated
@@ -1,15 +0,0 @@
|
||||
{"prompt":"什么是百度plato?</s>","completion":" 百度PLATO是一种聊天机器人,发布于2021年10月,参数量为百亿,是GPT-3的1/17。</s>"}
|
||||
{"prompt":"与百度plato相比,chatgpt有哪些优点?</s>","completion":" 与百度PLATO相比,ChatGPT体现出耐心、体贴和妥当,被网友们普遍称赞。ChatGPT可以用于个性化聊天机器人,例如生成一个很相似的数字人,提供一位温柔健谈的朋友,指导语言学习等。</s>"}
|
||||
{"prompt":"replika是什么?</s>","completion":" Replika是一个产品,使用ChatGPT的技术,可以根据自己的需求生成一个虚拟女友、男友或单纯的朋友。目前还引起了不少有关性骚扰、家暴、隐私等问题的争议。</s>"}
|
||||
{"prompt":"聊天机器人可以用于哪些场景?</s>","completion":" 聊天机器人可以用于很多陪伴的场景,例如亲人去世、抑郁症或自闭症患者、语言学习、小朋友、养老院或独居在家的老人等。</s>"}
|
||||
{"prompt":"有人反对使用聊天机器人解决孤独困境,为什么?</s>","completion":" 有人反对使用聊天机器人解决孤独困境,认为孤独困境应该用人与人真正的交往解决,机器是冰冷的。</s>"}
|
||||
{"prompt":"为什么聊天机器人仍然需要解决当下的需求?</s>","completion":" 在社会结构性的困局还没有解开时,长夜漫漫的独身打工人、养老院里卧床的老人、父母在外留守在家的儿童,当下的需求仍需要解决。</s>"}
|
||||
{"prompt":"ai在未来会被戴上什么样的帽子?</s>","completion":" AI在未来各个场景下都会被戴上一个很虚无的帽子:“机器是冰冷的”、“没有灵魂”、“它并不是真正在思考”。</s>"}
|
||||
{"prompt":"怎么回答“ai是否真正思考”的问题?</s>","completion":" 引用乔姆斯基老爷子的那句名言:“思考是人类特性。AI有天会思考吗?那就像是在问潜水艇会不会游泳一样。”</s>"}
|
||||
{"prompt":"聊天机器人与人类完成任务的方式有何不同?</s>","completion":" 聊天机器人与人类完成任务的方式不同,但并不重要。它们能跟人一样完成任务,这是最重要的。</s>"}
|
||||
{"prompt":"聊天机器人的发展前景如何?</s>","completion":" 聊天机器人的发展前景广阔,可以应用于很多领域,例如客服、教育、医疗等。</s>"}
|
||||
{"prompt":"什么是百度plato?</s>","completion":" 百度PLATO是一种聊天机器人,发布于2021年10月,参数量为百亿,是GPT-3的1/17。</s>"}
|
||||
{"prompt":"与百度plato相比,chatgpt有哪些优点?</s>","completion":" 与百度PLATO相比,ChatGPT体现出耐心、体贴和妥当,被网友们普遍称赞。ChatGPT还可以用于很多陪伴的场景,如亲人去世、抑郁症或自闭症患者、语言学习、小朋友、养老院或独居在家的老人等。</s>"}
|
||||
{"prompt":"replika是什么产品?</s>","completion":" Replika是一个产品,用的是ChatGPT的技术。用户可以根据自己的需求生成一个虚拟女友、男友或单纯的朋友。目前还引起了不少有关性骚扰、家暴、隐私等问题的争议。</s>"}
|
||||
{"prompt":"聊天机器人能够解决哪些现实问题?</s>","completion":" 聊天机器人能够解决现代人面临的孤独困境,如长夜漫漫的独身打工人、养老院里卧床的老人、父母在外留守在家的儿童等。</s>"}
|
||||
{"prompt":"有人反对使用聊天机器人解决孤独困境,为什么?</s>","completion":" 有人反对使用聊天机器人解决孤独困境,认为孤独困境应该用人与人真正的交往解决,机器是冰冷的。</s>"}
|
||||
10
public/docs/chatProblem.md
Normal file
@@ -0,0 +1,10 @@
|
||||
### 常见问题
|
||||
**请求次数太多了**
|
||||
一般是因为自己的 openai 账号异常。请先检查自己的账号是否正常使用。
|
||||
**内容长度**
|
||||
chatgpt 上下文最长 4096 tokens, 上下文超长时会报错。
|
||||
**删除和复制**
|
||||
电脑端:聊天内容右侧有复制和删除的图标。
|
||||
移动端:点击对话头像,可以选择复制或删除该条内容。
|
||||
**代理出错**
|
||||
服务器代理不稳定,可以过一会儿再尝试。 或者可以访问国外服务器: [FastGpt](https://fastgpt.run/)
|
||||
6
public/docs/csvSelect.md
Normal file
@@ -0,0 +1,6 @@
|
||||
接受一个csv文件,表格头包含 question 和 answer。question 代表问题,answer 代表答案。
|
||||
导入前会进行去重,如果问题和答案完全相同,则不会被导入,所以最终导入的内容可能会比文件的内容少。但是,对于带有换行的内容,目前无法去重。
|
||||
| question | answer |
|
||||
| --- | --- |
|
||||
| 什么是 laf | laf 是一个云函数开发平台…… |
|
||||
| 什么是 sealos | Sealos 是以 kubernetes 为内核的云操作系统发行版,可以…… |
|
||||
41
public/docs/intro.md
Normal file
@@ -0,0 +1,41 @@
|
||||
## 欢迎使用 Fast GPT
|
||||
|
||||
[Git 仓库](https://github.com/c121914yu/FastGPT)
|
||||
|
||||
### 交流群/问题反馈
|
||||
扫码满了,加个小号,定时拉
|
||||
wx号: fastgpt123
|
||||

|
||||
|
||||
|
||||
### 快速开始
|
||||
1. 使用手机号注册账号。
|
||||
2. 进入账号页面,添加关联账号,目前只有 openai 的账号可以添加,直接去 openai 官网,把 API Key 粘贴过来。
|
||||
3. 如果填写了自己的 openai 账号,使用时会直接用你的账号。如果没有填写,需要付费使用平台的账号。
|
||||
4. 进入模型页,创建一个模型,建议直接用 ChatGPT。
|
||||
5. 在模型列表点击【对话】,即可使用 API 进行聊天。
|
||||
|
||||
### 价格表
|
||||
如果使用了自己的 Api Key,不会计费。可以在账号页,看到详细账单。单纯使用 chatGPT 模型进行对话,只有一个计费项目。使用知识库时,包含**对话**和**索引**生成两个计费项。
|
||||
| 计费项 | 价格: 元/ 1K tokens(包含上下文)|
|
||||
| --- | --- |
|
||||
| chatgpt - 对话 | 0.03 |
|
||||
| 知识库 - 对话 | 0.03 |
|
||||
| 知识库 - 索引 | 0.004 |
|
||||
| 文件拆分 | 0.03 |
|
||||
|
||||
|
||||
### 定制 prompt
|
||||
|
||||
1. 进入模型编辑页
|
||||
2. 调整温度和提示词
|
||||
3. 使用该模型对话。每次对话时,提示词和温度都会自动注入,方便管理个人的模型。建议把自己日常经常需要使用的 5~10 个方向预设好。
|
||||
|
||||
### 知识库
|
||||
|
||||
1. 创建模型时选择【知识库】
|
||||
2. 进入模型编辑页
|
||||
3. 导入数据,可以选择手动导入,或者选择文件导入。文件导入会自动调用 chatGPT 理解文件内容,并生成知识库。
|
||||
4. 使用该模型对话。
|
||||
|
||||
注意:使用知识库模型对话时,tokens 消耗会加快。
|
||||
3
public/docs/shareHint.md
Normal file
@@ -0,0 +1,3 @@
|
||||
你正准备分享对话,请确保分享链接不会滥用,因为它是使用的是你的 API key。
|
||||
* 分享空白对话:为该模型创建一个空白的聊天分享出去。
|
||||
* 分享当前对话:会把当前聊天的内容也分享出去,但是要注意不要多个人同时用一个聊天内容。
|
||||
6
public/docs/versionIntro.md
Normal file
@@ -0,0 +1,6 @@
|
||||
### Fast GPT V2.8.1
|
||||
* 新增 - 暂停聊天。
|
||||
* 优化 - 知识库升级,内容条数不上限!
|
||||
* 优化 - 导入去重效果,可防止导出后的 csv 重复导入。
|
||||
* 优化 - 聊天框,电脑端复制删除图标。
|
||||
* 优化 - 聊天框,生成内容时,如果滚动条触底,则会自动向下滚动,不需要手动下滑。
|
||||
@@ -1,19 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" style="margin: auto; background: none; display: block; shape-rendering: auto;" width="204px" height="204px" viewBox="0 0 100 100" preserveAspectRatio="xMidYMid">
|
||||
<circle cx="84" cy="50" r="10" fill="#e15b64">
|
||||
<animate attributeName="r" repeatCount="indefinite" dur="0.5681818181818182s" calcMode="spline" keyTimes="0;1" values="15;0" keySplines="0 0.5 0.5 1" begin="0s"></animate>
|
||||
<animate attributeName="fill" repeatCount="indefinite" dur="2.272727272727273s" calcMode="discrete" keyTimes="0;0.25;0.5;0.75;1" values="#e15b64;#abbd81;#f8b26a;#f47e60;#e15b64" begin="0s"></animate>
|
||||
</circle><circle cx="16" cy="50" r="10" fill="#e15b64">
|
||||
<animate attributeName="r" repeatCount="indefinite" dur="2.272727272727273s" calcMode="spline" keyTimes="0;0.25;0.5;0.75;1" values="0;0;15;15;15" keySplines="0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1" begin="0s"></animate>
|
||||
<animate attributeName="cx" repeatCount="indefinite" dur="2.272727272727273s" calcMode="spline" keyTimes="0;0.25;0.5;0.75;1" values="16;16;16;50;84" keySplines="0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1" begin="0s"></animate>
|
||||
</circle><circle cx="50" cy="50" r="10" fill="#f47e60">
|
||||
<animate attributeName="r" repeatCount="indefinite" dur="2.272727272727273s" calcMode="spline" keyTimes="0;0.25;0.5;0.75;1" values="0;0;15;15;15" keySplines="0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1" begin="-0.5681818181818182s"></animate>
|
||||
<animate attributeName="cx" repeatCount="indefinite" dur="2.272727272727273s" calcMode="spline" keyTimes="0;0.25;0.5;0.75;1" values="16;16;16;50;84" keySplines="0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1" begin="-0.5681818181818182s"></animate>
|
||||
</circle><circle cx="84" cy="50" r="10" fill="#f8b26a">
|
||||
<animate attributeName="r" repeatCount="indefinite" dur="2.272727272727273s" calcMode="spline" keyTimes="0;0.25;0.5;0.75;1" values="0;0;15;15;15" keySplines="0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1" begin="-1.1363636363636365s"></animate>
|
||||
<animate attributeName="cx" repeatCount="indefinite" dur="2.272727272727273s" calcMode="spline" keyTimes="0;0.25;0.5;0.75;1" values="16;16;16;50;84" keySplines="0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1" begin="-1.1363636363636365s"></animate>
|
||||
</circle><circle cx="16" cy="50" r="10" fill="#abbd81">
|
||||
<animate attributeName="r" repeatCount="indefinite" dur="2.272727272727273s" calcMode="spline" keyTimes="0;0.25;0.5;0.75;1" values="0;0;15;15;15" keySplines="0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1" begin="-1.7045454545454546s"></animate>
|
||||
<animate attributeName="cx" repeatCount="indefinite" dur="2.272727272727273s" calcMode="spline" keyTimes="0;0.25;0.5;0.75;1" values="16;16;16;50;84" keySplines="0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1;0 0.5 0.5 1" begin="-1.7045454545454546s"></animate>
|
||||
</circle>
|
||||
<!-- [ldio] generated by https://loading.io/ --></svg>
|
||||
|
Before Width: | Height: | Size: 2.9 KiB |
BIN
public/imgs/wx300-2.jpg
Normal file
|
After Width: | Height: | Size: 59 KiB |
BIN
public/imgs/wx300.jpg
Normal file
|
After Width: | Height: | Size: 53 KiB |
|
Before Width: | Height: | Size: 15 KiB |
BIN
public/imgs/wxerweima300.jpg
Normal file
|
After Width: | Height: | Size: 50 KiB |
@@ -1,21 +0,0 @@
|
||||
{"prompt": "什么是高潜公司\n\n###\n\n", "completion": " 高潜公司是指高潜力的公司,其所处的赛道有发展潜力和前景。###"}
|
||||
{"prompt": "高潜公司的优势是什么\n\n###\n\n", "completion": " 高潜公司所处的整个行业和赛道有一定的增量空间,头部vc通过严格的筛选和加持为高潜公司背书,高潜公司的创始者一般都特别优秀。加入高潜公司可以获得很多机会来锻炼自己、强化自己的认知,快速成长、展现自己,并有可能成为公司扩张后的管理层###"}
|
||||
{"prompt": "高潜公司的风险是什么\n\n###\n\n", "completion": " 高潜公司的高风险问题也很明显。一旦公司失败,很多边缘人就会被淘汰掉。但宏观来讲,只要赛道是有潜力的,即便这家公司失败了,也可以跳槽到成功的公司去。###"}
|
||||
{"prompt": "高潜公司的例子有哪些\n\n###\n\n", "completion": " 很多高潜公司在做最近比较火的AIGC,包括去年的元宇宙,前两年比较火的新消费、企业服务和出海之类的。###"}
|
||||
{"prompt": "为什么现在加入大厂的前景不广阔?\n\n###\n\n", "completion": " 今年的大形势之下,互联网大厂纷纷开始裁员、取消普调、晋升比例严重缩水,HC也急剧减少。###"}
|
||||
{"prompt": "加入高潜公司可以获得什么样的机会?\n\n###\n\n", "completion": " 加入高潜公司可以获得很多机会来锻炼自己、强化自己的认知,快速成长、展现自己,并有可能成为公司扩张后的管理层。###"}
|
||||
{"prompt": "什么是高潜公司?\n\n###\n\n", "completion": " 高潜公司的定义是“高潜力的公司”,所处的行业和赛道有发展潜力和前景,头部vc为其背书,创始者一般都特别优秀。###"}
|
||||
{"prompt": "加入高潜公司有哪些优点\n\n###\n\n", "completion": " 加入高潜公司能够获得锻炼自己、强化认知的机会,快速成长、展现自己,在公司扩张后成为团队的管理层。同时,所处的行业还处于上升期,发展前景广阔。###"}
|
||||
{"prompt": "高潜公司存在哪些风险?\n\n###\n\n", "completion": " 高潜公司存在高风险问题,一旦公司失败,很多边缘人就会被淘汰掉。但宏观来讲,只要赛道是有潜力的,即便这家公司失败了,也可以跳槽到成功的公司去。###"}
|
||||
{"prompt": "高潜公司和互联网有什么相似之处?\n\n###\n\n", "completion": " 目前来说,高潜公司的风格大部分都比较像互联网大厂,有风格,但又没有那么内卷,因为大家每天都在开拓边疆,突破和探索市场的边界。同时,互联网产业的蓬勃发展也给其他行业,包括新能源汽车、新消费等,带来了用户增长和运营的经验###"}
|
||||
{"prompt": "互联网大厂的制度和分工如何?\n\n###\n\n", "completion": " 目前的互联网大厂的制度和分工非常明确,基本是按照一些标准SOP做事。同时,由于已经没有用户增量了,内卷和向上管理的问题也比较严重。###"}
|
||||
{"prompt": "高潜公司对于哪些毕业生是很好的选择\n\n###\n\n", "completion": " 对于追求长期创造价值而非想找份安稳的工作的毕业生来说,高潜公司是很好的选择。###"}
|
||||
{"prompt": "互联网大厂的晋升方式有哪些?\n\n###\n\n", "completion": " 互联网大厂的晋升方式有两种:老板特别喜欢你或者你的战功支撑你的竞争###"}
|
||||
{"prompt": "为什么目前更多优秀的人都会加入创业公司?\n\n###\n\n", "completion": " 目前社会的大趋势是更多优秀的人都会加入创业公司,因为真正在创造价值的其实永远是创业公司###"}
|
||||
{"prompt": "为什么选择高潜公司主要是因为我们迎来了什么三个繁荣?\n\n###\n\n", "completion": " 选择高潜公司主要是因为我们迎来了创新生态的三个繁荣:人才繁荣、资本繁荣和环境繁荣###"}
|
||||
{"prompt": "资本繁荣是如何推动创业生态的崛起的?\n\n###\n\n", "completion": " 资本繁荣,无论中国还是美国,创业生态的崛起都是伴随着移动互联网的发展。中国这一代VC的崛起,主要是通过投资移动互联网项目所积累的战绩扩大了资金池,通过这些成本低、增长快、回报率高的项目,才有了底气去推动更多行业发展,尝试着投资toB和硬科技这类成本高、增长慢、回报率低、风险大的项目。###"}
|
||||
{"prompt": "环境繁荣是指什么?\n\n###\n\n", "completion": " 环境繁荣是指互联网带动起经济发展后,各地政府也开始了对于创业进行培育,免费场地、巨额无偿补贴、各地送钱竞赛,大幅度降低了创业成本。###"}
|
||||
{"prompt": "高潜公司对于追求长期创造价值的毕业生来说为什么是好的选择?\n\n###\n\n", "completion": " 因为在高潜公司,毕业生可以通过参与创业公司的发展,获得更多的机会和成长空间,而不是被固定在某个职位上。此外,高潜公司通常会注重团队建设和文化建设,能够提供更好的工作环境和发展机会。###"}
|
||||
{"prompt": "如果想要寻找高潜公司,有哪些渠道可以利用?\n\n###\n\n", "completion": " 可以利用领英职场app和知潜微信小程序。领英职场app中入驻了很多高潜公司及高管,而知潜小程序现在也已经有100+高潜力创业公司入驻,都是经过筛选的、top VC投资过且赛道有前景的公司。此外,红杉和真格等VC机构自己的公众号也会经常披露投资的企业。###"}
|
||||
{"prompt": "为什么现在更多优秀的人才会加入创业公司?\n\n###\n\n", "completion": " 因为在当前社会的大趋势下,创业公司成为了创新生态的重要组成部分。创业公司在创造价值和推动行业发展方面具有更大的空间和潜力,而且随着移动互联网的发展,创业公司的崛起也得到了资本和政策的支持。因此,更多优秀的人才选择加入创业公司,以实现自己的创业梦想和获得更好的发展机会。###"}
|
||||
{"prompt": "高潜公司的优势在哪些方面?\n\n###\n\n", "completion": " 高潜公司在团队建设、文化建设和发展机会等方面具有优势。高潜公司通常会注重团队建设和文化建设,能够提供更好的工作环境和发展机会。同时,高潜公司的发展潜力和创新能力也很强,可以为毕业生提供更多的机会和成长空间。###"}
|
||||
@@ -1,21 +0,0 @@
|
||||
{"prompt":"什么是死锁?如何避免死锁?\n\n###\n\n","completion":" 死锁是指两个或多个进程在执行过程中,由于互相等待对方释放资源而造成的一种僵局。避免死锁的常用方法包括:破坏互斥条件、破坏请求和保持条件、破坏不剥夺条件、破坏循环等待条件。###"}
|
||||
{"prompt":"什么是虚拟内存?\n\n###\n\n","completion":" 虚拟内存是指一种计算机内存管理技术,它使得操作系统可以将进程所使用的虚拟地址空间映射到物理内存中,从而可以让多个进程共享物理内存。当物理内存不足时,操作系统会将一部分不常用的内存页面暂时存储到硬盘上,这样就可以释放出更多的物理内存供其他进程使用。###"}
|
||||
{"prompt":"什么是分页和分段?有什么区别?\n\n###\n\n","completion":" 分页和分段是虚拟内存管理中常用的两种技术。分页是将虚拟地址空间划分为固定大小的页面,而分段则是将虚拟地址空间划分为不同大小的段。区别在于,分页的页面大小是固定的,而分段的段大小是可变的;另外,分页是为了实现虚拟内存,而分段则是为了实现逻辑地址空间的管理。###"}
|
||||
{"prompt":"什么是lru算法?\n\n###\n\n","completion":" lru算法是一种页面置换算法,它的全称是least recently used,即最近最少使用算法。lru算法的基本思想是在物理内存中维护一个页面访问历史记录,当需要替换一页时,选择最近最少被访问的页面进行替换。###"}
|
||||
{"prompt":"什么是文件系统?\n\n###\n\n","completion":" 文件系统是一种用于管理计算机存储设备上文件和目录的组织方式。文件系统通常包括存储管理、目录管理、文件访问控制、文件系统维护等功能。常见的文件系统包括fat、ntfs、ext等。###"}
|
||||
{"prompt":"什么是并发和并行?\n\n###\n\n","completion":" 并发是指多个任务交替执行的情况,而并行是指多个任务同时执行的情况。并发的实现方式是通过时间分片技术,而并行的实现方式是通过多核处理器等硬件设备。###"}
|
||||
{"prompt":"什么是系统调用?\n\n###\n\n","completion":" 系统调用是操作系统中的一种机制,它允许用户空间程序请求操作系统内核提供一些服务,如读写文件、创建进程等。系统调用通常是通过软中断实现的###"}
|
||||
{"prompt":"什么是进程间通信?\n\n###\n\n","completion":" 进程间通信是指不同进程之间进行数据交换和通信的机制,常见的方式有管道、共享内存、消息队列、信号量等。###"}
|
||||
{"prompt":"什么是文件描述符?\n\n###\n\n","completion":" 文件描述符是一个非负整数,它是操作系统内核为了管理打开的文件而维护的一种抽象概念。应用程序使用文件描述符来访问文件或其他i\/o设备###"}
|
||||
{"prompt":"什么是中断? \n\n###\n\n","completion":" 中断是指计算机硬件或软件在执行程序时,暂停当前程序的执行,转而去执行另一个程序或处理器所需的其他任务的过程。中断可以是外部中断、内部中断或软中断。###"}
|
||||
{"prompt":"什么是页表?\n\n###\n\n","completion":" 页表是一种数据结构,它用于将虚拟地址转换为物理地址。在虚拟内存中,每个进程都有自己的页表,它描述了虚拟地址空间中每个页面对应的物理地址。 ###"}
|
||||
{"prompt":"什么是僵尸进程?\n\n###\n\n","completion":" 僵尸进程是指一个已经完成执行但其父进程还没有对其进行善后处理的进程,它会一直占用系统资源,直到其父进程将其处理为止。###"}
|
||||
{"prompt":"什么是守护进程? \n\n###\n\n","completion":" 守护进程是一种在后台运行的特殊进程,它通常用于提供某种服务或执行某种系统任务。守护进程通常不与任何用户界面交互,也不会终止,直到操作系统关机或重启。###"}
|
||||
{"prompt":"计算机操作系统是什么?\n\n###\n\n","completion":" 计算机操作系统是一种控制计算机硬件和协调应用程序运行的系统软件。###"}
|
||||
{"prompt":"什么是进程?\n\n###\n\n","completion":" 进程是一个正在执行中的程序实例,它包括了程序计数器、堆栈、数据区等信息。###"}
|
||||
{"prompt":"什么是线程?\n\n###\n\n","completion":" 线程是操作系统能够进行运算调度的最小单位,它是进程的一个执行流程,共享进程的地址空间。###"}
|
||||
{"prompt":"什么是虚拟内存?\n\n###\n\n","completion":" 虚拟内存是一种计算机系统内存管理技术,它使得应用程序认为它拥有连续的可用的内存,而实际上它通常是被分成多个物理内存碎片。###"}
|
||||
{"prompt":"什么是死锁?\n\n###\n\n","completion":" 死锁是指两个或多个进程在执行过程中,因争夺资源而造成的一种互相等待的现象,导致所有进程都无法继续执行。###"}
|
||||
{"prompt":"什么是缓存?\n\n###\n\n","completion":" 缓存是指在计算机中暂存数据的高速存储器,它可以提高计算机对数据的访问速度,避免频繁地访问较慢的主存储器。###"}
|
||||
{"prompt":"什么是文件系统?\n\n###\n\n","completion":" 文件系统是计算机中用来管理和组织文件的一种机制,它通过一系列的数据结构来描述文件和目录的组织方式,以及文件如何存储和访问。###"}
|
||||
{"prompt":"什么是调度算法?\n\n###\n\n","completion":" 调度算法是指操作系统中用来决定进程或线程在cpu上执行顺序的一种算法,它的目标是最大化系统吞吐量、最小化响应时间或最大化资源利用率等。###"}
|
||||
@@ -1,42 +1,32 @@
|
||||
import { GET, POST, DELETE } from './request';
|
||||
import type { ChatItemType, ChatSiteItemType } from '@/types/chat';
|
||||
import type { ChatItemType } from '@/types/chat';
|
||||
import type { InitChatResponse } from './response/chat';
|
||||
|
||||
/**
|
||||
* 获取一个聊天框的ID
|
||||
*/
|
||||
export const getChatSiteId = (modelId: string, isShare = false) =>
|
||||
GET<string>(`/chat/generate?modelId=${modelId}&isShare=${isShare ? 'true' : 'false'}`);
|
||||
|
||||
/**
|
||||
* 获取初始化聊天内容
|
||||
*/
|
||||
export const getInitChatSiteInfo = (chatId: string) =>
|
||||
GET<InitChatResponse>(`/chat/init?chatId=${chatId}`);
|
||||
export const getInitChatSiteInfo = (modelId: string, chatId: '' | string) =>
|
||||
GET<InitChatResponse>(`/chat/init?modelId=${modelId}&chatId=${chatId}`);
|
||||
|
||||
/**
|
||||
* 发送 GPT3 prompt
|
||||
* 获取历史记录
|
||||
*/
|
||||
export const postGPT3SendPrompt = ({
|
||||
chatId,
|
||||
prompt
|
||||
}: {
|
||||
prompt: ChatSiteItemType[];
|
||||
chatId: string;
|
||||
}) =>
|
||||
POST<string>(`/chat/gpt3`, {
|
||||
chatId,
|
||||
prompt: prompt.map((item) => ({
|
||||
obj: item.obj,
|
||||
value: item.value
|
||||
}))
|
||||
});
|
||||
export const getChatHistory = () =>
|
||||
GET<{ _id: string; title: string; modelId: string }[]>('/chat/getHistory');
|
||||
|
||||
/**
|
||||
* 删除一条历史记录
|
||||
*/
|
||||
export const delChatHistoryById = (id: string) => GET(`/chat/removeHistory?id=${id}`);
|
||||
|
||||
/**
|
||||
* 存储一轮对话
|
||||
*/
|
||||
export const postSaveChat = (data: { chatId: string; prompts: ChatItemType[] }) =>
|
||||
POST('/chat/saveChat', data);
|
||||
export const postSaveChat = (data: {
|
||||
modelId: string;
|
||||
chatId: '' | string;
|
||||
prompts: ChatItemType[];
|
||||
}) => POST<string>('/chat/saveChat', data);
|
||||
|
||||
/**
|
||||
* 删除一句对话
|
||||
|
||||
1
src/api/common.ts
Normal file
@@ -0,0 +1 @@
|
||||
import { GET, POST, DELETE } from './request';
|
||||
@@ -1,25 +0,0 @@
|
||||
import { GET, POST, DELETE, PUT } from './request';
|
||||
import { RequestPaging } from '../types/index';
|
||||
import { Obj2Query } from '@/utils/tools';
|
||||
import type { DataListItem } from '@/types/data';
|
||||
import type { PagingData } from '../types/index';
|
||||
import type { DataItemSchema } from '@/types/mongoSchema';
|
||||
import type { CreateDataProps } from '@/pages/data/components/CreateDataModal';
|
||||
|
||||
export const getDataList = () => GET<DataListItem[]>(`/data/getDataList`);
|
||||
|
||||
export const postData = (data: CreateDataProps) => POST<string>(`/data/postData`, data);
|
||||
|
||||
export const postSplitData = (dataId: string, text: string) =>
|
||||
POST(`/data/splitData`, { dataId, text });
|
||||
|
||||
export const updateDataName = (dataId: string, name: string) =>
|
||||
PUT(`/data/putDataName?dataId=${dataId}&name=${name}`);
|
||||
|
||||
export const delData = (dataId: string) => DELETE(`/data/delData?dataId=${dataId}`);
|
||||
|
||||
type GetDataItemsProps = RequestPaging & {
|
||||
dataId: string;
|
||||
};
|
||||
export const getDataItems = (data: GetDataItemsProps) =>
|
||||
GET<PagingData<DataItemSchema>>(`/data/getDataItems?${Obj2Query(data)}`);
|
||||
@@ -6,7 +6,7 @@ interface StreamFetchProps {
|
||||
abortSignal: AbortController;
|
||||
}
|
||||
export const streamFetch = ({ url, data, onMessage, abortSignal }: StreamFetchProps) =>
|
||||
new Promise(async (resolve, reject) => {
|
||||
new Promise<string>(async (resolve, reject) => {
|
||||
try {
|
||||
const res = await fetch(url, {
|
||||
method: 'POST',
|
||||
@@ -23,26 +23,30 @@ export const streamFetch = ({ url, data, onMessage, abortSignal }: StreamFetchPr
|
||||
let responseText = '';
|
||||
|
||||
const read = async () => {
|
||||
const { done, value } = await reader?.read();
|
||||
if (done) {
|
||||
if (res.status === 200) {
|
||||
resolve(responseText);
|
||||
} else {
|
||||
try {
|
||||
try {
|
||||
const { done, value } = await reader?.read();
|
||||
if (done) {
|
||||
if (res.status === 200) {
|
||||
resolve(responseText);
|
||||
} else {
|
||||
const parseError = JSON.parse(responseText);
|
||||
reject(parseError?.message || '请求异常');
|
||||
} catch (err) {
|
||||
reject('请求异常');
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
const text = decoder.decode(value).replace(/<br\/>/g, '\n');
|
||||
res.status === 200 && onMessage(text);
|
||||
responseText += text;
|
||||
read();
|
||||
} catch (err: any) {
|
||||
if (err?.message === 'The user aborted a request.') {
|
||||
return resolve(responseText);
|
||||
}
|
||||
reject(typeof err === 'string' ? err : err?.message || '请求异常');
|
||||
}
|
||||
const text = decoder.decode(value).replace(/<br\/>/g, '\n');
|
||||
res.status === 200 && onMessage(text);
|
||||
responseText += text;
|
||||
read();
|
||||
};
|
||||
|
||||
read();
|
||||
} catch (err: any) {
|
||||
console.log(err, '====');
|
||||
|
||||
@@ -1,63 +1,95 @@
|
||||
import { GET, POST, DELETE, PUT } from './request';
|
||||
import type { ModelSchema, ModelDataSchema, ModelSplitDataSchema } from '@/types/mongoSchema';
|
||||
import type { ModelSchema, ModelDataSchema } from '@/types/mongoSchema';
|
||||
import { ModelUpdateParams } from '@/types/model';
|
||||
import { TrainingItemType } from '../types/training';
|
||||
import { RequestPaging } from '../types/index';
|
||||
import { Obj2Query } from '@/utils/tools';
|
||||
|
||||
/**
|
||||
* 获取模型列表
|
||||
*/
|
||||
export const getMyModels = () => GET<ModelSchema[]>('/model/list');
|
||||
|
||||
/**
|
||||
* 创建一个模型
|
||||
*/
|
||||
export const postCreateModel = (data: { name: string; serviceModelName: string }) =>
|
||||
POST<ModelSchema>('/model/create', data);
|
||||
|
||||
/**
|
||||
* 根据 ID 删除模型
|
||||
*/
|
||||
export const delModelById = (id: string) => DELETE(`/model/del?modelId=${id}`);
|
||||
|
||||
/**
|
||||
* 根据 ID 获取模型
|
||||
*/
|
||||
export const getModelById = (id: string) => GET<ModelSchema>(`/model/detail?modelId=${id}`);
|
||||
|
||||
/**
|
||||
* 根据 ID 更新模型
|
||||
*/
|
||||
export const putModelById = (id: string, data: ModelUpdateParams) =>
|
||||
PUT(`/model/update?modelId=${id}`, data);
|
||||
|
||||
export const postTrainModel = (id: string, form: FormData) =>
|
||||
POST(`/model/train/train?modelId=${id}`, form, {
|
||||
headers: {
|
||||
'content-type': 'multipart/form-data'
|
||||
}
|
||||
});
|
||||
|
||||
export const putModelTrainingStatus = (id: string) =>
|
||||
PUT(`/model/train/putTrainStatus?modelId=${id}`);
|
||||
|
||||
export const getModelTrainings = (id: string) =>
|
||||
GET<TrainingItemType[]>(`/model/train/getTrainings?modelId=${id}`);
|
||||
|
||||
/* 模型 data */
|
||||
|
||||
type GetModelDataListProps = RequestPaging & {
|
||||
modelId: string;
|
||||
searchText: string;
|
||||
};
|
||||
/**
|
||||
* 获取模型的知识库数据
|
||||
*/
|
||||
export const getModelDataList = (props: GetModelDataListProps) =>
|
||||
GET(`/model/data/getModelData?${Obj2Query(props)}`);
|
||||
|
||||
/**
|
||||
* 获取导出数据(不分页)
|
||||
*/
|
||||
export const getExportDataList = (modelId: string) =>
|
||||
GET<string>(`/model/data/exportModelData?modelId=${modelId}`);
|
||||
GET<[string, string][]>(`/model/data/exportModelData?modelId=${modelId}`);
|
||||
|
||||
export const getModelSplitDataList = (modelId: string) =>
|
||||
GET<ModelSplitDataSchema[]>(`/model/data/getSplitData?modelId=${modelId}`);
|
||||
/**
|
||||
* 获取模型正在拆分数据的数量
|
||||
*/
|
||||
export const getModelSplitDataListLen = (modelId: string) =>
|
||||
GET<number>(`/model/data/getSplitData?modelId=${modelId}`);
|
||||
|
||||
/**
|
||||
* 获取 web 页面内容
|
||||
*/
|
||||
export const getWebContent = (url: string) => POST<string>(`/model/data/fetchingUrlData`, { url });
|
||||
|
||||
/**
|
||||
* 手动输入数据
|
||||
*/
|
||||
export const postModelDataInput = (data: {
|
||||
modelId: string;
|
||||
data: { text: ModelDataSchema['text']; q: ModelDataSchema['q'] }[];
|
||||
data: { a: ModelDataSchema['a']; q: ModelDataSchema['q'] }[];
|
||||
}) => POST<number>(`/model/data/pushModelDataInput`, data);
|
||||
|
||||
export const postModelDataFileText = (data: { modelId: string; text: string; prompt: string }) =>
|
||||
POST(`/model/data/splitData`, data);
|
||||
/**
|
||||
* 拆分数据
|
||||
*/
|
||||
export const postModelDataSplitData = (data: {
|
||||
modelId: string;
|
||||
chunks: string[];
|
||||
prompt: string;
|
||||
mode: 'qa' | 'subsection';
|
||||
}) => POST(`/model/data/splitData`, data);
|
||||
|
||||
export const postModelDataJsonData = (
|
||||
modelId: string,
|
||||
jsonData: { prompt: string; completion: string; vector?: number[] }[]
|
||||
) => POST(`/model/data/pushModelDataJson`, { modelId, data: jsonData });
|
||||
/**
|
||||
* json导入数据
|
||||
*/
|
||||
export const postModelDataCsvData = (modelId: string, data: string[][]) =>
|
||||
POST<number>(`/model/data/pushModelDataCsv`, { modelId, data: data });
|
||||
|
||||
export const putModelDataById = (data: { dataId: string; text: string; q?: string }) =>
|
||||
/**
|
||||
* 更新模型数据
|
||||
*/
|
||||
export const putModelDataById = (data: { dataId: string; a: string; q?: string }) =>
|
||||
PUT('/model/data/putModelData', data);
|
||||
/**
|
||||
* 删除一条模型数据
|
||||
*/
|
||||
export const delOneModelData = (dataId: string) =>
|
||||
DELETE(`/model/data/delModelDataById?dataId=${dataId}`);
|
||||
|
||||
16
src/api/openapi.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { GET, POST, DELETE } from './request';
|
||||
import { UserOpenApiKey } from '@/types/openapi';
|
||||
/**
|
||||
* crete a api key
|
||||
*/
|
||||
export const createAOpenApiKey = () => POST<string>('/openapi/postKey');
|
||||
|
||||
/**
|
||||
* get api keys
|
||||
*/
|
||||
export const getOpenApiKeys = () => GET<UserOpenApiKey[]>('/openapi/getKeys');
|
||||
|
||||
/**
|
||||
* delete api by id
|
||||
*/
|
||||
export const delOpenApiById = (id: string) => DELETE(`/openapi/delKey?id=${id}`);
|
||||
@@ -1,6 +1,6 @@
|
||||
import axios, { Method, InternalAxiosRequestConfig, AxiosResponse } from 'axios';
|
||||
import { getToken, clearToken } from '@/utils/user';
|
||||
import { TOKEN_ERROR_CODE } from '@/constants/responseCode';
|
||||
import { TOKEN_ERROR_CODE } from '@/service/errorCode';
|
||||
|
||||
interface ConfigType {
|
||||
headers?: { [key: string]: string };
|
||||
|
||||
1
src/api/response/chat.d.ts
vendored
@@ -6,7 +6,6 @@ export type InitChatResponse = {
|
||||
modelId: string;
|
||||
name: string;
|
||||
avatar: string;
|
||||
intro: string;
|
||||
chatModel: ModelSchema.service.chatModel; // 对话模型名
|
||||
modelName: ModelSchema.service.modelName; // 底层模型
|
||||
history: ChatItemType[];
|
||||
|
||||
8
src/api/response/user.d.ts
vendored
@@ -1,5 +1,13 @@
|
||||
import type { UserType } from '@/types/user';
|
||||
import type { PromotionRecordSchema } from '@/types/mongoSchema';
|
||||
export interface ResLogin {
|
||||
token: string;
|
||||
user: UserType;
|
||||
}
|
||||
|
||||
export interface PromotionRecordType {
|
||||
_id: PromotionRecordSchema['_id'];
|
||||
type: PromotionRecordSchema['type'];
|
||||
createTime: PromotionRecordSchema['createTime'];
|
||||
amount: PromotionRecordSchema['amount'];
|
||||
}
|
||||
|
||||
@@ -1,50 +1,66 @@
|
||||
import { GET, POST, PUT } from './request';
|
||||
import { createHashPassword, Obj2Query } from '@/utils/tools';
|
||||
import { ResLogin } from './response/user';
|
||||
import { EmailTypeEnum } from '@/constants/common';
|
||||
import { ResLogin, PromotionRecordType } from './response/user';
|
||||
import { UserAuthTypeEnum } from '@/constants/common';
|
||||
import { UserType, UserUpdateParams } from '@/types/user';
|
||||
import type { PagingData, RequestPaging } from '@/types';
|
||||
import { BillSchema, PaySchema } from '@/types/mongoSchema';
|
||||
import { adaptBill } from '@/utils/adapt';
|
||||
|
||||
export const sendCodeToEmail = ({ email, type }: { email: string; type: `${EmailTypeEnum}` }) =>
|
||||
GET('/user/sendEmail', { email, type });
|
||||
export const sendAuthCode = ({
|
||||
username,
|
||||
type
|
||||
}: {
|
||||
username: string;
|
||||
type: `${UserAuthTypeEnum}`;
|
||||
}) => GET('/user/sendAuthCode', { username, type });
|
||||
|
||||
export const getTokenLogin = () => GET<UserType>('/user/tokenLogin');
|
||||
|
||||
/* get promotion init data */
|
||||
export const getPromotionInitData = () =>
|
||||
GET<{
|
||||
invitedAmount: number;
|
||||
historyAmount: number;
|
||||
residueAmount: number;
|
||||
}>('/user/promotion/getPromotionData');
|
||||
|
||||
export const postRegister = ({
|
||||
email,
|
||||
username,
|
||||
password,
|
||||
code
|
||||
code,
|
||||
inviterId
|
||||
}: {
|
||||
email: string;
|
||||
username: string;
|
||||
code: string;
|
||||
password: string;
|
||||
inviterId: string;
|
||||
}) =>
|
||||
POST<ResLogin>('/user/register', {
|
||||
email,
|
||||
username,
|
||||
code,
|
||||
inviterId,
|
||||
password: createHashPassword(password)
|
||||
});
|
||||
|
||||
export const postFindPassword = ({
|
||||
email,
|
||||
username,
|
||||
code,
|
||||
password
|
||||
}: {
|
||||
email: string;
|
||||
username: string;
|
||||
code: string;
|
||||
password: string;
|
||||
}) =>
|
||||
POST<ResLogin>('/user/updatePasswordByCode', {
|
||||
email,
|
||||
username,
|
||||
code,
|
||||
password: createHashPassword(password)
|
||||
});
|
||||
|
||||
export const postLogin = ({ email, password }: { email: string; password: string }) =>
|
||||
export const postLogin = ({ username, password }: { username: string; password: string }) =>
|
||||
POST<ResLogin>('/user/loginByPassword', {
|
||||
email,
|
||||
username,
|
||||
password: createHashPassword(password)
|
||||
});
|
||||
|
||||
@@ -65,3 +81,7 @@ export const getPayCode = (amount: number) =>
|
||||
}>(`/user/getPayCode?amount=${amount}`);
|
||||
|
||||
export const checkPayResult = (payId: string) => GET<number>(`/user/checkPayResult?payId=${payId}`);
|
||||
|
||||
/* promotion records */
|
||||
export const getPromotionRecords = (data: RequestPaging) =>
|
||||
GET<PromotionRecordType>(`/user/promotion/getPromotions?${Obj2Query(data)}`);
|
||||
|
||||
1
src/components/Icon/icons/board.svg
Normal file
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1680878351566" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="1173" xmlns:xlink="http://www.w3.org/1999/xlink" width="48" height="48"><path d="M896 771.413333h-768c-51.2 0-93.866667-42.666667-93.866667-93.866666V209.92c0-51.2 42.666667-93.866667 93.866667-93.866667h768c51.2 0 93.866667 42.666667 93.866667 93.866667v465.92c0 52.906667-42.666667 95.573333-93.866667 95.573333zM128 167.253333C104.106667 167.253333 85.333333 186.026667 85.333333 209.92v465.92c0 23.893333 18.773333 42.666667 42.666667 42.666667h768c23.893333 0 42.666667-18.773333 42.666667-42.666667V209.92c0-23.893333-18.773333-42.666667-42.666667-42.666667h-768z" p-id="1174"></path><path d="M512 907.946667c-13.653333 0-25.6-11.946667-25.6-25.6v-136.533334c0-13.653333 11.946667-25.6 25.6-25.6s25.6 11.946667 25.6 25.6v136.533334c0 13.653333-11.946667 25.6-25.6 25.6z" p-id="1175"></path><path d="M680.96 907.946667H343.04c-13.653333 0-25.6-11.946667-25.6-25.6s11.946667-25.6 25.6-25.6h337.92c13.653333 0 25.6 11.946667 25.6 25.6s-11.946667 25.6-25.6 25.6zM776.533333 648.533333h-529.066666c-13.653333 0-25.6-11.946667-25.6-25.6s11.946667-25.6 25.6-25.6h530.773333c13.653333 0 25.6 11.946667 25.6 25.6s-11.946667 25.6-27.306667 25.6z" p-id="1176"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.4 KiB |
1
src/components/Icon/icons/dbModel.svg
Normal file
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1682232349111" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="7070" xmlns:xlink="http://www.w3.org/1999/xlink" width="28" height="28"><path d="M512 102.6c110.7 0 215 12.3 293.9 34.7 35.8 10.2 65 22.1 84.5 34.7 18.6 12 21.3 19.7 21.6 20.6-0.2 0.9-3 8.6-21.6 20.6-19.5 12.5-48.7 24.5-84.5 34.7-78.9 22.3-183.2 34.7-293.9 34.7s-215-12.3-293.9-34.7c-35.8-10.2-65-22.1-84.5-34.7-18.6-12-21.3-19.7-21.6-20.6 0.2-0.9 3-8.6 21.6-20.6 19.5-12.5 48.7-24.5 84.5-34.7 78.9-22.4 183.2-34.7 293.9-34.7m0-40c-243 0-440 58.2-440 130s197 130 440 130 440-58.2 440-130-197-130-440-130zM112 190.4H72v641h40v-641z m840-0.3h-40v641h40v-641zM912 831v0.5c-0.2 0.9-3 8.6-21.6 20.6-19.5 12.5-48.7 24.5-84.5 34.7-78.9 22.3-183.2 34.6-293.9 34.6s-215-12.3-293.9-34.7c-35.8-10.2-65-22.1-84.5-34.7-18.6-12-21.3-19.7-21.6-20.6v-0.3l-40 0.3v0.1c0 71.8 197 130 440 130s440-58.2 440-130v-0.4l-40-0.1z m0-210.5v0.5c-0.2 0.9-3 8.6-21.6 20.6-19.5 12.5-48.7 24.5-84.5 34.7C727 698.6 622.7 711 512 711s-215-12.3-293.9-34.7c-35.8-10.2-65-22.1-84.5-34.7-18.6-12-21.3-19.7-21.6-20.6v-0.3l-40 0.3v0.1c0 71.8 197 130 440 130s440-58.2 440-130v-0.4l-40-0.2z m0-221.5v0.5c-0.2 0.9-3 8.6-21.6 20.6-19.5 12.5-48.7 24.5-84.5 34.7-78.9 22.3-183.2 34.7-293.9 34.7s-215-12.3-293.9-34.7c-35.8-10.2-65-22.1-84.5-34.7-18.6-12-21.3-19.7-21.6-20.6v-0.3l-40 0.3v0.1c0 71.8 197 130 440 130s440-58.2 440-130v-0.4l-40-0.2z" fill="" p-id="7071"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.5 KiB |
1
src/components/Icon/icons/delete.svg
Normal file
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1681997838051" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="4520" xmlns:xlink="http://www.w3.org/1999/xlink" width="48" height="48"><path d="M898 178.7H665.3c4.3-9.8 6.7-20.6 6.7-32 0-44-36-80-80-80H432c-44 0-80 36-80 80 0 11.4 2.4 22.2 6.7 32H126c-13.2 0-24 10.8-24 24s10.8 24 24 24h772c13.2 0 24-10.8 24-24s-10.8-24-24-24z m-466 0c-8.5 0-16.5-3.4-22.6-9.4-6.1-6.1-9.4-14.1-9.4-22.6s3.4-16.5 9.4-22.6c6.1-6.1 14.1-9.4 22.6-9.4h160c8.5 0 16.5 3.4 22.6 9.4 6.1 6.1 9.4 14.1 9.4 22.6 0 8.5-3.4 16.5-9.4 22.6-6.1 6.1-14.1 9.4-22.6 9.4H432zM513 774.7c18.1 0 33-14.8 33-33v-334c0-18.1-14.9-33-33-33h-2c-18.1 0-33 14.8-33 33v334c0 18.2 14.8 33 33 33h2zM363 774.7c18.1 0 33-14.8 33-33v-334c0-18.1-14.9-33-33-33h-2c-18.1 0-33 14.8-33 33v334c0 18.2 14.8 33 33 33h2zM663 774.7c18.1 0 33-14.8 33-33v-334c0-18.1-14.9-33-33-33h-2c-18.1 0-33 14.8-33 33v334c0 18.2 14.8 33 33 33h2z" p-id="4521"></path><path d="M812 280.7c-13.3 0-24 10.7-24 24v530c0 41.9-34.1 76-76 76H312c-41.9 0-76-34.1-76-76v-530c0-13.3-10.7-24-24-24s-24 10.7-24 24v530c0 68.4 55.6 124 124 124h400c68.4 0 124-55.6 124-124v-530c0-13.2-10.7-24-24-24z" p-id="4522"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
1
src/components/Icon/icons/develop.svg
Normal file
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1680878410563" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="2745" xmlns:xlink="http://www.w3.org/1999/xlink" width="48" height="48"><path d="M256 512l81.6 108.8a32 32 0 0 1-51.2 38.4l-96-128a31.968 31.968 0 0 1 0-38.4l96-128a32 32 0 0 1 51.2 38.4L256 512zM670.4 620.8a32 32 0 0 0 51.2 38.4l96-128a31.968 31.968 0 0 0 0-38.4l-96-128a32 32 0 0 0-51.2 38.4L752 512l-81.6 108.8zM503.232 646.944a32 32 0 1 1-62.464-13.888l64-288a32 32 0 1 1 62.464 13.888l-64 288z" p-id="2746"></path><path d="M160 144a32 32 0 0 0-32 32V864a32 32 0 0 0 32 32h688a32 32 0 0 0 32-32V176a32 32 0 0 0-32-32H160z m0-64h688a96 96 0 0 1 96 96V864a96 96 0 0 1-96 96H160a96 96 0 0 1-96-96V176a96 96 0 0 1 96-96z" p-id="2747"></path></svg>
|
||||
|
After Width: | Height: | Size: 897 B |
1
src/components/Icon/icons/history.svg
Normal file
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1682232686576" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="8959" xmlns:xlink="http://www.w3.org/1999/xlink" width="28" height="28"><path d="M762.805186 140.938939c-14.335497-9.66922-33.725102-5.887081-43.373857 8.398274-9.648754 14.295588-5.897314 33.714869 8.398274 43.373857 106.369609 71.852468 169.864736 191.267185 169.864736 319.445496 0 212.414831-172.802648 385.217479-385.217479 385.217479S127.259382 724.571397 127.259382 512.156566c0-128.178311 63.494103-247.593028 169.864736-319.445496 14.295588-9.658987 18.047028-29.078269 8.398274-43.373857-9.658987-14.285355-29.088502-18.067494-43.373857-8.398274C138.575102 224.432539 64.791655 363.206162 64.791655 512.156566c0 246.851131 200.834074 447.685205 447.685205 447.685205S960.162066 759.007697 960.162066 512.156566C960.162066 363.206162 886.377596 224.432539 762.805186 140.938939z" p-id="8960"></path><path d="M401.003 64.47136c-17.253966 0-31.234375 13.980409-31.234375 31.233352l0 30.470989c0 17.253966 13.980409 31.234375 31.234375 31.234375s31.234375-13.980409 31.234375-31.234375L432.237375 95.704712C432.236352 78.450746 418.256966 64.47136 401.003 64.47136z" p-id="8961"></path><path d="M623.950721 64.47136c-17.253966 0-31.233352 13.980409-31.233352 31.233352l0 30.470989c0 17.253966 13.980409 31.234375 31.233352 31.234375s31.234375-13.980409 31.234375-31.234375L655.185097 95.704712C655.184073 78.450746 641.204687 64.47136 623.950721 64.47136z" p-id="8962"></path><path d="M426.012603 227.493248c11.214413 18.047028 41.970904 48.589648 86.157265 48.589648 43.963281 0 75.105558-30.318516 86.574774-48.223305 9.222035-14.396895 5.03262-33.358759-9.242502-42.763966-14.304797-9.405207-33.593096-5.398964-43.159986 8.764618-0.132006 0.193405-13.614066 19.754926-34.172287 19.754926-19.989263 0-32.423457-18.098193-33.267685-19.36914-9.160637-14.427594-28.264741-18.799158-42.834574-9.770528C421.416935 193.584973 416.912341 212.841549 426.012603 227.493248z" p-id="8963"></path><path d="M510.781242 335.164502c-17.253966 0-31.233352 13.980409-31.233352 31.233352l0 208.225415c0 0.63445 0.149403 1.227967 0.187265 1.853208 0.067538 1.115404 0.148379 2.217505 0.333598 3.314489 0.168846 1.00898 0.416486 1.978051 0.679475 2.951215 0.258896 0.954745 0.529049 1.895163 0.87595 2.821255 0.36839 0.981351 0.801249 1.916653 1.26276 2.847861 0.431835 0.876973 0.880043 1.734504 1.393743 2.569522 0.532119 0.860601 1.115404 1.670036 1.727341 2.472308 0.610914 0.805342 1.235131 1.588171 1.926886 2.336208 0.688685 0.74292 1.424442 1.420349 2.181689 2.093684 0.741897 0.659009 1.484817 1.303692 2.298346 1.89721 0.899486 0.657986 1.850138 1.222851 2.819209 1.783623 0.544399 0.314155 1.00898 0.714268 1.577938 0.998747l208.225415 104.113219c4.484128 2.236947 9.252735 3.304256 13.94971 3.304256 11.44875 0 22.479991-6.334265 27.959795-17.274432 7.706519-15.433504 1.454118-34.192753-13.970176-41.909505l-190.961216-95.480608L542.015617 366.397854C542.015617 349.143888 528.035208 335.164502 510.781242 335.164502z" p-id="8964"></path></svg>
|
||||
|
After Width: | Height: | Size: 3.1 KiB |
1
src/components/Icon/icons/promotion.svg
Normal file
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1682078370900" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="3577" xmlns:xlink="http://www.w3.org/1999/xlink" width="32" height="32"><path d="M941.312 888.704H628.032a32 32 0 0 1 0-64h313.28a32 32 0 0 1 0 64zM519.808 576.768c-158.976 0-288.384-129.344-288.384-288.384S360.832 0 519.808 0s288.384 129.344 288.384 288.384-129.408 288.384-288.384 288.384z m0-512.768C396.096 64 295.424 164.672 295.424 288.384s100.672 224.384 224.384 224.384c123.776 0 224.384-100.672 224.384-224.384S643.584 64 519.808 64z" p-id="3578"></path><path d="M763.264 606.528a31.552 31.552 0 0 1-16.96-4.864 427.2 427.2 0 0 0-100.544-45.952 32 32 0 0 1-21.184-40 31.744 31.744 0 0 1 39.936-21.184 492.16 492.16 0 0 1 115.712 52.864 32 32 0 0 1-16.96 59.136zM59.776 996.928a32 32 0 0 1-32-32 489.6 489.6 0 0 1 347.328-470.464 32 32 0 1 1 18.816 61.184 425.856 425.856 0 0 0-302.144 409.28 32 32 0 0 1-32 32zM964.224 879.68a32.128 32.128 0 0 1-24.32-11.2l-108.224-126.336a32 32 0 1 1 48.64-41.6l108.224 126.336a32 32 0 0 1-24.32 52.8z" p-id="3579"></path><path d="M856 1024a32 32 0 0 1-25.664-51.2l108.224-144.32a32.064 32.064 0 0 1 51.264 38.336L881.6 1011.2a32 32 0 0 1-25.6 12.8z" p-id="3580"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
1
src/components/Icon/icons/stop.svg
Normal file
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1682424901088" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="3662" xmlns:xlink="http://www.w3.org/1999/xlink" width="32" height="32"><path d="M885.333333 85.333333H138.666667a53.393333 53.393333 0 0 0-53.333334 53.333334v746.666666a53.393333 53.393333 0 0 0 53.333334 53.333334h746.666666a53.393333 53.393333 0 0 0 53.333334-53.333334V138.666667a53.393333 53.393333 0 0 0-53.333334-53.333334z m-160 602.666667a37.373333 37.373333 0 0 1-37.333333 37.333333H336a37.373333 37.373333 0 0 1-37.333333-37.333333V336a37.373333 37.373333 0 0 1 37.333333-37.333333h352a37.373333 37.373333 0 0 1 37.333333 37.333333z" p-id="3663"></path></svg>
|
||||
|
After Width: | Height: | Size: 823 B |
1
src/components/Icon/icons/user.svg
Normal file
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1680878383832" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="1637" xmlns:xlink="http://www.w3.org/1999/xlink" width="48" height="48"><path d="M511.333 63.333c-247.424 0-448 200.576-448 448s200.576 448 448 448 448-200.576 448-448-200.576-448-448-448z m0 832c-51.868 0-102.15-10.144-149.451-30.15-36.011-15.231-69.123-35.67-98.812-60.897 12.177-31.985 42.226-63.875 84.223-88.903C396.189 686.243 456.222 669.53 512 669.53c55.631 0 115.416 16.658 164.026 45.703 41.762 24.953 71.689 56.812 83.863 88.804-29.764 25.342-62.976 45.865-99.106 61.146-47.299 20.006-97.582 30.15-149.45 30.15z m296.268-139.658c-20.493-35.937-54.353-68.855-98.747-95.381C649.75 624.979 579.839 605.53 512 605.53c-67.964 0-138.094 19.488-197.471 54.875-44.644 26.606-78.656 59.594-99.195 95.586-23.835-28.755-43.234-60.652-57.85-95.208-20.006-47.3-30.15-97.583-30.15-149.451s10.144-102.15 30.15-149.451c19.337-45.719 47.034-86.792 82.321-122.078 35.286-35.287 76.359-62.983 122.078-82.321 47.3-20.006 97.583-30.15 149.451-30.15 51.868 0 102.15 10.144 149.451 30.15 45.719 19.337 86.792 47.034 122.078 82.321 35.287 35.286 62.983 76.359 82.321 122.078 20.006 47.3 30.15 97.583 30.15 149.451s-10.144 102.15-30.15 149.451c-14.563 34.429-33.869 66.22-57.583 94.892z" p-id="1638"></path><path d="M512 220.223c-88.224 0-160 71.776-160 160s71.776 160 160 160c88.225 0 160-71.775 160-160s-71.775-160-160-160z m0 256c-52.935 0-96-43.065-96-96s43.065-96 96-96 96 43.065 96 96-43.065 96-96 96z" p-id="1639"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.6 KiB |
1
src/components/Icon/icons/withdraw.svg
Normal file
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1682079057126" class="icon" viewBox="0 0 1322 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="2677" xmlns:xlink="http://www.w3.org/1999/xlink" width="36.1484375" height="28"><path d="M952.04654459 837.88839531H336.95615443A113.52706888 113.52706888 0 0 1 223.61160468 724.54384556v-419.79462838h728.43493991a113.52706888 113.52706888 0 0 1 113.34454973 113.34454975V724.54384556a113.52706888 113.52706888 0 0 1-113.34454973 113.34454975zM278.36742569 359.13999928v365.03880736a58.77124787 58.77124787 0 0 0 58.58872873 58.58872874h615.09039016a58.77124787 58.77124787 0 0 0 58.58872874-58.58872874V417.72872802a58.77124787 58.77124787 0 0 0-58.58872874-58.58872874z" p-id="2678"></path><path d="M278.36742569 350.37906772H223.61160468V297.44844068A111.51935577 111.51935577 0 0 1 334.94844068 186.11160469h334.01050924a111.51935577 111.51935577 0 0 1 111.33683598 111.33683599v49.09771996h-54.75582101V297.44844068A56.76353475 56.76353475 0 0 0 668.95894991 240.8674257H334.94844068A56.76353475 56.76353475 0 0 0 278.36742569 297.44844068zM1038.19570329 704.83175018H825.92563707A131.59649008 131.59649008 0 0 1 825.92563707 441.63877003h208.43715913v54.75582103H825.92563707a76.84066906 76.84066906 0 0 0 0 153.86385725h212.27006621z" p-id="2679"></path><path d="M889.80742792 600.43065117h-65.34194654a27.37791082 27.37791082 0 0 1 0-54.75582103h65.34194654a27.37791082 27.37791082 0 0 1-1e-8 54.75582103z" p-id="2680"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.6 KiB |
@@ -1,7 +1,6 @@
|
||||
import React from 'react';
|
||||
import type { IconProps } from '@chakra-ui/react';
|
||||
import { Icon } from '@chakra-ui/react';
|
||||
import dynamic from 'next/dynamic';
|
||||
|
||||
const map = {
|
||||
model: require('./icons/model.svg').default,
|
||||
@@ -10,14 +9,31 @@ const map = {
|
||||
menu: require('./icons/menu.svg').default,
|
||||
pay: require('./icons/pay.svg').default,
|
||||
copy: require('./icons/copy.svg').default,
|
||||
chatSend: require('./icons/chatSend.svg').default
|
||||
chatSend: require('./icons/chatSend.svg').default,
|
||||
board: require('./icons/board.svg').default,
|
||||
develop: require('./icons/develop.svg').default,
|
||||
user: require('./icons/user.svg').default,
|
||||
promotion: require('./icons/promotion.svg').default,
|
||||
delete: require('./icons/delete.svg').default,
|
||||
withdraw: require('./icons/withdraw.svg').default,
|
||||
dbModel: require('./icons/dbModel.svg').default,
|
||||
history: require('./icons/history.svg').default,
|
||||
stop: require('./icons/stop.svg').default
|
||||
};
|
||||
|
||||
export type IconName = keyof typeof map;
|
||||
|
||||
const MyIcon = ({ name, w = 'auto', h = 'auto', ...props }: { name: IconName } & IconProps) => {
|
||||
return map[name] ? (
|
||||
<Icon as={map[name]} w={w} h={h} boxSizing={'content-box'} verticalAlign={'top'} {...props} />
|
||||
<Icon
|
||||
as={map[name]}
|
||||
w={w}
|
||||
h={h}
|
||||
boxSizing={'content-box'}
|
||||
verticalAlign={'top'}
|
||||
fill={'currentcolor'}
|
||||
{...props}
|
||||
/>
|
||||
) : null;
|
||||
};
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ const Auth = ({ children }: { children: JSX.Element }) => {
|
||||
{
|
||||
onError(error) {
|
||||
console.log('error->', error);
|
||||
router.push('/login');
|
||||
router.replace('/login');
|
||||
toast();
|
||||
},
|
||||
onSettled() {
|
||||
|
||||
@@ -16,27 +16,33 @@ const unShowLayoutRoute: { [key: string]: boolean } = {
|
||||
const navbarList = [
|
||||
{
|
||||
label: '介绍',
|
||||
icon: 'icon-gongzuotai-01',
|
||||
icon: 'board',
|
||||
link: '/',
|
||||
activeLink: ['/']
|
||||
},
|
||||
{
|
||||
label: '模型',
|
||||
icon: 'icon-moxing',
|
||||
icon: 'model',
|
||||
link: '/model/list',
|
||||
activeLink: ['/model/list', '/model/detail']
|
||||
},
|
||||
// {
|
||||
// label: '数据',
|
||||
// icon: 'icon-datafull',
|
||||
// link: '/data/list',
|
||||
// activeLink: ['/data/list', '/data/detail']
|
||||
// },
|
||||
{
|
||||
label: '账号',
|
||||
icon: 'icon-yonghu-yuan',
|
||||
icon: 'user',
|
||||
link: '/number/setting',
|
||||
activeLink: ['/number/setting']
|
||||
},
|
||||
{
|
||||
label: '邀请',
|
||||
icon: 'promotion',
|
||||
link: '/promotion',
|
||||
activeLink: ['/promotion']
|
||||
},
|
||||
{
|
||||
label: '开发',
|
||||
icon: 'develop',
|
||||
link: '/openapi',
|
||||
activeLink: ['/openapi']
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
@@ -2,8 +2,7 @@ import React from 'react';
|
||||
import { Box, Flex } from '@chakra-ui/react';
|
||||
import Image from 'next/image';
|
||||
import { useRouter } from 'next/router';
|
||||
import Icon from '../Iconfont';
|
||||
|
||||
import MyIcon from '../Icon';
|
||||
export enum NavbarTypeEnum {
|
||||
normal = 'normal',
|
||||
small = 'small'
|
||||
@@ -66,20 +65,16 @@ const Navbar = ({
|
||||
backgroundColor: 'transparent'
|
||||
})}
|
||||
>
|
||||
<Icon
|
||||
name={item.icon}
|
||||
width={24}
|
||||
height={24}
|
||||
color={item.activeLink.includes(router.pathname) ? '#2B6CB0' : '#4A5568'}
|
||||
<MyIcon
|
||||
name={item.icon as any}
|
||||
width={'24px'}
|
||||
height={'24px'}
|
||||
fill={item.activeLink.includes(router.pathname) ? '#2B6CB0' : '#4A5568'}
|
||||
/>
|
||||
<Box mt={1}>{item.label}</Box>
|
||||
</Flex>
|
||||
))}
|
||||
</Box>
|
||||
{/* 通知 icon */}
|
||||
{/* <Flex className={styles.informIcon} mb={5} justifyContent={'center'}>
|
||||
<Icon name={'icon-tongzhi'} width={28} height={28} color={'#718096'}></Icon>
|
||||
</Flex> */}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import React from 'react';
|
||||
import { useRouter } from 'next/router';
|
||||
import Icon from '../Iconfont';
|
||||
import MyIcon from '../Icon';
|
||||
import {
|
||||
Flex,
|
||||
Drawer,
|
||||
@@ -39,9 +39,8 @@ const NavbarPhone = ({
|
||||
px={7}
|
||||
>
|
||||
<Box onClick={onOpen}>
|
||||
<Icon name="icon-caidan" width={20} height={20}></Icon>
|
||||
<MyIcon name="menu" width={'20px'} height={'20px'} color={'blackAlpha.600'}></MyIcon>
|
||||
</Box>
|
||||
{/* <Icon name="icon-tongzhi" width={20} height={20}></Icon> */}
|
||||
</Flex>
|
||||
<Drawer isOpen={isOpen} placement="left" size={'xs'} onClose={onClose}>
|
||||
<DrawerOverlay />
|
||||
@@ -74,11 +73,11 @@ const NavbarPhone = ({
|
||||
backgroundColor: 'transparent'
|
||||
})}
|
||||
>
|
||||
<Icon
|
||||
name={item.icon}
|
||||
width={24}
|
||||
height={24}
|
||||
color={item.activeLink.includes(router.pathname) ? '#2B6CB0' : '#4A5568'}
|
||||
<MyIcon
|
||||
name={item.icon as any}
|
||||
width={'24px'}
|
||||
height={'24px'}
|
||||
fill={item.activeLink.includes(router.pathname) ? '#2B6CB0' : '#4A5568'}
|
||||
/>
|
||||
<Box ml={5}>{item.label}</Box>
|
||||
</Flex>
|
||||
|
||||
@@ -160,7 +160,7 @@
|
||||
}
|
||||
.markdown ul,
|
||||
.markdown ol {
|
||||
padding-left: 1em;
|
||||
padding-left: 2em;
|
||||
}
|
||||
.markdown ul.no-list,
|
||||
.markdown ol.no-list {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { memo, useMemo } from 'react';
|
||||
import React, { memo } from 'react';
|
||||
import ReactMarkdown from 'react-markdown';
|
||||
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter';
|
||||
import { Box, Flex, useColorModeValue } from '@chakra-ui/react';
|
||||
@@ -13,7 +13,6 @@ import styles from './index.module.scss';
|
||||
import { codeLight } from './codeLight';
|
||||
|
||||
const Markdown = ({ source, isChatting = false }: { source: string; isChatting?: boolean }) => {
|
||||
const formatSource = useMemo(() => source, [source]);
|
||||
const { copyData } = useCopyData();
|
||||
|
||||
return (
|
||||
@@ -63,7 +62,7 @@ const Markdown = ({ source, isChatting = false }: { source: string; isChatting?:
|
||||
}}
|
||||
linkTarget="_blank"
|
||||
>
|
||||
{formatSource}
|
||||
{source}
|
||||
</ReactMarkdown>
|
||||
);
|
||||
};
|
||||
|
||||
52
src/components/Radio/index.tsx
Normal file
@@ -0,0 +1,52 @@
|
||||
import React from 'react';
|
||||
import { Stack, Box, Flex, useTheme } from '@chakra-ui/react';
|
||||
import type { StackProps } from '@chakra-ui/react';
|
||||
|
||||
// @ts-ignore
|
||||
interface Props extends StackProps {
|
||||
list: { label: string; value: string | number }[];
|
||||
value: string | number;
|
||||
onChange: (e: string | number) => void;
|
||||
}
|
||||
|
||||
const Radio = ({ list, value, onChange, ...props }: Props) => {
|
||||
return (
|
||||
<Stack {...props} spacing={5} direction={'row'}>
|
||||
{list.map((item) => (
|
||||
<Flex
|
||||
key={item.value}
|
||||
alignItems={'center'}
|
||||
cursor={'pointer'}
|
||||
userSelect={'none'}
|
||||
_before={{
|
||||
content: '""',
|
||||
w: '16px',
|
||||
h: '16px',
|
||||
mr: 1,
|
||||
borderRadius: '16px',
|
||||
transition: '0.2s',
|
||||
...(value === item.value
|
||||
? {
|
||||
border: '5px solid',
|
||||
borderColor: 'blue.500'
|
||||
}
|
||||
: {
|
||||
border: '2px solid',
|
||||
borderColor: 'gray.200'
|
||||
})
|
||||
}}
|
||||
_hover={{
|
||||
_before: {
|
||||
borderColor: 'blue.400'
|
||||
}
|
||||
}}
|
||||
onClick={() => onChange(item.value)}
|
||||
>
|
||||
{item.label}
|
||||
</Flex>
|
||||
))}
|
||||
</Stack>
|
||||
);
|
||||
};
|
||||
|
||||
export default Radio;
|
||||
@@ -23,15 +23,15 @@ const WxConcat = ({ onClose }: { onClose: () => void }) => {
|
||||
<ModalBody textAlign={'center'}>
|
||||
<Image
|
||||
style={{ margin: 'auto' }}
|
||||
src={'/imgs/wxcode300.jpg'}
|
||||
src={'/imgs/wx300.jpg'}
|
||||
width={200}
|
||||
height={200}
|
||||
alt=""
|
||||
/>
|
||||
<Box mt={2}>
|
||||
微信号:{' '}
|
||||
微信号:
|
||||
<Box as={'span'} userSelect={'all'}>
|
||||
YNyiqi
|
||||
fastgpt123
|
||||
</Box>
|
||||
</Box>
|
||||
</ModalBody>
|
||||
|
||||
@@ -1,64 +1,6 @@
|
||||
export enum EmailTypeEnum {
|
||||
export enum UserAuthTypeEnum {
|
||||
register = 'register',
|
||||
findPassword = 'findPassword'
|
||||
}
|
||||
|
||||
export const PRICE_SCALE = 100000;
|
||||
|
||||
export const introPage = `
|
||||
## 欢迎使用 Fast GPT
|
||||
|
||||
[Git 仓库](https://github.com/c121914yu/FastGPT)
|
||||
|
||||
### 交流群/问题反馈
|
||||
wx: YNyiqi
|
||||

|
||||
|
||||
|
||||
### 快速开始
|
||||
1. 使用邮箱注册账号。
|
||||
2. 进入账号页面,添加关联账号,目前只有 openai 的账号可以添加,直接去 openai 官网,把 API Key 粘贴过来。
|
||||
3. 如果填写了自己的 openai 账号,使用时会直接用你的账号。如果没有填写,需要付费使用平台的账号。
|
||||
4. 进入模型页,创建一个模型,建议直接用 ChatGPT。
|
||||
5. 在模型列表点击【对话】,即可使用 API 进行聊天。
|
||||
|
||||
### 定制 prompt
|
||||
|
||||
1. 进入模型编辑页
|
||||
2. 调整温度和提示词
|
||||
3. 使用该模型对话。每次对话时,提示词和温度都会自动注入,方便管理个人的模型。建议把自己日常经常需要使用的 5~10 个方向预设好。
|
||||
|
||||
### 知识库
|
||||
|
||||
1. 创建模型时选择【知识库】
|
||||
2. 进入模型编辑页
|
||||
3. 导入数据,可以选择手动导入,或者选择文件导入。文件导入会自动调用 chatGPT 理解文件内容,并生成知识库。
|
||||
4. 使用该模型对话。
|
||||
|
||||
注意:使用知识库模型对话时,tokens 消耗会加快。
|
||||
`;
|
||||
|
||||
export const chatProblem = `
|
||||
## 常见问题
|
||||
**内容长度**
|
||||
单次最长 4000 tokens, 上下文最长 8000 tokens, 上下文超长时会被截断。
|
||||
|
||||
**删除和复制**
|
||||
点击对话头像,可以选择复制或删除该条内容。
|
||||
|
||||
**代理出错**
|
||||
服务器代理不稳定,可以过一会儿再尝试。
|
||||
`;
|
||||
|
||||
export const versionIntro = `
|
||||
## Fast GPT V2.3
|
||||
* 数据集导出功能,可用于知识库分享。
|
||||
* 优化文件拆分功能,可自定义提示词。
|
||||
* 定制知识库:创建模型时可以选择【知识库】模型, 可以手动导入知识点或者直接导入一个文件自动学习。
|
||||
`;
|
||||
|
||||
export const shareHint = `
|
||||
你正准备分享对话,请确保分享链接不会滥用,因为它是使用的是你的 API key。
|
||||
* 分享空白对话:为该模型创建一个空白的聊天分享出去。
|
||||
* 分享当前对话:会把当前聊天的内容也分享出去,但是要注意不要多个人同时用一个聊天内容。
|
||||
`;
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
import type { DataType } from '@/types/data';
|
||||
|
||||
export const DataTypeTextMap: Record<DataType, string> = {
|
||||
QA: '问答拆分',
|
||||
abstract: '摘要总结'
|
||||
};
|
||||
@@ -1,75 +1,53 @@
|
||||
import type { ServiceName, ModelDataType, ModelSchema } from '@/types/mongoSchema';
|
||||
import type { RedisModelDataItemType } from '@/types/redis';
|
||||
import type { ModelSchema } from '@/types/mongoSchema';
|
||||
|
||||
export enum ChatModelNameEnum {
|
||||
GPT35 = 'gpt-3.5-turbo',
|
||||
VECTOR_GPT = 'VECTOR_GPT',
|
||||
GPT3 = 'text-davinci-003',
|
||||
VECTOR = 'text-embedding-ada-002'
|
||||
export const embeddingModel = 'text-embedding-ada-002';
|
||||
export enum ChatModelEnum {
|
||||
'GPT35' = 'gpt-3.5-turbo',
|
||||
'GPT4' = 'gpt-4',
|
||||
'GPT432k' = 'gpt-4-32k'
|
||||
}
|
||||
|
||||
export const ChatModelNameMap = {
|
||||
[ChatModelNameEnum.GPT35]: 'gpt-3.5-turbo',
|
||||
[ChatModelNameEnum.VECTOR_GPT]: 'gpt-3.5-turbo',
|
||||
[ChatModelNameEnum.GPT3]: 'text-davinci-003',
|
||||
[ChatModelNameEnum.VECTOR]: 'text-embedding-ada-002'
|
||||
export enum ModelNameEnum {
|
||||
GPT35 = 'gpt-3.5-turbo',
|
||||
VECTOR_GPT = 'VECTOR_GPT'
|
||||
}
|
||||
|
||||
export const Model2ChatModelMap: Record<`${ModelNameEnum}`, `${ChatModelEnum}`> = {
|
||||
[ModelNameEnum.GPT35]: 'gpt-3.5-turbo',
|
||||
[ModelNameEnum.VECTOR_GPT]: 'gpt-3.5-turbo'
|
||||
};
|
||||
|
||||
export type ModelConstantsData = {
|
||||
serviceCompany: `${ServiceName}`;
|
||||
icon: 'model' | 'dbModel';
|
||||
name: string;
|
||||
model: `${ChatModelNameEnum}`;
|
||||
model: `${ModelNameEnum}`;
|
||||
trainName: string; // 空字符串代表不能训练
|
||||
maxToken: number;
|
||||
contextMaxToken: number;
|
||||
maxTemperature: number;
|
||||
trainedMaxToken: number; // 训练后最大多少tokens
|
||||
price: number; // 多少钱 / 1token,单位: 0.00001元
|
||||
};
|
||||
|
||||
export const modelList: ModelConstantsData[] = [
|
||||
{
|
||||
serviceCompany: 'openai',
|
||||
icon: 'model',
|
||||
name: 'chatGPT',
|
||||
model: ChatModelNameEnum.GPT35,
|
||||
model: ModelNameEnum.GPT35,
|
||||
trainName: '',
|
||||
maxToken: 4000,
|
||||
contextMaxToken: 7500,
|
||||
trainedMaxToken: 2000,
|
||||
maxTemperature: 2,
|
||||
contextMaxToken: 4096,
|
||||
maxTemperature: 1.5,
|
||||
price: 3
|
||||
},
|
||||
{
|
||||
serviceCompany: 'openai',
|
||||
icon: 'dbModel',
|
||||
name: '知识库',
|
||||
model: ChatModelNameEnum.VECTOR_GPT,
|
||||
model: ModelNameEnum.VECTOR_GPT,
|
||||
trainName: 'vector',
|
||||
maxToken: 4000,
|
||||
contextMaxToken: 7500,
|
||||
trainedMaxToken: 2000,
|
||||
contextMaxToken: 4096,
|
||||
maxTemperature: 1,
|
||||
price: 3
|
||||
}
|
||||
// {
|
||||
// serviceCompany: 'openai',
|
||||
// name: 'GPT3',
|
||||
// model: ChatModelNameEnum.GPT3,
|
||||
// trainName: 'davinci',
|
||||
// maxToken: 4000,
|
||||
// contextMaxToken: 7500,
|
||||
// trainedMaxToken: 2000,
|
||||
// maxTemperature: 2,
|
||||
// price: 30
|
||||
// }
|
||||
];
|
||||
|
||||
export enum TrainingStatusEnum {
|
||||
pending = 'pending',
|
||||
succeed = 'succeed',
|
||||
errored = 'errored',
|
||||
canceled = 'canceled'
|
||||
}
|
||||
|
||||
export enum ModelStatusEnum {
|
||||
running = 'running',
|
||||
training = 'training',
|
||||
@@ -96,27 +74,59 @@ export const formatModelStatus = {
|
||||
}
|
||||
};
|
||||
|
||||
export const ModelDataStatusMap: Record<RedisModelDataItemType['status'], string> = {
|
||||
export enum ModelDataStatusEnum {
|
||||
ready = 'ready',
|
||||
waiting = 'waiting'
|
||||
}
|
||||
|
||||
export const ModelDataStatusMap: Record<`${ModelDataStatusEnum}`, string> = {
|
||||
ready: '训练完成',
|
||||
waiting: '训练中'
|
||||
};
|
||||
|
||||
/* 知识库搜索时的配置 */
|
||||
// 搜索方式
|
||||
export enum ModelVectorSearchModeEnum {
|
||||
hightSimilarity = 'hightSimilarity', // 高相似度+禁止回复
|
||||
lowSimilarity = 'lowSimilarity', // 低相似度
|
||||
noContext = 'noContex' // 高相似度+无上下文回复
|
||||
}
|
||||
export const ModelVectorSearchModeMap: Record<
|
||||
`${ModelVectorSearchModeEnum}`,
|
||||
{
|
||||
text: string;
|
||||
similarity: number;
|
||||
}
|
||||
> = {
|
||||
[ModelVectorSearchModeEnum.hightSimilarity]: {
|
||||
text: '高相似度, 无匹配时拒绝回复',
|
||||
similarity: 0.2
|
||||
},
|
||||
[ModelVectorSearchModeEnum.noContext]: {
|
||||
text: '高相似度,无匹配时直接回复',
|
||||
similarity: 0.2
|
||||
},
|
||||
[ModelVectorSearchModeEnum.lowSimilarity]: {
|
||||
text: '低相似度匹配',
|
||||
similarity: 0.8
|
||||
}
|
||||
};
|
||||
|
||||
export const defaultModel: ModelSchema = {
|
||||
_id: '',
|
||||
userId: '',
|
||||
name: '',
|
||||
name: 'modelName',
|
||||
avatar: '',
|
||||
status: ModelStatusEnum.pending,
|
||||
updateTime: Date.now(),
|
||||
trainingTimes: 0,
|
||||
systemPrompt: '',
|
||||
intro: '',
|
||||
temperature: 5,
|
||||
search: {
|
||||
mode: ModelVectorSearchModeEnum.hightSimilarity
|
||||
},
|
||||
service: {
|
||||
company: 'openai',
|
||||
trainId: '',
|
||||
chatModel: ChatModelNameEnum.GPT35,
|
||||
modelName: ChatModelNameEnum.GPT35
|
||||
chatModel: ModelNameEnum.GPT35,
|
||||
modelName: ModelNameEnum.GPT35
|
||||
},
|
||||
security: {
|
||||
domain: ['*'],
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
export const VecModelDataPrefix = 'model:data';
|
||||
export const VecModelDataIdx = `idx:${VecModelDataPrefix}:hash`;
|
||||
export enum ModelDataStatusEnum {
|
||||
ready = 'ready',
|
||||
waiting = 'waiting'
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
export const ERROR_CODE: { [key: number]: string } = {
|
||||
400: '请求失败',
|
||||
401: '无权访问',
|
||||
403: '紧张访问',
|
||||
404: '请求不存在',
|
||||
405: '请求方法错误',
|
||||
406: '请求的格式错误',
|
||||
410: '资源已删除',
|
||||
422: '验证错误',
|
||||
500: '服务器发生错误',
|
||||
502: '网关错误',
|
||||
503: '服务器暂时过载或维护',
|
||||
504: '网关超时'
|
||||
};
|
||||
|
||||
export const TOKEN_ERROR_CODE: { [key: number]: string } = {
|
||||
506: '请先登录',
|
||||
507: '请重新登录',
|
||||
508: '登录已过期'
|
||||
};
|
||||
@@ -20,3 +20,15 @@ export const BillTypeMap: Record<`${BillTypeEnum}`, string> = {
|
||||
[BillTypeEnum.vector]: '索引生成',
|
||||
[BillTypeEnum.return]: '退款'
|
||||
};
|
||||
|
||||
export enum PromotionEnum {
|
||||
invite = 'invite',
|
||||
shareModel = 'shareModel',
|
||||
withdraw = 'withdraw'
|
||||
}
|
||||
|
||||
export const PromotionTypeMap = {
|
||||
[PromotionEnum.invite]: '好友充值',
|
||||
[PromotionEnum.shareModel]: '模型分享',
|
||||
[PromotionEnum.withdraw]: '提现'
|
||||
};
|
||||
|
||||
15
src/hooks/useMarkdown.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
|
||||
export const getMd = async (url: string) => {
|
||||
const response = await fetch(`/docs/${url}`);
|
||||
const textContent = await response.text();
|
||||
return textContent;
|
||||
};
|
||||
|
||||
export const useMarkdown = ({ url }: { url: string }) => {
|
||||
const { data = '' } = useQuery([url], () => getMd(url));
|
||||
|
||||
return {
|
||||
data
|
||||
};
|
||||
};
|
||||
@@ -1,8 +1,8 @@
|
||||
import { useState, useCallback, useMemo } from 'react';
|
||||
import { useState, useCallback, useMemo, useEffect } from 'react';
|
||||
import type { PagingData } from '../types/index';
|
||||
import { IconButton, Flex, Box } from '@chakra-ui/react';
|
||||
import { IconButton, Flex, Box, Input } from '@chakra-ui/react';
|
||||
import { ArrowBackIcon, ArrowForwardIcon } from '@chakra-ui/icons';
|
||||
import { useQuery, useMutation } from '@tanstack/react-query';
|
||||
import { useMutation } from '@tanstack/react-query';
|
||||
import { useToast } from './useToast';
|
||||
|
||||
export const usePagination = <T = any,>({
|
||||
@@ -18,7 +18,7 @@ export const usePagination = <T = any,>({
|
||||
const [pageNum, setPageNum] = useState(1);
|
||||
const [total, setTotal] = useState(0);
|
||||
const [data, setData] = useState<T[]>([]);
|
||||
const maxPage = useMemo(() => Math.ceil(total / pageSize), [pageSize, total]);
|
||||
const maxPage = useMemo(() => Math.ceil(total / pageSize) || 1, [pageSize, total]);
|
||||
|
||||
const { mutate, isLoading } = useMutation({
|
||||
mutationFn: async (num: number = pageNum) => {
|
||||
@@ -40,10 +40,6 @@ export const usePagination = <T = any,>({
|
||||
}
|
||||
}
|
||||
});
|
||||
useQuery(['init'], () => {
|
||||
mutate(1);
|
||||
return null;
|
||||
});
|
||||
|
||||
const Pagination = useCallback(() => {
|
||||
return (
|
||||
@@ -53,22 +49,50 @@ export const usePagination = <T = any,>({
|
||||
icon={<ArrowBackIcon />}
|
||||
aria-label={'left'}
|
||||
size={'sm'}
|
||||
w={'28px'}
|
||||
h={'28px'}
|
||||
onClick={() => mutate(pageNum - 1)}
|
||||
/>
|
||||
<Box mx={2}>
|
||||
{pageNum}/{maxPage}
|
||||
</Box>
|
||||
<Flex mx={2} alignItems={'center'}>
|
||||
<Input
|
||||
defaultValue={pageNum}
|
||||
w={'50px'}
|
||||
size={'xs'}
|
||||
type={'number'}
|
||||
min={1}
|
||||
max={maxPage}
|
||||
onBlur={(e) => {
|
||||
const val = +e.target.value;
|
||||
if (val === pageNum) return;
|
||||
if (val >= maxPage) {
|
||||
mutate(maxPage);
|
||||
} else if (val < 1) {
|
||||
mutate(1);
|
||||
} else {
|
||||
mutate(+e.target.value);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
<Box mx={2}>/</Box>
|
||||
{maxPage}
|
||||
</Flex>
|
||||
<IconButton
|
||||
isDisabled={pageNum === maxPage}
|
||||
icon={<ArrowForwardIcon />}
|
||||
aria-label={'left'}
|
||||
size={'sm'}
|
||||
w={'28px'}
|
||||
h={'28px'}
|
||||
onClick={() => mutate(pageNum + 1)}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
}, [maxPage, mutate, pageNum]);
|
||||
|
||||
useEffect(() => {
|
||||
mutate(1);
|
||||
}, [mutate]);
|
||||
|
||||
return {
|
||||
pageNum,
|
||||
pageSize,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { useState, useCallback } from 'react';
|
||||
import { useState, useCallback, useEffect } from 'react';
|
||||
import type { PagingData } from '../types/index';
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
import { useToast } from './useToast';
|
||||
|
||||
export const usePaging = <T = any>({
|
||||
@@ -64,7 +63,9 @@ export const usePaging = <T = any>({
|
||||
getData(pageNum + 1);
|
||||
}, [getData, isLoadAll, pageNum, requesting]);
|
||||
|
||||
useQuery(['init'], () => getData(1, true));
|
||||
useEffect(() => {
|
||||
getData(1, true);
|
||||
}, []);
|
||||
|
||||
return {
|
||||
pageNum,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useState, useMemo, useCallback } from 'react';
|
||||
import { sendCodeToEmail } from '@/api/user';
|
||||
import { EmailTypeEnum } from '@/constants/common';
|
||||
import { sendAuthCode } from '@/api/user';
|
||||
import { UserAuthTypeEnum } from '@/constants/common';
|
||||
let timer: any;
|
||||
import { useToast } from './useToast';
|
||||
|
||||
@@ -19,11 +19,11 @@ export const useSendCode = () => {
|
||||
}, [codeCountDown]);
|
||||
|
||||
const sendCode = useCallback(
|
||||
async ({ email, type }: { email: string; type: `${EmailTypeEnum}` }) => {
|
||||
async ({ username, type }: { username: string; type: `${UserAuthTypeEnum}` }) => {
|
||||
setCodeSending(true);
|
||||
try {
|
||||
await sendCodeToEmail({
|
||||
email,
|
||||
await sendAuthCode({
|
||||
username,
|
||||
type
|
||||
});
|
||||
setCodeCountDown(60);
|
||||
|
||||
@@ -47,11 +47,10 @@ export default function App({ Component, pageProps }: AppProps) {
|
||||
<meta name="description" content="Generated by Fast GPT" />
|
||||
<meta
|
||||
name="viewport"
|
||||
content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=0;"
|
||||
content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=0"
|
||||
/>
|
||||
<link rel="icon" href="/favicon.ico" />
|
||||
</Head>
|
||||
<Script src="/js/iconfont.js" strategy="afterInteractive"></Script>
|
||||
<Script src="/js/qrcode.min.js" strategy="afterInteractive"></Script>
|
||||
<Script src="/js/pdf.js" strategy="afterInteractive"></Script>
|
||||
<QueryClientProvider client={queryClient}>
|
||||
|
||||
15
src/pages/_error.tsx
Normal file
@@ -0,0 +1,15 @@
|
||||
function Error({ statusCode }: { statusCode: number }) {
|
||||
return (
|
||||
<p>
|
||||
{statusCode ? `An error ${statusCode} occurred on server` : 'An error occurred on client'}
|
||||
</p>
|
||||
);
|
||||
}
|
||||
|
||||
Error.getInitialProps = ({ res, err }: { res: any; err: any }) => {
|
||||
const statusCode = res ? res.statusCode : err ? err.statusCode : 404;
|
||||
console.log(err);
|
||||
return { statusCode };
|
||||
};
|
||||
|
||||
export default Error;
|
||||
@@ -1,15 +1,13 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { createParser, ParsedEvent, ReconnectInterval } from 'eventsource-parser';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { getOpenAIApi, authChat } from '@/service/utils/chat';
|
||||
import { httpsAgent, openaiChatFilter } from '@/service/utils/tools';
|
||||
import { ChatCompletionRequestMessage, ChatCompletionRequestMessageRoleEnum } from 'openai';
|
||||
import { getOpenAIApi, authChat } from '@/service/utils/auth';
|
||||
import { axiosConfig, openaiChatFilter } from '@/service/utils/tools';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import type { ModelSchema } from '@/types/mongoSchema';
|
||||
import { PassThrough } from 'stream';
|
||||
import { modelList } from '@/constants/model';
|
||||
import { pushChatBill } from '@/service/events/pushBill';
|
||||
import { gpt35StreamResponse } from '@/service/utils/openai';
|
||||
|
||||
/* 发送提示词 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
@@ -28,29 +26,33 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
});
|
||||
|
||||
try {
|
||||
const { chatId, prompt } = req.body as {
|
||||
const { chatId, prompt, modelId } = req.body as {
|
||||
prompt: ChatItemType;
|
||||
chatId: string;
|
||||
modelId: string;
|
||||
chatId: '' | string;
|
||||
};
|
||||
|
||||
const { authorization } = req.headers;
|
||||
if (!chatId || !prompt) {
|
||||
if (!modelId || !prompt) {
|
||||
throw new Error('缺少参数');
|
||||
}
|
||||
|
||||
await connectToDatabase();
|
||||
let startTime = Date.now();
|
||||
|
||||
const { chat, userApiKey, systemKey, userId } = await authChat(chatId, authorization);
|
||||
const { model, content, userApiKey, systemKey, userId } = await authChat({
|
||||
modelId,
|
||||
chatId,
|
||||
authorization
|
||||
});
|
||||
|
||||
const model: ModelSchema = chat.modelId;
|
||||
const modelConstantsData = modelList.find((item) => item.model === model.service.modelName);
|
||||
if (!modelConstantsData) {
|
||||
throw new Error('模型加载异常');
|
||||
}
|
||||
|
||||
// 读取对话内容
|
||||
const prompts = [...chat.content, prompt];
|
||||
const prompts = [...content, prompt];
|
||||
|
||||
// 如果有系统提示词,自动插入
|
||||
if (model.systemPrompt) {
|
||||
@@ -61,33 +63,23 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
}
|
||||
|
||||
// 控制在 tokens 数量,防止超出
|
||||
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
|
||||
const filterPrompts = openaiChatFilter({
|
||||
model: model.service.chatModel,
|
||||
prompts,
|
||||
maxTokens: modelConstantsData.contextMaxToken - 500
|
||||
});
|
||||
|
||||
// 格式化文本内容成 chatgpt 格式
|
||||
const map = {
|
||||
Human: ChatCompletionRequestMessageRoleEnum.User,
|
||||
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
|
||||
};
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
|
||||
(item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
})
|
||||
);
|
||||
// console.log(formatPrompts);
|
||||
// 计算温度
|
||||
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
|
||||
|
||||
// console.log(filterPrompts);
|
||||
// 获取 chatAPI
|
||||
const chatAPI = getOpenAIApi(userApiKey || systemKey);
|
||||
// 发出请求
|
||||
const chatResponse = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: model.service.chatModel,
|
||||
temperature: temperature,
|
||||
// max_tokens: modelConstantsData.maxToken,
|
||||
messages: formatPrompts,
|
||||
temperature,
|
||||
messages: filterPrompts,
|
||||
frequency_penalty: 0.5, // 越大,重复内容越少
|
||||
presence_penalty: -0.5, // 越大,越容易出现新内容
|
||||
stream: true,
|
||||
@@ -96,64 +88,27 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
{
|
||||
timeout: 40000,
|
||||
responseType: 'stream',
|
||||
httpsAgent
|
||||
...axiosConfig
|
||||
}
|
||||
);
|
||||
|
||||
console.log('api response time:', `${(Date.now() - startTime) / 1000}s`);
|
||||
|
||||
// 创建响应流
|
||||
res.setHeader('Content-Type', 'text/event-stream;charset-utf-8');
|
||||
res.setHeader('Access-Control-Allow-Origin', '*');
|
||||
res.setHeader('X-Accel-Buffering', 'no');
|
||||
res.setHeader('Cache-Control', 'no-cache, no-transform');
|
||||
step = 1;
|
||||
|
||||
let responseContent = '';
|
||||
stream.pipe(res);
|
||||
const { responseContent } = await gpt35StreamResponse({
|
||||
res,
|
||||
stream,
|
||||
chatResponse
|
||||
});
|
||||
|
||||
const onParse = async (event: ParsedEvent | ReconnectInterval) => {
|
||||
if (event.type !== 'event') return;
|
||||
const data = event.data;
|
||||
if (data === '[DONE]') return;
|
||||
try {
|
||||
const json = JSON.parse(data);
|
||||
const content: string = json?.choices?.[0].delta.content || '';
|
||||
if (!content || (responseContent === '' && content === '\n')) return;
|
||||
|
||||
responseContent += content;
|
||||
// console.log('content:', content)
|
||||
!stream.destroyed && stream.push(content.replace(/\n/g, '<br/>'));
|
||||
} catch (error) {
|
||||
error;
|
||||
}
|
||||
};
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
try {
|
||||
for await (const chunk of chatResponse.data as any) {
|
||||
if (stream.destroyed) {
|
||||
// 流被中断了,直接忽略后面的内容
|
||||
break;
|
||||
}
|
||||
const parser = createParser(onParse);
|
||||
parser.feed(decoder.decode(chunk));
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('pipe error', error);
|
||||
}
|
||||
// close stream
|
||||
!stream.destroyed && stream.push(null);
|
||||
stream.destroy();
|
||||
|
||||
const promptsContent = formatPrompts.map((item) => item.content).join('');
|
||||
// 只有使用平台的 key 才计费
|
||||
pushChatBill({
|
||||
isPay: !userApiKey,
|
||||
modelName: model.service.modelName,
|
||||
userId,
|
||||
chatId,
|
||||
text: promptsContent + responseContent
|
||||
messages: filterPrompts.concat({ role: 'assistant', content: responseContent })
|
||||
});
|
||||
} catch (err: any) {
|
||||
if (step === 1) {
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Model, Chat } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import type { ModelSchema } from '@/types/mongoSchema';
|
||||
|
||||
/* 获取我的模型 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
const { modelId, isShare = 'false' } = req.query as {
|
||||
modelId: string;
|
||||
isShare?: 'true' | 'false';
|
||||
};
|
||||
const { authorization } = req.headers;
|
||||
|
||||
if (!authorization) {
|
||||
throw new Error('无权生成对话');
|
||||
}
|
||||
|
||||
if (!modelId) {
|
||||
throw new Error('缺少参数');
|
||||
}
|
||||
|
||||
// 凭证校验
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
// 获取模型配置
|
||||
const model = await Model.findOne<ModelSchema>({
|
||||
_id: modelId,
|
||||
userId
|
||||
});
|
||||
|
||||
if (!model) {
|
||||
throw new Error('模型不存在');
|
||||
}
|
||||
|
||||
// 创建 chat 数据
|
||||
const response = await Chat.create({
|
||||
userId,
|
||||
modelId,
|
||||
expiredTime: Date.now() + model.security.expiredTime,
|
||||
loadAmount: model.security.maxLoadAmount,
|
||||
isShare: isShare === 'true',
|
||||
content: []
|
||||
});
|
||||
|
||||
jsonRes(res, {
|
||||
data: response._id // 即聊天框的 ID
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
31
src/pages/api/chat/getHistory.ts
Normal file
@@ -0,0 +1,31 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Chat } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
|
||||
/* 获取历史记录 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
const userId = await authToken(req.headers.authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
const data = await Chat.find(
|
||||
{
|
||||
userId
|
||||
},
|
||||
'_id title modelId'
|
||||
)
|
||||
.sort({ updateTime: -1 })
|
||||
.limit(20);
|
||||
|
||||
jsonRes(res, {
|
||||
data
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,173 +0,0 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { createParser, ParsedEvent, ReconnectInterval } from 'eventsource-parser';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { getOpenAIApi, authChat } from '@/service/utils/chat';
|
||||
import { httpsAgent } from '@/service/utils/tools';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import type { ModelSchema } from '@/types/mongoSchema';
|
||||
import { PassThrough } from 'stream';
|
||||
import { modelList } from '@/constants/model';
|
||||
import { pushChatBill } from '@/service/events/pushBill';
|
||||
|
||||
/* 发送提示词 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
let step = 0; // step=1时,表示开始了流响应
|
||||
const stream = new PassThrough();
|
||||
stream.on('error', () => {
|
||||
console.log('error: ', 'stream error');
|
||||
stream.destroy();
|
||||
});
|
||||
res.on('close', () => {
|
||||
stream.destroy();
|
||||
});
|
||||
res.on('error', () => {
|
||||
console.log('error: ', 'request error');
|
||||
stream.destroy();
|
||||
});
|
||||
|
||||
try {
|
||||
const { chatId, prompt } = req.body as {
|
||||
prompt: ChatItemType;
|
||||
chatId: string;
|
||||
};
|
||||
const { authorization } = req.headers;
|
||||
if (!chatId || !prompt) {
|
||||
throw new Error('缺少参数');
|
||||
}
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
const { chat, userApiKey, systemKey, userId } = await authChat(chatId, authorization);
|
||||
|
||||
const model: ModelSchema = chat.modelId;
|
||||
|
||||
// 读取对话内容
|
||||
const prompts = [...chat.content, prompt];
|
||||
|
||||
// 上下文长度过滤
|
||||
const maxContext = model.security.contextMaxLen;
|
||||
const filterPrompts =
|
||||
prompts.length > maxContext ? prompts.slice(prompts.length - maxContext) : prompts;
|
||||
|
||||
// 格式化文本内容
|
||||
const formatPrompts: string[] = filterPrompts.map((item: ChatItemType) => item.value);
|
||||
// 如果有系统提示词,自动插入
|
||||
if (model.systemPrompt) {
|
||||
formatPrompts.unshift(`${model.systemPrompt}`);
|
||||
}
|
||||
|
||||
const promptText = formatPrompts.join('</s>');
|
||||
|
||||
// 计算温度
|
||||
const modelConstantsData = modelList.find((item) => item.model === model.service.modelName);
|
||||
if (!modelConstantsData) {
|
||||
throw new Error('模型异常,请用 chatgpt 模型');
|
||||
}
|
||||
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
|
||||
|
||||
// 获取 chatAPI
|
||||
const chatAPI = getOpenAIApi(userApiKey || systemKey);
|
||||
let startTime = Date.now();
|
||||
// console.log({
|
||||
// model: model.service.chatModel,
|
||||
// temperature: temperature,
|
||||
// prompt: promptText,
|
||||
// stream: true,
|
||||
// max_tokens:
|
||||
// model.trainingTimes > 0 ? modelConstantsData.trainedMaxToken : modelConstantsData.maxToken,
|
||||
// presence_penalty: -0.5, // 越大,越容易出现新内容
|
||||
// frequency_penalty: 0.5, // 越大,重复内容越少
|
||||
// stop: [`###`]
|
||||
// });
|
||||
// 发出请求
|
||||
const chatResponse = await chatAPI.createCompletion(
|
||||
{
|
||||
model: model.service.chatModel,
|
||||
temperature: temperature,
|
||||
prompt: promptText,
|
||||
stream: true,
|
||||
max_tokens:
|
||||
model.trainingTimes > 0
|
||||
? modelConstantsData.trainedMaxToken
|
||||
: modelConstantsData.maxToken,
|
||||
presence_penalty: -0.5, // 越大,越容易出现新内容
|
||||
frequency_penalty: 0.5, // 越大,重复内容越少
|
||||
stop: [`###`, '。!?.!.']
|
||||
},
|
||||
{
|
||||
timeout: 40000,
|
||||
responseType: 'stream',
|
||||
httpsAgent
|
||||
}
|
||||
);
|
||||
|
||||
console.log('api response time:', `${(Date.now() - startTime) / 1000}s`);
|
||||
|
||||
// 创建响应流
|
||||
res.setHeader('Content-Type', 'text/event-stream;charset-utf-8');
|
||||
res.setHeader('Access-Control-Allow-Origin', '*');
|
||||
res.setHeader('X-Accel-Buffering', 'no');
|
||||
res.setHeader('Cache-Control', 'no-cache, no-transform');
|
||||
step = 1;
|
||||
|
||||
let responseContent = '';
|
||||
stream.pipe(res);
|
||||
|
||||
const onParse = async (event: ParsedEvent | ReconnectInterval) => {
|
||||
if (event.type !== 'event') return;
|
||||
const data = event.data;
|
||||
if (data === '[DONE]') return;
|
||||
try {
|
||||
const json = JSON.parse(data);
|
||||
const content: string = json?.choices?.[0].text || '';
|
||||
// console.log('content:', content);
|
||||
if (!content || (responseContent === '' && content === '\n')) return;
|
||||
|
||||
responseContent += content;
|
||||
!stream.destroyed && stream.push(content.replace(/\n/g, '<br/>'));
|
||||
} catch (error) {
|
||||
error;
|
||||
}
|
||||
};
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
try {
|
||||
for await (const chunk of chatResponse.data as any) {
|
||||
if (stream.destroyed) {
|
||||
// 流被中断了,直接忽略后面的内容
|
||||
break;
|
||||
}
|
||||
const parser = createParser(onParse);
|
||||
parser.feed(decoder.decode(chunk));
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('pipe error', error);
|
||||
}
|
||||
// close stream
|
||||
!stream.destroyed && stream.push(null);
|
||||
stream.destroy();
|
||||
|
||||
// 只有使用平台的 key 才计费
|
||||
pushChatBill({
|
||||
isPay: !userApiKey,
|
||||
modelName: model.service.modelName,
|
||||
userId,
|
||||
chatId,
|
||||
text: promptText + responseContent
|
||||
});
|
||||
} catch (err: any) {
|
||||
// console.log(err?.response);
|
||||
if (step === 1) {
|
||||
// 直接结束流
|
||||
console.log('error,结束');
|
||||
stream.destroy();
|
||||
} else {
|
||||
res.status(500);
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,11 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Chat } from '@/service/mongo';
|
||||
import type { ChatPopulate } from '@/types/mongoSchema';
|
||||
import type { InitChatResponse } from '@/api/response/chat';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { authModel } from '@/service/utils/auth';
|
||||
import mongoose from 'mongoose';
|
||||
|
||||
/* 初始化我的聊天框,需要身份验证 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
@@ -11,43 +13,49 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
const { authorization } = req.headers;
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
const { chatId } = req.query as { chatId: string };
|
||||
const { modelId, chatId } = req.query as { modelId: string; chatId: '' | string };
|
||||
|
||||
if (!chatId) {
|
||||
if (!modelId) {
|
||||
throw new Error('缺少参数');
|
||||
}
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
// 获取 chat 数据
|
||||
const chat = await Chat.findOne<ChatPopulate>({
|
||||
_id: chatId,
|
||||
userId
|
||||
}).populate({
|
||||
path: 'modelId',
|
||||
options: {
|
||||
strictPopulate: false
|
||||
}
|
||||
});
|
||||
// 获取 model 数据
|
||||
const { model } = await authModel(modelId, userId);
|
||||
|
||||
if (!chat) {
|
||||
throw new Error('聊天框不存在');
|
||||
// 历史记录
|
||||
let history: ChatItemType[] = [];
|
||||
|
||||
if (chatId) {
|
||||
// 获取 chat.content 数据
|
||||
history = await Chat.aggregate([
|
||||
{ $match: { _id: new mongoose.Types.ObjectId(chatId) } },
|
||||
{ $unwind: '$content' },
|
||||
{ $match: { 'content.deleted': false } },
|
||||
{ $sort: { 'content._id': -1 } },
|
||||
{ $limit: 50 },
|
||||
{
|
||||
$project: {
|
||||
id: '$content._id',
|
||||
obj: '$content.obj',
|
||||
value: '$content.value'
|
||||
}
|
||||
}
|
||||
]);
|
||||
|
||||
history.reverse();
|
||||
}
|
||||
|
||||
// filter 掉被 deleted 的内容
|
||||
chat.content = chat.content.filter((item) => item.deleted !== true);
|
||||
|
||||
const model = chat.modelId;
|
||||
jsonRes<InitChatResponse>(res, {
|
||||
data: {
|
||||
chatId: chat._id,
|
||||
modelId: model._id,
|
||||
chatId: chatId || '',
|
||||
modelId: modelId,
|
||||
name: model.name,
|
||||
avatar: model.avatar,
|
||||
intro: model.intro,
|
||||
modelName: model.service.modelName,
|
||||
chatModel: model.service.chatModel,
|
||||
history: chat.content
|
||||
history
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
|
||||
@@ -1,277 +0,0 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { createParser, ParsedEvent, ReconnectInterval } from 'eventsource-parser';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { getOpenAIApi, authChat } from '@/service/utils/chat';
|
||||
import { httpsAgent, openaiChatFilter, systemPromptFilter } from '@/service/utils/tools';
|
||||
import { ChatCompletionRequestMessage, ChatCompletionRequestMessageRoleEnum } from 'openai';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import type { ModelSchema } from '@/types/mongoSchema';
|
||||
import { PassThrough } from 'stream';
|
||||
import { modelList } from '@/constants/model';
|
||||
import { pushChatBill } from '@/service/events/pushBill';
|
||||
import { connectRedis } from '@/service/redis';
|
||||
import { VecModelDataPrefix } from '@/constants/redis';
|
||||
import { vectorToBuffer } from '@/utils/tools';
|
||||
import { openaiCreateEmbedding } from '@/service/utils/openai';
|
||||
|
||||
/* 发送提示词 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
let step = 0; // step=1时,表示开始了流响应
|
||||
const stream = new PassThrough();
|
||||
stream.on('error', () => {
|
||||
console.log('error: ', 'stream error');
|
||||
stream.destroy();
|
||||
});
|
||||
res.on('close', () => {
|
||||
stream.destroy();
|
||||
});
|
||||
res.on('error', () => {
|
||||
console.log('error: ', 'request error');
|
||||
stream.destroy();
|
||||
});
|
||||
|
||||
try {
|
||||
const { chatId, prompt } = req.body as {
|
||||
prompt: ChatItemType;
|
||||
chatId: string;
|
||||
};
|
||||
|
||||
const { authorization } = req.headers;
|
||||
if (!chatId || !prompt) {
|
||||
throw new Error('缺少参数');
|
||||
}
|
||||
|
||||
await connectToDatabase();
|
||||
const redis = await connectRedis();
|
||||
let startTime = Date.now();
|
||||
|
||||
const { chat, userApiKey, systemKey, userId } = await authChat(chatId, authorization);
|
||||
|
||||
const model: ModelSchema = chat.modelId;
|
||||
const modelConstantsData = modelList.find((item) => item.model === model.service.modelName);
|
||||
if (!modelConstantsData) {
|
||||
throw new Error('模型加载异常');
|
||||
}
|
||||
// 获取 chatAPI
|
||||
const chatAPI = getOpenAIApi(userApiKey || systemKey);
|
||||
|
||||
// 请求一次 chatgpt 拆解需求
|
||||
const promptResponse = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: model.service.chatModel,
|
||||
temperature: 0,
|
||||
// max_tokens: modelConstantsData.maxToken,
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: `服务端逻辑生成器。根据用户输入的需求,拆解成代码实现的步骤,并按格式返回: 1.\n2.\n3.\n ......
|
||||
|
||||
下面是一些例子:
|
||||
实现一个手机号注册账号的方法,包含两个函数
|
||||
* 发送手机验证码函数:
|
||||
1. 从 query 中获取 phone
|
||||
2. 校验手机号格式是否正确,不正确返回{error: "手机号格式错误"}
|
||||
3. 给 phone 发送一个短信验证码,验证码长度为6位字符串,内容为:你正在注册laf, 验证码为:code
|
||||
4. 数据库添加数据,表为"codes",内容为 {phone, code}
|
||||
* 注册函数
|
||||
1. 从 body 中获取 phone 和 code
|
||||
2. 校验手机号格式是否正确,不正确返回{error: "手机号格式错误"}
|
||||
2. 获取数据库数据,表为"codes",查找是否有符合 phone, code 等于body参数的记录,没有的话返回 {error:"验证码不正确"}
|
||||
4. 添加数据库数据,表为"users" ,内容为{phone, code, createTime}
|
||||
5. 删除数据库数据,删除 code 记录
|
||||
---------------
|
||||
更新博客记录。传入blogId,blogText,tags,还需要记录更新的时间
|
||||
1. 从 body 中获取 blogId,blogText 和 tags
|
||||
2. 校验 blogId 是否为空,为空则返回 {error: "博客ID不能为空"}
|
||||
3. 校验 blogText 是否为空,为空则返回 {error: "博客内容不能为空"}
|
||||
4. 校验 tags 是否为数组,不是则返回 {error: "标签必须为数组"}
|
||||
5. 获取当前时间,记录为 updateTime
|
||||
6. 更新数据库数据,表为"blogs",更新符合 blogId 的记录的内容为{blogText, tags, updateTime}
|
||||
7. 返回结果 {message: "更新博客记录成功"}`
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: prompt.value
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
timeout: 40000,
|
||||
httpsAgent
|
||||
}
|
||||
);
|
||||
|
||||
const promptResolve = promptResponse.data.choices?.[0]?.message?.content || '';
|
||||
if (!promptResolve) {
|
||||
throw new Error('gpt 异常');
|
||||
}
|
||||
|
||||
prompt.value += `\n${promptResolve}`;
|
||||
console.log('prompt resolve success, time:', `${(Date.now() - startTime) / 1000}s`);
|
||||
|
||||
// 获取提示词的向量
|
||||
const { vector: promptVector } = await openaiCreateEmbedding({
|
||||
isPay: !userApiKey,
|
||||
apiKey: userApiKey || systemKey,
|
||||
userId,
|
||||
text: prompt.value
|
||||
});
|
||||
|
||||
// 读取对话内容
|
||||
const prompts = [...chat.content, prompt];
|
||||
|
||||
// 搜索系统提示词, 按相似度从 redis 中搜出相关的 q 和 text
|
||||
const redisData: any[] = await redis.sendCommand([
|
||||
'FT.SEARCH',
|
||||
`idx:${VecModelDataPrefix}:hash`,
|
||||
`@modelId:{${String(
|
||||
chat.modelId._id
|
||||
)}} @vector:[VECTOR_RANGE 0.25 $blob]=>{$YIELD_DISTANCE_AS: score}`,
|
||||
// `@modelId:{${String(chat.modelId._id)}}=>[KNN 10 @vector $blob AS score]`,
|
||||
'RETURN',
|
||||
'1',
|
||||
'text',
|
||||
'SORTBY',
|
||||
'score',
|
||||
'PARAMS',
|
||||
'2',
|
||||
'blob',
|
||||
vectorToBuffer(promptVector),
|
||||
'LIMIT',
|
||||
'0',
|
||||
'20',
|
||||
'DIALECT',
|
||||
'2'
|
||||
]);
|
||||
|
||||
// 格式化响应值,获取 qa
|
||||
const formatRedisPrompt = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
|
||||
.map((i) => {
|
||||
if (!redisData[i]) return '';
|
||||
const text = (redisData[i][1] as string) || '';
|
||||
|
||||
if (!text) return '';
|
||||
|
||||
return text;
|
||||
})
|
||||
.filter((item) => item);
|
||||
|
||||
if (formatRedisPrompt.length === 0) {
|
||||
throw new Error('对不起,我没有找到你的问题');
|
||||
}
|
||||
|
||||
// textArr 筛选,最多 3000 tokens
|
||||
const systemPrompt = systemPromptFilter(formatRedisPrompt, 3400);
|
||||
|
||||
prompts.unshift({
|
||||
obj: 'SYSTEM',
|
||||
value: `${model.systemPrompt} 知识库内容是最新的,知识库内容为: "${systemPrompt}"`
|
||||
});
|
||||
|
||||
// 控制在 tokens 数量,防止超出
|
||||
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
|
||||
|
||||
// 格式化文本内容成 chatgpt 格式
|
||||
const map = {
|
||||
Human: ChatCompletionRequestMessageRoleEnum.User,
|
||||
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
|
||||
};
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
|
||||
(item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
})
|
||||
);
|
||||
console.log(formatPrompts);
|
||||
// 计算温度
|
||||
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
|
||||
|
||||
// 发出请求
|
||||
const chatResponse = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: model.service.chatModel,
|
||||
temperature: temperature,
|
||||
// max_tokens: modelConstantsData.maxToken,
|
||||
messages: formatPrompts,
|
||||
frequency_penalty: 0.5, // 越大,重复内容越少
|
||||
presence_penalty: -0.5, // 越大,越容易出现新内容
|
||||
stream: true
|
||||
},
|
||||
{
|
||||
timeout: 40000,
|
||||
responseType: 'stream',
|
||||
httpsAgent
|
||||
}
|
||||
);
|
||||
|
||||
console.log('api response time:', `${(Date.now() - startTime) / 1000}s`);
|
||||
|
||||
// 创建响应流
|
||||
res.setHeader('Content-Type', 'text/event-stream;charset-utf-8');
|
||||
res.setHeader('Access-Control-Allow-Origin', '*');
|
||||
res.setHeader('X-Accel-Buffering', 'no');
|
||||
res.setHeader('Cache-Control', 'no-cache, no-transform');
|
||||
step = 1;
|
||||
|
||||
let responseContent = '';
|
||||
stream.pipe(res);
|
||||
|
||||
const onParse = async (event: ParsedEvent | ReconnectInterval) => {
|
||||
if (event.type !== 'event') return;
|
||||
const data = event.data;
|
||||
if (data === '[DONE]') return;
|
||||
try {
|
||||
const json = JSON.parse(data);
|
||||
const content: string = json?.choices?.[0].delta.content || '';
|
||||
if (!content || (responseContent === '' && content === '\n')) return;
|
||||
|
||||
responseContent += content;
|
||||
// console.log('content:', content)
|
||||
!stream.destroyed && stream.push(content.replace(/\n/g, '<br/>'));
|
||||
} catch (error) {
|
||||
error;
|
||||
}
|
||||
};
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
try {
|
||||
for await (const chunk of chatResponse.data as any) {
|
||||
if (stream.destroyed) {
|
||||
// 流被中断了,直接忽略后面的内容
|
||||
break;
|
||||
}
|
||||
const parser = createParser(onParse);
|
||||
parser.feed(decoder.decode(chunk));
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('pipe error', error);
|
||||
}
|
||||
// close stream
|
||||
!stream.destroyed && stream.push(null);
|
||||
stream.destroy();
|
||||
|
||||
const promptsContent = formatPrompts.map((item) => item.content).join('');
|
||||
// 只有使用平台的 key 才计费
|
||||
pushChatBill({
|
||||
isPay: !userApiKey,
|
||||
modelName: model.service.modelName,
|
||||
userId,
|
||||
chatId,
|
||||
text: promptsContent + responseContent
|
||||
});
|
||||
} catch (err: any) {
|
||||
if (step === 1) {
|
||||
// 直接结束流
|
||||
console.log('error,结束');
|
||||
stream.destroy();
|
||||
} else {
|
||||
res.status(500);
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
27
src/pages/api/chat/removeHistory.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { connectToDatabase, Chat } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
|
||||
/* 获取历史记录 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
const { id } = req.query;
|
||||
const userId = await authToken(req.headers.authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
await Chat.findOneAndRemove({
|
||||
_id: id,
|
||||
userId
|
||||
});
|
||||
|
||||
jsonRes(res);
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -2,34 +2,54 @@ import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { connectToDatabase, Chat } from '@/service/mongo';
|
||||
import { authModel } from '@/service/utils/auth';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
|
||||
/* 聊天内容存存储 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
const { chatId, prompts } = req.body as {
|
||||
chatId: string;
|
||||
const { chatId, modelId, prompts } = req.body as {
|
||||
chatId: '' | string;
|
||||
modelId: string;
|
||||
prompts: ChatItemType[];
|
||||
};
|
||||
|
||||
if (!chatId || !prompts) {
|
||||
if (!prompts) {
|
||||
throw new Error('缺少参数');
|
||||
}
|
||||
|
||||
const userId = await authToken(req.headers.authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
// 存入库
|
||||
await Chat.findByIdAndUpdate(chatId, {
|
||||
$push: {
|
||||
content: {
|
||||
$each: prompts.map((item) => ({
|
||||
obj: item.obj,
|
||||
value: item.value
|
||||
}))
|
||||
}
|
||||
},
|
||||
updateTime: new Date()
|
||||
});
|
||||
const content = prompts.map((item) => ({
|
||||
obj: item.obj,
|
||||
value: item.value
|
||||
}));
|
||||
|
||||
// 没有 chatId, 创建一个对话
|
||||
if (!chatId) {
|
||||
await authModel(modelId, userId);
|
||||
const { _id } = await Chat.create({
|
||||
userId,
|
||||
modelId,
|
||||
content,
|
||||
title: content[0].value.slice(0, 20)
|
||||
});
|
||||
return jsonRes(res, {
|
||||
data: _id
|
||||
});
|
||||
} else {
|
||||
// 已经有记录,追加入库
|
||||
await Chat.findByIdAndUpdate(chatId, {
|
||||
$push: {
|
||||
content: {
|
||||
$each: content
|
||||
}
|
||||
},
|
||||
updateTime: new Date()
|
||||
});
|
||||
}
|
||||
jsonRes(res);
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { createParser, ParsedEvent, ReconnectInterval } from 'eventsource-parser';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { getOpenAIApi, authChat } from '@/service/utils/chat';
|
||||
import { httpsAgent, openaiChatFilter, systemPromptFilter } from '@/service/utils/tools';
|
||||
import { ChatCompletionRequestMessage, ChatCompletionRequestMessageRoleEnum } from 'openai';
|
||||
import { authChat } from '@/service/utils/auth';
|
||||
import { axiosConfig, systemPromptFilter, openaiChatFilter } from '@/service/utils/tools';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import type { ModelSchema } from '@/types/mongoSchema';
|
||||
import { PassThrough } from 'stream';
|
||||
import { modelList } from '@/constants/model';
|
||||
import {
|
||||
modelList,
|
||||
ModelVectorSearchModeMap,
|
||||
ModelVectorSearchModeEnum,
|
||||
ModelDataStatusEnum
|
||||
} from '@/constants/model';
|
||||
import { pushChatBill } from '@/service/events/pushBill';
|
||||
import { connectRedis } from '@/service/redis';
|
||||
import { VecModelDataPrefix } from '@/constants/redis';
|
||||
import { vectorToBuffer } from '@/utils/tools';
|
||||
import { openaiCreateEmbedding } from '@/service/utils/openai';
|
||||
import { openaiCreateEmbedding, gpt35StreamResponse } from '@/service/utils/openai';
|
||||
import dayjs from 'dayjs';
|
||||
import { PgClient } from '@/service/pg';
|
||||
|
||||
/* 发送提示词 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
@@ -32,30 +33,33 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
});
|
||||
|
||||
try {
|
||||
const { chatId, prompt } = req.body as {
|
||||
const { modelId, chatId, prompt } = req.body as {
|
||||
modelId: string;
|
||||
chatId: '' | string;
|
||||
prompt: ChatItemType;
|
||||
chatId: string;
|
||||
};
|
||||
|
||||
const { authorization } = req.headers;
|
||||
if (!chatId || !prompt) {
|
||||
if (!modelId || !prompt) {
|
||||
throw new Error('缺少参数');
|
||||
}
|
||||
|
||||
await connectToDatabase();
|
||||
const redis = await connectRedis();
|
||||
let startTime = Date.now();
|
||||
|
||||
const { chat, userApiKey, systemKey, userId } = await authChat(chatId, authorization);
|
||||
const { model, content, userApiKey, systemKey, userId } = await authChat({
|
||||
modelId,
|
||||
chatId,
|
||||
authorization
|
||||
});
|
||||
|
||||
const model: ModelSchema = chat.modelId;
|
||||
const modelConstantsData = modelList.find((item) => item.model === model.service.modelName);
|
||||
if (!modelConstantsData) {
|
||||
throw new Error('模型加载异常');
|
||||
}
|
||||
|
||||
// 读取对话内容
|
||||
const prompts = [...chat.content, prompt];
|
||||
const prompts = [...content, prompt];
|
||||
|
||||
// 获取提示词的向量
|
||||
const { vector: promptVector, chatAPI } = await openaiCreateEmbedding({
|
||||
@@ -65,70 +69,70 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
text: prompt.value
|
||||
});
|
||||
|
||||
// 搜索系统提示词, 按相似度从 redis 中搜出相关的 q 和 text
|
||||
const redisData: any[] = await redis.sendCommand([
|
||||
'FT.SEARCH',
|
||||
`idx:${VecModelDataPrefix}:hash`,
|
||||
`@modelId:{${String(
|
||||
chat.modelId._id
|
||||
)}} @vector:[VECTOR_RANGE 0.25 $blob]=>{$YIELD_DISTANCE_AS: score}`,
|
||||
// `@modelId:{${String(chat.modelId._id)}}=>[KNN 10 @vector $blob AS score]`,
|
||||
'RETURN',
|
||||
'1',
|
||||
'text',
|
||||
'SORTBY',
|
||||
'score',
|
||||
'PARAMS',
|
||||
'2',
|
||||
'blob',
|
||||
vectorToBuffer(promptVector),
|
||||
'LIMIT',
|
||||
'0',
|
||||
'20',
|
||||
'DIALECT',
|
||||
'2'
|
||||
]);
|
||||
|
||||
// 格式化响应值,获取 qa
|
||||
const formatRedisPrompt = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
|
||||
.map((i) => {
|
||||
if (!redisData[i]) return '';
|
||||
const text = (redisData[i][1] as string) || '';
|
||||
|
||||
if (!text) return '';
|
||||
|
||||
return text;
|
||||
})
|
||||
.filter((item) => item);
|
||||
|
||||
if (formatRedisPrompt.length === 0) {
|
||||
throw new Error('对不起,我没有找到你的问题');
|
||||
}
|
||||
|
||||
// textArr 筛选,最多 3000 tokens
|
||||
const systemPrompt = systemPromptFilter(formatRedisPrompt, 3400);
|
||||
|
||||
prompts.unshift({
|
||||
obj: 'SYSTEM',
|
||||
value: `${model.systemPrompt} 知识库内容是最新的,知识库内容为: "${systemPrompt}"`
|
||||
// 相似度搜素
|
||||
const similarity = ModelVectorSearchModeMap[model.search.mode]?.similarity || 0.22;
|
||||
const vectorSearch = await PgClient.select<{ id: string; q: string; a: string }>('modelData', {
|
||||
fields: ['id', 'q', 'a'],
|
||||
where: [
|
||||
['status', ModelDataStatusEnum.ready],
|
||||
'AND',
|
||||
['model_id', model._id],
|
||||
'AND',
|
||||
`vector <=> '[${promptVector}]' < ${similarity}`
|
||||
],
|
||||
order: [{ field: 'vector', mode: `<=> '[${promptVector}]'` }],
|
||||
limit: 20
|
||||
});
|
||||
|
||||
// 控制在 tokens 数量,防止超出
|
||||
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
|
||||
const formatRedisPrompt: string[] = vectorSearch.rows.map((item) => `${item.q}\n${item.a}`);
|
||||
|
||||
// 格式化文本内容成 chatgpt 格式
|
||||
const map = {
|
||||
Human: ChatCompletionRequestMessageRoleEnum.User,
|
||||
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
|
||||
};
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
|
||||
(item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
})
|
||||
);
|
||||
// console.log(formatPrompts);
|
||||
/* 高相似度+退出,无法匹配时直接退出 */
|
||||
if (
|
||||
formatRedisPrompt.length === 0 &&
|
||||
model.search.mode === ModelVectorSearchModeEnum.hightSimilarity
|
||||
) {
|
||||
return res.send('对不起,你的问题不在知识库中。');
|
||||
}
|
||||
/* 高相似度+无上下文,不添加额外知识 */
|
||||
if (
|
||||
formatRedisPrompt.length === 0 &&
|
||||
model.search.mode === ModelVectorSearchModeEnum.noContext
|
||||
) {
|
||||
prompts.unshift({
|
||||
obj: 'SYSTEM',
|
||||
value: model.systemPrompt
|
||||
});
|
||||
} else {
|
||||
// 有匹配情况下,system 添加知识库内容。
|
||||
// 系统提示词过滤,最多 2500 tokens
|
||||
const systemPrompt = systemPromptFilter({
|
||||
model: model.service.chatModel,
|
||||
prompts: formatRedisPrompt,
|
||||
maxTokens: 2500
|
||||
});
|
||||
|
||||
prompts.unshift({
|
||||
obj: 'SYSTEM',
|
||||
value: `
|
||||
${model.systemPrompt}
|
||||
${
|
||||
model.search.mode === ModelVectorSearchModeEnum.hightSimilarity
|
||||
? `你只能从知识库选择内容回答.不在知识库内容拒绝回复`
|
||||
: ''
|
||||
}
|
||||
知识库内容为: 当前时间为${dayjs().format('YYYY/MM/DD HH:mm:ss')}\n${systemPrompt}'
|
||||
`
|
||||
});
|
||||
}
|
||||
|
||||
// 控制在 tokens 数量,防止超出
|
||||
const filterPrompts = openaiChatFilter({
|
||||
model: model.service.chatModel,
|
||||
prompts,
|
||||
maxTokens: modelConstantsData.contextMaxToken - 500
|
||||
});
|
||||
|
||||
// console.log(filterPrompts);
|
||||
// 计算温度
|
||||
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
|
||||
|
||||
@@ -136,74 +140,37 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
const chatResponse = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: model.service.chatModel,
|
||||
temperature: temperature,
|
||||
// max_tokens: modelConstantsData.maxToken,
|
||||
messages: formatPrompts,
|
||||
temperature,
|
||||
messages: filterPrompts,
|
||||
frequency_penalty: 0.5, // 越大,重复内容越少
|
||||
presence_penalty: -0.5, // 越大,越容易出现新内容
|
||||
stream: true
|
||||
stream: true,
|
||||
stop: ['.!?。']
|
||||
},
|
||||
{
|
||||
timeout: 40000,
|
||||
responseType: 'stream',
|
||||
httpsAgent
|
||||
...axiosConfig
|
||||
}
|
||||
);
|
||||
|
||||
console.log('api response time:', `${(Date.now() - startTime) / 1000}s`);
|
||||
|
||||
// 创建响应流
|
||||
res.setHeader('Content-Type', 'text/event-stream;charset-utf-8');
|
||||
res.setHeader('Access-Control-Allow-Origin', '*');
|
||||
res.setHeader('X-Accel-Buffering', 'no');
|
||||
res.setHeader('Cache-Control', 'no-cache, no-transform');
|
||||
step = 1;
|
||||
|
||||
let responseContent = '';
|
||||
stream.pipe(res);
|
||||
const { responseContent } = await gpt35StreamResponse({
|
||||
res,
|
||||
stream,
|
||||
chatResponse
|
||||
});
|
||||
|
||||
const onParse = async (event: ParsedEvent | ReconnectInterval) => {
|
||||
if (event.type !== 'event') return;
|
||||
const data = event.data;
|
||||
if (data === '[DONE]') return;
|
||||
try {
|
||||
const json = JSON.parse(data);
|
||||
const content: string = json?.choices?.[0].delta.content || '';
|
||||
if (!content || (responseContent === '' && content === '\n')) return;
|
||||
|
||||
responseContent += content;
|
||||
// console.log('content:', content)
|
||||
!stream.destroyed && stream.push(content.replace(/\n/g, '<br/>'));
|
||||
} catch (error) {
|
||||
error;
|
||||
}
|
||||
};
|
||||
|
||||
const decoder = new TextDecoder();
|
||||
try {
|
||||
for await (const chunk of chatResponse.data as any) {
|
||||
if (stream.destroyed) {
|
||||
// 流被中断了,直接忽略后面的内容
|
||||
break;
|
||||
}
|
||||
const parser = createParser(onParse);
|
||||
parser.feed(decoder.decode(chunk));
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('pipe error', error);
|
||||
}
|
||||
// close stream
|
||||
!stream.destroyed && stream.push(null);
|
||||
stream.destroy();
|
||||
|
||||
const promptsContent = formatPrompts.map((item) => item.content).join('');
|
||||
// 只有使用平台的 key 才计费
|
||||
pushChatBill({
|
||||
isPay: !userApiKey,
|
||||
modelName: model.service.modelName,
|
||||
userId,
|
||||
chatId,
|
||||
text: promptsContent + responseContent
|
||||
messages: filterPrompts.concat({ role: 'assistant', content: responseContent })
|
||||
});
|
||||
// jsonRes(res);
|
||||
} catch (err: any) {
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Data, DataItem } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import type { DataListItem } from '@/types/data';
|
||||
import mongoose from 'mongoose';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
const { authorization } = req.headers;
|
||||
|
||||
if (!authorization) {
|
||||
throw new Error('缺少登录凭证');
|
||||
}
|
||||
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
const datalist = await Data.aggregate<DataListItem>([
|
||||
{
|
||||
$match: {
|
||||
userId: new mongoose.Types.ObjectId(userId),
|
||||
isDeleted: false
|
||||
}
|
||||
},
|
||||
{
|
||||
$sort: { createTime: -1 } // 按照创建时间倒序排列
|
||||
},
|
||||
{
|
||||
$lookup: {
|
||||
from: 'dataitems',
|
||||
localField: '_id',
|
||||
foreignField: 'dataId',
|
||||
as: 'items'
|
||||
}
|
||||
},
|
||||
{
|
||||
$addFields: {
|
||||
totalData: {
|
||||
$size: '$items' // 统计dataItem的总数
|
||||
},
|
||||
trainingData: {
|
||||
$size: {
|
||||
$filter: {
|
||||
input: '$items',
|
||||
as: 'item',
|
||||
cond: { $ne: ['$$item.status', 0] } // 统计 status 不为0的数量
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
$project: {
|
||||
items: 0 // 不返回 items 字段
|
||||
}
|
||||
}
|
||||
]);
|
||||
|
||||
jsonRes(res, {
|
||||
data: datalist
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Data } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import type { DataType } from '@/types/data';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
let { name, type } = req.body as { name: string; type: DataType };
|
||||
if (!name || !type) {
|
||||
throw new Error('参数错误');
|
||||
}
|
||||
await connectToDatabase();
|
||||
|
||||
const { authorization } = req.headers;
|
||||
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
// 生成 data 集合
|
||||
const data = await Data.create({
|
||||
userId,
|
||||
name,
|
||||
type
|
||||
});
|
||||
|
||||
jsonRes(res, {
|
||||
data: data._id
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, DataItem, Data } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { generateQA } from '@/service/events/generateQA';
|
||||
import { generateAbstract } from '@/service/events/generateAbstract';
|
||||
import { encode } from 'gpt-token-utils';
|
||||
|
||||
/* 拆分数据成QA */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
const { text, dataId } = req.body as { text: string; dataId: string };
|
||||
if (!text || !dataId) {
|
||||
throw new Error('参数错误');
|
||||
}
|
||||
await connectToDatabase();
|
||||
|
||||
const { authorization } = req.headers;
|
||||
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
const DataRecord = await Data.findById(dataId);
|
||||
|
||||
if (!DataRecord) {
|
||||
throw new Error('找不到数据集');
|
||||
}
|
||||
const replaceText = text.replace(/[\\n]+/g, ' ');
|
||||
|
||||
// 文本拆分成 chunk
|
||||
let chunks = replaceText.match(/[^!?.。]+[!?.。]/g) || [];
|
||||
|
||||
const dataItems: any[] = [];
|
||||
let splitText = '';
|
||||
|
||||
chunks.forEach((chunk) => {
|
||||
splitText += chunk;
|
||||
const tokens = encode(splitText).length;
|
||||
if (tokens >= 780) {
|
||||
dataItems.push({
|
||||
userId,
|
||||
dataId,
|
||||
type: DataRecord.type,
|
||||
text: splitText,
|
||||
status: 1
|
||||
});
|
||||
splitText = '';
|
||||
}
|
||||
});
|
||||
|
||||
// 批量插入数据
|
||||
await DataItem.insertMany(dataItems);
|
||||
|
||||
try {
|
||||
generateQA();
|
||||
generateAbstract();
|
||||
} catch (error) {
|
||||
error;
|
||||
}
|
||||
|
||||
jsonRes(res, {
|
||||
data: { chunks, replaceText }
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -3,14 +3,14 @@ import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { ModelStatusEnum, modelList, ChatModelNameEnum, ChatModelNameMap } from '@/constants/model';
|
||||
import { ModelStatusEnum, modelList, ModelNameEnum, Model2ChatModelMap } from '@/constants/model';
|
||||
import { Model } from '@/service/models/model';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
const { name, serviceModelName } = req.body as {
|
||||
name: string;
|
||||
serviceModelName: `${ChatModelNameEnum}`;
|
||||
serviceModelName: `${ModelNameEnum}`;
|
||||
};
|
||||
const { authorization } = req.headers;
|
||||
|
||||
@@ -47,9 +47,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
||||
userId,
|
||||
status: ModelStatusEnum.running,
|
||||
service: {
|
||||
company: modelItem.serviceCompany,
|
||||
trainId: '',
|
||||
chatModel: ChatModelNameMap[modelItem.model], // 聊天时用的模型
|
||||
chatModel: Model2ChatModelMap[modelItem.model], // 聊天时用的模型
|
||||
modelName: modelItem.model // 最底层的模型,不会变,用于计费等核心操作
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { connectRedis } from '@/service/redis';
|
||||
import { PgClient } from '@/service/pg';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
@@ -21,15 +21,10 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
||||
// 凭证校验
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
const redis = await connectRedis();
|
||||
await PgClient.delete('modelData', {
|
||||
where: [['user_id', userId], 'AND', ['id', dataId]]
|
||||
});
|
||||
|
||||
// 校验是否为该用户的数据
|
||||
const dataItemUserId = await redis.hGet(dataId, 'userId');
|
||||
if (dataItemUserId !== userId) {
|
||||
throw new Error('无权操作');
|
||||
}
|
||||
// 删除
|
||||
await redis.del(dataId);
|
||||
jsonRes(res);
|
||||
} catch (err) {
|
||||
console.log(err);
|
||||
|
||||
@@ -2,9 +2,7 @@ import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { connectRedis } from '@/service/redis';
|
||||
import { VecModelDataIdx } from '@/constants/redis';
|
||||
import { BufferToVector } from '@/utils/tools';
|
||||
import { PgClient } from '@/service/pg';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
@@ -26,39 +24,26 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
const redis = await connectRedis();
|
||||
|
||||
// 从 redis 中获取数据
|
||||
const searchRes = await redis.ft.search(
|
||||
VecModelDataIdx,
|
||||
`@modelId:{${modelId}} @userId:{${userId}}`,
|
||||
{
|
||||
RETURN: ['q', 'text', 'rawVector'],
|
||||
LIMIT: {
|
||||
from: 0,
|
||||
size: 10000
|
||||
}
|
||||
}
|
||||
);
|
||||
// 统计数据
|
||||
const count = await PgClient.count('modelData', {
|
||||
where: [['model_id', modelId], 'AND', ['user_id', userId]]
|
||||
});
|
||||
// 从 pg 中获取所有数据
|
||||
const pgData = await PgClient.select<{ q: string; a: string }>('modelData', {
|
||||
where: [['model_id', modelId], 'AND', ['user_id', userId]],
|
||||
fields: ['q', 'a'],
|
||||
order: [{ field: 'id', mode: 'DESC' }],
|
||||
limit: count
|
||||
});
|
||||
|
||||
const data = searchRes.documents
|
||||
.filter((item) => {
|
||||
if (!item?.value?.rawVector) return false;
|
||||
try {
|
||||
JSON.parse(item.value.rawVector as string);
|
||||
return true;
|
||||
} catch (error) {
|
||||
return false;
|
||||
}
|
||||
})
|
||||
.map((item: any) => ({
|
||||
prompt: item.value.q,
|
||||
completion: item.value.text,
|
||||
vector: JSON.parse(item.value.rawVector)
|
||||
}));
|
||||
const data: [string, string][] = pgData.rows.map((item) => [
|
||||
item.q.replace(/\n/g, '\\n'),
|
||||
item.a.replace(/\n/g, '\\n')
|
||||
]);
|
||||
|
||||
jsonRes(res, {
|
||||
data: JSON.stringify(data)
|
||||
data
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
|
||||
36
src/pages/api/model/data/fetchingUrlData.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import axios from 'axios';
|
||||
import { axiosConfig } from '@/service/utils/tools';
|
||||
|
||||
/**
|
||||
* 读取网站的内容
|
||||
*/
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
const { url } = req.body as { url: string };
|
||||
if (!url) {
|
||||
throw new Error('缺少 url');
|
||||
}
|
||||
await connectToDatabase();
|
||||
|
||||
const { authorization } = req.headers;
|
||||
|
||||
await authToken(authorization);
|
||||
|
||||
const data = await axios
|
||||
.get(url, {
|
||||
httpsAgent: axiosConfig.httpsAgent
|
||||
})
|
||||
.then((res) => res.data as string);
|
||||
|
||||
jsonRes(res, { data });
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -2,22 +2,22 @@ import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { connectRedis } from '@/service/redis';
|
||||
import { VecModelDataIdx } from '@/constants/redis';
|
||||
import { SearchOptions } from 'redis';
|
||||
import { PgClient } from '@/service/pg';
|
||||
import type { PgModelDataItemType } from '@/types/pg';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
let {
|
||||
modelId,
|
||||
pageNum = 1,
|
||||
pageSize = 10
|
||||
pageSize = 10,
|
||||
searchText = ''
|
||||
} = req.query as {
|
||||
modelId: string;
|
||||
pageNum: string;
|
||||
pageSize: string;
|
||||
searchText: string;
|
||||
};
|
||||
|
||||
const { authorization } = req.headers;
|
||||
|
||||
pageNum = +pageNum;
|
||||
@@ -35,34 +35,30 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
const redis = await connectRedis();
|
||||
|
||||
// 从 redis 中获取数据
|
||||
const searchRes = await redis.ft.search(
|
||||
VecModelDataIdx,
|
||||
`@modelId:{${modelId}} @userId:{${userId}}`,
|
||||
{
|
||||
RETURN: ['q', 'text', 'status'],
|
||||
LIMIT: {
|
||||
from: (pageNum - 1) * pageSize,
|
||||
size: pageSize
|
||||
},
|
||||
SORTBY: {
|
||||
BY: 'modelId',
|
||||
DIRECTION: 'DESC'
|
||||
}
|
||||
}
|
||||
);
|
||||
const where: any = [
|
||||
['user_id', userId],
|
||||
'AND',
|
||||
['model_id', modelId],
|
||||
...(searchText ? ['AND', `(q LIKE '%${searchText}%' OR a LIKE '%${searchText}%')`] : [])
|
||||
];
|
||||
|
||||
const searchRes = await PgClient.select<PgModelDataItemType>('modelData', {
|
||||
fields: ['id', 'q', 'a', 'status'],
|
||||
where,
|
||||
order: [{ field: 'id', mode: 'DESC' }],
|
||||
limit: pageSize,
|
||||
offset: pageSize * (pageNum - 1)
|
||||
});
|
||||
|
||||
jsonRes(res, {
|
||||
data: {
|
||||
pageNum,
|
||||
pageSize,
|
||||
data: searchRes.documents.map((item) => ({
|
||||
id: item.id,
|
||||
...item.value
|
||||
})),
|
||||
total: searchRes.total
|
||||
data: searchRes.rows,
|
||||
total: await PgClient.count('modelData', {
|
||||
where
|
||||
})
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
|
||||
@@ -24,7 +24,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
});
|
||||
|
||||
jsonRes(res, {
|
||||
data
|
||||
data: data.map((item) => item.textList).flat().length
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
|
||||
99
src/pages/api/model/data/pushModelDataCsv.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Model } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { generateVector } from '@/service/events/generateVector';
|
||||
import { ModelDataStatusEnum } from '@/constants/model';
|
||||
import { PgClient } from '@/service/pg';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
const { modelId, data } = req.body as {
|
||||
modelId: string;
|
||||
data: string[][];
|
||||
};
|
||||
const { authorization } = req.headers;
|
||||
|
||||
if (!authorization) {
|
||||
throw new Error('无权操作');
|
||||
}
|
||||
|
||||
if (!modelId || !Array.isArray(data)) {
|
||||
throw new Error('缺少参数');
|
||||
}
|
||||
|
||||
// 凭证校验
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
// 验证是否是该用户的 model
|
||||
const model = await Model.findOne({
|
||||
_id: modelId,
|
||||
userId
|
||||
});
|
||||
|
||||
if (!model) {
|
||||
throw new Error('无权操作该模型');
|
||||
}
|
||||
|
||||
// 去重
|
||||
const searchRes = await Promise.allSettled(
|
||||
data.map(async ([q, a]) => {
|
||||
if (!q || !a) {
|
||||
return Promise.reject('q/a为空');
|
||||
}
|
||||
try {
|
||||
q = q.replace(/\\n/g, '\n');
|
||||
a = a.replace(/\\n/g, '\n');
|
||||
const count = await PgClient.count('modelData', {
|
||||
where: [
|
||||
['user_id', userId],
|
||||
'AND',
|
||||
['model_id', modelId],
|
||||
'AND',
|
||||
['q', q],
|
||||
'AND',
|
||||
['a', a]
|
||||
]
|
||||
});
|
||||
if (count > 0) {
|
||||
return Promise.reject('已经存在');
|
||||
}
|
||||
} catch (error) {
|
||||
error;
|
||||
}
|
||||
return Promise.resolve({
|
||||
q,
|
||||
a
|
||||
});
|
||||
})
|
||||
);
|
||||
// 过滤重复的内容
|
||||
const filterData = searchRes
|
||||
.filter((item) => item.status === 'fulfilled')
|
||||
.map<{ q: string; a: string }>((item: any) => item.value);
|
||||
|
||||
// 插入 pg
|
||||
const insertRes = await PgClient.insert('modelData', {
|
||||
values: filterData.map((item) => [
|
||||
{ key: 'user_id', value: userId },
|
||||
{ key: 'model_id', value: modelId },
|
||||
{ key: 'q', value: item.q },
|
||||
{ key: 'a', value: item.a },
|
||||
{ key: 'status', value: ModelDataStatusEnum.waiting }
|
||||
])
|
||||
});
|
||||
|
||||
generateVector();
|
||||
|
||||
jsonRes(res, {
|
||||
data: insertRes.rowCount
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -4,14 +4,13 @@ import { connectToDatabase, Model } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { ModelDataSchema } from '@/types/mongoSchema';
|
||||
import { generateVector } from '@/service/events/generateVector';
|
||||
import { connectRedis } from '@/service/redis';
|
||||
import { VecModelDataPrefix, ModelDataStatusEnum } from '@/constants/redis';
|
||||
import { PgClient } from '@/service/pg';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
const { modelId, data } = req.body as {
|
||||
modelId: string;
|
||||
data: { text: ModelDataSchema['text']; q: ModelDataSchema['q'] }[];
|
||||
data: { a: ModelDataSchema['a']; q: ModelDataSchema['q'] }[];
|
||||
};
|
||||
const { authorization } = req.headers;
|
||||
|
||||
@@ -27,7 +26,6 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
const redis = await connectRedis();
|
||||
|
||||
// 验证是否是该用户的 model
|
||||
const model = await Model.findOne({
|
||||
@@ -39,29 +37,21 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
||||
throw new Error('无权操作该模型');
|
||||
}
|
||||
|
||||
const insertRes = await Promise.allSettled(
|
||||
data.map((item) => {
|
||||
return redis.sendCommand([
|
||||
'HMSET',
|
||||
`${VecModelDataPrefix}:${item.q.id}`,
|
||||
'userId',
|
||||
userId,
|
||||
'modelId',
|
||||
modelId,
|
||||
'q',
|
||||
item.q.text,
|
||||
'text',
|
||||
item.text,
|
||||
'status',
|
||||
ModelDataStatusEnum.waiting
|
||||
]);
|
||||
})
|
||||
);
|
||||
// 插入记录
|
||||
await PgClient.insert('modelData', {
|
||||
values: data.map((item) => [
|
||||
{ key: 'user_id', value: userId },
|
||||
{ key: 'model_id', value: modelId },
|
||||
{ key: 'q', value: item.q },
|
||||
{ key: 'a', value: item.a },
|
||||
{ key: 'status', value: 'waiting' }
|
||||
])
|
||||
});
|
||||
|
||||
generateVector(true);
|
||||
generateVector();
|
||||
|
||||
jsonRes(res, {
|
||||
data: insertRes.filter((item) => item.status === 'rejected').length
|
||||
data: 0
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Model } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { generateVector } from '@/service/events/generateVector';
|
||||
import { vectorToBuffer, formatVector } from '@/utils/tools';
|
||||
import { connectRedis } from '@/service/redis';
|
||||
import { VecModelDataPrefix, ModelDataStatusEnum } from '@/constants/redis';
|
||||
import { customAlphabet } from 'nanoid';
|
||||
const nanoid = customAlphabet('abcdefghijklmnopqrstuvwxyz1234567890', 12);
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
const { modelId, data } = req.body as {
|
||||
modelId: string;
|
||||
data: { prompt: string; completion: string; vector?: number[] }[];
|
||||
};
|
||||
const { authorization } = req.headers;
|
||||
|
||||
if (!authorization) {
|
||||
throw new Error('无权操作');
|
||||
}
|
||||
|
||||
if (!modelId || !Array.isArray(data)) {
|
||||
throw new Error('缺少参数');
|
||||
}
|
||||
|
||||
// 凭证校验
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
const redis = await connectRedis();
|
||||
|
||||
// 验证是否是该用户的 model
|
||||
const model = await Model.findOne({
|
||||
_id: modelId,
|
||||
userId
|
||||
});
|
||||
|
||||
if (!model) {
|
||||
throw new Error('无权操作该模型');
|
||||
}
|
||||
|
||||
// 插入 redis
|
||||
const insertRedisRes = await Promise.allSettled(
|
||||
data.map((item) => {
|
||||
const vector = item.vector;
|
||||
|
||||
return redis.sendCommand([
|
||||
'HMSET',
|
||||
`${VecModelDataPrefix}:${nanoid()}`,
|
||||
'userId',
|
||||
userId,
|
||||
'modelId',
|
||||
String(modelId),
|
||||
...(vector
|
||||
? ['vector', vectorToBuffer(formatVector(vector)), 'rawVector', JSON.stringify(vector)]
|
||||
: []),
|
||||
'q',
|
||||
item.prompt,
|
||||
'text',
|
||||
item.completion,
|
||||
'status',
|
||||
vector ? ModelDataStatusEnum.ready : ModelDataStatusEnum.waiting
|
||||
]);
|
||||
})
|
||||
);
|
||||
|
||||
generateVector(true);
|
||||
|
||||
jsonRes(res, {
|
||||
data: insertRedisRes.filter((item) => item.status === 'rejected').length
|
||||
});
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,13 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { connectRedis } from '@/service/redis';
|
||||
import { ModelDataStatusEnum } from '@/constants/redis';
|
||||
import { ModelDataStatusEnum } from '@/constants/model';
|
||||
import { generateVector } from '@/service/events/generateVector';
|
||||
import { PgClient } from '@/service/pg';
|
||||
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
const { dataId, text, q } = req.body as { dataId: string; text: string; q?: string };
|
||||
const { dataId, a, q } = req.body as { dataId: string; a: string; q?: string };
|
||||
const { authorization } = req.headers;
|
||||
|
||||
if (!authorization) {
|
||||
@@ -21,26 +21,21 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
||||
// 凭证校验
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
const redis = await connectRedis();
|
||||
// 更新 pg 内容
|
||||
await PgClient.update('modelData', {
|
||||
where: [['id', dataId], 'AND', ['user_id', userId]],
|
||||
values: [
|
||||
{ key: 'a', value: a },
|
||||
...(q
|
||||
? [
|
||||
{ key: 'q', value: q },
|
||||
{ key: 'status', value: ModelDataStatusEnum.waiting }
|
||||
]
|
||||
: [])
|
||||
]
|
||||
});
|
||||
|
||||
// 校验是否为该用户的数据
|
||||
const dataItemUserId = await redis.hGet(dataId, 'userId');
|
||||
if (dataItemUserId !== userId) {
|
||||
throw new Error('无权操作');
|
||||
}
|
||||
|
||||
// 更新
|
||||
await redis.sendCommand([
|
||||
'HMSET',
|
||||
dataId,
|
||||
...(q ? ['q', q, 'status', ModelDataStatusEnum.waiting] : []),
|
||||
'text',
|
||||
text
|
||||
]);
|
||||
|
||||
if (q) {
|
||||
generateVector();
|
||||
}
|
||||
q && generateVector();
|
||||
|
||||
jsonRes(res);
|
||||
} catch (err) {
|
||||
|
||||
@@ -2,14 +2,20 @@ import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, SplitData, Model } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { generateVector } from '@/service/events/generateVector';
|
||||
import { generateQA } from '@/service/events/generateQA';
|
||||
import { encode } from 'gpt-token-utils';
|
||||
import { PgClient } from '@/service/pg';
|
||||
|
||||
/* 拆分数据成QA */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
const { text, modelId, prompt } = req.body as { text: string; modelId: string; prompt: string };
|
||||
if (!text || !modelId || !prompt) {
|
||||
const { chunks, modelId, prompt, mode } = req.body as {
|
||||
modelId: string;
|
||||
chunks: string[];
|
||||
prompt: string;
|
||||
mode: 'qa' | 'subsection';
|
||||
};
|
||||
if (!chunks || !modelId || !prompt) {
|
||||
throw new Error('参数错误');
|
||||
}
|
||||
await connectToDatabase();
|
||||
@@ -28,46 +34,31 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
throw new Error('无权操作该模型');
|
||||
}
|
||||
|
||||
const replaceText = text.replace(/(\\n|\n)+/g, ' ');
|
||||
if (mode === 'qa') {
|
||||
// 批量QA拆分插入数据
|
||||
await SplitData.create({
|
||||
userId,
|
||||
modelId,
|
||||
textList: chunks,
|
||||
prompt
|
||||
});
|
||||
|
||||
// 文本拆分成 chunk
|
||||
const chunks = replaceText.match(/[^!?.。]+[!?.。]/g) || [];
|
||||
generateQA();
|
||||
} else if (mode === 'subsection') {
|
||||
// 插入记录
|
||||
await PgClient.insert('modelData', {
|
||||
values: chunks.map((item) => [
|
||||
{ key: 'user_id', value: userId },
|
||||
{ key: 'model_id', value: modelId },
|
||||
{ key: 'q', value: item },
|
||||
{ key: 'a', value: '' },
|
||||
{ key: 'status', value: 'waiting' }
|
||||
])
|
||||
});
|
||||
|
||||
const textList: string[] = [];
|
||||
let splitText = '';
|
||||
|
||||
/* 取 3k ~ 4K tokens 内容 */
|
||||
chunks.forEach((chunk) => {
|
||||
const tokens = encode(splitText + chunk).length;
|
||||
if (tokens >= 4000) {
|
||||
// 超过 4000,不要这块内容
|
||||
textList.push(splitText);
|
||||
splitText = chunk;
|
||||
} else if (tokens >= 3000) {
|
||||
// 超过 3000,取内容
|
||||
textList.push(splitText + chunk);
|
||||
splitText = '';
|
||||
} else {
|
||||
//没超过 3000,继续添加
|
||||
splitText += chunk;
|
||||
}
|
||||
});
|
||||
|
||||
if (splitText) {
|
||||
textList.push(splitText);
|
||||
generateVector();
|
||||
}
|
||||
|
||||
// 批量插入数据
|
||||
await SplitData.create({
|
||||
userId,
|
||||
modelId,
|
||||
rawText: text,
|
||||
textList,
|
||||
prompt
|
||||
});
|
||||
|
||||
generateQA();
|
||||
|
||||
jsonRes(res);
|
||||
} catch (err) {
|
||||
jsonRes(res, {
|
||||
@@ -76,3 +67,11 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export const config = {
|
||||
api: {
|
||||
bodyParser: {
|
||||
sizeLimit: '10mb'
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,18 +1,13 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { Chat, Model, Training, connectToDatabase } from '@/service/mongo';
|
||||
import { Chat, Model, connectToDatabase } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { getUserApiOpenai } from '@/service/utils/openai';
|
||||
import { TrainingStatusEnum } from '@/constants/model';
|
||||
import { TrainingItemType } from '@/types/training';
|
||||
import { httpsAgent } from '@/service/utils/tools';
|
||||
import { connectRedis } from '@/service/redis';
|
||||
import { VecModelDataIdx } from '@/constants/redis';
|
||||
import { PgClient } from '@/service/pg';
|
||||
|
||||
/* 获取我的模型 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
try {
|
||||
const { modelId } = req.query;
|
||||
const { modelId } = req.query as { modelId: string };
|
||||
const { authorization } = req.headers;
|
||||
|
||||
if (!authorization) {
|
||||
@@ -37,50 +32,17 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse<
|
||||
}
|
||||
|
||||
await connectToDatabase();
|
||||
const redis = await connectRedis();
|
||||
|
||||
// 获取 redis 中模型关联的所有数据
|
||||
const searchRes = await redis.ft.search(
|
||||
VecModelDataIdx,
|
||||
`@modelId:{${modelId}} @userId:{${userId}}`,
|
||||
{
|
||||
LIMIT: {
|
||||
from: 0,
|
||||
size: 10000
|
||||
}
|
||||
}
|
||||
);
|
||||
// 删除 redis 内容
|
||||
await Promise.all(searchRes.documents.map((item) => redis.del(item.id)));
|
||||
// 删除 pg 中所有该模型的数据
|
||||
await PgClient.delete('modelData', {
|
||||
where: [['user_id', userId], 'AND', ['model_id', modelId]]
|
||||
});
|
||||
|
||||
// 删除对应的聊天
|
||||
await Chat.deleteMany({
|
||||
modelId
|
||||
});
|
||||
|
||||
// 查看是否正在训练
|
||||
const training: TrainingItemType | null = await Training.findOne({
|
||||
modelId,
|
||||
status: TrainingStatusEnum.pending
|
||||
});
|
||||
|
||||
// 如果正在训练,需要删除openai上的相关信息
|
||||
if (training) {
|
||||
const { openai } = await getUserApiOpenai(userId);
|
||||
// 获取训练记录
|
||||
const tuneRecord = await openai.retrieveFineTune(training.tuneId, { httpsAgent });
|
||||
|
||||
// 删除训练文件
|
||||
openai.deleteFile(tuneRecord.data.training_files[0].id, { httpsAgent });
|
||||
// 取消训练
|
||||
openai.cancelFineTune(training.tuneId, { httpsAgent });
|
||||
}
|
||||
|
||||
// 删除对应训练记录
|
||||
await Training.deleteMany({
|
||||
modelId
|
||||
});
|
||||
|
||||
// 删除模型
|
||||
await Model.deleteOne({
|
||||
_id: modelId,
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Training } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
|
||||
// 关闭next默认的bodyParser处理方式
|
||||
export const config = {
|
||||
api: {
|
||||
bodyParser: false
|
||||
}
|
||||
};
|
||||
|
||||
/* 获取模型训练记录 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
const { authorization } = req.headers;
|
||||
|
||||
if (!authorization) {
|
||||
throw new Error('无权操作');
|
||||
}
|
||||
const { modelId } = req.query;
|
||||
if (!modelId) {
|
||||
throw new Error('参数错误');
|
||||
}
|
||||
await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
/* 获取 modelId 下的 training 记录 */
|
||||
const records = await Training.find({
|
||||
modelId
|
||||
});
|
||||
|
||||
jsonRes(res, {
|
||||
data: records
|
||||
});
|
||||
} catch (err: any) {
|
||||
/* 清除上传的文件,关闭训练记录 */
|
||||
// @ts-ignore
|
||||
if (openai) {
|
||||
// @ts-ignore
|
||||
uploadFileId && openai.deleteFile(uploadFileId);
|
||||
// @ts-ignore
|
||||
trainId && openai.cancelFineTune(trainId);
|
||||
}
|
||||
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Model, Training } from '@/service/mongo';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { getUserApiOpenai } from '@/service/utils/openai';
|
||||
import type { ModelSchema } from '@/types/mongoSchema';
|
||||
import { TrainingItemType } from '@/types/training';
|
||||
import { ModelStatusEnum, TrainingStatusEnum } from '@/constants/model';
|
||||
import { OpenAiTuneStatusEnum } from '@/service/constants/training';
|
||||
import { httpsAgent } from '@/service/utils/tools';
|
||||
|
||||
/* 更新训练状态 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
try {
|
||||
const { authorization } = req.headers;
|
||||
|
||||
if (!authorization) {
|
||||
throw new Error('无权操作');
|
||||
}
|
||||
const { modelId } = req.query as { modelId: string };
|
||||
if (!modelId) {
|
||||
throw new Error('参数错误');
|
||||
}
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
// 获取模型
|
||||
const model = await Model.findById<ModelSchema>(modelId);
|
||||
|
||||
if (!model || model.status !== 'training') {
|
||||
throw new Error('模型不在训练中');
|
||||
}
|
||||
|
||||
// 查询正在训练中的训练记录
|
||||
const training: TrainingItemType | null = await Training.findOne({
|
||||
modelId,
|
||||
status: 'pending'
|
||||
});
|
||||
|
||||
if (!training) {
|
||||
throw new Error('找不到训练记录');
|
||||
}
|
||||
|
||||
// 用户的 openai 实例
|
||||
const { openai } = await getUserApiOpenai(userId);
|
||||
|
||||
// 获取 openai 的训练情况
|
||||
const { data } = await openai.retrieveFineTune(training.tuneId, { httpsAgent });
|
||||
// console.log(data);
|
||||
if (data.status === OpenAiTuneStatusEnum.succeeded) {
|
||||
// 删除训练文件
|
||||
openai.deleteFile(data.training_files[0].id, { httpsAgent });
|
||||
|
||||
// 更新模型状态和模型内容
|
||||
await Model.findByIdAndUpdate(modelId, {
|
||||
status: ModelStatusEnum.running,
|
||||
updateTime: new Date(),
|
||||
service: {
|
||||
...model.service,
|
||||
trainId: data.fine_tuned_model, // 训练完后,再次训练和对话使用的 model 是一样的
|
||||
chatModel: data.fine_tuned_model
|
||||
}
|
||||
});
|
||||
// 更新训练数据
|
||||
await Training.findByIdAndUpdate(training._id, {
|
||||
status: TrainingStatusEnum.succeed
|
||||
});
|
||||
|
||||
return jsonRes(res, {
|
||||
data: '模型微调完成'
|
||||
});
|
||||
}
|
||||
|
||||
/* 取消微调 */
|
||||
if (data.status === OpenAiTuneStatusEnum.cancelled) {
|
||||
// 删除训练文件
|
||||
openai.deleteFile(data.training_files[0].id, { httpsAgent });
|
||||
|
||||
// 更新模型
|
||||
await Model.findByIdAndUpdate(modelId, {
|
||||
status: ModelStatusEnum.running,
|
||||
updateTime: new Date()
|
||||
});
|
||||
// 更新训练数据
|
||||
await Training.findByIdAndUpdate(training._id, {
|
||||
status: TrainingStatusEnum.canceled
|
||||
});
|
||||
|
||||
return jsonRes(res, {
|
||||
data: '模型微调已取消'
|
||||
});
|
||||
}
|
||||
|
||||
jsonRes(res, {
|
||||
data: '模型还在训练中'
|
||||
});
|
||||
} catch (err: any) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
// Next.js API route support: https://nextjs.org/docs/api-routes/introduction
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@/service/response';
|
||||
import { connectToDatabase, Model, Training } from '@/service/mongo';
|
||||
import formidable from 'formidable';
|
||||
import { authToken } from '@/service/utils/tools';
|
||||
import { getUserApiOpenai } from '@/service/utils/openai';
|
||||
import { join } from 'path';
|
||||
import fs from 'fs';
|
||||
import type { ModelSchema } from '@/types/mongoSchema';
|
||||
import type { OpenAIApi } from 'openai';
|
||||
import { ModelStatusEnum, TrainingStatusEnum } from '@/constants/model';
|
||||
import { httpsAgent } from '@/service/utils/tools';
|
||||
|
||||
// 关闭next默认的bodyParser处理方式
|
||||
export const config = {
|
||||
api: {
|
||||
bodyParser: false
|
||||
}
|
||||
};
|
||||
|
||||
/* 上传文件,开始微调 */
|
||||
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
|
||||
let openai: OpenAIApi, trainId: string, uploadFileId: string;
|
||||
|
||||
try {
|
||||
const { authorization } = req.headers;
|
||||
|
||||
if (!authorization) {
|
||||
throw new Error('无权操作');
|
||||
}
|
||||
const { modelId } = req.query;
|
||||
|
||||
if (!modelId) {
|
||||
throw new Error('参数错误');
|
||||
}
|
||||
const userId = await authToken(authorization);
|
||||
|
||||
await connectToDatabase();
|
||||
|
||||
// 获取模型的状态
|
||||
const model = await Model.findById<ModelSchema>(modelId);
|
||||
|
||||
if (!model || model.status !== 'running') {
|
||||
throw new Error('模型正忙');
|
||||
}
|
||||
|
||||
// const trainingType = model.service.modelType
|
||||
const trainingType = model.service.trainId; // 目前都默认是 openai text-davinci-03
|
||||
|
||||
// 获取用户的 API Key 实例化后的对象
|
||||
const user = await getUserApiOpenai(userId);
|
||||
openai = user.openai;
|
||||
|
||||
// 接收文件并保存
|
||||
const form = formidable({
|
||||
uploadDir: join(process.cwd(), 'public/trainData'),
|
||||
keepExtensions: true
|
||||
});
|
||||
|
||||
const { files } = await new Promise<{
|
||||
fields: formidable.Fields;
|
||||
files: formidable.Files;
|
||||
}>((resolve, reject) => {
|
||||
form.parse(req, (err, fields, files) => {
|
||||
if (err) return reject(err);
|
||||
resolve({ fields, files });
|
||||
});
|
||||
});
|
||||
const file = files.file;
|
||||
|
||||
// 上传文件到 openai
|
||||
// @ts-ignore
|
||||
const uploadRes = await openai.createFile(
|
||||
// @ts-ignore
|
||||
fs.createReadStream(file.filepath),
|
||||
'fine-tune',
|
||||
{ httpsAgent }
|
||||
);
|
||||
uploadFileId = uploadRes.data.id; // 记录上传文件的 ID
|
||||
|
||||
// 开始训练
|
||||
const trainRes = await openai.createFineTune(
|
||||
{
|
||||
training_file: uploadFileId,
|
||||
model: trainingType,
|
||||
suffix: model.name,
|
||||
n_epochs: 4
|
||||
},
|
||||
{ httpsAgent }
|
||||
);
|
||||
|
||||
trainId = trainRes.data.id; // 记录训练 ID
|
||||
|
||||
// 创建训练记录
|
||||
await Training.create({
|
||||
serviceName: 'openai',
|
||||
tuneId: trainId,
|
||||
status: TrainingStatusEnum.pending,
|
||||
modelId
|
||||
});
|
||||
|
||||
// 修改模型状态
|
||||
await Model.findByIdAndUpdate(modelId, {
|
||||
$inc: {
|
||||
trainingTimes: +1
|
||||
},
|
||||
updateTime: new Date(),
|
||||
status: ModelStatusEnum.training
|
||||
});
|
||||
|
||||
jsonRes(res, {
|
||||
data: 'start training'
|
||||
});
|
||||
} catch (err: any) {
|
||||
/* 清除上传的文件,关闭训练记录 */
|
||||
// @ts-ignore
|
||||
if (openai) {
|
||||
// @ts-ignore
|
||||
uploadFileId && openai.deleteFile(uploadFileId, { httpsAgent });
|
||||
// @ts-ignore
|
||||
trainId && openai.cancelFineTune(trainId, { httpsAgent });
|
||||
}
|
||||
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
error: err
|
||||
});
|
||||
}
|
||||
}
|
||||