Compare commits
67 Commits
v4.8.23
...
v4.9.1-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
93f7747904 | ||
|
|
6aa5e2c200 | ||
|
|
d8712d4092 | ||
|
|
561a496f80 | ||
|
|
bd369d3b09 | ||
|
|
3e21030536 | ||
|
|
7ec4ba7067 | ||
|
|
2d22af3cce | ||
|
|
4346c5703a | ||
|
|
f71ab0caeb | ||
|
|
c131c2a7dc | ||
|
|
d052d0de53 | ||
|
|
d1ce3e2936 | ||
|
|
c301dafca7 | ||
|
|
1a3613cd2c | ||
|
|
30f83f848d | ||
|
|
ac7091f8d6 | ||
|
|
16832caaf6 | ||
|
|
a3df9ea531 | ||
|
|
bcd0b010a6 | ||
|
|
3f794baf2e | ||
|
|
92b2ecc381 | ||
|
|
4dbe41db0e | ||
|
|
f9dd170895 | ||
|
|
a7b09461be | ||
|
|
cd2cb3f6ea | ||
|
|
56f77b58c9 | ||
|
|
1d697f97d7 | ||
|
|
bb30ca4859 | ||
|
|
139e934345 | ||
|
|
bf69aa6e3d | ||
|
|
3a730a23cb | ||
|
|
75cb46796a | ||
|
|
effdb5884b | ||
|
|
9523ba92f3 | ||
|
|
2f522aff90 | ||
|
|
0dccfd176d | ||
|
|
867e8acf27 | ||
|
|
36da8c862f | ||
|
|
b50cf49cc7 | ||
|
|
2270e149eb | ||
|
|
4957bdcba1 | ||
|
|
bca5cf738a | ||
|
|
c35bb5841c | ||
|
|
6e045093b1 | ||
|
|
a1b114e426 | ||
|
|
54fde7630c | ||
|
|
467c408ad7 | ||
|
|
c005a94454 | ||
|
|
c8a35822d6 | ||
|
|
d05259dedd | ||
|
|
8980664b8a | ||
|
|
43f30b3790 | ||
|
|
3ddbb37612 | ||
|
|
7c419a26b3 | ||
|
|
e131465d25 | ||
|
|
a345e56508 | ||
|
|
32ce032995 | ||
|
|
0bc075aa4e | ||
|
|
3e3f2165db | ||
|
|
e1aa068858 | ||
|
|
e98d6f1d30 | ||
|
|
54eb5c0547 | ||
|
|
adf5377ebe | ||
|
|
08b6f594df | ||
|
|
90d13ee3df | ||
|
|
5c718abd50 |
2
.github/workflows/docs-deploy-kubeconfig.yml
vendored
@@ -6,8 +6,6 @@ on:
|
||||
- 'docSite/**'
|
||||
branches:
|
||||
- 'main'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
jobs:
|
||||
build-fastgpt-docs-images:
|
||||
|
||||
2
.github/workflows/docs-deploy-vercel.yml
vendored
@@ -7,8 +7,6 @@ on:
|
||||
- 'docSite/**'
|
||||
branches:
|
||||
- 'main'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
|
||||
2
.github/workflows/docs-preview.yml
vendored
@@ -4,8 +4,6 @@ on:
|
||||
pull_request_target:
|
||||
paths:
|
||||
- 'docSite/**'
|
||||
branches:
|
||||
- 'main'
|
||||
workflow_dispatch:
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
|
||||
6
.github/workflows/fastgpt-build-image.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
with:
|
||||
driver-opts: network=host
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
@@ -108,7 +108,7 @@ jobs:
|
||||
with:
|
||||
driver-opts: network=host
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
@@ -191,7 +191,7 @@ jobs:
|
||||
with:
|
||||
driver-opts: network=host
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
|
||||
29
.github/workflows/fastgpt-test.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: 'FastGPT-Test'
|
||||
on:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
# Required to checkout the code
|
||||
contents: read
|
||||
# Required to put a comment into the pull-request
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
- name: 'Install Deps'
|
||||
run: pnpm install
|
||||
- name: 'Test'
|
||||
run: pnpm run test
|
||||
- name: 'Report Coverage'
|
||||
# Set if: always() to also generate the report if tests are failing
|
||||
# Only works if you set `reportOnFailure: true` in your vite config as specified above
|
||||
if: always()
|
||||
uses: davelosert/vitest-coverage-report-action@v2
|
||||
2
.github/workflows/sandbox-build-image.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
with:
|
||||
driver-opts: network=host
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
|
||||
1
.gitignore
vendored
@@ -44,3 +44,4 @@ files/helm/fastgpt/fastgpt-0.1.0.tgz
|
||||
files/helm/fastgpt/charts/*.tgz
|
||||
|
||||
tmp/
|
||||
coverage
|
||||
|
||||
@@ -5,4 +5,5 @@ node_modules
|
||||
docSite/
|
||||
*.md
|
||||
|
||||
pnpm-lock.yaml
|
||||
cl100l_base.ts
|
||||
4
.vscode/settings.json
vendored
@@ -27,7 +27,5 @@
|
||||
},
|
||||
"markdown.copyFiles.destination": {
|
||||
"/docSite/content/**/*": "${documentWorkspaceFolder}/docSite/assets/imgs/"
|
||||
},
|
||||
"markdown.copyFiles.overwriteBehavior": "nameIncrementally",
|
||||
"markdown.copyFiles.transformPath": "const filename = uri.path.split('/').pop(); return `/imgs/${filename}`;"
|
||||
}
|
||||
}
|
||||
25
README.md
@@ -114,16 +114,6 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 🏘️ 社区交流群
|
||||
|
||||
扫码加入飞书话题群:
|
||||
|
||||

|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 🏘️ 加入我们
|
||||
|
||||
我们正在寻找志同道合的小伙伴,加速 FastGPT 的发展。你可以通过 [FastGPT 2025 招聘](https://fael3z0zfze.feishu.cn/wiki/P7FOwEmPziVcaYkvVaacnVX1nvg)了解 FastGPT 的招聘信息。
|
||||
@@ -133,17 +123,26 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
- [Laf:3 分钟快速接入三方应用](https://github.com/labring/laf)
|
||||
- [Sealos:快速部署集群应用](https://github.com/labring/sealos)
|
||||
- [One API:多模型管理,支持 Azure、文心一言等](https://github.com/songquanpeng/one-api)
|
||||
- [TuShan:5 分钟搭建后台管理系统](https://github.com/msgbyte/tushan)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
|
||||
## 🌿 第三方生态
|
||||
|
||||
- [COW 个人微信/企微机器人](https://doc.tryfastgpt.ai/docs/use-cases/external-integration/onwechat/)
|
||||
- [AI Proxy:国内模型聚合服务](https://sealos.run/aiproxy/?k=fastgpt-github/)
|
||||
- [SiliconCloud (硅基流动) —— 开源模型在线体验平台](https://cloud.siliconflow.cn/i/TR9Ym0c4)
|
||||
- [COW 个人微信/企微机器人](https://doc.tryfastgpt.ai/docs/use-cases/external-integration/onwechat/)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 🏘️ 社区交流群
|
||||
|
||||
扫码加入飞书话题群:
|
||||
|
||||

|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
|
||||
@@ -69,7 +69,7 @@ Project tech stack: NextJs + TS + ChakraUI + MongoDB + PostgreSQL (PG Vector plu
|
||||
|
||||
> When using [Sealos](https://sealos.io) services, there is no need to purchase servers or domain names. It supports high concurrency and dynamic scaling, and the database application uses the kubeblocks database, which far exceeds the simple Docker container deployment in terms of IO performance.
|
||||
<div align="center">
|
||||
[](https://cloud.sealos.io/?openapp=system-fastdeploy%3FtemplateName%3Dfastgpt)
|
||||
[](https://cloud.sealos.io/?openapp=system-fastdeploy%3FtemplateName%3Dfastgpt&uid=fnWRt09fZP)
|
||||
</div>
|
||||
|
||||
Give it a 2-4 minute wait after deployment as it sets up the database. Initially, it might be a too slow since we're using the basic settings.
|
||||
|
||||
@@ -94,7 +94,7 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
|
||||
- **⚡ デプロイ**
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-fastdeploy%3FtemplateName%3Dfastgpt)
|
||||
[](https://cloud.sealos.io/?openapp=system-fastdeploy%3FtemplateName%3Dfastgpt&uid=fnWRt09fZP)
|
||||
|
||||
デプロイ 後、データベースをセットアップするので、2~4分待 ってください。基本設定 を 使 っているので、最初 は 少 し 遅 いかもしれません。
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ services:
|
||||
exec docker-entrypoint.sh "$$@" &
|
||||
|
||||
# 等待MongoDB服务启动
|
||||
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')" > /dev/null 2>&1; do
|
||||
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do
|
||||
echo "Waiting for MongoDB to start..."
|
||||
sleep 2
|
||||
done
|
||||
@@ -114,15 +114,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.8.23-fix # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.23-fix # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.0 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.0 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.8.23-fix # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.23-fix # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.0 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.0 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
@@ -137,10 +137,13 @@ services:
|
||||
- FE_DOMAIN=
|
||||
# root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。
|
||||
- DEFAULT_ROOT_PSW=1234
|
||||
# AI模型的API地址哦。务必加 /v1。这里默认填写了OneApi的访问地址。
|
||||
- OPENAI_BASE_URL=http://oneapi:3000/v1
|
||||
# AI模型的API Key。(这里默认填写了OneAPI的快速默认key,测试通后,务必及时修改)
|
||||
- CHAT_API_KEY=sk-fastgpt
|
||||
# AI Proxy 的地址,如果配了该地址,优先使用
|
||||
- AIPROXY_API_ENDPOINT=http://aiproxy:3000
|
||||
# AI Proxy 的 Admin Token,与 AI Proxy 中的环境变量 ADMIN_KEY
|
||||
- AIPROXY_API_TOKEN=aiproxy
|
||||
# 模型中转地址(如果用了 AI Proxy,下面 2 个就不需要了,旧版 OneAPI 用户,使用下面的变量)
|
||||
# - OPENAI_BASE_URL=http://oneapi:3000/v1
|
||||
# - CHAT_API_KEY=sk-fastgpt
|
||||
# 数据库最大连接数
|
||||
- DB_MAX_LINK=30
|
||||
# 登录凭证密钥
|
||||
@@ -170,48 +173,52 @@ services:
|
||||
volumes:
|
||||
- ./config.json:/app/data/config.json
|
||||
|
||||
# oneapi
|
||||
mysql:
|
||||
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mysql:8.0.36 # 阿里云
|
||||
# image: mysql:8.0.36
|
||||
container_name: mysql
|
||||
restart: always
|
||||
ports:
|
||||
- 3306:3306
|
||||
networks:
|
||||
- fastgpt
|
||||
command: --default-authentication-plugin=mysql_native_password
|
||||
environment:
|
||||
# 默认root密码,仅首次运行有效
|
||||
MYSQL_ROOT_PASSWORD: oneapimmysql
|
||||
MYSQL_DATABASE: oneapi
|
||||
volumes:
|
||||
- ./mysql:/var/lib/mysql
|
||||
oneapi:
|
||||
container_name: oneapi
|
||||
image: ghcr.io/songquanpeng/one-api:v0.6.7
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/one-api:v0.6.6 # 阿里云
|
||||
ports:
|
||||
- 3001:3000
|
||||
# AI Proxy
|
||||
aiproxy:
|
||||
image: 'ghcr.io/labring/aiproxy:latest'
|
||||
container_name: aiproxy
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mysql
|
||||
aiproxy_pg:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
environment:
|
||||
# mysql 连接参数
|
||||
- SQL_DSN=root:oneapimmysql@tcp(mysql:3306)/oneapi
|
||||
# 登录凭证加密密钥
|
||||
- SESSION_SECRET=oneapikey
|
||||
# 内存缓存
|
||||
- MEMORY_CACHE_ENABLED=true
|
||||
# 启动聚合更新,减少数据交互频率
|
||||
- BATCH_UPDATE_ENABLED=true
|
||||
# 聚合更新时长
|
||||
- BATCH_UPDATE_INTERVAL=10
|
||||
# 初始化的 root 密钥(建议部署完后更改,否则容易泄露)
|
||||
- INITIAL_ROOT_TOKEN=fastgpt
|
||||
# 对应 fastgpt 里的AIPROXY_API_TOKEN
|
||||
- ADMIN_KEY=aiproxy
|
||||
# 错误日志详情保存时间(小时)
|
||||
- LOG_DETAIL_STORAGE_HOURS=1
|
||||
# 数据库连接地址
|
||||
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
|
||||
# 最大重试次数
|
||||
- RETRY_TIMES=3
|
||||
# 不需要计费
|
||||
- BILLING_ENABLED=false
|
||||
# 不需要严格检测模型
|
||||
- DISABLE_MODEL_CONFIG=true
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
aiproxy_pg:
|
||||
image: pgvector/pgvector:0.8.0-pg15 # docker hub
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
|
||||
restart: unless-stopped
|
||||
container_name: aiproxy_pg
|
||||
volumes:
|
||||
- ./oneapi:/data
|
||||
- ./aiproxy_pg:/var/lib/postgresql/data
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: aiproxy
|
||||
POSTGRES_PASSWORD: aiproxy
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
networks:
|
||||
fastgpt:
|
||||
|
||||
@@ -7,12 +7,12 @@ version: '3.3'
|
||||
services:
|
||||
# db
|
||||
pg:
|
||||
image: pgvector/pgvector:0.7.0-pg15 # docker hub
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.7.0 # 阿里云
|
||||
image: pgvector/pgvector:0.8.0-pg15 # docker hub
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
|
||||
container_name: pg
|
||||
restart: always
|
||||
ports: # 生产环境建议不要暴露
|
||||
- 5432:5432
|
||||
# ports: # 生产环境建议不要暴露
|
||||
# - 5432:5432
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
@@ -28,8 +28,8 @@ services:
|
||||
# image: mongo:4.4.29 # cpu不支持AVX时候使用
|
||||
container_name: mongo
|
||||
restart: always
|
||||
ports:
|
||||
- 27017:27017
|
||||
# ports:
|
||||
# - 27017:27017
|
||||
networks:
|
||||
- fastgpt
|
||||
command: mongod --keyFile /data/mongodb.key --replSet rs0
|
||||
@@ -58,7 +58,7 @@ services:
|
||||
exec docker-entrypoint.sh "$$@" &
|
||||
|
||||
# 等待MongoDB服务启动
|
||||
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')" > /dev/null 2>&1; do
|
||||
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do
|
||||
echo "Waiting for MongoDB to start..."
|
||||
sleep 2
|
||||
done
|
||||
@@ -72,15 +72,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.8.23-fix # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.23-fix # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.0 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.0 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.8.23-fix # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.23-fix # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.0 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.0 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
@@ -95,10 +95,13 @@ services:
|
||||
- FE_DOMAIN=
|
||||
# root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。
|
||||
- DEFAULT_ROOT_PSW=1234
|
||||
# AI模型的API地址哦。务必加 /v1。这里默认填写了OneApi的访问地址。
|
||||
- OPENAI_BASE_URL=http://oneapi:3000/v1
|
||||
# AI模型的API Key。(这里默认填写了OneAPI的快速默认key,测试通后,务必及时修改)
|
||||
- CHAT_API_KEY=sk-fastgpt
|
||||
# AI Proxy 的地址,如果配了该地址,优先使用
|
||||
- AIPROXY_API_ENDPOINT=http://aiproxy:3000
|
||||
# AI Proxy 的 Admin Token,与 AI Proxy 中的环境变量 ADMIN_KEY
|
||||
- AIPROXY_API_TOKEN=aiproxy
|
||||
# 模型中转地址(如果用了 AI Proxy,下面 2 个就不需要了,旧版 OneAPI 用户,使用下面的变量)
|
||||
# - OPENAI_BASE_URL=http://oneapi:3000/v1
|
||||
# - CHAT_API_KEY=sk-fastgpt
|
||||
# 数据库最大连接数
|
||||
- DB_MAX_LINK=30
|
||||
# 登录凭证密钥
|
||||
@@ -127,48 +130,52 @@ services:
|
||||
volumes:
|
||||
- ./config.json:/app/data/config.json
|
||||
|
||||
# oneapi
|
||||
mysql:
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mysql:8.0.36 # 阿里云
|
||||
image: mysql:8.0.36
|
||||
container_name: mysql
|
||||
restart: always
|
||||
ports:
|
||||
- 3306:3306
|
||||
networks:
|
||||
- fastgpt
|
||||
command: --default-authentication-plugin=mysql_native_password
|
||||
environment:
|
||||
# 默认root密码,仅首次运行有效
|
||||
MYSQL_ROOT_PASSWORD: oneapimmysql
|
||||
MYSQL_DATABASE: oneapi
|
||||
volumes:
|
||||
- ./mysql:/var/lib/mysql
|
||||
oneapi:
|
||||
container_name: oneapi
|
||||
image: ghcr.io/songquanpeng/one-api:v0.6.7
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/one-api:v0.6.6 # 阿里云
|
||||
ports:
|
||||
- 3001:3000
|
||||
# AI Proxy
|
||||
aiproxy:
|
||||
image: 'ghcr.io/labring/aiproxy:latest'
|
||||
container_name: aiproxy
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mysql
|
||||
aiproxy_pg:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
environment:
|
||||
# mysql 连接参数
|
||||
- SQL_DSN=root:oneapimmysql@tcp(mysql:3306)/oneapi
|
||||
# 登录凭证加密密钥
|
||||
- SESSION_SECRET=oneapikey
|
||||
# 内存缓存
|
||||
- MEMORY_CACHE_ENABLED=true
|
||||
# 启动聚合更新,减少数据交互频率
|
||||
- BATCH_UPDATE_ENABLED=true
|
||||
# 聚合更新时长
|
||||
- BATCH_UPDATE_INTERVAL=10
|
||||
# 初始化的 root 密钥(建议部署完后更改,否则容易泄露)
|
||||
- INITIAL_ROOT_TOKEN=fastgpt
|
||||
# 对应 fastgpt 里的AIPROXY_API_TOKEN
|
||||
- ADMIN_KEY=aiproxy
|
||||
# 错误日志详情保存时间(小时)
|
||||
- LOG_DETAIL_STORAGE_HOURS=1
|
||||
# 数据库连接地址
|
||||
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
|
||||
# 最大重试次数
|
||||
- RETRY_TIMES=3
|
||||
# 不需要计费
|
||||
- BILLING_ENABLED=false
|
||||
# 不需要严格检测模型
|
||||
- DISABLE_MODEL_CONFIG=true
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
aiproxy_pg:
|
||||
image: pgvector/pgvector:0.8.0-pg15 # docker hub
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
|
||||
restart: unless-stopped
|
||||
container_name: aiproxy_pg
|
||||
volumes:
|
||||
- ./oneapi:/data
|
||||
- ./aiproxy_pg:/var/lib/postgresql/data
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: aiproxy
|
||||
POSTGRES_PASSWORD: aiproxy
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
networks:
|
||||
fastgpt:
|
||||
|
||||
@@ -41,7 +41,7 @@ services:
|
||||
exec docker-entrypoint.sh "$$@" &
|
||||
|
||||
# 等待MongoDB服务启动
|
||||
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')" > /dev/null 2>&1; do
|
||||
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do
|
||||
echo "Waiting for MongoDB to start..."
|
||||
sleep 2
|
||||
done
|
||||
@@ -53,15 +53,15 @@ services:
|
||||
wait $$!
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.8.23-fix # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.23-fix # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.0 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.0 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.8.23-fix # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.23-fix # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.0 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.0 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
@@ -75,10 +75,13 @@ services:
|
||||
- FE_DOMAIN=
|
||||
# root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。
|
||||
- DEFAULT_ROOT_PSW=1234
|
||||
# AI模型的API地址哦。务必加 /v1。这里默认填写了OneApi的访问地址。
|
||||
- OPENAI_BASE_URL=http://oneapi:3000/v1
|
||||
# AI模型的API Key。(这里默认填写了OneAPI的快速默认key,测试通后,务必及时修改)
|
||||
- CHAT_API_KEY=sk-fastgpt
|
||||
# AI Proxy 的地址,如果配了该地址,优先使用
|
||||
- AIPROXY_API_ENDPOINT=http://aiproxy:3000
|
||||
# AI Proxy 的 Admin Token,与 AI Proxy 中的环境变量 ADMIN_KEY
|
||||
- AIPROXY_API_TOKEN=aiproxy
|
||||
# 模型中转地址(如果用了 AI Proxy,下面 2 个就不需要了,旧版 OneAPI 用户,使用下面的变量)
|
||||
# - OPENAI_BASE_URL=http://oneapi:3000/v1
|
||||
# - CHAT_API_KEY=sk-fastgpt
|
||||
# 数据库最大连接数
|
||||
- DB_MAX_LINK=30
|
||||
# 登录凭证密钥
|
||||
@@ -108,48 +111,52 @@ services:
|
||||
volumes:
|
||||
- ./config.json:/app/data/config.json
|
||||
|
||||
# oneapi
|
||||
mysql:
|
||||
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mysql:8.0.36 # 阿里云
|
||||
# image: mysql:8.0.36
|
||||
container_name: mysql
|
||||
restart: always
|
||||
ports:
|
||||
- 3306:3306
|
||||
networks:
|
||||
- fastgpt
|
||||
command: --default-authentication-plugin=mysql_native_password
|
||||
environment:
|
||||
# 默认root密码,仅首次运行有效
|
||||
MYSQL_ROOT_PASSWORD: oneapimmysql
|
||||
MYSQL_DATABASE: oneapi
|
||||
volumes:
|
||||
- ./mysql:/var/lib/mysql
|
||||
oneapi:
|
||||
container_name: oneapi
|
||||
image: ghcr.io/songquanpeng/one-api:v0.6.7
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/one-api:v0.6.6 # 阿里云
|
||||
ports:
|
||||
- 3001:3000
|
||||
# AI Proxy
|
||||
aiproxy:
|
||||
image: 'ghcr.io/labring/aiproxy:latest'
|
||||
container_name: aiproxy
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- mysql
|
||||
aiproxy_pg:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
environment:
|
||||
# mysql 连接参数
|
||||
- SQL_DSN=root:oneapimmysql@tcp(mysql:3306)/oneapi
|
||||
# 登录凭证加密密钥
|
||||
- SESSION_SECRET=oneapikey
|
||||
# 内存缓存
|
||||
- MEMORY_CACHE_ENABLED=true
|
||||
# 启动聚合更新,减少数据交互频率
|
||||
- BATCH_UPDATE_ENABLED=true
|
||||
# 聚合更新时长
|
||||
- BATCH_UPDATE_INTERVAL=10
|
||||
# 初始化的 root 密钥(建议部署完后更改,否则容易泄露)
|
||||
- INITIAL_ROOT_TOKEN=fastgpt
|
||||
# 对应 fastgpt 里的AIPROXY_API_TOKEN
|
||||
- ADMIN_KEY=aiproxy
|
||||
# 错误日志详情保存时间(小时)
|
||||
- LOG_DETAIL_STORAGE_HOURS=1
|
||||
# 数据库连接地址
|
||||
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
|
||||
# 最大重试次数
|
||||
- RETRY_TIMES=3
|
||||
# 不需要计费
|
||||
- BILLING_ENABLED=false
|
||||
# 不需要严格检测模型
|
||||
- DISABLE_MODEL_CONFIG=true
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
aiproxy_pg:
|
||||
image: pgvector/pgvector:0.8.0-pg15 # docker hub
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
|
||||
restart: unless-stopped
|
||||
container_name: aiproxy_pg
|
||||
volumes:
|
||||
- ./oneapi:/data
|
||||
- ./aiproxy_pg:/var/lib/postgresql/data
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: aiproxy
|
||||
POSTGRES_PASSWORD: aiproxy
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
networks:
|
||||
fastgpt:
|
||||
|
||||
@@ -6,6 +6,7 @@ data:
|
||||
"openapiPrefix": "fastgpt",
|
||||
"vectorMaxProcess": 15,
|
||||
"qaMaxProcess": 15,
|
||||
"vlmMaxProcess": 15,
|
||||
"pgHNSWEfSearch": 100
|
||||
},
|
||||
"llmModels": [
|
||||
|
||||
BIN
docSite/assets/imgs/aiproxy-1.jpg
Normal file
|
After Width: | Height: | Size: 198 KiB |
BIN
docSite/assets/imgs/aiproxy-1.png
Normal file
|
After Width: | Height: | Size: 198 KiB |
BIN
docSite/assets/imgs/aiproxy-10.png
Normal file
|
After Width: | Height: | Size: 229 KiB |
BIN
docSite/assets/imgs/aiproxy-11.png
Normal file
|
After Width: | Height: | Size: 422 KiB |
BIN
docSite/assets/imgs/aiproxy-2.png
Normal file
|
After Width: | Height: | Size: 235 KiB |
BIN
docSite/assets/imgs/aiproxy-3.png
Normal file
|
After Width: | Height: | Size: 341 KiB |
BIN
docSite/assets/imgs/aiproxy-4.png
Normal file
|
After Width: | Height: | Size: 212 KiB |
BIN
docSite/assets/imgs/aiproxy-5.png
Normal file
|
After Width: | Height: | Size: 240 KiB |
BIN
docSite/assets/imgs/aiproxy-6.png
Normal file
|
After Width: | Height: | Size: 342 KiB |
BIN
docSite/assets/imgs/aiproxy-7.png
Normal file
|
After Width: | Height: | Size: 363 KiB |
BIN
docSite/assets/imgs/aiproxy-8.png
Normal file
|
After Width: | Height: | Size: 348 KiB |
BIN
docSite/assets/imgs/aiproxy-9.png
Normal file
|
After Width: | Height: | Size: 222 KiB |
BIN
docSite/assets/imgs/aiproxy1.png
Normal file
|
After Width: | Height: | Size: 135 KiB |
BIN
docSite/assets/imgs/image copy.png
Normal file
|
After Width: | Height: | Size: 329 KiB |
BIN
docSite/assets/imgs/marker2.png
Normal file
|
After Width: | Height: | Size: 216 KiB |
BIN
docSite/assets/imgs/marker3.png
Normal file
|
After Width: | Height: | Size: 85 KiB |
@@ -23,8 +23,54 @@ weight: 707
|
||||
"systemEnv": {
|
||||
"vectorMaxProcess": 15, // 向量处理线程数量
|
||||
"qaMaxProcess": 15, // 问答拆分线程数量
|
||||
"vlmMaxProcess": 15, // 图片理解模型最大处理进程
|
||||
"tokenWorkers": 50, // Token 计算线程保持数,会持续占用内存,不能设置太大。
|
||||
"pgHNSWEfSearch": 100 // 向量搜索参数。越大,搜索越精确,但是速度越慢。设置为100,有99%+精度。
|
||||
"pgHNSWEfSearch": 100, // 向量搜索参数。越大,搜索越精确,但是速度越慢。设置为100,有99%+精度。
|
||||
"customPdfParse": { // 4.9.0 新增配置
|
||||
"url": "", // 自定义 PDF 解析服务地址
|
||||
"key": "", // 自定义 PDF 解析服务密钥
|
||||
"doc2xKey": "", // doc2x 服务密钥
|
||||
"price": 0 // PDF 解析服务价格
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 自定义 PDF 解析配置
|
||||
|
||||
自定义 PDF 服务解析的优先级高于 Doc2x 服务,所以如果使用 Doc2x 服务,请勿配置自定义 PDF 服务。
|
||||
|
||||
### 使用 Sealos PDF 解析服务
|
||||
|
||||
#### 1. 申请 Sealos AI proxy API Key
|
||||
|
||||
[点击打开 Sealos Pdf parser 官网](https://hzh.sealos.run/?uid=fnWRt09fZP&openapp=system-aiproxy),并进行对应 API Key 的申请。
|
||||
|
||||
#### 2. 修改 FastGPT 配置文件
|
||||
|
||||
`systemEnv.customPdfParse.url`填写成`https://aiproxy.hzh.sealos.run/v1/parse/pdf?model=parse-pdf`
|
||||
`systemEnv.customPdfParse.key`填写成在 Sealos AI proxy 中申请的 API Key。
|
||||
|
||||

|
||||
|
||||
### 使用 Doc2x 解析 PDF 文件
|
||||
|
||||
`Doc2x`是一个国内提供专业 PDF 解析。
|
||||
|
||||
#### 1. 申请 Doc2x 服务
|
||||
|
||||
[点击打开 Doc2x 官网](https://doc2x.noedgeai.com?inviteCode=9EACN2),并进行对应 API Key 的申请。
|
||||
|
||||
#### 2. 修改 FastGPT 配置文件
|
||||
|
||||
开源版用户在 `config.json` 文件中添加 `systemEnv.customPdfParse.doc2xKey` 配置,并填写上申请到的 API Key。并重启服务。
|
||||
|
||||
商业版用户在 Admin 后台根据表单指引填写 Doc2x 服务密钥。
|
||||
|
||||
#### 3. 开始使用
|
||||
|
||||
在知识库导入数据或应用文件上传配置中,可以勾选`PDF 增强解析`,则在对 PDF 解析时候,会使用 Doc2x 服务进行解析。
|
||||
|
||||
### 使用 Marker 解析 PDF 文件
|
||||
|
||||
[点击查看 Marker 接入教程](/docs/development/custom-models/marker)
|
||||
@@ -11,39 +11,51 @@ weight: 909
|
||||
|
||||
PDF 是一个相对复杂的文件格式,在 FastGPT 内置的 pdf 解析器中,依赖的是 pdfjs 库解析,该库基于逻辑解析,无法有效的理解复杂的 pdf 文件。所以我们在解析 pdf 时候,如果遇到图片、表格、公式等非简单文本内容,会发现解析效果不佳。
|
||||
|
||||
市面上目前有多种解析 PDF 的方法,比如使用 [Marker](https://github.com/VikParuchuri/marker),该项目使用了 Surya 模型,基于视觉解析,可以有效提取图片、表格、公式等复杂内容。为了可以让 Marker 快速接入 FastGPT,我们做了一个自定义解析的拓展 Demo。
|
||||
市面上目前有多种解析 PDF 的方法,比如使用 [Marker](https://github.com/VikParuchuri/marker),该项目使用了 Surya 模型,基于视觉解析,可以有效提取图片、表格、公式等复杂内容。
|
||||
|
||||
在 FastGPT 4.8.15 版本中,你可以通过增加一个环境变量,来替换掉 FastGPT 系统内置解析器,实现自定义的文档解析服务。该功能只是 Demo 阶段,后期配置模式和交互规则会发生改动。
|
||||
在 `FastGPT v4.9.0` 版本中,开源版用户可以在`config.json`文件中添加`systemEnv.customPdfParse`配置,来使用 Marker 解析 PDF 文件。商业版用户直接在 Admin 后台根据表单指引填写即可。需重新拉取 Marker 镜像,接口格式已变动。
|
||||
|
||||
## 使用教程
|
||||
|
||||
### 1. 按照 Marker
|
||||
### 1. 安装 Marker
|
||||
|
||||
参考文档 [Marker 安装教程](https://github.com/labring/FastGPT/tree/main/plugins/model/pdf-marker),安装 Marker 模型。封装的 API 已经适配了 FastGPT 自定义解析服务。
|
||||
|
||||
这里介绍快速 Docker 安装的方法:
|
||||
|
||||
```dockerfile
|
||||
docker pull crpi-h3snc261q1dosroc.cn-hangzhou.personal.cr.aliyuncs.com/marker11/marker_images:latest
|
||||
docker run --gpus all -itd -p 7231:7231 --name model_pdf_v1 crpi-h3snc261q1dosroc.cn-hangzhou.personal.cr.aliyuncs.com/marker11/marker_images:latest
|
||||
docker pull crpi-h3snc261q1dosroc.cn-hangzhou.personal.cr.aliyuncs.com/marker11/marker_images:v0.2
|
||||
docker run --gpus all -itd -p 7231:7232 --name model_pdf_v2 -e PROCESSES_PER_GPU="2" crpi-h3snc261q1dosroc.cn-hangzhou.personal.cr.aliyuncs.com/marker11/marker_images:v0.2
|
||||
```
|
||||
### 2. 添加 FastGPT 文件配置
|
||||
|
||||
```json
|
||||
{
|
||||
xxx
|
||||
"systemEnv": {
|
||||
xxx
|
||||
"customPdfParse": {
|
||||
"url": "http://xxxx.com/v2/parse/file", // 自定义 PDF 解析服务地址 marker v0.2
|
||||
"key": "", // 自定义 PDF 解析服务密钥
|
||||
"doc2xKey": "", // doc2x 服务密钥
|
||||
"price": 0 // PDF 解析服务价格
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. 添加 FastGPT 环境变量
|
||||
|
||||
```
|
||||
CUSTOM_READ_FILE_URL=http://xxxx.com/v1/parse/file
|
||||
CUSTOM_READ_FILE_EXTENSION=pdf
|
||||
```
|
||||
|
||||
* CUSTOM_READ_FILE_URL - 自定义解析服务的地址, host改成解析服务的访问地址,path 不能变动。
|
||||
* CUSTOM_READ_FILE_EXTENSION - 支持的文件后缀,多个文件类型,可用逗号隔开。
|
||||
需要重启服务。
|
||||
|
||||
### 3. 测试效果
|
||||
|
||||
通过知识库上传一个 pdf 文件,并确认上传,可以在日志中看到 LOG (LOG_LEVEL需要设置 info 或者 debug):
|
||||
通过知识库上传一个 pdf 文件,并勾选上 `PDF 增强解析`。
|
||||
|
||||

|
||||
|
||||
确认上传后,可以在日志中看到 LOG (LOG_LEVEL需要设置 info 或者 debug):
|
||||
|
||||
```
|
||||
[Info] 2024-12-05 15:04:42 Parsing files from an external service
|
||||
[Info] 2024-12-05 15:04:42 Parsing files from an external service
|
||||
[Info] 2024-12-05 15:07:08 Custom file parsing is complete, time: 1316ms
|
||||
```
|
||||
|
||||
@@ -51,6 +63,10 @@ CUSTOM_READ_FILE_EXTENSION=pdf
|
||||
|
||||

|
||||
|
||||
同样的,在应用中,你可以在文件上传配置里,勾选上 `PDF 增强解析`。
|
||||
|
||||

|
||||
|
||||
|
||||
## 效果展示
|
||||
|
||||
@@ -63,4 +79,25 @@ CUSTOM_READ_FILE_EXTENSION=pdf
|
||||
|
||||
上图是分块后的结果,下图是 pdf 原文。整体图片、公式、表格都可以提取出来,效果还是杠杠的。
|
||||
|
||||
不过要注意的是,[Marker](https://github.com/VikParuchuri/marker) 的协议是`GPL-3.0 license`,请在遵守协议的前提下使用。
|
||||
不过要注意的是,[Marker](https://github.com/VikParuchuri/marker) 的协议是`GPL-3.0 license`,请在遵守协议的前提下使用。
|
||||
|
||||
## 旧版 Marker 使用方法
|
||||
|
||||
FastGPT V4.9.0 版本之前,可以用以下方式,试用 Marker 解析服务。
|
||||
|
||||
安装和运行 Marker 服务:
|
||||
|
||||
```dockerfile
|
||||
docker pull crpi-h3snc261q1dosroc.cn-hangzhou.personal.cr.aliyuncs.com/marker11/marker_images:v0.1
|
||||
docker run --gpus all -itd -p 7231:7231 --name model_pdf_v1 -e PROCESSES_PER_GPU="2" crpi-h3snc261q1dosroc.cn-hangzhou.personal.cr.aliyuncs.com/marker11/marker_images:v0.1
|
||||
```
|
||||
|
||||
并修改 FastGPT 环境变量:
|
||||
|
||||
```
|
||||
CUSTOM_READ_FILE_URL=http://xxxx.com/v1/parse/file
|
||||
CUSTOM_READ_FILE_EXTENSION=pdf
|
||||
```
|
||||
|
||||
* CUSTOM_READ_FILE_URL - 自定义解析服务的地址, host改成解析服务的访问地址,path 不能变动。
|
||||
* CUSTOM_READ_FILE_EXTENSION - 支持的文件后缀,多个文件类型,可用逗号隔开。
|
||||
@@ -30,7 +30,7 @@ weight: 707
|
||||
|
||||
### PgVector版本
|
||||
|
||||
非常轻量,适合数据量在 5000 万以下。
|
||||
非常轻量,适合知识库索引量在 5000 万以下。
|
||||
|
||||
{{< table "table-hover table-striped-columns" >}}
|
||||
| 环境 | 最低配置(单节点) | 推荐配置 |
|
||||
@@ -149,18 +149,14 @@ curl -o docker-compose.yml https://raw.githubusercontent.com/labring/FastGPT/mai
|
||||
{{< tab tabName="PgVector版本" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```
|
||||
FE_DOMAIN=你的前端你访问地址,例如 http://192.168.0.1:3000;https://cloud.fastgpt.cn
|
||||
```
|
||||
无需操作
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
{{< tab tabName="Milvus版本" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```
|
||||
FE_DOMAIN=你的前端你访问地址,例如 http://192.168.0.1:3000;https://cloud.fastgpt.cn
|
||||
```
|
||||
无需操作
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
@@ -174,7 +170,6 @@ FE_DOMAIN=你的前端你访问地址,例如 http://192.168.0.1:3000;https://clo
|
||||
{{% alert icon="🤖" context="success" %}}
|
||||
|
||||
1. 修改`MILVUS_ADDRESS`和`MILVUS_TOKEN`链接参数,分别对应 `zilliz` 的 `Public Endpoint` 和 `Api key`,记得把自己ip加入白名单。
|
||||
2. 修改FE_DOMAIN=你的前端你访问地址,例如 http://192.168.0.1:3000;https://cloud.fastgpt.cn
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
@@ -189,36 +184,28 @@ FE_DOMAIN=你的前端你访问地址,例如 http://192.168.0.1:3000;https://clo
|
||||
```bash
|
||||
# 启动容器
|
||||
docker-compose up -d
|
||||
# 等待10s,OneAPI第一次总是要重启几次才能连上Mysql
|
||||
sleep 10
|
||||
# 重启一次oneapi(由于OneAPI的默认Key有点问题,不重启的话会提示找不到渠道,临时手动重启一次解决,等待作者修复)
|
||||
docker restart oneapi
|
||||
```
|
||||
|
||||
### 4. 打开 OneAPI 添加模型
|
||||
### 4. 访问 FastGPT
|
||||
|
||||
可以通过`ip:3001`访问OneAPI,默认账号为`root`密码为`123456`。
|
||||
|
||||
在OneApi中添加合适的AI模型渠道。[点击查看相关教程](/docs/development/modelconfig/one-api/)
|
||||
|
||||
只需要添加模型即可,模板已经配置好了oneapi的连接地址和令牌,无需变更。
|
||||
|
||||
### 5. 访问 FastGPT
|
||||
|
||||
目前可以通过 `ip:3000` 直接访问(注意防火墙)。登录用户名为 `root`,密码为`docker-compose.yml`环境变量里设置的 `DEFAULT_ROOT_PSW`。
|
||||
目前可以通过 `ip:3000` 直接访问(注意开放防火墙)。登录用户名为 `root`,密码为`docker-compose.yml`环境变量里设置的 `DEFAULT_ROOT_PSW`。
|
||||
|
||||
如果需要域名访问,请自行安装并配置 Nginx。
|
||||
|
||||
首次运行,会自动初始化 root 用户,密码为 `1234`(与环境变量中的`DEFAULT_ROOT_PSW`一致),日志里会提示一次`MongoServerError: Unable to read from a snapshot due to pending collection catalog changes;`可忽略。
|
||||
首次运行,会自动初始化 root 用户,密码为 `1234`(与环境变量中的`DEFAULT_ROOT_PSW`一致),日志可能会提示一次`MongoServerError: Unable to read from a snapshot due to pending collection catalog changes;`可忽略。
|
||||
|
||||
### 6. 配置模型
|
||||
### 5. 配置模型
|
||||
|
||||
登录FastGPT后,进入模型配置页面,务必先配置至少一个语言模型和一个向量模型,否则系统无法正常使用。
|
||||
|
||||
[点击查看模型配置教程](/docs/development/modelConfig/intro/)
|
||||
- 首次登录FastGPT后,系统会提示未配置`语言模型`和`索引模型`,并自动跳转模型配置页面。系统必须至少有这两类模型才能正常使用。
|
||||
- 如果系统未正常跳转,可以在`账号-模型提供商`页面,进行模型配置。[点击查看相关教程](/docs/development/modelconfig/ai-proxy)
|
||||
- 目前已知可能问题:首次进入系统后,整个浏览器 tab 无法响应。此时需要删除该tab,重新打开一次即可。
|
||||
|
||||
## FAQ
|
||||
|
||||
### 登录系统后,浏览器无法响应
|
||||
|
||||
无法点击任何内容,刷新也无效。此时需要删除该tab,重新打开一次即可。
|
||||
|
||||
### Mongo 副本集自动初始化失败
|
||||
|
||||
最新的 docker-compose 示例优化 Mongo 副本集初始化,实现了全自动。目前在 unbuntu20,22 centos7, wsl2, mac, window 均通过测试。仍无法正常启动,大部分是因为 cpu 不支持 AVX 指令集,可以切换 Mongo4.x 版本。
|
||||
|
||||
@@ -70,6 +70,7 @@ Mongo 数据库需要注意,需要注意在连接地址中增加 `directConnec
|
||||
|
||||
- `vectorMaxProcess`: 向量生成最大进程,根据数据库和 key 的并发数来决定,通常单个 120 号,2c4g 服务器设置 10~15。
|
||||
- `qaMaxProcess`: QA 生成最大进程
|
||||
- `vlmMaxProcess`: 图片理解模型最大进程
|
||||
- `pgHNSWEfSearch`: PostgreSQL vector 索引参数,越大搜索精度越高但是速度越慢,具体可看 pgvector 官方说明。
|
||||
|
||||
### 5. 运行
|
||||
|
||||
@@ -7,9 +7,18 @@ draft: false
|
||||
images: []
|
||||
---
|
||||
|
||||
## Copy文件
|
||||
## 1. 停止服务
|
||||
|
||||
```bash
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
|
||||
## 2. Copy文件夹
|
||||
|
||||
Docker 部署数据库都会通过 volume 挂载本地的目录进入容器,如果要迁移,直接复制这些目录即可。
|
||||
|
||||
`PG 数据`: pg/data
|
||||
`Mongo 数据`: mongo/data
|
||||
`Mongo 数据`: mongo/data
|
||||
|
||||
直接把pg 和 mongo目录全部复制走即可。
|
||||
129
docSite/content/zh-cn/docs/development/modelConfig/ai-proxy.md
Normal file
@@ -0,0 +1,129 @@
|
||||
---
|
||||
title: '通过 AI Proxy 接入模型'
|
||||
description: '通过 AI Proxy 接入模型'
|
||||
icon: 'api'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 744
|
||||
---
|
||||
|
||||
从 `FastGPT 4.8.23` 版本开始,引入 AI Proxy 来进一步方便模型的配置。
|
||||
|
||||
AI Proxy 与 One API 类似,也是作为一个 OpenAI 接口管理 & 分发系统,可以通过标准的 OpenAI API 格式访问所有的大模型,开箱即用。
|
||||
|
||||
## 部署
|
||||
|
||||
### Docker 版本
|
||||
|
||||
`docker-compose.yml` 文件已加入了 AI Proxy 配置,可直接使用。[点击查看最新的 yml 配置](https://raw.githubusercontent.com/labring/FastGPT/main/deploy/docker/docker-compose-pgvector.yml)
|
||||
|
||||
从旧版升级的用户,可以复制 yml 里,ai proxy 的配置,加入到旧的 yml 文件中。
|
||||
|
||||
## 运行原理
|
||||
|
||||
AI proxy 核心模块:
|
||||
|
||||
1. 渠道管理:管理各家模型提供商的 API Key 和可用模型列表。
|
||||
2. 模型调用:根据请求的模型,选中对应的渠道;根据渠道的 API 格式,构造请求体,发送请求;格式化响应体成标准格式返回。
|
||||
3. 调用日志:详细记录模型调用的日志,并在错误时候可以记录其入参和报错信息,方便排查。
|
||||
|
||||
运行流程:
|
||||
|
||||

|
||||
|
||||
## 在 FastGPT 中使用
|
||||
|
||||
AI proxy 相关功能,可以在`账号-模型提供商`页面找到。
|
||||
|
||||
### 1. 创建渠道
|
||||
|
||||
在`模型提供商`的配置页面,点击`模型渠道`,进入渠道配置页面
|
||||
|
||||

|
||||
|
||||
点击右上角的“新增渠道”,即可进入渠道配置页面
|
||||
|
||||

|
||||
|
||||
以阿里云的模型为例,进行如下配置
|
||||
|
||||

|
||||
|
||||
1. 渠道名:展示在外部的渠道名称,仅作标识;
|
||||
2. 厂商:模型对应的厂商,不同厂商对应不同的默认地址和 API 密钥格式;
|
||||
3. 模型:当前渠道具体可以使用的模型,系统内置了主流的一些模型,如果下拉框中没有想要的选项,可以点击“新增模型”,[增加自定义模型](/docs/development/modelconfig/intro/#新增自定义模型);
|
||||
4. 模型映射:将 FastGPT 请求的模型,映射到具体提供的模型上。例如:
|
||||
|
||||
```json
|
||||
{
|
||||
"gpt-4o-test": "gpt-4o",
|
||||
}
|
||||
```
|
||||
|
||||
FatGPT 中的模型为 `gpt-4o-test`,向 AI Proxy 发起请求时也是 `gpt-4o-test`。AI proxy 在向上游发送请求时,实际的`model`为 `gpt-4o`。
|
||||
|
||||
5. 代理地址:具体请求的地址,系统给每个主流渠道配置了默认的地址,如果无需改动则不用填。
|
||||
6. API 密钥:从模型厂商处获取的 API 凭证。注意部分厂商需要提供多个密钥组合,可以根据提示进行输入。
|
||||
|
||||
最后点击“新增”,就能在“模型渠道”下看到刚刚配置的渠道
|
||||
|
||||

|
||||
|
||||
### 2. 渠道测试
|
||||
|
||||
然后可以对渠道进行测试,确保配置的模型有效
|
||||
|
||||

|
||||
|
||||
点击“模型测试”,可以看到配置的模型列表,点击“开始测试”
|
||||
|
||||

|
||||
|
||||
等待模型测试完成后,会输出每个模型的测试结果以及请求时长
|
||||
|
||||

|
||||
|
||||
### 3. 启用模型
|
||||
|
||||
最后在`模型配置`中,可以选择启用对应的模型,这样就能在平台中使用了,更多模型配置可以参考[模型配置](/docs/development/modelconfig/intro)
|
||||
|
||||

|
||||
|
||||
## 其他功能介绍
|
||||
|
||||
### 优先级
|
||||
|
||||
范围1~100。数值越大,越容易被优先选中。
|
||||
|
||||

|
||||
|
||||
### 启用/禁用
|
||||
|
||||
在渠道右侧的控制菜单中,还可以控制渠道的启用或禁用,被禁用的渠道将无法再提供模型服务
|
||||
|
||||

|
||||
|
||||
### 调用日志
|
||||
|
||||
在 `调用日志` 页面,会展示发送到模型处的请求记录,包括具体的输入输出 tokens、请求时间、请求耗时、请求地址等等。错误的请求,则会详细的入参和错误信息,方便排查,但仅会保留 1 小时(环境变量里可配置)。
|
||||
|
||||

|
||||
|
||||
## 从 OneAPI 迁移到 AI Proxy
|
||||
|
||||
可以从任意终端,发起 1 个 HTTP 请求。其中 `{{host}}` 替换成 AI Proxy 地址,`{{admin_key}}` 替换成 AI Proxy 中 `ADMIN_KEY` 的值。
|
||||
|
||||
Body 参数 `dsn` 为 OneAPI 的 mysql 连接串。
|
||||
|
||||
```bash
|
||||
curl --location --request POST '{{host}}/api/channels/import/oneapi' \
|
||||
--header 'Authorization: Bearer {{admin_key}}' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{
|
||||
"dsn": "mysql://root:s5mfkwst@tcp(dbconn.sealoshzh.site:33123)/mydb"
|
||||
}'
|
||||
```
|
||||
|
||||
执行成功的情况下会返回 "success": true
|
||||
|
||||
脚本目前不是完全准,仅是简单的做数据映射,主要是迁移`代理地址`、`模型`和`API 密钥`,建议迁移后再进行手动检查。
|
||||
@@ -13,9 +13,15 @@ weight: 744
|
||||
|
||||
## 配置模型
|
||||
|
||||
### 1. 使用 OneAPI 对接模型提供商
|
||||
### 1. 对接模型提供商
|
||||
|
||||
可以使用 [OneAPI 接入教程](/docs/development/modelconfig/one-api) 来进行模型聚合,从而可以对接更多模型提供商。你需要先在各服务商申请好 API 接入 OneAPI 后,才能在 FastGPT 中使用这些模型。示例流程如下:
|
||||
#### AI Proxy
|
||||
|
||||
从 4.8.23 版本开始, FastGPT 支持在页面上配置模型提供商,即使用 [AI Proxy 接入教程](/docs/development/modelconfig/ai-proxy) 来进行模型聚合,从而可以对接更多模型提供商。
|
||||
|
||||
#### One API
|
||||
|
||||
也可以使用 [OneAPI 接入教程](/docs/development/modelconfig/one-api)。你需要先在各服务商申请好 API 接入 OneAPI 后,才能在 FastGPT 中使用这些模型。示例流程如下:
|
||||
|
||||

|
||||
|
||||
@@ -23,22 +29,12 @@ weight: 744
|
||||
|
||||
{{% alert icon=" " context="info" %}}
|
||||
- [SiliconCloud(硅基流动)](https://cloud.siliconflow.cn/i/TR9Ym0c4): 提供开源模型调用的平台。
|
||||
- [Sealos AIProxy](https://cloud.sealos.run/?uid=fnWRt09fZP&openapp=system-aiproxy): 提供国内各家模型代理,无需逐一申请 api。
|
||||
- [Sealos AIProxy](https://hzh.sealos.run/?uid=fnWRt09fZP&openapp=system-aiproxy): 提供国内各家模型代理,无需逐一申请 api。
|
||||
{{% /alert %}}
|
||||
|
||||
在 OneAPI 配置好模型后,你就可以打开 FastGPT 页面,启用对应模型了。
|
||||
|
||||
### 2. 登录 root 用户
|
||||
|
||||
仅 root 用户可以进行模型配置。
|
||||
|
||||
### 3. 进入模型配置页面
|
||||
|
||||
登录 root 用户后,在`账号-模型提供商-模型配置`中,你可以看到所有内置的模型和自定义模型,以及哪些模型启用了。
|
||||
|
||||

|
||||
|
||||
### 4. 配置介绍
|
||||
### 2. 配置介绍
|
||||
|
||||
{{% alert icon="🤖 " context="success" %}}
|
||||
注意:
|
||||
|
||||
@@ -20,14 +20,10 @@ FastGPT 目前采用模型分离的部署方案,FastGPT 中只兼容 OpenAI
|
||||
|
||||
## 部署
|
||||
|
||||
### Docker 版本
|
||||
|
||||
`docker-compose.yml` 文件已加入了 OneAPI 配置,可直接使用。默认暴露在 3001 端口。
|
||||
|
||||
### Sealos 版本
|
||||
|
||||
* 北京区: [点击部署 OneAPI](https://hzh.sealos.run/?openapp=system-template%3FtemplateName%3Done-api)
|
||||
* 新加坡区(可用 GPT) [点击部署 OneAPI](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Done-api)
|
||||
* 新加坡区(可用 GPT) [点击部署 OneAPI](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Done-api&uid=fnWRt09fZP)
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ CHAT_API_KEY=sk-xxxxxx
|
||||
|
||||

|
||||
|
||||
## 5. 体验测试
|
||||
## 4. 体验测试
|
||||
|
||||
### 测试对话和图片识别
|
||||
|
||||
|
||||
@@ -297,7 +297,9 @@ curl --location --request DELETE 'http://localhost:3000/api/core/dataset/delete?
|
||||
| --- | --- | --- |
|
||||
| datasetId | 知识库ID | ✅ |
|
||||
| parentId: | 父级ID,不填则默认为根目录 | |
|
||||
| trainingType | 训练模式。chunk: 按文本长度进行分割;qa: QA拆分;auto: 增强训练 | ✅ |
|
||||
| trainingType | 数据处理方式。chunk: 按文本长度进行分割;qa: 问答对提取 | ✅ |
|
||||
| autoIndexes | 是否自动生成索引(仅商业版支持) | |
|
||||
| imageIndex | 是否自动生成图片索引(仅商业版支持) | |
|
||||
| chunkSize | 预估块大小 | |
|
||||
| chunkSplitter | 自定义最高优先分割符号 | |
|
||||
| qaPrompt | qa拆分提示词 | |
|
||||
@@ -1061,10 +1063,12 @@ curl --location --request DELETE 'http://localhost:3000/api/core/dataset/collect
|
||||
|
||||
| 字段 | 类型 | 说明 | 必填 |
|
||||
| --- | --- | --- | --- |
|
||||
| defaultIndex | Boolean | 是否为默认索引 | ✅ |
|
||||
| dataId | String | 关联的向量ID | ✅ |
|
||||
| type | String | 可选索引类型:default-默认索引; custom-自定义索引; summary-总结索引; question-问题索引; image-图片索引 | |
|
||||
| dataId | String | 关联的向量ID,变更数据时候传入该 ID,会进行差量更新,而不是全量更新 | |
|
||||
| text | String | 文本内容 | ✅ |
|
||||
|
||||
`type` 不填则默认为 `custom` 索引,还会基于 q/a 组成一个默认索引。如果传入了默认索引,则不会额外创建。
|
||||
|
||||
### 为集合批量添加添加数据
|
||||
|
||||
注意,每次最多推送 200 组数据。
|
||||
@@ -1079,7 +1083,7 @@ curl --location --request POST 'https://api.fastgpt.in/api/core/dataset/data/pus
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{
|
||||
"collectionId": "64663f451ba1676dbdef0499",
|
||||
"trainingMode": "chunk",
|
||||
"trainingType": "chunk",
|
||||
"prompt": "可选。qa 拆分引导词,chunk 模式下忽略",
|
||||
"billId": "可选。如果有这个值,本次的数据会被聚合到一个订单中,这个值可以重复使用。可以参考 [创建训练订单] 获取该值。",
|
||||
"data": [
|
||||
@@ -1296,8 +1300,7 @@ curl --location --request GET 'http://localhost:3000/api/core/dataset/data/detai
|
||||
"chunkIndex": 0,
|
||||
"indexes": [
|
||||
{
|
||||
"defaultIndex": true,
|
||||
"type": "chunk",
|
||||
"type": "default",
|
||||
"dataId": "3720083",
|
||||
"text": "N o . 2 0 2 2 1 2中 国 信 息 通 信 研 究 院京东探索研究院2022年 9月人工智能生成内容(AIGC)白皮书(2022 年)版权声明本白皮书版权属于中国信息通信研究院和京东探索研究院,并受法律保护。转载、摘编或利用其它方式使用本白皮书文字或者观点的,应注明“来源:中国信息通信研究院和京东探索研究院”。违反上述声明者,编者将追究其相关法律责任。前 言习近平总书记曾指出,“数字技术正以新理念、新业态、新模式全面融入人类经济、政治、文化、社会、生态文明建设各领域和全过程”。在当前数字世界和物理世界加速融合的大背景下,人工智能生成内容(Artificial Intelligence Generated Content,简称 AIGC)正在悄然引导着一场深刻的变革,重塑甚至颠覆数字内容的生产方式和消费模式,将极大地丰富人们的数字生活,是未来全面迈向数字文明新时代不可或缺的支撑力量。",
|
||||
"_id": "65abd4b29d1448617cba61dc"
|
||||
@@ -1333,12 +1336,18 @@ curl --location --request PUT 'http://localhost:3000/api/core/dataset/data/updat
|
||||
"a":"sss",
|
||||
"indexes":[
|
||||
{
|
||||
"dataId": "xxx",
|
||||
"defaultIndex":false,
|
||||
"text":"自定义索引1"
|
||||
"dataId": "xxxx",
|
||||
"type": "default",
|
||||
"text": "默认索引"
|
||||
},
|
||||
{
|
||||
"text":"修改后的自定义索引2。(会删除原来的自定义索引2,并插入新的自定义索引2)"
|
||||
"dataId": "xxx",
|
||||
"type": "custom",
|
||||
"text": "旧的自定义索引1"
|
||||
},
|
||||
{
|
||||
"type":"custom",
|
||||
"text":"新增的自定义索引"
|
||||
}
|
||||
]
|
||||
}'
|
||||
|
||||
@@ -9,7 +9,7 @@ weight: 951
|
||||
|
||||
## 登录 Sealos
|
||||
|
||||
[Sealos](https://cloud.sealos.io/)
|
||||
[Sealos](https://cloud.sealos.io?uid=fnWRt09fZP)
|
||||
|
||||
## 创建应用
|
||||
|
||||
|
||||
@@ -26,13 +26,13 @@ FastGPT 使用了 one-api 项目来管理模型池,其可以兼容 OpenAI 、A
|
||||
|
||||
新加披区的服务器在国外,可以直接访问 OpenAI,但国内用户需要梯子才可以正常访问新加坡区。国际区价格稍贵,点击下面按键即可部署👇
|
||||
|
||||
<a href="https://template.cloud.sealos.io/deploy?templateName=fastgpt" rel="external" target="_blank"><img src="https://cdn.jsdelivr.net/gh/labring-actions/templates@main/Deploy-on-Sealos.svg" alt="Deploy on Sealos"/></a>
|
||||
<a href="https://template.cloud.sealos.io/deploy?templateName=fastgpt&uid=fnWRt09fZP" rel="external" target="_blank"><img src="https://cdn.jsdelivr.net/gh/labring-actions/templates@main/Deploy-on-Sealos.svg" alt="Deploy on Sealos"/></a>
|
||||
|
||||
### 北京区
|
||||
|
||||
北京区服务提供商为火山云,国内用户可以稳定访问,但无法访问 OpenAI 等境外服务,价格约为新加坡区的 1/4。点击下面按键即可部署👇
|
||||
|
||||
<a href="https://bja.sealos.run/?openapp=system-template%3FtemplateName%3Dfastgpt" rel="external" target="_blank"><img src="https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg" alt="Deploy on Sealos"/></a>
|
||||
<a href="https://bja.sealos.run/?openapp=system-template%3FtemplateName%3Dfastgpt&uid=fnWRt09fZP" rel="external" target="_blank"><img src="https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg" alt="Deploy on Sealos"/></a>
|
||||
|
||||
### 1. 开始部署
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ FastGPT V4.5 引入 PgVector0.5 版本的 HNSW 索引,极大的提高了知识
|
||||
|
||||
## PgVector升级:Sealos 部署方案
|
||||
|
||||
1. 点击[Sealos桌面](https://cloud.sealos.io)的数据库应用。
|
||||
1. 点击[Sealos桌面](https://cloud.sealos.io?uid=fnWRt09fZP)的数据库应用。
|
||||
2. 点击【pg】数据库的详情。
|
||||
3. 点击右上角的重启,等待重启完成。
|
||||
4. 点击左侧的一键链接,等待打开 Terminal。
|
||||
|
||||
@@ -35,7 +35,7 @@ curl --location --request POST 'https://{{host}}/api/admin/initv4820' \
|
||||
|
||||
## 完整更新内容
|
||||
|
||||
1. 新增 - 可视化模型参数配置,取代原配置文件配置模型。预设超过 100 个模型配置。同时支持所有类型模型的一键测试。(预计下个版本会完全支持在页面上配置渠道)。
|
||||
1. 新增 - 可视化模型参数配置,取代原配置文件配置模型。预设超过 100 个模型配置。同时支持所有类型模型的一键测试。(预计下个版本会完全支持在页面上配置渠道)。[点击查看模型配置方案](/docs/development/modelconfig/intro/)
|
||||
2. 新增 - DeepSeek resoner 模型支持输出思考过程。
|
||||
3. 新增 - 使用记录导出和仪表盘。
|
||||
4. 新增 - markdown 语法扩展,支持音视频(代码块 audio 和 video)。
|
||||
|
||||
@@ -4,7 +4,7 @@ description: 'FastGPT V4.8.23 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 802
|
||||
weight: 801
|
||||
---
|
||||
|
||||
## 更新指南
|
||||
|
||||
190
docSite/content/zh-cn/docs/development/upgrading/490.md
Normal file
@@ -0,0 +1,190 @@
|
||||
---
|
||||
title: 'V4.9.0(包含升级脚本)'
|
||||
description: 'FastGPT V4.9.0 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 800
|
||||
---
|
||||
|
||||
|
||||
## 更新指南
|
||||
|
||||
### 1. 做好数据库备份
|
||||
|
||||
### 2. 更新镜像和 PG 容器
|
||||
|
||||
- 更新 FastGPT 镜像 tag: v4.9.0
|
||||
- 更新 FastGPT 商业版镜像 tag: v4.9.0
|
||||
- Sandbox 镜像,可以不更新
|
||||
- 更新 PG 容器为 v0.8.0-pg15, 可以查看[最新的 yml](https://raw.githubusercontent.com/labring/FastGPT/main/deploy/docker/docker-compose-pgvector.yml)
|
||||
|
||||
### 3. 替换 OneAPI(可选)
|
||||
|
||||
如果需要使用 [AI Proxy](https://github.com/labring/aiproxy) 替换 OneAPI 的用户可执行该步骤。
|
||||
|
||||
#### 1. 修改 yml 文件
|
||||
|
||||
参考[最新的 yml](https://raw.githubusercontent.com/labring/FastGPT/main/deploy/docker/docker-compose-pgvector.yml) 文件。里面已移除 OneAPI 并添加了 AIProxy配置。包含一个服务和一个 PgSQL 数据库。将 `aiproxy` 的配置`追加`到 OneAPI 的配置后面(先不要删除 OneAPI,有一个初始化会自动同步 OneAPI 的配置)
|
||||
|
||||
{{% details title="AI Proxy Yml 配置" closed="true" %}}
|
||||
|
||||
```
|
||||
# AI Proxy
|
||||
aiproxy:
|
||||
image: 'ghcr.io/labring/aiproxy:latest'
|
||||
container_name: aiproxy
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
aiproxy_pg:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
# 对应 fastgpt 里的AIPROXY_API_TOKEN
|
||||
- ADMIN_KEY=aiproxy
|
||||
# 错误日志详情保存时间(小时)
|
||||
- LOG_DETAIL_STORAGE_HOURS=1
|
||||
# 数据库连接地址
|
||||
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
|
||||
# 最大重试次数
|
||||
- RETRY_TIMES=3
|
||||
# 不需要计费
|
||||
- BILLING_ENABLED=false
|
||||
# 不需要严格检测模型
|
||||
- DISABLE_MODEL_CONFIG=true
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
aiproxy_pg:
|
||||
image: pgvector/pgvector:0.8.0-pg15 # docker hub
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
|
||||
restart: unless-stopped
|
||||
container_name: aiproxy_pg
|
||||
volumes:
|
||||
- ./aiproxy_pg:/var/lib/postgresql/data
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: aiproxy
|
||||
POSTGRES_PASSWORD: aiproxy
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
```
|
||||
|
||||
{{% /details %}}
|
||||
|
||||
#### 2. 增加 FastGPT 环境变量:
|
||||
|
||||
修改 yml 文件中,fastgpt 容器的环境变量:
|
||||
|
||||
```
|
||||
# AI Proxy 的地址,如果配了该地址,优先使用
|
||||
- AIPROXY_API_ENDPOINT=http://aiproxy:3000
|
||||
# AI Proxy 的 Admin Token,与 AI Proxy 中的环境变量 ADMIN_KEY
|
||||
- AIPROXY_API_TOKEN=aiproxy
|
||||
```
|
||||
|
||||
#### 3. 重载服务
|
||||
|
||||
`docker-compose down` 停止服务,然后 `docker-compose up -d` 启动服务,此时会追加 `aiproxy` 服务,并修改 FastGPT 的配置。
|
||||
|
||||
#### 4. 执行OneAPI迁移AI proxy脚本
|
||||
|
||||
- 可联网方案:
|
||||
|
||||
```bash
|
||||
# 进入 aiproxy 容器
|
||||
docker exec -it aiproxy sh
|
||||
# 安装 curl
|
||||
apk add curl
|
||||
# 执行脚本
|
||||
curl --location --request POST 'http://localhost:3000/api/channels/import/oneapi' \
|
||||
--header 'Authorization: Bearer aiproxy' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{
|
||||
"dsn": "mysql://root:oneapimmysql@tcp(mysql:3306)/oneapi"
|
||||
}'
|
||||
# 返回 {"data":[],"success":true} 代表成功
|
||||
```
|
||||
|
||||
- 无法联网时,可打开`aiproxy`的外网暴露端口,然后在本地执行脚本。
|
||||
|
||||
aiProxy 暴露端口:3003:3000,修改后重新 `docker-compose up -d` 启动服务。
|
||||
|
||||
```bash
|
||||
# 在终端执行脚本
|
||||
curl --location --request POST 'http://localhost:3003/api/channels/import/oneapi' \
|
||||
--header 'Authorization: Bearer aiproxy' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{
|
||||
"dsn": "mysql://root:oneapimmysql@tcp(mysql:3306)/oneapi"
|
||||
}'
|
||||
# 返回 {"data":[],"success":true} 代表成功
|
||||
```
|
||||
|
||||
- 如果不熟悉 docker 操作,建议不要走脚本迁移,直接删除 OneAPI 所有内容,然后手动重新添加渠道。
|
||||
|
||||
#### 5. 进入 FastGPT 检查`AI Proxy` 服务是否正常启动。
|
||||
|
||||
登录 root 账号后,在`账号-模型提供商`页面,可以看到多出了`模型渠道`和`调用日志`两个选项,打开模型渠道,可以看到之前 OneAPI 的渠道,说明迁移完成,此时可以手动再检查下渠道是否正常。
|
||||
|
||||
#### 6. 删除 OneAPI 服务
|
||||
|
||||
```bash
|
||||
# 停止服务,或者针对性停止 OneAPI 和其 Mysql
|
||||
docker-compose down
|
||||
# yml 文件中删除 OneAPI 和其 Mysql 依赖
|
||||
# 重启服务
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### 4. 运行 FastGPT 升级脚本
|
||||
|
||||
从任意终端,发起 1 个 HTTP 请求。其中 {{rootkey}} 替换成环境变量里的 `rootkey`;{{host}} 替换成**FastGPT 域名**。
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'https://{{host}}/api/admin/initv490' \
|
||||
--header 'rootkey: {{rootkey}}' \
|
||||
--header 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
**脚本功能**
|
||||
|
||||
1. 升级 PG Vector 插件版本
|
||||
2. 全量更新知识库集合字段。
|
||||
3. 全量更新知识库数据中,index 的 type 类型。(时间较长,最后可能提示 timeout,可忽略,数据库不崩都会一直增量执行)
|
||||
|
||||
## 兼容 & 弃用
|
||||
|
||||
1. 弃用 - 之前私有化部署的自定义文件解析方案,请同步更新到最新的配置方案。[点击查看 PDF 增强解析配置](/docs/development/configuration/#使用-doc2x-解析-pdf-文件)
|
||||
2. 弃用 - 弃用旧版本地文件上传 API:/api/core/dataset/collection/create/file(以前仅商业版可用的 API,该接口已放切换成:/api/core/dataset/collection/create/localFile)
|
||||
3. 停止维护,即将弃用 - 外部文件库相关 API,可通过 API 文件库替代。
|
||||
4. API更新 - 上传文件至知识库、创建连接集合、API 文件库、推送分块数据等带有 `trainingType` 字段的接口,`trainingType`字段未来仅支持`chunk`和`QA`两种模式。增强索引模式将设置单独字段:`autoIndexes`,目前仍有适配旧版`trainingType=auto`代码,但请尽快变更成新接口类型。具体可见:[知识库 OpenAPI 文档](/docs/development/openapi/dataset.md)
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. PDF增强解析交互添加到页面上。同时内嵌 Doc2x 服务,可直接使用 Doc2x 服务解析 PDF 文件。
|
||||
2. 图片自动标注,同时修改知识库文件上传部分数据逻辑和交互。
|
||||
3. pg vector 插件升级 0.8.0 版本,引入迭代搜索,减少部分数据无法被检索的情况。
|
||||
4. 新增 qwen-qwq 系列模型配置。
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
1. 知识库数据不再限制索引数量,可无限自定义。同时可自动更新输入文本的索引,不影响自定义索引。
|
||||
2. Markdown 解析,增加链接后中文标点符号检测,增加空格。
|
||||
3. Prompt 模式工具调用,支持思考模型。同时优化其格式检测,减少空输出的概率。
|
||||
4. Mongo 文件读取流合并,减少计算量。同时优化存储 chunks,极大提高大文件读取速度。50M PDF 读取时间提高 3 倍。
|
||||
5. HTTP Body 适配,增加对字符串对象的适配。
|
||||
|
||||
## 🐛 修复
|
||||
|
||||
1. 增加网页抓取安全链接校验。
|
||||
2. 批量运行时,全局变量未进一步传递到下一次运行中,导致最终变量更新错误。
|
||||
36
docSite/content/zh-cn/docs/development/upgrading/491.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: 'V4.9.1'
|
||||
description: 'FastGPT V4.9.1 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 799
|
||||
---
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 商业版支持单团队模式,更好的管理内部成员。
|
||||
2. 知识库分块阅读器。
|
||||
3. API 知识库支持 PDF 增强解析。
|
||||
4. 邀请团队成员,改为邀请链接模式。
|
||||
5. 支持混合检索权重设置。
|
||||
6. 支持重排模型选择和权重设置,同时调整了知识库搜索权重计算方式,改成 搜索权重 + 重排权重,而不是向量检索权重+全文检索权重+重排权重。
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
1. 知识库数据输入框交互
|
||||
2. 应用拉取绑定知识库数据交由后端处理。
|
||||
3. 增加依赖包安全版本检测,并升级部分依赖包。
|
||||
4. 模型测试代码。
|
||||
5. 优化思考过程解析逻辑:只要配置了模型支持思考,均会解析 <think> 标签,不会因为对话时,关闭思考而不解析。
|
||||
|
||||
## 🐛 修复
|
||||
|
||||
1. 最大响应 tokens 提示显示错误的问题。
|
||||
2. HTTP Node 中,字符串包含换行符时,会解析失败。
|
||||
3. 知识库问题优化中,未传递历史记录。
|
||||
4. 错误提示翻译缺失。
|
||||
5. 内容提取节点,array 类型 schema 错误。
|
||||
6. 模型渠道测试时,实际未指定渠道测试。
|
||||
7. 新增自定义模型时,会把默认模型字段也保存,导致默认模型误判。
|
||||
8. 修复 promp 模式工具调用,未判空思考链,导致 UI 错误展示。
|
||||
@@ -30,7 +30,7 @@ FastGPT 升级包括两个步骤:
|
||||
|
||||
## Sealos 修改镜像
|
||||
|
||||
1. 打开 [Sealos Cloud](https://cloud.sealos.io/), 找到桌面上的应用管理
|
||||
1. 打开 [Sealos Cloud](https://cloud.sealos.io?uid=fnWRt09fZP), 找到桌面上的应用管理
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ weight: 303
|
||||
|
||||
这里介绍在 Sealos 中部署 SearXNG 的方法。Docker 部署,可以直接参考 [SearXNG 官方教程](https://github.com/searxng/searxng)。
|
||||
|
||||
点击打开 [Sealos 北京区](https://bja.sealos.run/),点击应用部署,并新建一个应用:
|
||||
点击打开 [Sealos 北京区](https://bja.sealos.run?uid=fnWRt09fZP),点击应用部署,并新建一个应用:
|
||||
|
||||
| 打开应用部署 | 点击新建应用 |
|
||||
| --- | --- |
|
||||
@@ -130,7 +130,7 @@ doi_resolvers:
|
||||
default_doi_resolver: 'oadoi.org'
|
||||
```
|
||||
|
||||
国内目前只有 Bing 引擎可以正常用,所以上面的配置只配置了 bing 引擎。如果在海外部署,可以使用[Sealos 新加坡可用区](https://cloud.sealos.io/),并配置其他搜索引擎,可以参考[SearXNG 默认配置文件](https://github.com/searxng/searxng/blob/master/searx/settings.yml), 从里面复制一些 engine 配置。例如:
|
||||
国内目前只有 Bing 引擎可以正常用,所以上面的配置只配置了 bing 引擎。如果在海外部署,可以使用[Sealos 新加坡可用区](https://cloud.sealos.io?uid=fnWRt09fZP),并配置其他搜索引擎,可以参考[SearXNG 默认配置文件](https://github.com/searxng/searxng/blob/master/searx/settings.yml), 从里面复制一些 engine 配置。例如:
|
||||
|
||||
```
|
||||
- name: duckduckgo
|
||||
|
||||
@@ -89,6 +89,12 @@ weight: 506
|
||||
47.99.59.223
|
||||
112.124.46.5
|
||||
121.40.46.247
|
||||
120.26.145.73
|
||||
120.26.147.199
|
||||
121.43.125.163
|
||||
121.196.228.45
|
||||
121.43.126.202
|
||||
120.26.144.37
|
||||
```
|
||||
|
||||
## 4. 获取AES Key,选择加密方式
|
||||
|
||||
@@ -27,7 +27,7 @@ weight: 510
|
||||
|
||||
## sealos部署服务
|
||||
|
||||
[访问sealos](https://cloud.sealos.run/) 登录进来之后打开「应用管理」-> 「新建应用」。
|
||||
[访问sealos](https://hzh.sealos.run?uid=fnWRt09fZP) 登录进来之后打开「应用管理」-> 「新建应用」。
|
||||
- 应用名:称随便填写
|
||||
- 镜像名:私人微信填写 aibotk/wechat-assistant 企业微信填写 aibotk/worker-assistant
|
||||
- cpu和内存建议 1c1g
|
||||
|
||||
12
package.json
@@ -11,16 +11,22 @@
|
||||
"initIcon": "node ./scripts/icon/init.js",
|
||||
"previewIcon": "node ./scripts/icon/index.js",
|
||||
"api:gen": "tsc ./scripts/openapi/index.ts && node ./scripts/openapi/index.js && npx @redocly/cli build-docs ./scripts/openapi/openapi.json -o ./projects/app/public/openapi/index.html",
|
||||
"create:i18n": "node ./scripts/i18n/index.js"
|
||||
"create:i18n": "node ./scripts/i18n/index.js",
|
||||
"test": "vitest run --exclude 'test/cases/spec'",
|
||||
"test:all": "vitest run",
|
||||
"test:workflow": "vitest run workflow"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chakra-ui/cli": "^2.4.1",
|
||||
"@vitest/coverage-v8": "^3.0.2",
|
||||
"husky": "^8.0.3",
|
||||
"lint-staged": "^13.3.0",
|
||||
"i18next": "23.11.5",
|
||||
"lint-staged": "^13.3.0",
|
||||
"next-i18next": "15.3.0",
|
||||
"react-i18next": "14.1.2",
|
||||
"prettier": "3.2.4",
|
||||
"react-i18next": "14.1.2",
|
||||
"vitest": "^3.0.2",
|
||||
"vitest-mongodb": "^1.0.1",
|
||||
"zhlint": "^0.7.4"
|
||||
},
|
||||
"lint-staged": {
|
||||
|
||||
@@ -24,7 +24,10 @@ export enum TeamErrEnum {
|
||||
cannotModifyRootOrg = 'cannotModifyRootOrg',
|
||||
cannotDeleteNonEmptyOrg = 'cannotDeleteNonEmptyOrg',
|
||||
cannotDeleteDefaultGroup = 'cannotDeleteDefaultGroup',
|
||||
userNotActive = 'userNotActive'
|
||||
userNotActive = 'userNotActive',
|
||||
invitationLinkInvalid = 'invitationLinkInvalid',
|
||||
youHaveBeenInTheTeam = 'youHaveBeenInTheTeam',
|
||||
tooManyInvitations = 'tooManyInvitations'
|
||||
}
|
||||
|
||||
const teamErr = [
|
||||
@@ -112,6 +115,18 @@ const teamErr = [
|
||||
{
|
||||
statusText: TeamErrEnum.cannotDeleteNonEmptyOrg,
|
||||
message: i18nT('common:code_error.team_error.cannot_delete_non_empty_org')
|
||||
},
|
||||
{
|
||||
statusText: TeamErrEnum.invitationLinkInvalid,
|
||||
message: i18nT('common:code_error.team_error.invitation_link_invalid')
|
||||
},
|
||||
{
|
||||
statusText: TeamErrEnum.youHaveBeenInTheTeam,
|
||||
message: i18nT('common:code_error.team_error.you_have_been_in_the_team')
|
||||
},
|
||||
{
|
||||
statusText: TeamErrEnum.tooManyInvitations,
|
||||
message: i18nT('common:code_error.team_error.too_many_invitations')
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
export const retryRun = <T>(fn: () => T, retry = 2): T => {
|
||||
try {
|
||||
return fn();
|
||||
} catch (error) {
|
||||
if (retry > 0) {
|
||||
return retryRun(fn, retry - 1);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
export const batchRun = async <T>(arr: T[], fn: (arr: T) => any, batchSize = 10) => {
|
||||
const batchArr = new Array(batchSize).fill(null);
|
||||
const result: any[] = [];
|
||||
|
||||
const batchFn = async () => {
|
||||
const data = arr.shift();
|
||||
if (data) {
|
||||
result.push(await fn(data));
|
||||
return batchFn();
|
||||
}
|
||||
};
|
||||
|
||||
await Promise.all(
|
||||
batchArr.map(async () => {
|
||||
await batchFn();
|
||||
})
|
||||
);
|
||||
|
||||
return result;
|
||||
};
|
||||
@@ -1,4 +1,4 @@
|
||||
import { batchRun } from '../fn/utils';
|
||||
import { batchRun } from '../system/utils';
|
||||
import { getNanoid, simpleText } from './tools';
|
||||
import type { ImageType } from '../../../service/worker/readFile/type';
|
||||
|
||||
@@ -37,6 +37,80 @@ export const simpleMarkdownText = (rawText: string) => {
|
||||
return rawText.trim();
|
||||
};
|
||||
|
||||
export const htmlTable2Md = (content: string): string => {
|
||||
return content.replace(/<table>[\s\S]*?<\/table>/g, (htmlTable) => {
|
||||
try {
|
||||
// Clean up whitespace and newlines
|
||||
const cleanHtml = htmlTable.replace(/\n\s*/g, '');
|
||||
const rows = cleanHtml.match(/<tr>(.*?)<\/tr>/g);
|
||||
if (!rows) return htmlTable;
|
||||
|
||||
// Parse table data
|
||||
let tableData: string[][] = [];
|
||||
let maxColumns = 0;
|
||||
|
||||
// Try to convert to markdown table
|
||||
rows.forEach((row, rowIndex) => {
|
||||
if (!tableData[rowIndex]) {
|
||||
tableData[rowIndex] = [];
|
||||
}
|
||||
let colIndex = 0;
|
||||
const cells = row.match(/<td.*?>(.*?)<\/td>/g) || [];
|
||||
|
||||
cells.forEach((cell) => {
|
||||
while (tableData[rowIndex][colIndex]) {
|
||||
colIndex++;
|
||||
}
|
||||
const colspan = parseInt(cell.match(/colspan="(\d+)"/)?.[1] || '1');
|
||||
const rowspan = parseInt(cell.match(/rowspan="(\d+)"/)?.[1] || '1');
|
||||
const content = cell.replace(/<td.*?>|<\/td>/g, '').trim();
|
||||
|
||||
for (let i = 0; i < rowspan; i++) {
|
||||
for (let j = 0; j < colspan; j++) {
|
||||
if (!tableData[rowIndex + i]) {
|
||||
tableData[rowIndex + i] = [];
|
||||
}
|
||||
tableData[rowIndex + i][colIndex + j] = i === 0 && j === 0 ? content : '^^';
|
||||
}
|
||||
}
|
||||
colIndex += colspan;
|
||||
maxColumns = Math.max(maxColumns, colIndex);
|
||||
});
|
||||
|
||||
for (let i = 0; i < maxColumns; i++) {
|
||||
if (!tableData[rowIndex][i]) {
|
||||
tableData[rowIndex][i] = ' ';
|
||||
}
|
||||
}
|
||||
});
|
||||
const chunks: string[] = [];
|
||||
|
||||
const headerCells = tableData[0]
|
||||
.slice(0, maxColumns)
|
||||
.map((cell) => (cell === '^^' ? ' ' : cell || ' '));
|
||||
const headerRow = '| ' + headerCells.join(' | ') + ' |';
|
||||
chunks.push(headerRow);
|
||||
|
||||
const separator = '| ' + Array(headerCells.length).fill('---').join(' | ') + ' |';
|
||||
chunks.push(separator);
|
||||
|
||||
tableData.slice(1).forEach((row) => {
|
||||
const paddedRow = row
|
||||
.slice(0, maxColumns)
|
||||
.map((cell) => (cell === '^^' ? ' ' : cell || ' '));
|
||||
while (paddedRow.length < maxColumns) {
|
||||
paddedRow.push(' ');
|
||||
}
|
||||
chunks.push('| ' + paddedRow.join(' | ') + ' |');
|
||||
});
|
||||
|
||||
return chunks.join('\n');
|
||||
} catch (error) {
|
||||
return htmlTable;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* format markdown
|
||||
* 1. upload base64
|
||||
@@ -94,7 +168,7 @@ export const markdownProcess = async ({
|
||||
return simpleMarkdownText(imageProcess);
|
||||
};
|
||||
|
||||
export const matchMdImgTextAndUpload = (text: string) => {
|
||||
export const matchMdImg = (text: string) => {
|
||||
const base64Regex = /!\[([^\]]*)\]\((data:image\/[^;]+;base64[^)]+)\)/g;
|
||||
const imageList: ImageType[] = [];
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ ${mdSplitString}
|
||||
|
||||
/*
|
||||
1. 自定义分隔符:不需要重叠,不需要小块合并
|
||||
2. Markdown 标题:不需要重叠;标题嵌套共享,不需要小块合并
|
||||
2. Markdown 标题:不需要重叠;标题嵌套共享,需要小块合并
|
||||
3. 特殊 markdown 语法:不需要重叠,需要小块合并
|
||||
4. 段落:尽可能保证它是一个完整的段落。
|
||||
5. 标点分割:重叠
|
||||
@@ -227,7 +227,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
}): string[] => {
|
||||
const isMarkdownStep = checkIsMarkdownSplit(step);
|
||||
const isCustomStep = checkIsCustomStep(step);
|
||||
const forbidConcat = isMarkdownStep || isCustomStep; // forbid=true时候,lastText肯定为空
|
||||
const forbidConcat = isCustomStep; // forbid=true时候,lastText肯定为空
|
||||
|
||||
// oversize
|
||||
if (step >= stepReges.length) {
|
||||
|
||||
34
packages/global/common/system/types/index.d.ts
vendored
@@ -6,7 +6,7 @@ import type {
|
||||
EmbeddingModelItemType,
|
||||
AudioSpeechModels,
|
||||
STTModelType,
|
||||
ReRankModelItemType
|
||||
RerankModelItemType
|
||||
} from '../../../core/ai/model.d';
|
||||
import { SubTypeEnum } from '../../../support/wallet/sub/constants';
|
||||
|
||||
@@ -35,7 +35,7 @@ export type FastGPTConfigFileType = {
|
||||
// Abandon
|
||||
llmModels?: ChatModelItemType[];
|
||||
vectorModels?: EmbeddingModelItemType[];
|
||||
reRankModels?: ReRankModelItemType[];
|
||||
reRankModels?: RerankModelItemType[];
|
||||
audioSpeechModels?: TTSModelType[];
|
||||
whisperModel?: STTModelType;
|
||||
};
|
||||
@@ -43,10 +43,14 @@ export type FastGPTConfigFileType = {
|
||||
export type FastGPTFeConfigsType = {
|
||||
show_workorder?: boolean;
|
||||
show_emptyChat?: boolean;
|
||||
isPlus?: boolean;
|
||||
register_method?: ['email' | 'phone' | 'sync'];
|
||||
login_method?: ['email' | 'phone']; // Attention: login method is diffrent with oauth
|
||||
find_password_method?: ['email' | 'phone'];
|
||||
bind_notification_method?: ['email' | 'phone'];
|
||||
googleClientVerKey?: string;
|
||||
|
||||
show_emptyChat?: boolean;
|
||||
show_appStore?: boolean;
|
||||
show_git?: boolean;
|
||||
show_pay?: boolean;
|
||||
@@ -57,15 +61,19 @@ export type FastGPTFeConfigsType = {
|
||||
show_aiproxy?: boolean;
|
||||
concatMd?: string;
|
||||
|
||||
concatMd?: string;
|
||||
docUrl?: string;
|
||||
openAPIDocUrl?: string;
|
||||
systemPluginCourseUrl?: string;
|
||||
appTemplateCourse?: string;
|
||||
customApiDomain?: string;
|
||||
customSharePageDomain?: string;
|
||||
|
||||
systemTitle?: string;
|
||||
systemDescription?: string;
|
||||
googleClientVerKey?: string;
|
||||
isPlus?: boolean;
|
||||
scripts?: { [key: string]: string }[];
|
||||
favicon?: string;
|
||||
|
||||
sso?: {
|
||||
icon?: string;
|
||||
title?: string;
|
||||
@@ -91,13 +99,14 @@ export type FastGPTFeConfigsType = {
|
||||
exportDatasetLimitMinutes?: number;
|
||||
websiteSyncLimitMinuted?: number;
|
||||
};
|
||||
scripts?: { [key: string]: string }[];
|
||||
favicon?: string;
|
||||
customApiDomain?: string;
|
||||
customSharePageDomain?: string;
|
||||
|
||||
uploadFileMaxAmount?: number;
|
||||
uploadFileMaxSize?: number;
|
||||
|
||||
// Compute by systemEnv.customPdfParse
|
||||
showCustomPdfParse?: boolean;
|
||||
customPdfParsePrice?: number;
|
||||
|
||||
lafEnv?: string;
|
||||
navbarItems?: NavbarItemType[];
|
||||
externalProviderWorkflowVariables?: ExternalProviderWorkflowVarType[];
|
||||
@@ -107,9 +116,18 @@ export type SystemEnvType = {
|
||||
openapiPrefix?: string;
|
||||
vectorMaxProcess: number;
|
||||
qaMaxProcess: number;
|
||||
vlmMaxProcess: number;
|
||||
pgHNSWEfSearch: number;
|
||||
tokenWorkers: number; // token count max worker
|
||||
|
||||
oneapiUrl?: string;
|
||||
chatApiKey?: string;
|
||||
|
||||
customPdfParse?: {
|
||||
url?: string;
|
||||
key?: string;
|
||||
|
||||
doc2xKey?: string;
|
||||
price?: number; // n points/1 page
|
||||
};
|
||||
};
|
||||
|
||||
@@ -16,3 +16,24 @@ export const retryFn = async <T>(fn: () => Promise<T>, retryTimes = 3): Promise<
|
||||
return Promise.reject(error);
|
||||
}
|
||||
};
|
||||
|
||||
export const batchRun = async <T>(arr: T[], fn: (arr: T) => any, batchSize = 10) => {
|
||||
const batchArr = new Array(batchSize).fill(null);
|
||||
const result: any[] = [];
|
||||
|
||||
const batchFn = async () => {
|
||||
const data = arr.shift();
|
||||
if (data) {
|
||||
result.push(await fn(data));
|
||||
return batchFn();
|
||||
}
|
||||
};
|
||||
|
||||
await Promise.all(
|
||||
batchArr.map(async () => {
|
||||
await batchFn();
|
||||
})
|
||||
);
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
2
packages/global/core/ai/model.d.ts
vendored
@@ -72,7 +72,7 @@ export type EmbeddingModelItemType = PriceType &
|
||||
queryConfig?: Record<string, any>; // Custom parameters for query
|
||||
};
|
||||
|
||||
export type ReRankModelItemType = PriceType &
|
||||
export type RerankModelItemType = PriceType &
|
||||
BaseModelItemType & {
|
||||
type: ModelTypeEnum.rerank;
|
||||
};
|
||||
|
||||
@@ -22,7 +22,7 @@ export const defaultQAModels: LLMModelItemType[] = [
|
||||
maxTemperature: 1.2,
|
||||
charsPointsPrice: 0,
|
||||
censor: false,
|
||||
vision: false,
|
||||
vision: true,
|
||||
datasetProcess: true,
|
||||
toolChoice: true,
|
||||
functionCall: false,
|
||||
@@ -59,10 +59,17 @@ export const defaultSTTModels: STTModelType[] = [
|
||||
export const getModelFromList = (
|
||||
modelList: { provider: ModelProviderIdType; name: string; model: string }[],
|
||||
model: string
|
||||
) => {
|
||||
):
|
||||
| {
|
||||
avatar: string;
|
||||
provider: ModelProviderIdType;
|
||||
name: string;
|
||||
model: string;
|
||||
}
|
||||
| undefined => {
|
||||
const modelData = modelList.find((item) => item.model === model) ?? modelList[0];
|
||||
if (!modelData) {
|
||||
throw new Error('No Key model is configured');
|
||||
return;
|
||||
}
|
||||
const provider = getModelProvider(modelData.provider);
|
||||
return {
|
||||
|
||||
24
packages/global/core/app/type.d.ts
vendored
@@ -71,6 +71,20 @@ export type AppDetailType = AppSchema & {
|
||||
permission: AppPermission;
|
||||
};
|
||||
|
||||
export type AppDatasetSearchParamsType = {
|
||||
searchMode: `${DatasetSearchModeEnum}`;
|
||||
limit?: number; // limit max tokens
|
||||
similarity?: number;
|
||||
embeddingWeight?: number; // embedding weight, fullText weight = 1 - embeddingWeight
|
||||
|
||||
usingReRank?: boolean;
|
||||
rerankModel?: string;
|
||||
rerankWeight?: number;
|
||||
|
||||
datasetSearchUsingExtensionQuery?: boolean;
|
||||
datasetSearchExtensionModel?: string;
|
||||
datasetSearchExtensionBg?: string;
|
||||
};
|
||||
export type AppSimpleEditFormType = {
|
||||
// templateId: string;
|
||||
aiSettings: {
|
||||
@@ -88,14 +102,7 @@ export type AppSimpleEditFormType = {
|
||||
};
|
||||
dataset: {
|
||||
datasets: SelectedDatasetType;
|
||||
searchMode: `${DatasetSearchModeEnum}`;
|
||||
similarity?: number;
|
||||
limit?: number;
|
||||
usingReRank?: boolean;
|
||||
datasetSearchUsingExtensionQuery?: boolean;
|
||||
datasetSearchExtensionModel?: string;
|
||||
datasetSearchExtensionBg?: string;
|
||||
};
|
||||
} & AppDatasetSearchParamsType;
|
||||
selectedTools: FlowNodeTemplateType[];
|
||||
chatConfig: AppChatConfigType;
|
||||
};
|
||||
@@ -188,6 +195,7 @@ export type AppAutoExecuteConfigType = {
|
||||
// File
|
||||
export type AppFileSelectConfigType = {
|
||||
canSelectFile: boolean;
|
||||
customPdfParse?: boolean;
|
||||
canSelectImg: boolean;
|
||||
maxFiles: number;
|
||||
};
|
||||
|
||||
@@ -24,9 +24,11 @@ export const getDefaultAppForm = (): AppSimpleEditFormType => {
|
||||
dataset: {
|
||||
datasets: [],
|
||||
similarity: 0.4,
|
||||
limit: 1500,
|
||||
limit: 3000,
|
||||
searchMode: DatasetSearchModeEnum.embedding,
|
||||
usingReRank: false,
|
||||
rerankModel: '',
|
||||
rerankWeight: 0.5,
|
||||
datasetSearchUsingExtensionQuery: true,
|
||||
datasetSearchExtensionBg: ''
|
||||
},
|
||||
@@ -70,6 +72,26 @@ export const appWorkflow2Form = ({
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.history
|
||||
);
|
||||
defaultAppForm.aiSettings.aiChatReasoning = findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.aiChatReasoning
|
||||
);
|
||||
defaultAppForm.aiSettings.aiChatTopP = findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.aiChatTopP
|
||||
);
|
||||
defaultAppForm.aiSettings.aiChatStopSign = findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.aiChatStopSign
|
||||
);
|
||||
defaultAppForm.aiSettings.aiChatResponseFormat = findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.aiChatResponseFormat
|
||||
);
|
||||
defaultAppForm.aiSettings.aiChatJsonSchema = findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.aiChatJsonSchema
|
||||
);
|
||||
} else if (node.flowNodeType === FlowNodeTypeEnum.datasetSearchNode) {
|
||||
defaultAppForm.dataset.datasets = findInputValueByKey(
|
||||
node.inputs,
|
||||
@@ -86,10 +108,24 @@ export const appWorkflow2Form = ({
|
||||
defaultAppForm.dataset.searchMode =
|
||||
findInputValueByKey(node.inputs, NodeInputKeyEnum.datasetSearchMode) ||
|
||||
DatasetSearchModeEnum.embedding;
|
||||
defaultAppForm.dataset.embeddingWeight = findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.datasetSearchEmbeddingWeight
|
||||
);
|
||||
// Rerank
|
||||
defaultAppForm.dataset.usingReRank = !!findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.datasetSearchUsingReRank
|
||||
);
|
||||
defaultAppForm.dataset.rerankModel = findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.datasetSearchRerankModel
|
||||
);
|
||||
defaultAppForm.dataset.rerankWeight = findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.datasetSearchRerankWeight
|
||||
);
|
||||
// Query extension
|
||||
defaultAppForm.dataset.datasetSearchUsingExtensionQuery = findInputValueByKey(
|
||||
node.inputs,
|
||||
NodeInputKeyEnum.datasetSearchUsingExtensionQuery
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import type {
|
||||
AIChatItemValueItemType,
|
||||
ChatItemType,
|
||||
ChatItemValueItemType,
|
||||
RuntimeUserPromptType,
|
||||
UserChatItemType
|
||||
SystemChatItemValueItemType,
|
||||
UserChatItemType,
|
||||
UserChatItemValueItemType
|
||||
} from '../../core/chat/type.d';
|
||||
import { ChatFileTypeEnum, ChatItemValueTypeEnum, ChatRoleEnum } from '../../core/chat/constants';
|
||||
import type {
|
||||
@@ -174,137 +177,24 @@ export const GPTMessages2Chats = (
|
||||
): ChatItemType[] => {
|
||||
const chatMessages = messages
|
||||
.map((item) => {
|
||||
const value: ChatItemType['value'] = [];
|
||||
const obj = GPT2Chat[item.role];
|
||||
|
||||
if (
|
||||
obj === ChatRoleEnum.System &&
|
||||
item.role === ChatCompletionRequestMessageRoleEnum.System
|
||||
) {
|
||||
if (Array.isArray(item.content)) {
|
||||
item.content.forEach((item) => [
|
||||
value.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: item.text
|
||||
}
|
||||
})
|
||||
]);
|
||||
} else {
|
||||
value.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: item.content
|
||||
}
|
||||
});
|
||||
}
|
||||
} else if (
|
||||
obj === ChatRoleEnum.Human &&
|
||||
item.role === ChatCompletionRequestMessageRoleEnum.User
|
||||
) {
|
||||
if (typeof item.content === 'string') {
|
||||
value.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: item.content
|
||||
}
|
||||
});
|
||||
} else if (Array.isArray(item.content)) {
|
||||
item.content.forEach((item) => {
|
||||
if (item.type === 'text') {
|
||||
const value = (() => {
|
||||
if (
|
||||
obj === ChatRoleEnum.System &&
|
||||
item.role === ChatCompletionRequestMessageRoleEnum.System
|
||||
) {
|
||||
const value: SystemChatItemValueItemType[] = [];
|
||||
|
||||
if (Array.isArray(item.content)) {
|
||||
item.content.forEach((item) => [
|
||||
value.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: item.text
|
||||
}
|
||||
});
|
||||
} else if (item.type === 'image_url') {
|
||||
value.push({
|
||||
//@ts-ignore
|
||||
type: ChatItemValueTypeEnum.file,
|
||||
file: {
|
||||
type: ChatFileTypeEnum.image,
|
||||
name: '',
|
||||
url: item.image_url.url
|
||||
}
|
||||
});
|
||||
} else if (item.type === 'file_url') {
|
||||
value.push({
|
||||
// @ts-ignore
|
||||
type: ChatItemValueTypeEnum.file,
|
||||
file: {
|
||||
type: ChatFileTypeEnum.file,
|
||||
name: item.name,
|
||||
url: item.url
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
} else if (
|
||||
obj === ChatRoleEnum.AI &&
|
||||
item.role === ChatCompletionRequestMessageRoleEnum.Assistant
|
||||
) {
|
||||
if (item.tool_calls && reserveTool) {
|
||||
// save tool calls
|
||||
const toolCalls = item.tool_calls as ChatCompletionMessageToolCall[];
|
||||
value.push({
|
||||
//@ts-ignore
|
||||
type: ChatItemValueTypeEnum.tool,
|
||||
tools: toolCalls.map((tool) => {
|
||||
let toolResponse =
|
||||
messages.find(
|
||||
(msg) =>
|
||||
msg.role === ChatCompletionRequestMessageRoleEnum.Tool &&
|
||||
msg.tool_call_id === tool.id
|
||||
)?.content || '';
|
||||
toolResponse =
|
||||
typeof toolResponse === 'string' ? toolResponse : JSON.stringify(toolResponse);
|
||||
|
||||
return {
|
||||
id: tool.id,
|
||||
toolName: tool.toolName || '',
|
||||
toolAvatar: tool.toolAvatar || '',
|
||||
functionName: tool.function.name,
|
||||
params: tool.function.arguments,
|
||||
response: toolResponse as string
|
||||
};
|
||||
})
|
||||
});
|
||||
} else if (item.function_call && reserveTool) {
|
||||
const functionCall = item.function_call as ChatCompletionMessageFunctionCall;
|
||||
const functionResponse = messages.find(
|
||||
(msg) =>
|
||||
msg.role === ChatCompletionRequestMessageRoleEnum.Function &&
|
||||
msg.name === item.function_call?.name
|
||||
) as ChatCompletionFunctionMessageParam;
|
||||
|
||||
if (functionResponse) {
|
||||
value.push({
|
||||
//@ts-ignore
|
||||
type: ChatItemValueTypeEnum.tool,
|
||||
tools: [
|
||||
{
|
||||
id: functionCall.id || '',
|
||||
toolName: functionCall.toolName || '',
|
||||
toolAvatar: functionCall.toolAvatar || '',
|
||||
functionName: functionCall.name,
|
||||
params: functionCall.arguments,
|
||||
response: functionResponse.content || ''
|
||||
}
|
||||
]
|
||||
});
|
||||
}
|
||||
} else if (item.interactive) {
|
||||
value.push({
|
||||
//@ts-ignore
|
||||
type: ChatItemValueTypeEnum.interactive,
|
||||
interactive: item.interactive
|
||||
});
|
||||
} else if (typeof item.content === 'string') {
|
||||
const lastValue = value[value.length - 1];
|
||||
if (lastValue && lastValue.type === ChatItemValueTypeEnum.text && lastValue.text) {
|
||||
lastValue.text.content += item.content;
|
||||
})
|
||||
]);
|
||||
} else {
|
||||
value.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
@@ -313,8 +203,145 @@ export const GPTMessages2Chats = (
|
||||
}
|
||||
});
|
||||
}
|
||||
return value;
|
||||
} else if (
|
||||
obj === ChatRoleEnum.Human &&
|
||||
item.role === ChatCompletionRequestMessageRoleEnum.User
|
||||
) {
|
||||
const value: UserChatItemValueItemType[] = [];
|
||||
|
||||
if (typeof item.content === 'string') {
|
||||
value.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: item.content
|
||||
}
|
||||
});
|
||||
} else if (Array.isArray(item.content)) {
|
||||
item.content.forEach((item) => {
|
||||
if (item.type === 'text') {
|
||||
value.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: item.text
|
||||
}
|
||||
});
|
||||
} else if (item.type === 'image_url') {
|
||||
value.push({
|
||||
//@ts-ignore
|
||||
type: ChatItemValueTypeEnum.file,
|
||||
file: {
|
||||
type: ChatFileTypeEnum.image,
|
||||
name: '',
|
||||
url: item.image_url.url
|
||||
}
|
||||
});
|
||||
} else if (item.type === 'file_url') {
|
||||
value.push({
|
||||
// @ts-ignore
|
||||
type: ChatItemValueTypeEnum.file,
|
||||
file: {
|
||||
type: ChatFileTypeEnum.file,
|
||||
name: item.name,
|
||||
url: item.url
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
return value;
|
||||
} else if (
|
||||
obj === ChatRoleEnum.AI &&
|
||||
item.role === ChatCompletionRequestMessageRoleEnum.Assistant
|
||||
) {
|
||||
const value: AIChatItemValueItemType[] = [];
|
||||
|
||||
if (typeof item.reasoning_text === 'string' && item.reasoning_text) {
|
||||
value.push({
|
||||
type: ChatItemValueTypeEnum.reasoning,
|
||||
reasoning: {
|
||||
content: item.reasoning_text
|
||||
}
|
||||
});
|
||||
}
|
||||
if (item.tool_calls && reserveTool) {
|
||||
// save tool calls
|
||||
const toolCalls = item.tool_calls as ChatCompletionMessageToolCall[];
|
||||
value.push({
|
||||
//@ts-ignore
|
||||
type: ChatItemValueTypeEnum.tool,
|
||||
tools: toolCalls.map((tool) => {
|
||||
let toolResponse =
|
||||
messages.find(
|
||||
(msg) =>
|
||||
msg.role === ChatCompletionRequestMessageRoleEnum.Tool &&
|
||||
msg.tool_call_id === tool.id
|
||||
)?.content || '';
|
||||
toolResponse =
|
||||
typeof toolResponse === 'string' ? toolResponse : JSON.stringify(toolResponse);
|
||||
|
||||
return {
|
||||
id: tool.id,
|
||||
toolName: tool.toolName || '',
|
||||
toolAvatar: tool.toolAvatar || '',
|
||||
functionName: tool.function.name,
|
||||
params: tool.function.arguments,
|
||||
response: toolResponse as string
|
||||
};
|
||||
})
|
||||
});
|
||||
}
|
||||
if (item.function_call && reserveTool) {
|
||||
const functionCall = item.function_call as ChatCompletionMessageFunctionCall;
|
||||
const functionResponse = messages.find(
|
||||
(msg) =>
|
||||
msg.role === ChatCompletionRequestMessageRoleEnum.Function &&
|
||||
msg.name === item.function_call?.name
|
||||
) as ChatCompletionFunctionMessageParam;
|
||||
|
||||
if (functionResponse) {
|
||||
value.push({
|
||||
//@ts-ignore
|
||||
type: ChatItemValueTypeEnum.tool,
|
||||
tools: [
|
||||
{
|
||||
id: functionCall.id || '',
|
||||
toolName: functionCall.toolName || '',
|
||||
toolAvatar: functionCall.toolAvatar || '',
|
||||
functionName: functionCall.name,
|
||||
params: functionCall.arguments,
|
||||
response: functionResponse.content || ''
|
||||
}
|
||||
]
|
||||
});
|
||||
}
|
||||
}
|
||||
if (item.interactive) {
|
||||
value.push({
|
||||
//@ts-ignore
|
||||
type: ChatItemValueTypeEnum.interactive,
|
||||
interactive: item.interactive
|
||||
});
|
||||
}
|
||||
if (typeof item.content === 'string' && item.content) {
|
||||
const lastValue = value[value.length - 1];
|
||||
if (lastValue && lastValue.type === ChatItemValueTypeEnum.text && lastValue.text) {
|
||||
lastValue.text.content += item.content;
|
||||
} else {
|
||||
value.push({
|
||||
type: ChatItemValueTypeEnum.text,
|
||||
text: {
|
||||
content: item.content
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
})();
|
||||
|
||||
return {
|
||||
dataId: item.dataId,
|
||||
|
||||
2
packages/global/core/chat/type.d.ts
vendored
@@ -77,6 +77,7 @@ export type AIChatItemValueItemType = {
|
||||
| ChatItemValueTypeEnum.reasoning
|
||||
| ChatItemValueTypeEnum.tool
|
||||
| ChatItemValueTypeEnum.interactive;
|
||||
|
||||
text?: {
|
||||
content: string;
|
||||
};
|
||||
@@ -133,6 +134,7 @@ export type ChatItemType = (UserChatItemType | SystemChatItemType | AIChatItemTy
|
||||
|
||||
// Frontend type
|
||||
export type ChatSiteItemType = (UserChatItemType | SystemChatItemType | AIChatItemType) & {
|
||||
_id?: string;
|
||||
dataId: string;
|
||||
status: `${ChatStatusEnum}`;
|
||||
moduleName?: string;
|
||||
|
||||
19
packages/global/core/dataset/api.d.ts
vendored
@@ -1,5 +1,5 @@
|
||||
import { DatasetDataIndexItemType, DatasetSchemaType } from './type';
|
||||
import { TrainingModeEnum, DatasetCollectionTypeEnum } from './constants';
|
||||
import { DatasetCollectionTypeEnum, DatasetCollectionDataProcessModeEnum } from './constants';
|
||||
import type { LLMModelItemType } from '../ai/model.d';
|
||||
import { ParentIdType } from 'common/parentFolder/type';
|
||||
|
||||
@@ -10,9 +10,11 @@ export type DatasetUpdateBody = {
|
||||
name?: string;
|
||||
avatar?: string;
|
||||
intro?: string;
|
||||
agentModel?: LLMModelItemType;
|
||||
status?: DatasetSchemaType['status'];
|
||||
|
||||
agentModel?: string;
|
||||
vlmModel?: string;
|
||||
|
||||
websiteConfig?: DatasetSchemaType['websiteConfig'];
|
||||
externalReadUrl?: DatasetSchemaType['externalReadUrl'];
|
||||
defaultPermission?: DatasetSchemaType['defaultPermission'];
|
||||
@@ -27,7 +29,10 @@ export type DatasetUpdateBody = {
|
||||
/* ================= collection ===================== */
|
||||
export type DatasetCollectionChunkMetadataType = {
|
||||
parentId?: string;
|
||||
trainingType?: TrainingModeEnum;
|
||||
customPdfParse?: boolean;
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
imageIndex?: boolean;
|
||||
autoIndexes?: boolean;
|
||||
chunkSize?: number;
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
@@ -131,9 +136,15 @@ export type PostWebsiteSyncParams = {
|
||||
export type PushDatasetDataProps = {
|
||||
collectionId: string;
|
||||
data: PushDatasetDataChunkProps[];
|
||||
trainingMode: TrainingModeEnum;
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
prompt?: string;
|
||||
|
||||
billId?: string;
|
||||
|
||||
// Abandon
|
||||
trainingMode?: DatasetCollectionDataProcessModeEnum;
|
||||
};
|
||||
export type PushDatasetDataResponse = {
|
||||
insertLen: number;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { DatasetCollectionTypeEnum, TrainingModeEnum, TrainingTypeMap } from '../constants';
|
||||
import { DatasetCollectionTypeEnum } from '../constants';
|
||||
import { DatasetCollectionSchemaType } from '../type';
|
||||
|
||||
export const getCollectionSourceData = (collection?: DatasetCollectionSchemaType) => {
|
||||
@@ -16,9 +16,3 @@ export const getCollectionSourceData = (collection?: DatasetCollectionSchemaType
|
||||
export const checkCollectionIsFolder = (type: DatasetCollectionTypeEnum) => {
|
||||
return type === DatasetCollectionTypeEnum.folder || type === DatasetCollectionTypeEnum.virtual;
|
||||
};
|
||||
|
||||
export const getTrainingTypeLabel = (type?: TrainingModeEnum) => {
|
||||
if (!type) return '';
|
||||
if (!TrainingTypeMap[type]) return '';
|
||||
return TrainingTypeMap[type].label;
|
||||
};
|
||||
|
||||
@@ -109,6 +109,26 @@ export const DatasetCollectionSyncResultMap = {
|
||||
}
|
||||
};
|
||||
|
||||
export enum DatasetCollectionDataProcessModeEnum {
|
||||
chunk = 'chunk',
|
||||
qa = 'qa',
|
||||
auto = 'auto' // abandon
|
||||
}
|
||||
export const DatasetCollectionDataProcessModeMap = {
|
||||
[DatasetCollectionDataProcessModeEnum.chunk]: {
|
||||
label: i18nT('common:core.dataset.training.Chunk mode'),
|
||||
tooltip: i18nT('common:core.dataset.import.Chunk Split Tip')
|
||||
},
|
||||
[DatasetCollectionDataProcessModeEnum.qa]: {
|
||||
label: i18nT('common:core.dataset.training.QA mode'),
|
||||
tooltip: i18nT('common:core.dataset.import.QA Import Tip')
|
||||
},
|
||||
[DatasetCollectionDataProcessModeEnum.auto]: {
|
||||
label: i18nT('common:core.dataset.training.Auto mode'),
|
||||
tooltip: i18nT('common:core.dataset.training.Auto mode Tip')
|
||||
}
|
||||
};
|
||||
|
||||
/* ------------ data -------------- */
|
||||
|
||||
/* ------------ training -------------- */
|
||||
@@ -124,28 +144,11 @@ export enum ImportDataSourceEnum {
|
||||
|
||||
export enum TrainingModeEnum {
|
||||
chunk = 'chunk',
|
||||
qa = 'qa',
|
||||
auto = 'auto',
|
||||
qa = 'qa'
|
||||
image = 'image'
|
||||
}
|
||||
|
||||
export const TrainingTypeMap = {
|
||||
[TrainingModeEnum.chunk]: {
|
||||
label: i18nT('common:core.dataset.training.Chunk mode'),
|
||||
tooltip: i18nT('common:core.dataset.import.Chunk Split Tip'),
|
||||
openSource: true
|
||||
},
|
||||
[TrainingModeEnum.auto]: {
|
||||
label: i18nT('common:core.dataset.training.Auto mode'),
|
||||
tooltip: i18nT('common:core.dataset.training.Auto mode Tip'),
|
||||
openSource: false
|
||||
},
|
||||
[TrainingModeEnum.qa]: {
|
||||
label: i18nT('common:core.dataset.training.QA mode'),
|
||||
tooltip: i18nT('common:core.dataset.import.QA Import Tip'),
|
||||
openSource: true
|
||||
}
|
||||
};
|
||||
|
||||
/* ------------ search -------------- */
|
||||
export enum DatasetSearchModeEnum {
|
||||
embedding = 'embedding',
|
||||
@@ -182,7 +185,7 @@ export enum SearchScoreTypeEnum {
|
||||
}
|
||||
export const SearchScoreTypeMap = {
|
||||
[SearchScoreTypeEnum.embedding]: {
|
||||
label: i18nT('common:core.dataset.search.score.embedding'),
|
||||
label: i18nT('common:core.dataset.search.mode.embedding'),
|
||||
desc: i18nT('common:core.dataset.search.score.embedding desc'),
|
||||
showScore: true
|
||||
},
|
||||
|
||||
25
packages/global/core/dataset/controller.d.ts
vendored
@@ -20,9 +20,22 @@ export type UpdateDatasetDataProps = {
|
||||
})[];
|
||||
};
|
||||
|
||||
export type PatchIndexesProps = {
|
||||
type: 'create' | 'update' | 'delete' | 'unChange';
|
||||
index: Omit<DatasetDataIndexItemType, 'dataId'> & {
|
||||
dataId?: string;
|
||||
};
|
||||
};
|
||||
export type PatchIndexesProps =
|
||||
| {
|
||||
type: 'create';
|
||||
index: Omit<DatasetDataIndexItemType, 'dataId'> & {
|
||||
dataId?: string;
|
||||
};
|
||||
}
|
||||
| {
|
||||
type: 'update';
|
||||
index: DatasetDataIndexItemType;
|
||||
}
|
||||
| {
|
||||
type: 'delete';
|
||||
index: DatasetDataIndexItemType;
|
||||
}
|
||||
| {
|
||||
type: 'unChange';
|
||||
index: DatasetDataIndexItemType;
|
||||
};
|
||||
|
||||
42
packages/global/core/dataset/data/constants.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import { i18nT } from '../../../../web/i18n/utils';
|
||||
|
||||
export enum DatasetDataIndexTypeEnum {
|
||||
default = 'default',
|
||||
custom = 'custom',
|
||||
summary = 'summary',
|
||||
question = 'question',
|
||||
image = 'image'
|
||||
}
|
||||
|
||||
export const DatasetDataIndexMap: Record<
|
||||
`${DatasetDataIndexTypeEnum}`,
|
||||
{
|
||||
label: any;
|
||||
color: string;
|
||||
}
|
||||
> = {
|
||||
[DatasetDataIndexTypeEnum.default]: {
|
||||
label: i18nT('common:data_index_default'),
|
||||
color: 'gray'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.custom]: {
|
||||
label: i18nT('common:data_index_custom'),
|
||||
color: 'blue'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.summary]: {
|
||||
label: i18nT('common:data_index_summary'),
|
||||
color: 'green'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.question]: {
|
||||
label: i18nT('common:data_index_question'),
|
||||
color: 'red'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.image]: {
|
||||
label: i18nT('common:data_index_image'),
|
||||
color: 'purple'
|
||||
}
|
||||
};
|
||||
export const defaultDatasetIndexData = DatasetDataIndexMap[DatasetDataIndexTypeEnum.custom];
|
||||
export const getDatasetIndexMapData = (type: `${DatasetDataIndexTypeEnum}`) => {
|
||||
return DatasetDataIndexMap[type] || defaultDatasetIndexData;
|
||||
};
|
||||
20
packages/global/core/dataset/training/type.d.ts
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
import { PushDatasetDataChunkProps } from '../api';
|
||||
import { TrainingModeEnum } from '../constants';
|
||||
|
||||
export type PushDataToTrainingQueueProps = {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
|
||||
mode?: TrainingModeEnum;
|
||||
data: PushDatasetDataChunkProps[];
|
||||
prompt?: string;
|
||||
|
||||
agentModel: string;
|
||||
vectorModel: string;
|
||||
vlmModel?: string;
|
||||
|
||||
billId?: string;
|
||||
session?: ClientSession;
|
||||
};
|
||||
52
packages/global/core/dataset/type.d.ts
vendored
@@ -2,6 +2,7 @@ import type { LLMModelItemType, EmbeddingModelItemType } from '../../core/ai/mod
|
||||
import { PermissionTypeEnum } from '../../support/permission/constant';
|
||||
import { PushDatasetDataChunkProps } from './api';
|
||||
import {
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
DatasetCollectionTypeEnum,
|
||||
DatasetStatusEnum,
|
||||
DatasetTypeEnum,
|
||||
@@ -12,6 +13,7 @@ import { DatasetPermission } from '../../support/permission/dataset/controller';
|
||||
import { Permission } from '../../support/permission/controller';
|
||||
import { APIFileServer, FeishuServer, YuqueServer } from './apiDataset';
|
||||
import { SourceMemberType } from 'support/user/type';
|
||||
import { DatasetDataIndexTypeEnum } from './data/constants';
|
||||
|
||||
export type DatasetSchemaType = {
|
||||
_id: string;
|
||||
@@ -23,11 +25,14 @@ export type DatasetSchemaType = {
|
||||
|
||||
avatar: string;
|
||||
name: string;
|
||||
vectorModel: string;
|
||||
agentModel: string;
|
||||
intro: string;
|
||||
type: `${DatasetTypeEnum}`;
|
||||
status: `${DatasetStatusEnum}`;
|
||||
|
||||
vectorModel: string;
|
||||
agentModel: string;
|
||||
vlmModel?: string;
|
||||
|
||||
websiteConfig?: {
|
||||
url: string;
|
||||
selector: string;
|
||||
@@ -52,26 +57,22 @@ export type DatasetCollectionSchemaType = {
|
||||
parentId?: string;
|
||||
name: string;
|
||||
type: DatasetCollectionTypeEnum;
|
||||
createTime: Date;
|
||||
updateTime: Date;
|
||||
forbid?: boolean;
|
||||
|
||||
trainingType: TrainingModeEnum;
|
||||
chunkSize: number;
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
ocrParse?: boolean;
|
||||
|
||||
tags?: string[];
|
||||
|
||||
createTime: Date;
|
||||
updateTime: Date;
|
||||
|
||||
// Status
|
||||
forbid?: boolean;
|
||||
nextSyncTime?: Date;
|
||||
|
||||
// Collection metadata
|
||||
fileId?: string; // local file id
|
||||
rawLink?: string; // link url
|
||||
externalFileId?: string; //external file id
|
||||
apiFileId?: string; // api file id
|
||||
externalFileUrl?: string; // external import url
|
||||
|
||||
nextSyncTime?: Date;
|
||||
|
||||
rawTextLength?: number;
|
||||
hashRawText?: string;
|
||||
metadata?: {
|
||||
@@ -80,6 +81,16 @@ export type DatasetCollectionSchemaType = {
|
||||
|
||||
[key: string]: any;
|
||||
};
|
||||
|
||||
// Parse settings
|
||||
customPdfParse?: boolean;
|
||||
// Chunk settings
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
trainingType: DatasetCollectionDataProcessModeEnum;
|
||||
chunkSize: number;
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
};
|
||||
|
||||
export type DatasetCollectionTagsSchemaType = {
|
||||
@@ -90,7 +101,7 @@ export type DatasetCollectionTagsSchemaType = {
|
||||
};
|
||||
|
||||
export type DatasetDataIndexItemType = {
|
||||
defaultIndex: boolean;
|
||||
type: `${DatasetDataIndexTypeEnum}`;
|
||||
dataId: string; // pg data id
|
||||
text: string;
|
||||
};
|
||||
@@ -101,12 +112,15 @@ export type DatasetDataSchemaType = {
|
||||
tmbId: string;
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
chunkIndex: number;
|
||||
updateTime: Date;
|
||||
q: string; // large chunks or question
|
||||
a: string; // answer or custom content
|
||||
history?: {
|
||||
q: string;
|
||||
a: string;
|
||||
updateTime: Date;
|
||||
}[];
|
||||
forbid?: boolean;
|
||||
fullTextToken: string;
|
||||
indexes: DatasetDataIndexItemType[];
|
||||
@@ -141,6 +155,7 @@ export type DatasetTrainingSchemaType = {
|
||||
chunkIndex: number;
|
||||
weight: number;
|
||||
indexes: Omit<DatasetDataIndexItemType, 'dataId'>[];
|
||||
retryCount: number;
|
||||
};
|
||||
|
||||
export type CollectionWithDatasetType = DatasetCollectionSchemaType & {
|
||||
@@ -169,9 +184,10 @@ export type DatasetListItemType = {
|
||||
sourceMember?: SourceMemberType;
|
||||
};
|
||||
|
||||
export type DatasetItemType = Omit<DatasetSchemaType, 'vectorModel' | 'agentModel'> & {
|
||||
export type DatasetItemType = Omit<DatasetSchemaType, 'vectorModel' | 'agentModel' | 'vlmModel'> & {
|
||||
vectorModel: EmbeddingModelItemType;
|
||||
agentModel: LLMModelItemType;
|
||||
vlmModel?: LLMModelItemType;
|
||||
permission: DatasetPermission;
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { TrainingModeEnum, DatasetCollectionTypeEnum } from './constants';
|
||||
import { getFileIcon } from '../../common/file/icon';
|
||||
import { strIsLink } from '../../common/string/tools';
|
||||
import { DatasetDataIndexTypeEnum } from './data/constants';
|
||||
|
||||
export function getCollectionIcon(
|
||||
type: DatasetCollectionTypeEnum = DatasetCollectionTypeEnum.file,
|
||||
@@ -38,14 +39,23 @@ export function getSourceNameIcon({
|
||||
}
|
||||
|
||||
/* get dataset data default index */
|
||||
export function getDefaultIndex(props?: { q?: string; a?: string; dataId?: string }) {
|
||||
const { q = '', a, dataId } = props || {};
|
||||
const qaStr = `${q}\n${a}`.trim();
|
||||
return {
|
||||
defaultIndex: true,
|
||||
text: a ? qaStr : q,
|
||||
dataId
|
||||
};
|
||||
export function getDefaultIndex(props?: { q?: string; a?: string }) {
|
||||
const { q = '', a } = props || {};
|
||||
|
||||
return [
|
||||
{
|
||||
text: q,
|
||||
type: DatasetDataIndexTypeEnum.default
|
||||
},
|
||||
...(a
|
||||
? [
|
||||
{
|
||||
text: a,
|
||||
type: DatasetDataIndexTypeEnum.default
|
||||
}
|
||||
]
|
||||
: [])
|
||||
];
|
||||
}
|
||||
|
||||
export const predictDataLimitLength = (mode: TrainingModeEnum, data: any[]) => {
|
||||
|
||||
7
packages/global/core/workflow/api.d.ts
vendored
@@ -1,7 +1,12 @@
|
||||
import { EmbeddingModelItemType } from '../ai/model.d';
|
||||
import { NodeInputKeyEnum } from './constants';
|
||||
|
||||
export type SelectedDatasetType = { datasetId: string }[];
|
||||
export type SelectedDatasetType = {
|
||||
datasetId: string;
|
||||
avatar: string;
|
||||
name: string;
|
||||
vectorModel: EmbeddingModelItemType;
|
||||
}[];
|
||||
|
||||
export type HttpBodyType<T = Record<string, any>> = {
|
||||
// [NodeInputKeyEnum.addInputParam]: Record<string, any>;
|
||||
|
||||
@@ -154,7 +154,12 @@ export enum NodeInputKeyEnum {
|
||||
datasetSimilarity = 'similarity',
|
||||
datasetMaxTokens = 'limit',
|
||||
datasetSearchMode = 'searchMode',
|
||||
datasetSearchEmbeddingWeight = 'embeddingWeight',
|
||||
|
||||
datasetSearchUsingReRank = 'usingReRank',
|
||||
datasetSearchRerankWeight = 'rerankWeight',
|
||||
datasetSearchRerankModel = 'rerankModel',
|
||||
|
||||
datasetSearchUsingExtensionQuery = 'datasetSearchUsingExtensionQuery',
|
||||
datasetSearchExtensionModel = 'datasetSearchExtensionModel',
|
||||
datasetSearchExtensionBg = 'datasetSearchExtensionBg',
|
||||
|
||||
@@ -133,6 +133,9 @@ export type DispatchNodeResponseType = {
|
||||
similarity?: number;
|
||||
limit?: number;
|
||||
searchMode?: `${DatasetSearchModeEnum}`;
|
||||
embeddingWeight?: number;
|
||||
rerankModel?: string;
|
||||
rerankWeight?: number;
|
||||
searchUsingReRank?: boolean;
|
||||
queryExtensionResult?: {
|
||||
model: string;
|
||||
|
||||
@@ -55,7 +55,7 @@ export const AiChatModule: FlowNodeTemplateType = {
|
||||
showStatus: true,
|
||||
isTool: true,
|
||||
courseUrl: '/docs/guide/workbench/workflow/ai_chat/',
|
||||
version: '4813',
|
||||
version: '490',
|
||||
inputs: [
|
||||
Input_Template_SettingAiModel,
|
||||
// --- settings modal
|
||||
|
||||
@@ -4,7 +4,10 @@ export type ContextExtractAgentItemType = {
|
||||
valueType:
|
||||
| WorkflowIOValueTypeEnum.string
|
||||
| WorkflowIOValueTypeEnum.number
|
||||
| WorkflowIOValueTypeEnum.boolean;
|
||||
| WorkflowIOValueTypeEnum.boolean
|
||||
| WorkflowIOValueTypeEnum.arrayString
|
||||
| WorkflowIOValueTypeEnum.arrayNumber
|
||||
| WorkflowIOValueTypeEnum.arrayBoolean;
|
||||
desc: string;
|
||||
key: string;
|
||||
required: boolean;
|
||||
|
||||
@@ -64,6 +64,14 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
value: DatasetSearchModeEnum.embedding
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.datasetSearchEmbeddingWeight,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.number,
|
||||
value: 0.5
|
||||
},
|
||||
// Rerank
|
||||
{
|
||||
key: NodeInputKeyEnum.datasetSearchUsingReRank,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
@@ -71,6 +79,20 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: false
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.datasetSearchRerankModel,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.string
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.datasetSearchRerankWeight,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.number,
|
||||
value: 0.5
|
||||
},
|
||||
// Query Extension
|
||||
{
|
||||
key: NodeInputKeyEnum.datasetSearchUsingExtensionQuery,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
@@ -91,6 +113,7 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
|
||||
valueType: WorkflowIOValueTypeEnum.string,
|
||||
value: ''
|
||||
},
|
||||
|
||||
{
|
||||
key: NodeInputKeyEnum.authTmbId,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
|
||||
@@ -58,6 +58,13 @@ export const ToolModule: FlowNodeTemplateType = {
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: true
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatReasoning,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
label: '',
|
||||
valueType: WorkflowIOValueTypeEnum.boolean,
|
||||
value: true
|
||||
},
|
||||
{
|
||||
key: NodeInputKeyEnum.aiChatTopP,
|
||||
renderTypeList: [FlowNodeInputTypeEnum.hidden],
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"@apidevtools/swagger-parser": "^10.1.0",
|
||||
"axios": "^1.5.1",
|
||||
"axios": "^1.8.2",
|
||||
"cron-parser": "^4.9.0",
|
||||
"dayjs": "^1.11.7",
|
||||
"encoding": "^0.1.13",
|
||||
"js-yaml": "^4.1.0",
|
||||
"jschardet": "3.1.1",
|
||||
"nanoid": "^4.0.1",
|
||||
"next": "14.2.5",
|
||||
"nanoid": "^5.1.3",
|
||||
"next": "14.2.21",
|
||||
"openai": "4.61.0",
|
||||
"openapi-types": "^12.1.3",
|
||||
"json5": "^2.2.3",
|
||||
|
||||
3
packages/global/support/outLink/type.d.ts
vendored
@@ -63,6 +63,8 @@ export type OutLinkSchema<T extends OutlinkAppType = undefined> = {
|
||||
responseDetail: boolean;
|
||||
// whether to hide the node status
|
||||
showNodeStatus?: boolean;
|
||||
// wheter to show the full text reader
|
||||
// showFullText?: boolean;
|
||||
// whether to show the complete quote
|
||||
showRawSource?: boolean;
|
||||
|
||||
@@ -89,6 +91,7 @@ export type OutLinkEditType<T = undefined> = {
|
||||
name: string;
|
||||
responseDetail?: OutLinkSchema<T>['responseDetail'];
|
||||
showNodeStatus?: OutLinkSchema<T>['showNodeStatus'];
|
||||
// showFullText?: OutLinkSchema<T>['showFullText'];
|
||||
showRawSource?: OutLinkSchema<T>['showRawSource'];
|
||||
// response when request
|
||||
immediateResponse?: string;
|
||||
|
||||
@@ -14,29 +14,28 @@ export const TeamMemberRoleMap = {
|
||||
};
|
||||
|
||||
export enum TeamMemberStatusEnum {
|
||||
waiting = 'waiting',
|
||||
active = 'active',
|
||||
reject = 'reject',
|
||||
leave = 'leave'
|
||||
leave = 'leave',
|
||||
forbidden = 'forbidden'
|
||||
}
|
||||
|
||||
export const TeamMemberStatusMap = {
|
||||
[TeamMemberStatusEnum.waiting]: {
|
||||
label: 'user.team.member.waiting',
|
||||
color: 'orange.600'
|
||||
},
|
||||
[TeamMemberStatusEnum.active]: {
|
||||
label: 'user.team.member.active',
|
||||
color: 'green.600'
|
||||
},
|
||||
[TeamMemberStatusEnum.reject]: {
|
||||
label: 'user.team.member.reject',
|
||||
color: 'red.600'
|
||||
},
|
||||
[TeamMemberStatusEnum.leave]: {
|
||||
label: 'user.team.member.leave',
|
||||
color: 'red.600'
|
||||
},
|
||||
[TeamMemberStatusEnum.forbidden]: {
|
||||
label: 'user.team.member.forbidden',
|
||||
color: 'red.600'
|
||||
}
|
||||
};
|
||||
|
||||
export const notLeaveStatus = { $ne: TeamMemberStatusEnum.leave };
|
||||
export const notLeaveStatus = {
|
||||
$not: {
|
||||
$in: [TeamMemberStatusEnum.leave, TeamMemberStatusEnum.forbidden]
|
||||
}
|
||||
};
|
||||
|
||||
@@ -40,11 +40,6 @@ export type UpdateInviteProps = {
|
||||
status: TeamMemberSchema['status'];
|
||||
};
|
||||
|
||||
export type UpdateStatusProps = {
|
||||
tmbId: string;
|
||||
status: TeamMemberSchema['status'];
|
||||
};
|
||||
|
||||
export type InviteMemberResponse = Record<
|
||||
'invite' | 'inValid' | 'inTeam',
|
||||
{ username: string; userId: string }[]
|
||||
|
||||
@@ -10,7 +10,8 @@ export enum UsageSourceEnum {
|
||||
wecom = 'wecom',
|
||||
feishu = 'feishu',
|
||||
dingtalk = 'dingtalk',
|
||||
official_account = 'official_account'
|
||||
official_account = 'official_account',
|
||||
pdfParse = 'pdfParse'
|
||||
}
|
||||
|
||||
export const UsageSourceMap = {
|
||||
@@ -43,5 +44,8 @@ export const UsageSourceMap = {
|
||||
},
|
||||
[UsageSourceEnum.dingtalk]: {
|
||||
label: i18nT('account_usage:dingtalk')
|
||||
},
|
||||
[UsageSourceEnum.pdfParse]: {
|
||||
label: i18nT('account_usage:pdf_parse')
|
||||
}
|
||||
};
|
||||
|
||||
@@ -7,6 +7,7 @@ export type UsageListItemCountType = {
|
||||
outputTokens?: number;
|
||||
charsLength?: number;
|
||||
duration?: number;
|
||||
pages?: number;
|
||||
|
||||
// deprecated
|
||||
tokens?: number;
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"dependencies": {
|
||||
"cheerio": "1.0.0-rc.12",
|
||||
"@types/pg": "^8.6.6",
|
||||
"axios": "^1.5.1",
|
||||
"axios": "^1.8.2",
|
||||
"duck-duck-scrape": "^2.2.5",
|
||||
"echarts": "5.4.1",
|
||||
"expr-eval": "^2.0.2",
|
||||
|
||||
@@ -52,7 +52,9 @@ export async function uploadFile({
|
||||
const stats = await fsp.stat(path);
|
||||
if (!stats.isFile()) return Promise.reject(`${path} is not a file`);
|
||||
|
||||
const readStream = fs.createReadStream(path);
|
||||
const readStream = fs.createReadStream(path, {
|
||||
highWaterMark: 256 * 1024
|
||||
});
|
||||
|
||||
// Add default metadata
|
||||
metadata.teamId = teamId;
|
||||
@@ -62,9 +64,27 @@ export async function uploadFile({
|
||||
// create a gridfs bucket
|
||||
const bucket = getGridBucket(bucketName);
|
||||
|
||||
const fileSize = stats.size;
|
||||
const chunkSizeBytes = (() => {
|
||||
// 计算理想块大小:文件大小 ÷ 目标块数(10)
|
||||
const idealChunkSize = Math.ceil(fileSize / 10);
|
||||
|
||||
// 确保块大小至少为512KB
|
||||
const minChunkSize = 512 * 1024; // 512KB
|
||||
|
||||
// 取理想块大小和最小块大小中的较大值
|
||||
let chunkSize = Math.max(idealChunkSize, minChunkSize);
|
||||
|
||||
// 将块大小向上取整到最接近的64KB的倍数,使其更整齐
|
||||
chunkSize = Math.ceil(chunkSize / (64 * 1024)) * (64 * 1024);
|
||||
|
||||
return chunkSize;
|
||||
})();
|
||||
|
||||
const stream = bucket.openUploadStream(filename, {
|
||||
metadata,
|
||||
contentType
|
||||
contentType,
|
||||
chunkSizeBytes
|
||||
});
|
||||
|
||||
// save to gridfs
|
||||
@@ -186,20 +206,25 @@ export async function getDownloadStream({
|
||||
|
||||
export const readFileContentFromMongo = async ({
|
||||
teamId,
|
||||
tmbId,
|
||||
bucketName,
|
||||
fileId,
|
||||
isQAImport = false
|
||||
isQAImport = false,
|
||||
customPdfParse = false
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
bucketName: `${BucketNameEnum}`;
|
||||
fileId: string;
|
||||
isQAImport?: boolean;
|
||||
customPdfParse?: boolean;
|
||||
}): Promise<{
|
||||
rawText: string;
|
||||
filename: string;
|
||||
}> => {
|
||||
const bufferId = `${fileId}-${customPdfParse}`;
|
||||
// read buffer
|
||||
const fileBuffer = await MongoRawTextBuffer.findOne({ sourceId: fileId }, undefined, {
|
||||
const fileBuffer = await MongoRawTextBuffer.findOne({ sourceId: bufferId }, undefined, {
|
||||
...readFromSecondary
|
||||
}).lean();
|
||||
if (fileBuffer) {
|
||||
@@ -227,9 +252,11 @@ export const readFileContentFromMongo = async ({
|
||||
|
||||
// Get raw text
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
customPdfParse,
|
||||
extension,
|
||||
isQAImport,
|
||||
teamId,
|
||||
tmbId,
|
||||
buffer: fileBuffers,
|
||||
encoding,
|
||||
metadata: {
|
||||
@@ -240,7 +267,7 @@ export const readFileContentFromMongo = async ({
|
||||
// < 14M
|
||||
if (fileBuffers.length < 14 * 1024 * 1024 && rawText.trim()) {
|
||||
MongoRawTextBuffer.create({
|
||||
sourceId: fileId,
|
||||
sourceId: bufferId,
|
||||
rawText,
|
||||
metadata: {
|
||||
filename: file.filename
|
||||
|
||||
@@ -3,15 +3,13 @@ import { PassThrough } from 'stream';
|
||||
|
||||
export const gridFsStream2Buffer = (stream: NodeJS.ReadableStream) => {
|
||||
return new Promise<Buffer>((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
let totalLength = 0;
|
||||
const chunks: Uint8Array[] = [];
|
||||
|
||||
stream.on('data', (chunk) => {
|
||||
chunks.push(chunk);
|
||||
totalLength += chunk.length;
|
||||
});
|
||||
stream.on('end', () => {
|
||||
const resultBuffer = Buffer.concat(chunks, totalLength); // 一次性拼接
|
||||
const resultBuffer = Buffer.concat(chunks); // 一次性拼接
|
||||
resolve(resultBuffer);
|
||||
});
|
||||
stream.on('error', (err) => {
|
||||
@@ -21,25 +19,26 @@ export const gridFsStream2Buffer = (stream: NodeJS.ReadableStream) => {
|
||||
};
|
||||
|
||||
export const stream2Encoding = async (stream: NodeJS.ReadableStream) => {
|
||||
const start = Date.now();
|
||||
const copyStream = stream.pipe(new PassThrough());
|
||||
|
||||
/* get encoding */
|
||||
const buffer = await (() => {
|
||||
return new Promise<Buffer>((resolve, reject) => {
|
||||
let tmpBuffer: Buffer = Buffer.from([]);
|
||||
const chunks: Uint8Array[] = [];
|
||||
let totalLength = 0;
|
||||
|
||||
stream.on('data', (chunk) => {
|
||||
if (tmpBuffer.length < 200) {
|
||||
tmpBuffer = Buffer.concat([tmpBuffer, chunk]);
|
||||
if (totalLength < 200) {
|
||||
chunks.push(chunk);
|
||||
totalLength += chunk.length;
|
||||
|
||||
if (tmpBuffer.length >= 200) {
|
||||
resolve(tmpBuffer);
|
||||
if (totalLength >= 200) {
|
||||
resolve(Buffer.concat(chunks));
|
||||
}
|
||||
}
|
||||
});
|
||||
stream.on('end', () => {
|
||||
resolve(tmpBuffer);
|
||||
resolve(Buffer.concat(chunks));
|
||||
});
|
||||
stream.on('error', (err) => {
|
||||
reject(err);
|
||||
|
||||
@@ -6,6 +6,7 @@ import { guessBase64ImageType } from '../utils';
|
||||
import { readFromSecondary } from '../../mongo/utils';
|
||||
import { addHours } from 'date-fns';
|
||||
import { imageFileType } from '@fastgpt/global/common/file/constants';
|
||||
import { retryFn } from '@fastgpt/global/common/system/utils';
|
||||
|
||||
export const maxImgSize = 1024 * 1024 * 12;
|
||||
const base64MimeRegex = /data:image\/([^\)]+);base64/;
|
||||
@@ -40,13 +41,15 @@ export async function uploadMongoImg({
|
||||
return Promise.reject(`Invalid image file type: ${mime}`);
|
||||
}
|
||||
|
||||
const { _id } = await MongoImage.create({
|
||||
teamId,
|
||||
binary,
|
||||
metadata: Object.assign({ mime }, metadata),
|
||||
shareId,
|
||||
expiredTime: forever ? undefined : addHours(new Date(), 1)
|
||||
});
|
||||
const { _id } = await retryFn(() =>
|
||||
MongoImage.create({
|
||||
teamId,
|
||||
binary,
|
||||
metadata: Object.assign({ mime }, metadata),
|
||||
shareId,
|
||||
expiredTime: forever ? undefined : addHours(new Date(), 1)
|
||||
})
|
||||
);
|
||||
|
||||
return `${process.env.NEXT_PUBLIC_BASE_URL || ''}${imageBaseUrl}${String(_id)}.${extension}`;
|
||||
}
|
||||
|
||||
34
packages/service/common/file/image/utils.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import axios from 'axios';
|
||||
import { addLog } from '../../system/log';
|
||||
import { serverRequestBaseUrl } from '../../api/serverRequest';
|
||||
import { getFileContentTypeFromHeader, guessBase64ImageType } from '../utils';
|
||||
import { retryFn } from '@fastgpt/global/common/system/utils';
|
||||
|
||||
export const getImageBase64 = async (url: string) => {
|
||||
addLog.debug(`Load image to base64: ${url}`);
|
||||
|
||||
try {
|
||||
const response = await retryFn(() =>
|
||||
axios.get(url, {
|
||||
baseURL: serverRequestBaseUrl,
|
||||
responseType: 'arraybuffer',
|
||||
proxy: false
|
||||
})
|
||||
);
|
||||
|
||||
const base64 = Buffer.from(response.data, 'binary').toString('base64');
|
||||
const imageType =
|
||||
getFileContentTypeFromHeader(response.headers['content-type']) ||
|
||||
guessBase64ImageType(base64);
|
||||
|
||||
return {
|
||||
completeBase64: `data:${imageType};base64,${base64}`,
|
||||
base64,
|
||||
mime: imageType
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.debug(`Load image to base64 failed: ${url}`);
|
||||
console.log(error);
|
||||
return Promise.reject(error);
|
||||
}
|
||||
};
|
||||
@@ -1,18 +1,24 @@
|
||||
import { uploadMongoImg } from '../image/controller';
|
||||
import FormData from 'form-data';
|
||||
|
||||
import { WorkerNameEnum, runWorker } from '../../../worker/utils';
|
||||
import fs from 'fs';
|
||||
import type { ReadFileResponse } from '../../../worker/readFile/type';
|
||||
import type { ImageType, ReadFileResponse } from '../../../worker/readFile/type';
|
||||
import axios from 'axios';
|
||||
import { addLog } from '../../system/log';
|
||||
import { batchRun } from '@fastgpt/global/common/fn/utils';
|
||||
import { matchMdImgTextAndUpload } from '@fastgpt/global/common/string/markdown';
|
||||
import { batchRun } from '@fastgpt/global/common/system/utils';
|
||||
import { htmlTable2Md, matchMdImg } from '@fastgpt/global/common/string/markdown';
|
||||
import { createPdfParseUsage } from '../../../support/wallet/usage/controller';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { delay } from '@fastgpt/global/common/system/utils';
|
||||
import { getNanoid } from '@fastgpt/global/common/string/tools';
|
||||
import { getImageBase64 } from '../image/utils';
|
||||
|
||||
export type readRawTextByLocalFileParams = {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
path: string;
|
||||
encoding: string;
|
||||
customPdfParse?: boolean;
|
||||
metadata?: Record<string, any>;
|
||||
};
|
||||
export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParams) => {
|
||||
@@ -22,46 +28,51 @@ export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParam
|
||||
|
||||
const buffer = await fs.promises.readFile(path);
|
||||
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
return readRawContentByFileBuffer({
|
||||
extension,
|
||||
isQAImport: false,
|
||||
customPdfParse: params.customPdfParse,
|
||||
teamId: params.teamId,
|
||||
tmbId: params.tmbId,
|
||||
encoding: params.encoding,
|
||||
buffer,
|
||||
metadata: params.metadata
|
||||
});
|
||||
|
||||
return {
|
||||
rawText
|
||||
};
|
||||
};
|
||||
|
||||
export const readRawContentByFileBuffer = async ({
|
||||
extension,
|
||||
isQAImport,
|
||||
teamId,
|
||||
tmbId,
|
||||
|
||||
extension,
|
||||
buffer,
|
||||
encoding,
|
||||
metadata
|
||||
metadata,
|
||||
customPdfParse = false,
|
||||
isQAImport = false
|
||||
}: {
|
||||
isQAImport?: boolean;
|
||||
extension: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
|
||||
extension: string;
|
||||
buffer: Buffer;
|
||||
encoding: string;
|
||||
metadata?: Record<string, any>;
|
||||
}) => {
|
||||
// Custom read file service
|
||||
const customReadfileUrl = process.env.CUSTOM_READ_FILE_URL;
|
||||
const customReadFileExtension = process.env.CUSTOM_READ_FILE_EXTENSION || '';
|
||||
const ocrParse = process.env.CUSTOM_READ_FILE_OCR || 'false';
|
||||
const readFileFromCustomService = async (): Promise<ReadFileResponse | undefined> => {
|
||||
if (
|
||||
!customReadfileUrl ||
|
||||
!customReadFileExtension ||
|
||||
!customReadFileExtension.includes(extension)
|
||||
)
|
||||
return;
|
||||
|
||||
customPdfParse?: boolean;
|
||||
isQAImport: boolean;
|
||||
}): Promise<ReadFileResponse> => {
|
||||
const systemParse = () =>
|
||||
runWorker<ReadFileResponse>(WorkerNameEnum.readFile, {
|
||||
extension,
|
||||
encoding,
|
||||
buffer,
|
||||
teamId
|
||||
});
|
||||
const parsePdfFromCustomService = async (): Promise<ReadFileResponse> => {
|
||||
const url = global.systemEnv.customPdfParse?.url;
|
||||
const token = global.systemEnv.customPdfParse?.key;
|
||||
if (!url) return systemParse();
|
||||
|
||||
const start = Date.now();
|
||||
addLog.info('Parsing files from an external service');
|
||||
@@ -70,27 +81,32 @@ export const readRawContentByFileBuffer = async ({
|
||||
data.append('file', buffer, {
|
||||
filename: `file.${extension}`
|
||||
});
|
||||
data.append('extension', extension);
|
||||
data.append('ocr', ocrParse);
|
||||
const { data: response } = await axios.post<{
|
||||
success: boolean;
|
||||
message: string;
|
||||
data: {
|
||||
page: number;
|
||||
markdown: string;
|
||||
duration: number;
|
||||
};
|
||||
}>(customReadfileUrl, data, {
|
||||
pages: number;
|
||||
markdown: string;
|
||||
error?: Object | string;
|
||||
}>(url, data, {
|
||||
timeout: 600000,
|
||||
headers: {
|
||||
...data.getHeaders()
|
||||
...data.getHeaders(),
|
||||
Authorization: token ? `Bearer ${token}` : undefined
|
||||
}
|
||||
});
|
||||
|
||||
if (response.error) {
|
||||
return Promise.reject(response.error);
|
||||
}
|
||||
|
||||
addLog.info(`Custom file parsing is complete, time: ${Date.now() - start}ms`);
|
||||
|
||||
const rawText = response.data.markdown;
|
||||
const { text, imageList } = matchMdImgTextAndUpload(rawText);
|
||||
const rawText = response.markdown;
|
||||
const { text, imageList } = matchMdImg(rawText);
|
||||
|
||||
createPdfParseUsage({
|
||||
teamId,
|
||||
tmbId,
|
||||
pages: response.pages
|
||||
});
|
||||
|
||||
return {
|
||||
rawText: text,
|
||||
@@ -98,15 +114,203 @@ export const readRawContentByFileBuffer = async ({
|
||||
imageList
|
||||
};
|
||||
};
|
||||
const parsePdfFromDoc2x = async (): Promise<ReadFileResponse> => {
|
||||
const doc2xKey = global.systemEnv.customPdfParse?.doc2xKey;
|
||||
if (!doc2xKey) return systemParse();
|
||||
|
||||
let { rawText, formatText, imageList } =
|
||||
(await readFileFromCustomService()) ||
|
||||
(await runWorker<ReadFileResponse>(WorkerNameEnum.readFile, {
|
||||
extension,
|
||||
encoding,
|
||||
buffer,
|
||||
teamId
|
||||
}));
|
||||
const parseTextImage = async (text: string) => {
|
||||
// Extract image links and convert to base64
|
||||
const imageList: { id: string; url: string }[] = [];
|
||||
let processedText = text.replace(/!\[.*?\]\((http[^)]+)\)/g, (match, url) => {
|
||||
const id = `IMAGE_${getNanoid()}_IMAGE`;
|
||||
imageList.push({
|
||||
id,
|
||||
url
|
||||
});
|
||||
return ``;
|
||||
});
|
||||
|
||||
// Get base64 from image url
|
||||
let resultImageList: ImageType[] = [];
|
||||
await batchRun(
|
||||
imageList,
|
||||
async (item) => {
|
||||
try {
|
||||
const { base64, mime } = await getImageBase64(item.url);
|
||||
resultImageList.push({
|
||||
uuid: item.id,
|
||||
mime,
|
||||
base64
|
||||
});
|
||||
} catch (error) {
|
||||
processedText = processedText.replace(item.id, item.url);
|
||||
addLog.warn(`Failed to get image from ${item.url}: ${getErrText(error)}`);
|
||||
}
|
||||
},
|
||||
5
|
||||
);
|
||||
|
||||
return {
|
||||
text: processedText,
|
||||
imageList: resultImageList
|
||||
};
|
||||
};
|
||||
|
||||
let startTime = Date.now();
|
||||
|
||||
// 1. Get pre-upload URL first
|
||||
const { data: preupload_data } = await axios
|
||||
.post<{ code: string; data: { uid: string; url: string } }>(
|
||||
'https://v2.doc2x.noedgeai.com/api/v2/parse/preupload',
|
||||
null,
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${doc2xKey}`
|
||||
}
|
||||
}
|
||||
)
|
||||
.catch((error) => {
|
||||
return Promise.reject(
|
||||
`[Pre-upload Error] Failed to get pre-upload URL: ${getErrText(error)}`
|
||||
);
|
||||
});
|
||||
if (preupload_data?.code !== 'success') {
|
||||
return Promise.reject(`Failed to get pre-upload URL: ${JSON.stringify(preupload_data)}`);
|
||||
}
|
||||
|
||||
const upload_url = preupload_data.data.url;
|
||||
const uid = preupload_data.data.uid;
|
||||
|
||||
// 2. Upload file to pre-signed URL with binary stream
|
||||
const blob = new Blob([buffer], { type: 'application/pdf' });
|
||||
const response = await axios
|
||||
.put(upload_url, blob, {
|
||||
headers: {
|
||||
'Content-Type': 'application/pdf'
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
return Promise.reject(`[Upload Error] Failed to upload file: ${getErrText(error)}`);
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
return Promise.reject(`Upload failed with status ${response.status}: ${response.statusText}`);
|
||||
}
|
||||
|
||||
await delay(5000);
|
||||
addLog.debug(`Uploaded file to Doc2x, uid: ${uid}`);
|
||||
// 3. Get the result by uid
|
||||
const checkResult = async (retry = 30) => {
|
||||
if (retry <= 0) {
|
||||
return Promise.reject(
|
||||
`[Parse Timeout Error] Failed to get result (uid: ${uid}): Process timeout`
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
const { data: result_data } = await axios
|
||||
.get<{
|
||||
code: string;
|
||||
data: {
|
||||
progress: number;
|
||||
status: 'processing' | 'failed' | 'success';
|
||||
result: {
|
||||
pages: {
|
||||
md: string;
|
||||
}[];
|
||||
};
|
||||
};
|
||||
}>(`https://v2.doc2x.noedgeai.com/api/v2/parse/status?uid=${uid}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${doc2xKey}`
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
return Promise.reject(
|
||||
`[Parse Status Error] Failed to get parse status: ${getErrText(error)}`
|
||||
);
|
||||
});
|
||||
|
||||
// Error
|
||||
if (!['ok', 'success'].includes(result_data.code)) {
|
||||
return Promise.reject(
|
||||
`Failed to get result (uid: ${uid}): ${JSON.stringify(result_data)}`
|
||||
);
|
||||
}
|
||||
|
||||
// Process
|
||||
if (['ready', 'processing'].includes(result_data.data.status)) {
|
||||
addLog.debug(`Waiting for the result, uid: ${uid}`);
|
||||
await delay(5000);
|
||||
return checkResult(retry - 1);
|
||||
}
|
||||
|
||||
// Finifsh
|
||||
if (result_data.data.status === 'success') {
|
||||
const result = result_data.data.result.pages
|
||||
.map((page) => page.md)
|
||||
.join('')
|
||||
// Do some post-processing
|
||||
.replace(/\\[\(\)]/g, '$')
|
||||
.replace(/\\[\[\]]/g, '$$')
|
||||
.replace(/<img\s+src="([^"]+)"(?:\s*\?[^>]*)?(?:\s*\/>|>)/g, '')
|
||||
.replace(/<!-- Media -->/g, '')
|
||||
.replace(/<!-- Footnote -->/g, '')
|
||||
.replace(/\$(.+?)\s+\\tag\{(.+?)\}\$/g, '$$$1 \\qquad \\qquad ($2)$$')
|
||||
.replace(/\\text\{([^}]*?)(\b\w+)_(\w+\b)([^}]*?)\}/g, '\\text{$1$2\\_$3$4}');
|
||||
|
||||
const { text, imageList } = await parseTextImage(htmlTable2Md(result));
|
||||
|
||||
return {
|
||||
pages: result_data.data.result.pages.length,
|
||||
text,
|
||||
imageList
|
||||
};
|
||||
}
|
||||
return checkResult(retry - 1);
|
||||
} catch (error) {
|
||||
if (retry > 1) {
|
||||
await delay(100);
|
||||
return checkResult(retry - 1);
|
||||
}
|
||||
return Promise.reject(error);
|
||||
}
|
||||
};
|
||||
|
||||
const { pages, text, imageList } = await checkResult();
|
||||
|
||||
createPdfParseUsage({
|
||||
teamId,
|
||||
tmbId,
|
||||
pages
|
||||
});
|
||||
|
||||
addLog.info(`Doc2x parse success, time: ${Date.now() - startTime}ms`);
|
||||
return {
|
||||
rawText: text,
|
||||
formatText: text,
|
||||
imageList
|
||||
};
|
||||
};
|
||||
// Custom read file service
|
||||
const pdfParseFn = async (): Promise<ReadFileResponse> => {
|
||||
if (!customPdfParse) return systemParse();
|
||||
if (global.systemEnv.customPdfParse?.url) return parsePdfFromCustomService();
|
||||
if (global.systemEnv.customPdfParse?.doc2xKey) return parsePdfFromDoc2x();
|
||||
|
||||
return systemParse();
|
||||
};
|
||||
|
||||
const start = Date.now();
|
||||
addLog.debug(`Start parse file`, { extension });
|
||||
|
||||
let { rawText, formatText, imageList } = await (async () => {
|
||||
if (extension === 'pdf') {
|
||||
return await pdfParseFn();
|
||||
}
|
||||
return await systemParse();
|
||||
})();
|
||||
|
||||
addLog.debug(`Parse file success, time: ${Date.now() - start}ms. Uploading file image.`);
|
||||
|
||||
// markdown data format
|
||||
if (imageList) {
|
||||
@@ -116,14 +320,14 @@ export const readRawContentByFileBuffer = async ({
|
||||
return await uploadMongoImg({
|
||||
base64Img: `data:${item.mime};base64,${item.base64}`,
|
||||
teamId,
|
||||
// expiredTime: addHours(new Date(), 1),
|
||||
metadata: {
|
||||
...metadata,
|
||||
mime: item.mime
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
return '';
|
||||
addLog.warn('Upload file image error', { error });
|
||||
return 'Upload load image error';
|
||||
}
|
||||
})();
|
||||
rawText = rawText.replace(item.uuid, src);
|
||||
@@ -142,5 +346,7 @@ export const readRawContentByFileBuffer = async ({
|
||||
}
|
||||
}
|
||||
|
||||
return { rawText };
|
||||
addLog.debug(`Upload file image success, time: ${Date.now() - start}ms`);
|
||||
|
||||
return { rawText, formatText, imageList };
|
||||
};
|
||||
|
||||
@@ -19,7 +19,7 @@ export async function connectMongo(): Promise<Mongoose> {
|
||||
// Remove existing listeners to prevent duplicates
|
||||
connectionMongo.connection.removeAllListeners('error');
|
||||
connectionMongo.connection.removeAllListeners('disconnected');
|
||||
connectionMongo.set('strictQuery', false);
|
||||
connectionMongo.set('strictQuery', 'throw');
|
||||
|
||||
connectionMongo.connection.on('error', async (error) => {
|
||||
console.log('mongo error', error);
|
||||
|
||||
@@ -10,6 +10,11 @@ export const SERVICE_LOCAL_HOST =
|
||||
export const initFastGPTConfig = (config?: FastGPTConfigFileType) => {
|
||||
if (!config) return;
|
||||
|
||||
// Special config computed
|
||||
config.feConfigs.showCustomPdfParse =
|
||||
!!config.systemEnv.customPdfParse?.url || !!config.systemEnv.customPdfParse?.doc2xKey;
|
||||
config.feConfigs.customPdfParsePrice = config.systemEnv.customPdfParse?.price || 0;
|
||||
|
||||
global.feConfigs = config.feConfigs;
|
||||
global.systemEnv = config.systemEnv;
|
||||
global.subPlans = config.subPlans;
|
||||
|
||||
@@ -30,10 +30,10 @@ export const isInternalAddress = (url: string): boolean => {
|
||||
return true;
|
||||
}
|
||||
|
||||
// For non-metadata URLs, check if it's a domain name
|
||||
// For IP addresses, check if they are internal
|
||||
const ipv4Pattern = /^(\d{1,3}\.){3}\d{1,3}$/;
|
||||
if (!ipv4Pattern.test(hostname)) {
|
||||
return true;
|
||||
return false; // Not an IP address, so it's a domain name - consider it external by default
|
||||
}
|
||||
|
||||
// ... existing IP validation code ...
|
||||
|
||||
@@ -38,6 +38,27 @@ export class PgVectorCtrl {
|
||||
await PgClient.query(
|
||||
`CREATE INDEX CONCURRENTLY IF NOT EXISTS create_time_index ON ${DatasetVectorTableName} USING btree(createtime);`
|
||||
);
|
||||
// 10w rows
|
||||
// await PgClient.query(`
|
||||
// ALTER TABLE modeldata SET (
|
||||
// autovacuum_vacuum_scale_factor = 0.1,
|
||||
// autovacuum_analyze_scale_factor = 0.05,
|
||||
// autovacuum_vacuum_threshold = 50,
|
||||
// autovacuum_analyze_threshold = 50,
|
||||
// autovacuum_vacuum_cost_delay = 20,
|
||||
// autovacuum_vacuum_cost_limit = 200
|
||||
// );`);
|
||||
|
||||
// 100w rows
|
||||
// await PgClient.query(`
|
||||
// ALTER TABLE modeldata SET (
|
||||
// autovacuum_vacuum_scale_factor = 0.01,
|
||||
// autovacuum_analyze_scale_factor = 0.02,
|
||||
// autovacuum_vacuum_threshold = 1000,
|
||||
// autovacuum_analyze_threshold = 1000,
|
||||
// autovacuum_vacuum_cost_delay = 10,
|
||||
// autovacuum_vacuum_cost_limit = 2000
|
||||
// );`)
|
||||
|
||||
addLog.info('init pg successful');
|
||||
} catch (error) {
|
||||
@@ -164,34 +185,22 @@ export class PgVectorCtrl {
|
||||
}
|
||||
|
||||
try {
|
||||
// const explan: any = await PgClient.query(
|
||||
// `BEGIN;
|
||||
// SET LOCAL hnsw.ef_search = ${global.systemEnv?.pgHNSWEfSearch || 100};
|
||||
// EXPLAIN ANALYZE select id, collection_id, vector <#> '[${vector}]' AS score
|
||||
// from ${DatasetVectorTableName}
|
||||
// where team_id='${teamId}'
|
||||
// AND dataset_id IN (${datasetIds.map((id) => `'${String(id)}'`).join(',')})
|
||||
// ${forbidCollectionSql}
|
||||
// order by score limit ${limit};
|
||||
// COMMIT;`
|
||||
// );
|
||||
// console.log(explan[2].rows);
|
||||
|
||||
const results: any = await PgClient.query(
|
||||
`
|
||||
BEGIN;
|
||||
`BEGIN;
|
||||
SET LOCAL hnsw.ef_search = ${global.systemEnv?.pgHNSWEfSearch || 100};
|
||||
select id, collection_id, vector <#> '[${vector}]' AS score
|
||||
from ${DatasetVectorTableName}
|
||||
where team_id='${teamId}'
|
||||
AND dataset_id IN (${datasetIds.map((id) => `'${String(id)}'`).join(',')})
|
||||
${filterCollectionIdSql}
|
||||
${forbidCollectionSql}
|
||||
order by score limit ${limit};
|
||||
SET LOCAL hnsw.iterative_scan = relaxed_order;
|
||||
WITH relaxed_results AS MATERIALIZED (
|
||||
select id, collection_id, vector <#> '[${vector}]' AS score
|
||||
from ${DatasetVectorTableName}
|
||||
where team_id='${teamId}'
|
||||
AND dataset_id IN (${datasetIds.map((id) => `'${String(id)}'`).join(',')})
|
||||
${filterCollectionIdSql}
|
||||
${forbidCollectionSql}
|
||||
order by score limit ${limit}
|
||||
) SELECT id, collection_id, score FROM relaxed_results ORDER BY score;
|
||||
COMMIT;`
|
||||
);
|
||||
|
||||
const rows = results?.[2]?.rows as PgSearchRawType[];
|
||||
const rows = results?.[3]?.rows as PgSearchRawType[];
|
||||
|
||||
return {
|
||||
results: rows.map((item) => ({
|
||||
|
||||
@@ -43,13 +43,13 @@ export async function text2Speech({
|
||||
const readableStream = response.body as unknown as NodeJS.ReadableStream;
|
||||
readableStream.pipe(res);
|
||||
|
||||
let bufferStore = Buffer.from([]);
|
||||
const chunks: Uint8Array[] = [];
|
||||
|
||||
readableStream.on('data', (chunk) => {
|
||||
bufferStore = Buffer.concat([bufferStore, chunk]);
|
||||
chunks.push(chunk);
|
||||
});
|
||||
readableStream.on('end', () => {
|
||||
onSuccess({ model, buffer: bufferStore });
|
||||
onSuccess({ model, buffer: Buffer.concat(chunks) });
|
||||
});
|
||||
readableStream.on('error', (e) => {
|
||||
onError(e);
|
||||
|
||||
@@ -6,10 +6,12 @@ import { getSTTModel } from '../model';
|
||||
|
||||
export const aiTranscriptions = async ({
|
||||
model,
|
||||
fileStream
|
||||
fileStream,
|
||||
headers
|
||||
}: {
|
||||
model: string;
|
||||
fileStream: fs.ReadStream;
|
||||
headers?: Record<string, string>;
|
||||
}) => {
|
||||
const data = new FormData();
|
||||
data.append('model', model);
|
||||
@@ -30,7 +32,8 @@ export const aiTranscriptions = async ({
|
||||
Authorization: modelData.requestAuth
|
||||
? `Bearer ${modelData.requestAuth}`
|
||||
: aiAxiosConfig.authorization,
|
||||
...data.getHeaders()
|
||||
...data.getHeaders(),
|
||||
...headers
|
||||
},
|
||||
data: data
|
||||
});
|
||||
|
||||
@@ -76,6 +76,10 @@ export const createChatCompletion = async ({
|
||||
timeout: formatTimeout
|
||||
});
|
||||
|
||||
addLog.debug(`Start create chat completion`, {
|
||||
model: body.model
|
||||
});
|
||||
|
||||
const response = await ai.chat.completions.create(body, {
|
||||
...options,
|
||||
...(modelConstantsData.requestUrl ? { path: modelConstantsData.requestUrl } : {}),
|
||||
|
||||