Compare commits
24 Commits
v4.9.9
...
test-openG
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
63028dacb2 | ||
|
|
b4ecfb0b79 | ||
|
|
331b851a78 | ||
|
|
50d235c42a | ||
|
|
9838593451 | ||
|
|
c25cd48e72 | ||
|
|
874300a56a | ||
|
|
1dea2b71b4 | ||
|
|
a8673344b1 | ||
|
|
9709ae7a4f | ||
|
|
fae76e887a | ||
|
|
9af92d1eae | ||
|
|
6a6719e93d | ||
|
|
50481f4ca8 | ||
|
|
88bd3aaa9e | ||
|
|
dd3c251603 | ||
|
|
aa55f059d4 | ||
|
|
89c9a02650 | ||
|
|
0f3bfa280a | ||
|
|
593ebfd269 | ||
|
|
f6dc2204f5 | ||
|
|
d44c338059 | ||
|
|
1dac2b70ec | ||
|
|
9fef3e15fb |
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -21,7 +21,7 @@
|
||||
"i18n-ally.namespace": true,
|
||||
"i18n-ally.pathMatcher": "{locale}/{namespaces}.json",
|
||||
"i18n-ally.extract.targetPickingStrategy": "most-similar-by-key",
|
||||
"i18n-ally.translate.engines": ["google"],
|
||||
"i18n-ally.translate.engines": ["deepl","google"],
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
|
||||
@@ -132,15 +132,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@@ -150,8 +150,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@@ -109,15 +109,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@@ -127,8 +127,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
218
deploy/docker/docker-compose-opengauss.yml
Normal file
218
deploy/docker/docker-compose-opengauss.yml
Normal file
@@ -0,0 +1,218 @@
|
||||
# 数据库的默认账号和密码仅首次运行时设置有效
|
||||
# 如果修改了账号密码,记得改数据库和项目连接参数,别只改一处~
|
||||
# 该配置文件只是给快速启动,测试使用。正式使用,记得务必修改账号密码,以及调整合适的知识库参数,共享内存等。
|
||||
# 如何无法访问 dockerhub 和 git,可以用阿里云(阿里云没有arm包)
|
||||
|
||||
version: '3.3'
|
||||
services:
|
||||
# db
|
||||
gs:
|
||||
image: opengauss/opengauss:7.0.0-RC1 # docker hub
|
||||
container_name: gs
|
||||
restart: always
|
||||
# ports: # 生产环境建议不要暴露
|
||||
# - 5432:5432
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
# 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
|
||||
- GS_USER=username
|
||||
- GS_PASSWORD=password
|
||||
- GS_DB=postgres
|
||||
volumes:
|
||||
- ./opengauss/data:/var/lib/opengauss/data
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'netstat -lntp | grep tcp6 > /dev/null 2>&1']
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
mongo:
|
||||
image: mongo:5.0.18 # dockerhub
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云
|
||||
# image: mongo:4.4.29 # cpu不支持AVX时候使用
|
||||
container_name: mongo
|
||||
restart: always
|
||||
# ports:
|
||||
# - 27017:27017
|
||||
networks:
|
||||
- fastgpt
|
||||
command: mongod --keyFile /data/mongodb.key --replSet rs0
|
||||
environment:
|
||||
- MONGO_INITDB_ROOT_USERNAME=myusername
|
||||
- MONGO_INITDB_ROOT_PASSWORD=mypassword
|
||||
volumes:
|
||||
- ./mongo/data:/data/db
|
||||
entrypoint:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
openssl rand -base64 128 > /data/mongodb.key
|
||||
chmod 400 /data/mongodb.key
|
||||
chown 999:999 /data/mongodb.key
|
||||
echo 'const isInited = rs.status().ok === 1
|
||||
if(!isInited){
|
||||
rs.initiate({
|
||||
_id: "rs0",
|
||||
members: [
|
||||
{ _id: 0, host: "mongo:27017" }
|
||||
]
|
||||
})
|
||||
}' > /data/initReplicaSet.js
|
||||
# 启动MongoDB服务
|
||||
exec docker-entrypoint.sh "$$@" &
|
||||
|
||||
# 等待MongoDB服务启动
|
||||
until mongo -u myusername -p mypassword --authenticationDatabase admin --eval "print('waited for connection')"; do
|
||||
echo "Waiting for MongoDB to start..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# 执行初始化副本集的脚本
|
||||
mongo -u myusername -p mypassword --authenticationDatabase admin /data/initReplicaSet.js
|
||||
|
||||
# 等待docker-entrypoint.sh脚本执行的MongoDB服务进程
|
||||
wait $$!
|
||||
|
||||
redis:
|
||||
image: redis:7.2-alpine
|
||||
container_name: redis
|
||||
# ports:
|
||||
# - 6379:6379
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
command: |
|
||||
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
|
||||
healthcheck:
|
||||
test: ['CMD', 'redis-cli', '-a', 'mypassword', 'ping']
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- ./redis/data:/data
|
||||
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.7-fix2 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.7-fix2 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
environment:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.7-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.7-fix2 # 阿里云
|
||||
# image: swr.cn-north-4.myhuaweicloud.com/ddn-k8s/ghcr.io/labring/fastgpt:v4.8.4-linuxarm64 # openGauss在arm架构上性能更好
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
- fastgpt
|
||||
depends_on:
|
||||
- mongo
|
||||
- gs
|
||||
- sandbox
|
||||
restart: always
|
||||
environment:
|
||||
# 前端外部可访问的地址,用于自动补全文件资源路径。例如 https:fastgpt.cn,不能填 localhost。这个值可以不填,不填则发给模型的图片会是一个相对路径,而不是全路径,模型可能伪造Host。
|
||||
- FE_DOMAIN=
|
||||
# root 密码,用户名为: root。如果需要修改 root 密码,直接修改这个环境变量,并重启即可。
|
||||
- DEFAULT_ROOT_PSW=1234
|
||||
# AI Proxy 的地址,如果配了该地址,优先使用
|
||||
- AIPROXY_API_ENDPOINT=http://aiproxy:3000
|
||||
# AI Proxy 的 Admin Token,与 AI Proxy 中的环境变量 ADMIN_KEY
|
||||
- AIPROXY_API_TOKEN=aiproxy
|
||||
# 数据库最大连接数
|
||||
- DB_MAX_LINK=30
|
||||
# 登录凭证密钥
|
||||
- TOKEN_KEY=any
|
||||
# root的密钥,常用于升级时候的初始化请求
|
||||
- ROOT_KEY=root_key
|
||||
# 文件阅读加密
|
||||
- FILE_TOKEN_KEY=filetoken
|
||||
# MongoDB 连接参数. 用户名myusername,密码mypassword。
|
||||
- MONGODB_URI=mongodb://myusername:mypassword@mongo:27017/fastgpt?authSource=admin
|
||||
# openGauss 连接参数
|
||||
- OPENGAUSS_URL=opengauss://gaussdb:Huawei12%23%24@gs:9999/test
|
||||
# Redis 连接参数
|
||||
- REDIS_URL=redis://default:mypassword@redis:6379
|
||||
# sandbox 地址
|
||||
- SANDBOX_URL=http://sandbox:3000
|
||||
# 日志等级: debug, info, warn, error
|
||||
- LOG_LEVEL=info
|
||||
- STORE_LOG_LEVEL=warn
|
||||
# 工作流最大运行次数
|
||||
- WORKFLOW_MAX_RUN_TIMES=1000
|
||||
# 批量执行节点,最大输入长度
|
||||
- WORKFLOW_MAX_LOOP_TIMES=100
|
||||
# 自定义跨域,不配置时,默认都允许跨域(多个域名通过逗号分割)
|
||||
- ALLOWED_ORIGINS=
|
||||
# 是否开启IP限制,默认不开启
|
||||
- USE_IP_LIMIT=false
|
||||
# 对话文件过期天数
|
||||
- CHAT_FILE_EXPIRE_TIME=7
|
||||
volumes:
|
||||
- ./config.json:/app/data/config.json
|
||||
|
||||
# AI Proxy
|
||||
aiproxy:
|
||||
image: ghcr.io/labring/aiproxy:v0.1.7
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/labring/aiproxy:v0.1.7 # 阿里云
|
||||
container_name: aiproxy
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
aiproxy_pg:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
# 对应 fastgpt 里的AIPROXY_API_TOKEN
|
||||
- ADMIN_KEY=aiproxy
|
||||
# 错误日志详情保存时间(小时)
|
||||
- LOG_DETAIL_STORAGE_HOURS=1
|
||||
# 数据库连接地址
|
||||
- SQL_DSN=postgres://postgres:aiproxy@aiproxy_pg:5432/aiproxy
|
||||
# 最大重试次数
|
||||
- RETRY_TIMES=3
|
||||
# 不需要计费
|
||||
- BILLING_ENABLED=false
|
||||
# 不需要严格检测模型
|
||||
- DISABLE_MODEL_CONFIG=true
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:3000/api/status']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
aiproxy_pg:
|
||||
image: pgvector/pgvector:0.8.0-pg15 # docker hub
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.8.0-pg15 # 阿里云
|
||||
restart: unless-stopped
|
||||
container_name: aiproxy_pg
|
||||
volumes:
|
||||
- ./aiproxy_pg:/var/lib/postgresql/data
|
||||
networks:
|
||||
- fastgpt
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: aiproxy
|
||||
POSTGRES_PASSWORD: aiproxy
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
networks:
|
||||
fastgpt:
|
||||
@@ -23,7 +23,7 @@ services:
|
||||
volumes:
|
||||
- ./pg/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'postgres']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
@@ -96,15 +96,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@@ -114,8 +114,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@@ -72,15 +72,15 @@ services:
|
||||
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@@ -90,8 +90,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@@ -959,10 +959,16 @@ curl --location --request POST 'http://localhost:3000/api/core/chat/getHistories
|
||||
{{< markdownify >}}
|
||||
|
||||
{{% alert icon=" " context="success" %}}
|
||||
目前仅能获取到当前 API key 的创建者的对话。
|
||||
|
||||
- appId - 应用 Id
|
||||
- offset - 偏移量,即从第几条数据开始取
|
||||
- pageSize - 记录数量
|
||||
- source - 对话源。source=api,表示获取通过 API 创建的对话(不会获取到页面上的对话记录)
|
||||
- startCreateTime - 开始创建时间(可选)
|
||||
- endCreateTime - 结束创建时间(可选)
|
||||
- startUpdateTime - 开始更新时间(可选)
|
||||
- endUpdateTime - 结束更新时间(可选)
|
||||
{{% /alert %}}
|
||||
|
||||
{{< /markdownify >}}
|
||||
|
||||
50
docSite/content/zh-cn/docs/development/upgrading/4910.md
Normal file
50
docSite/content/zh-cn/docs/development/upgrading/4910.md
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
title: 'V4.9.10'
|
||||
description: 'FastGPT V4.9.10 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 790
|
||||
---
|
||||
|
||||
## 升级指南
|
||||
|
||||
重要提示:本次更新会重新构建全文索引,构建期间,全文检索结果会为空,4c16g 700 万组全文索引大致消耗 25 分钟。如需无缝升级,需自行做表同步工程。
|
||||
|
||||
### 1. 做好数据备份
|
||||
|
||||
### 2. 更新镜像 tag
|
||||
|
||||
- 更新 FastGPT 镜像 tag: v4.9.10
|
||||
- 更新 FastGPT 商业版镜像 tag: v4.9.10
|
||||
- mcp_server 无需更新
|
||||
- Sandbox 无需更新
|
||||
- AIProxy 无需更新
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 支持 PG 设置`systemEnv.hnswMaxScanTuples`参数,提高迭代搜索的数据总量。
|
||||
2. 知识库预处理参数增加 “分块条件”,可控制某些情况下不进行分块处理。
|
||||
3. 知识库预处理参数增加 “段落优先” 模式,可控制最大段落深度。原“长度优先”模式,不再内嵌段落优先逻辑。
|
||||
4. 工作流调整为单向接入和接出,支持快速的添加下一步节点。
|
||||
5. 开放飞书和语雀知识库到开源版。
|
||||
6. gemini 和 claude 最新模型预设。
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
1. LLM stream调用,默认超时调大。
|
||||
2. 部分确认交互优化。
|
||||
3. 纠正原先知识库的“表格数据集”名称,改成“备份导入”。同时支持知识库索引的导出和导入。
|
||||
4. 工作流知识库引用上限,如果工作流中没有相关 AI 节点,则交互模式改成纯手动输入,并且上限为 1000万。
|
||||
5. 语音输入,移动端判断逻辑,准确判断是否为手机,而不是小屏。
|
||||
6. 优化上下文截取算法,至少保证留下一组 Human 信息。
|
||||
|
||||
## 🐛 修复
|
||||
|
||||
1. 全文检索多知识库时排序得分排序不正确。
|
||||
2. 流响应捕获 finish_reason 可能不正确。
|
||||
3. 工具调用模式,未保存思考输出。
|
||||
4. 知识库 indexSize 参数未生效。
|
||||
5. 工作流嵌套 2 层后,获取预览引用、上下文不正确。
|
||||
6. xlsx 转成 Markdown 时候,前面会多出一个空格。
|
||||
7. 读取 Markdown 文件时,Base64 图片未进行额外抓换保存。
|
||||
21
docSite/content/zh-cn/docs/development/upgrading/4911.md
Normal file
21
docSite/content/zh-cn/docs/development/upgrading/4911.md
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
title: 'V4.9.11(进行中)'
|
||||
description: 'FastGPT V4.9.11 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 789
|
||||
---
|
||||
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 工作流中,子流程版本控制,可选择“保持最新版本”,无需手动更新。
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
|
||||
|
||||
## 🐛 修复
|
||||
|
||||
1. 工作流中,管理员声明的全局系统工具,无法进行版本管理。
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.9.9(进行中)'
|
||||
title: 'V4.9.9'
|
||||
description: 'FastGPT V4.9.9 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
@@ -7,12 +7,28 @@ toc: true
|
||||
weight: 791
|
||||
---
|
||||
|
||||
## 升级指南
|
||||
|
||||
### 1. 做好数据备份
|
||||
|
||||
### 2. 商业版用户替换新 License
|
||||
|
||||
商业版用户可以联系 FastGPT 团队支持同学,获取 License 替换方案。替换后,可以直接升级系统,管理后台会提示输入新 License。
|
||||
|
||||
### 3. 更新镜像 tag
|
||||
|
||||
- 更新 FastGPT 镜像 tag: v4.9.9
|
||||
- 更新 FastGPT 商业版镜像 tag: v4.9.9
|
||||
- mcp_server 无需更新
|
||||
- Sandbox 无需更新
|
||||
- AIProxy 无需更新
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 切换 SessionId 来替代 JWT 实现登录鉴权,可控制最大登录客户端数量。
|
||||
2. 新的商业版 License 管理模式。
|
||||
3. 公众号调用,显示记录 chat 对话错误,方便排查。
|
||||
4. API 知识库支持 BasePath 选择,需增加 API 接口,具体可见[API 知识库介绍](/docs/guide/knowledge_base/api_dataset/#4-获取文件详细信息用于获取文件信息)
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
@@ -24,3 +40,4 @@ weight: 791
|
||||
1. 无法正常获取应用历史保存/发布记录。
|
||||
2. 成员创建 MCP 工具权限问题。
|
||||
3. 来源引用展示,存在 ID 传递错误,导致提示无权操作该文件。
|
||||
4. 回答标注前端数据报错。
|
||||
|
||||
@@ -28,7 +28,6 @@ FastGPT 商业版是基于 FastGPT 开源版的增强版本,增加了一些独
|
||||
| 应用发布安全配置 | ❌ | ✅ | ✅ |
|
||||
| 内容审核 | ❌ | ✅ | ✅ |
|
||||
| web站点同步 | ❌ | ✅ | ✅ |
|
||||
| 主流文档库接入(目前支持:语雀、飞书) | ❌ | ✅ | ✅ |
|
||||
| 增强训练模式 | ❌ | ✅ | ✅ |
|
||||
| 第三方应用快速接入(飞书、公众号) | ❌ | ✅ | ✅ |
|
||||
| 管理后台 | ❌ | ✅ | 不需要 |
|
||||
|
||||
1
env.d.ts
vendored
1
env.d.ts
vendored
@@ -15,6 +15,7 @@ declare global {
|
||||
MONGODB_LOG_URI?: string;
|
||||
PG_URL: string;
|
||||
OCEANBASE_URL: string;
|
||||
OPENGAUSS_URL: string;
|
||||
MILVUS_ADDRESS: string;
|
||||
MILVUS_TOKEN: string;
|
||||
SANDBOX_URL: string;
|
||||
|
||||
@@ -27,7 +27,7 @@ const datasetErr = [
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unExist,
|
||||
message: 'core.dataset.error.unExistDataset'
|
||||
message: i18nT('common:core.dataset.error.unExistDataset')
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unExistCollection,
|
||||
|
||||
@@ -7,6 +7,10 @@ export const CUSTOM_SPLIT_SIGN = '-----CUSTOM_SPLIT_SIGN-----';
|
||||
type SplitProps = {
|
||||
text: string;
|
||||
chunkSize: number;
|
||||
|
||||
paragraphChunkDeep?: number; // Paragraph deep
|
||||
paragraphChunkMinSize?: number; // Paragraph min size, if too small, it will merge
|
||||
|
||||
maxSize?: number;
|
||||
overlapRatio?: number;
|
||||
customReg?: string[];
|
||||
@@ -108,6 +112,8 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
let {
|
||||
text = '',
|
||||
chunkSize,
|
||||
paragraphChunkDeep = 5,
|
||||
paragraphChunkMinSize = 100,
|
||||
maxSize = defaultMaxChunkSize,
|
||||
overlapRatio = 0.15,
|
||||
customReg = []
|
||||
@@ -123,7 +129,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
text = text.replace(/(```[\s\S]*?```|~~~[\s\S]*?~~~)/g, function (match) {
|
||||
return match.replace(/\n/g, codeBlockMarker);
|
||||
});
|
||||
// 2. 表格处理 - 单独提取表格出来,进行表头合并
|
||||
// 2. Markdown 表格处理 - 单独提取表格出来,进行表头合并
|
||||
const tableReg =
|
||||
/(\n\|(?:(?:[^\n|]+\|){1,})\n\|(?:[:\-\s]+\|){1,}\n(?:\|(?:[^\n|]+\|)*\n?)*)(?:\n|$)/g;
|
||||
const tableDataList = text.match(tableReg);
|
||||
@@ -143,25 +149,40 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
text = text.replace(/(\r?\n|\r){3,}/g, '\n\n\n');
|
||||
|
||||
// The larger maxLen is, the next sentence is less likely to trigger splitting
|
||||
const markdownIndex = 4;
|
||||
const forbidOverlapIndex = 8;
|
||||
const customRegLen = customReg.length;
|
||||
const markdownIndex = paragraphChunkDeep - 1;
|
||||
const forbidOverlapIndex = customRegLen + markdownIndex + 4;
|
||||
|
||||
const markdownHeaderRules = ((deep?: number): { reg: RegExp; maxLen: number }[] => {
|
||||
if (!deep || deep === 0) return [];
|
||||
|
||||
const maxDeep = Math.min(deep, 8); // Maximum 8 levels
|
||||
const rules: { reg: RegExp; maxLen: number }[] = [];
|
||||
|
||||
for (let i = 1; i <= maxDeep; i++) {
|
||||
const hashSymbols = '#'.repeat(i);
|
||||
rules.push({
|
||||
reg: new RegExp(`^(${hashSymbols}\\s[^\\n]+\\n)`, 'gm'),
|
||||
maxLen: chunkSize
|
||||
});
|
||||
}
|
||||
|
||||
return rules;
|
||||
})(paragraphChunkDeep);
|
||||
|
||||
const stepReges: { reg: RegExp | string; maxLen: number }[] = [
|
||||
...customReg.map((text) => ({
|
||||
reg: text.replaceAll('\\n', '\n'),
|
||||
maxLen: chunkSize
|
||||
})),
|
||||
{ reg: /^(#\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(##\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(###\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(####\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(#####\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
...markdownHeaderRules,
|
||||
|
||||
{ reg: /([\n](```[\s\S]*?```|~~~[\s\S]*?~~~))/g, maxLen: maxSize }, // code block
|
||||
// HTML Table tag 尽可能保障完整
|
||||
{
|
||||
reg: /(\n\|(?:(?:[^\n|]+\|){1,})\n\|(?:[:\-\s]+\|){1,}\n(?:\|(?:[^\n|]+\|)*\n)*)/g,
|
||||
maxLen: Math.min(chunkSize * 1.5, maxSize)
|
||||
}, // Table 尽可能保证完整性
|
||||
maxLen: chunkSize
|
||||
}, // Markdown Table 尽可能保证完整性
|
||||
{ reg: /(\n{2,})/g, maxLen: chunkSize },
|
||||
{ reg: /([\n])/g, maxLen: chunkSize },
|
||||
// ------ There's no overlap on the top
|
||||
@@ -172,12 +193,10 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
{ reg: /([,]|,\s)/g, maxLen: chunkSize }
|
||||
];
|
||||
|
||||
const customRegLen = customReg.length;
|
||||
const checkIsCustomStep = (step: number) => step < customRegLen;
|
||||
const checkIsMarkdownSplit = (step: number) =>
|
||||
step >= customRegLen && step <= markdownIndex + customRegLen;
|
||||
|
||||
const checkForbidOverlap = (step: number) => step <= forbidOverlapIndex + customRegLen;
|
||||
const checkForbidOverlap = (step: number) => step <= forbidOverlapIndex;
|
||||
|
||||
// if use markdown title split, Separate record title
|
||||
const getSplitTexts = ({ text, step }: { text: string; step: number }) => {
|
||||
@@ -301,6 +320,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
const splitTexts = getSplitTexts({ text, step });
|
||||
|
||||
const chunks: string[] = [];
|
||||
|
||||
for (let i = 0; i < splitTexts.length; i++) {
|
||||
const item = splitTexts[i];
|
||||
|
||||
@@ -443,7 +463,6 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
*/
|
||||
export const splitText2Chunks = (props: SplitProps): SplitResponse => {
|
||||
let { text = '' } = props;
|
||||
const start = Date.now();
|
||||
const splitWithCustomSign = text.split(CUSTOM_SPLIT_SIGN);
|
||||
|
||||
const splitResult = splitWithCustomSign.map((item) => {
|
||||
|
||||
@@ -130,9 +130,11 @@ export type SystemEnvType = {
|
||||
vectorMaxProcess: number;
|
||||
qaMaxProcess: number;
|
||||
vlmMaxProcess: number;
|
||||
hnswEfSearch: number;
|
||||
tokenWorkers: number; // token count max worker
|
||||
|
||||
hnswEfSearch: number;
|
||||
hnswMaxScanTuples: number;
|
||||
|
||||
oneapiUrl?: string;
|
||||
chatApiKey?: string;
|
||||
|
||||
|
||||
@@ -60,5 +60,3 @@ export enum AppTemplateTypeEnum {
|
||||
// special type
|
||||
contribute = 'contribute'
|
||||
}
|
||||
|
||||
export const defaultDatasetMaxTokens = 16000;
|
||||
|
||||
@@ -10,6 +10,8 @@ import { AppTypeEnum } from './constants';
|
||||
import { AppErrEnum } from '../../common/error/code/app';
|
||||
import { PluginErrEnum } from '../../common/error/code/plugin';
|
||||
import { i18nT } from '../../../web/i18n/utils';
|
||||
import appErrList from '../../common/error/code/app';
|
||||
import pluginErrList from '../../common/error/code/plugin';
|
||||
|
||||
export const getDefaultAppForm = (): AppSimpleEditFormType => {
|
||||
return {
|
||||
@@ -190,17 +192,10 @@ export const getAppType = (config?: WorkflowTemplateBasicType | AppSimpleEditFor
|
||||
return '';
|
||||
};
|
||||
|
||||
export const formatToolError = (error?: string) => {
|
||||
const unExistError: Array<string> = [
|
||||
AppErrEnum.unAuthApp,
|
||||
AppErrEnum.unExist,
|
||||
PluginErrEnum.unAuth,
|
||||
PluginErrEnum.unExist
|
||||
];
|
||||
export const formatToolError = (error?: any) => {
|
||||
if (!error || typeof error !== 'string') return;
|
||||
|
||||
if (error && unExistError.includes(error)) {
|
||||
return i18nT('app:un_auth');
|
||||
} else {
|
||||
return error;
|
||||
}
|
||||
const errorText = appErrList[error]?.message || pluginErrList[error]?.message;
|
||||
|
||||
return errorText || error;
|
||||
};
|
||||
|
||||
1
packages/global/core/chat/type.d.ts
vendored
1
packages/global/core/chat/type.d.ts
vendored
@@ -26,6 +26,7 @@ export type ChatSchema = {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
appId: string;
|
||||
createTime: Date;
|
||||
updateTime: Date;
|
||||
title: string;
|
||||
customTitle: string;
|
||||
|
||||
38
packages/global/core/dataset/api.d.ts
vendored
38
packages/global/core/dataset/api.d.ts
vendored
@@ -1,9 +1,11 @@
|
||||
import type { DatasetDataIndexItemType, DatasetSchemaType } from './type';
|
||||
import type { ChunkSettingsType, DatasetDataIndexItemType, DatasetSchemaType } from './type';
|
||||
import type {
|
||||
DatasetCollectionTypeEnum,
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
ChunkSettingModeEnum,
|
||||
DataChunkSplitModeEnum
|
||||
DataChunkSplitModeEnum,
|
||||
ChunkTriggerConfigTypeEnum,
|
||||
ParagraphChunkAIModeEnum
|
||||
} from './constants';
|
||||
import type { LLMModelItemType } from '../ai/model.d';
|
||||
import type { ParentIdType } from 'common/parentFolder/type';
|
||||
@@ -32,26 +34,16 @@ export type DatasetUpdateBody = {
|
||||
};
|
||||
|
||||
/* ================= collection ===================== */
|
||||
export type DatasetCollectionChunkMetadataType = {
|
||||
// Input + store params
|
||||
type DatasetCollectionStoreDataType = ChunkSettingsType & {
|
||||
parentId?: string;
|
||||
customPdfParse?: boolean;
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
imageIndex?: boolean;
|
||||
autoIndexes?: boolean;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
indexSize?: number;
|
||||
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
metadata?: Record<string, any>;
|
||||
|
||||
customPdfParse?: boolean;
|
||||
};
|
||||
|
||||
// create collection params
|
||||
export type CreateDatasetCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
export type CreateDatasetCollectionParams = DatasetCollectionStoreDataType & {
|
||||
datasetId: string;
|
||||
name: string;
|
||||
type: DatasetCollectionTypeEnum;
|
||||
@@ -72,7 +64,7 @@ export type CreateDatasetCollectionParams = DatasetCollectionChunkMetadataType &
|
||||
nextSyncTime?: Date;
|
||||
};
|
||||
|
||||
export type ApiCreateDatasetCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
export type ApiCreateDatasetCollectionParams = DatasetCollectionStoreDataType & {
|
||||
datasetId: string;
|
||||
tags?: string[];
|
||||
};
|
||||
@@ -90,7 +82,7 @@ export type ApiDatasetCreateDatasetCollectionParams = ApiCreateDatasetCollection
|
||||
export type FileIdCreateDatasetCollectionParams = ApiCreateDatasetCollectionParams & {
|
||||
fileId: string;
|
||||
};
|
||||
export type reTrainingDatasetFileCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
export type reTrainingDatasetFileCollectionParams = DatasetCollectionStoreDataType & {
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
};
|
||||
@@ -132,6 +124,13 @@ export type PgSearchRawType = {
|
||||
collection_id: string;
|
||||
score: number;
|
||||
};
|
||||
|
||||
export type GsSearchRawType = {
|
||||
id: string;
|
||||
collection_id: string;
|
||||
score: number;
|
||||
};
|
||||
|
||||
export type PushDatasetDataChunkProps = {
|
||||
q: string; // embedding content
|
||||
a?: string; // bonus content
|
||||
@@ -147,6 +146,7 @@ export type PushDatasetDataProps = {
|
||||
collectionId: string;
|
||||
data: PushDatasetDataChunkProps[];
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
indexSize?: number;
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
prompt?: string;
|
||||
|
||||
@@ -120,6 +120,8 @@ export const DatasetCollectionSyncResultMap = {
|
||||
export enum DatasetCollectionDataProcessModeEnum {
|
||||
chunk = 'chunk',
|
||||
qa = 'qa',
|
||||
backup = 'backup',
|
||||
|
||||
auto = 'auto' // abandon
|
||||
}
|
||||
export const DatasetCollectionDataProcessModeMap = {
|
||||
@@ -131,21 +133,35 @@ export const DatasetCollectionDataProcessModeMap = {
|
||||
label: i18nT('common:core.dataset.training.QA mode'),
|
||||
tooltip: i18nT('common:core.dataset.import.QA Import Tip')
|
||||
},
|
||||
[DatasetCollectionDataProcessModeEnum.backup]: {
|
||||
label: i18nT('dataset:backup_mode'),
|
||||
tooltip: i18nT('dataset:backup_mode')
|
||||
},
|
||||
[DatasetCollectionDataProcessModeEnum.auto]: {
|
||||
label: i18nT('common:core.dataset.training.Auto mode'),
|
||||
tooltip: i18nT('common:core.dataset.training.Auto mode Tip')
|
||||
}
|
||||
};
|
||||
|
||||
export enum ChunkTriggerConfigTypeEnum {
|
||||
minSize = 'minSize',
|
||||
forceChunk = 'forceChunk',
|
||||
maxSize = 'maxSize'
|
||||
}
|
||||
export enum ChunkSettingModeEnum {
|
||||
auto = 'auto',
|
||||
custom = 'custom'
|
||||
}
|
||||
|
||||
export enum DataChunkSplitModeEnum {
|
||||
paragraph = 'paragraph',
|
||||
size = 'size',
|
||||
char = 'char'
|
||||
}
|
||||
export enum ParagraphChunkAIModeEnum {
|
||||
auto = 'auto',
|
||||
force = 'force'
|
||||
}
|
||||
|
||||
/* ------------ data -------------- */
|
||||
|
||||
@@ -154,7 +170,6 @@ export enum ImportDataSourceEnum {
|
||||
fileLocal = 'fileLocal',
|
||||
fileLink = 'fileLink',
|
||||
fileCustom = 'fileCustom',
|
||||
csvTable = 'csvTable',
|
||||
externalFile = 'externalFile',
|
||||
apiDataset = 'apiDataset',
|
||||
reTraining = 'reTraining'
|
||||
|
||||
@@ -32,7 +32,7 @@ export const DatasetDataIndexMap: Record<
|
||||
color: 'red'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.image]: {
|
||||
label: i18nT('common:data_index_image'),
|
||||
label: i18nT('dataset:data_index_image'),
|
||||
color: 'purple'
|
||||
}
|
||||
};
|
||||
|
||||
@@ -118,9 +118,8 @@ export const computeChunkSize = (params: {
|
||||
return getLLMMaxChunkSize(params.llmModel);
|
||||
}
|
||||
|
||||
return Math.min(params.chunkSize || chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
|
||||
return Math.min(params.chunkSize ?? chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
|
||||
};
|
||||
|
||||
export const computeChunkSplitter = (params: {
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
@@ -129,8 +128,21 @@ export const computeChunkSplitter = (params: {
|
||||
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
|
||||
return undefined;
|
||||
}
|
||||
if (params.chunkSplitMode === DataChunkSplitModeEnum.size) {
|
||||
if (params.chunkSplitMode !== DataChunkSplitModeEnum.char) {
|
||||
return undefined;
|
||||
}
|
||||
return params.chunkSplitter;
|
||||
};
|
||||
export const computeParagraphChunkDeep = (params: {
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
paragraphChunkDeep?: number;
|
||||
}) => {
|
||||
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
|
||||
return 5;
|
||||
}
|
||||
if (params.chunkSplitMode === DataChunkSplitModeEnum.paragraph) {
|
||||
return params.paragraphChunkDeep;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
|
||||
48
packages/global/core/dataset/type.d.ts
vendored
48
packages/global/core/dataset/type.d.ts
vendored
@@ -8,26 +8,42 @@ import type {
|
||||
DatasetStatusEnum,
|
||||
DatasetTypeEnum,
|
||||
SearchScoreTypeEnum,
|
||||
TrainingModeEnum
|
||||
TrainingModeEnum,
|
||||
ChunkSettingModeEnum,
|
||||
ChunkTriggerConfigTypeEnum
|
||||
} from './constants';
|
||||
import type { DatasetPermission } from '../../support/permission/dataset/controller';
|
||||
import { Permission } from '../../support/permission/controller';
|
||||
import type { APIFileServer, FeishuServer, YuqueServer } from './apiDataset';
|
||||
import type { SourceMemberType } from 'support/user/type';
|
||||
import type { DatasetDataIndexTypeEnum } from './data/constants';
|
||||
import type { ChunkSettingModeEnum } from './constants';
|
||||
|
||||
export type ChunkSettingsType = {
|
||||
trainingType: DatasetCollectionDataProcessModeEnum;
|
||||
autoIndexes?: boolean;
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
|
||||
// Chunk trigger
|
||||
chunkTriggerType?: ChunkTriggerConfigTypeEnum;
|
||||
chunkTriggerMinSize?: number; // maxSize from agent model, not store
|
||||
|
||||
// Data enhance
|
||||
dataEnhanceCollectionName?: boolean; // Auto add collection name to data
|
||||
|
||||
// Index enhance
|
||||
imageIndex?: boolean;
|
||||
autoIndexes?: boolean;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
// Chunk setting
|
||||
chunkSettingMode?: ChunkSettingModeEnum; // 系统参数/自定义参数
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
// Paragraph split
|
||||
paragraphChunkAIMode?: ParagraphChunkAIModeEnum;
|
||||
paragraphChunkDeep?: number; // Paragraph deep
|
||||
paragraphChunkMinSize?: number; // Paragraph min size, if too small, it will merge
|
||||
// Size split
|
||||
chunkSize?: number; // chunk/qa chunk size, Paragraph max chunk size.
|
||||
// Char split
|
||||
chunkSplitter?: string; // chunk/qa chunk splitter
|
||||
indexSize?: number;
|
||||
chunkSplitter?: string;
|
||||
|
||||
qaPrompt?: string;
|
||||
};
|
||||
|
||||
@@ -66,7 +82,7 @@ export type DatasetSchemaType = {
|
||||
defaultPermission?: number;
|
||||
};
|
||||
|
||||
export type DatasetCollectionSchemaType = {
|
||||
export type DatasetCollectionSchemaType = ChunkSettingsType & {
|
||||
_id: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
@@ -101,18 +117,7 @@ export type DatasetCollectionSchemaType = {
|
||||
|
||||
// Parse settings
|
||||
customPdfParse?: boolean;
|
||||
// Chunk settings
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
trainingType: DatasetCollectionDataProcessModeEnum;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
indexSize?: number;
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
};
|
||||
|
||||
export type DatasetCollectionTagsSchemaType = {
|
||||
@@ -175,6 +180,7 @@ export type DatasetTrainingSchemaType = {
|
||||
q: string;
|
||||
a: string;
|
||||
chunkIndex: number;
|
||||
indexSize?: number;
|
||||
weight: number;
|
||||
indexes: Omit<DatasetDataIndexItemType, 'dataId'>[];
|
||||
retryCount: number;
|
||||
|
||||
1
packages/global/core/workflow/type/node.d.ts
vendored
1
packages/global/core/workflow/type/node.d.ts
vendored
@@ -59,7 +59,6 @@ export type FlowNodeCommonType = {
|
||||
};
|
||||
|
||||
export type PluginDataType = {
|
||||
version?: string;
|
||||
diagram?: string;
|
||||
userGuide?: string;
|
||||
courseUrl?: string;
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4816",
|
||||
"name": "钉钉 webhook",
|
||||
"avatar": "plugins/dingding",
|
||||
"intro": "向钉钉机器人发起 webhook 请求。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "Menghuan1918",
|
||||
"version": "488",
|
||||
"name": "PDF识别",
|
||||
"avatar": "plugins/doc2x",
|
||||
"intro": "将PDF文件发送至Doc2X进行解析,返回结构化的LaTeX公式的文本(markdown),支持传入String类型的URL或者流程输出中的文件链接变量",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "Menghuan1918",
|
||||
"version": "488",
|
||||
"name": "Doc2X服务",
|
||||
"avatar": "plugins/doc2x",
|
||||
"intro": "将传入的图片或PDF文件发送至Doc2X进行解析,返回带LaTeX公式的markdown格式的文本。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4816",
|
||||
"name": "企业微信 webhook",
|
||||
"avatar": "plugins/qiwei",
|
||||
"intro": "向企业微信机器人发起 webhook 请求。只能内部群使用。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4811",
|
||||
"name": "Bing搜索",
|
||||
"avatar": "core/workflow/template/bing",
|
||||
"intro": "在Bing中搜索。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "silencezhang",
|
||||
"version": "4811",
|
||||
"name": "数据库连接",
|
||||
"avatar": "core/workflow/template/datasource",
|
||||
"intro": "可连接常用数据库,并执行sql",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "collin",
|
||||
"version": "4817",
|
||||
"name": "流程等待",
|
||||
"avatar": "core/workflow/template/sleep",
|
||||
"intro": "让工作流等待指定时间后运行",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "silencezhang",
|
||||
"version": "4817",
|
||||
"name": "基础图表",
|
||||
"avatar": "core/workflow/template/baseChart",
|
||||
"intro": "根据数据生成图表,可根据chartType生成柱状图,折线图,饼图",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "silencezhang",
|
||||
"version": "486",
|
||||
"name": "BI图表功能",
|
||||
"avatar": "core/workflow/template/BI",
|
||||
"intro": "BI图表功能,可以生成一些常用的图表,如饼图,柱状图,折线图等",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo 网络搜索",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "使用 DuckDuckGo 进行网络搜索",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo 图片搜索",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "使用 DuckDuckGo 进行图片搜索",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo 新闻检索",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "使用 DuckDuckGo 进行新闻检索",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo 视频搜索",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "使用 DuckDuckGo 进行视频搜索",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "DuckDuckGo服务",
|
||||
"avatar": "core/workflow/template/duckduckgo",
|
||||
"intro": "DuckDuckGo 服务,包含网络搜索、图片搜索、新闻搜索等。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "488",
|
||||
"name": "飞书 webhook",
|
||||
"avatar": "core/app/templates/plugin-feishu",
|
||||
"intro": "向飞书机器人发起 webhook 请求。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "网页内容抓取",
|
||||
"avatar": "core/workflow/template/fetchUrl",
|
||||
"intro": "可获取一个网页链接内容,并以 Markdown 格式输出,仅支持获取静态网站。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "481",
|
||||
"templateType": "tools",
|
||||
"name": "获取当前时间",
|
||||
"avatar": "core/workflow/template/getTime",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4811",
|
||||
"name": "Google搜索",
|
||||
"avatar": "core/workflow/template/google",
|
||||
"intro": "在google中搜索。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "486",
|
||||
"name": "数学公式执行",
|
||||
"avatar": "core/workflow/template/mathCall",
|
||||
"intro": "用于执行数学表达式的工具,通过 js 的 expr-eval 库运行表达式并返回结果。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4816",
|
||||
"name": "Search XNG 搜索",
|
||||
"avatar": "core/workflow/template/searxng",
|
||||
"intro": "使用 Search XNG 服务进行搜索。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "cloudpense",
|
||||
"version": "1.0.0",
|
||||
"name": "Email 邮件发送",
|
||||
"avatar": "plugins/email",
|
||||
"intro": "通过SMTP协议发送电子邮件(nodemailer)",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "489",
|
||||
"name": "文本加工",
|
||||
"avatar": "/imgs/workflow/textEditor.svg",
|
||||
"intro": "可对固定或传入的文本进行加工后输出,非字符串类型数据最终会转成字符串类型。",
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"author": "",
|
||||
"version": "4811",
|
||||
"name": "Wiki搜索",
|
||||
"avatar": "core/workflow/template/wiki",
|
||||
"intro": "在Wiki中查询释义。",
|
||||
|
||||
18
packages/service/common/api/type.d.ts
vendored
18
packages/service/common/api/type.d.ts
vendored
@@ -6,12 +6,6 @@ import type {
|
||||
} from '../../core/dataset/search/controller';
|
||||
import type { AuthOpenApiLimitProps } from '../../support/openapi/auth';
|
||||
import type { CreateUsageProps, ConcatUsageProps } from '@fastgpt/global/support/wallet/usage/api';
|
||||
import type {
|
||||
GetProApiDatasetFileContentParams,
|
||||
GetProApiDatasetFileDetailParams,
|
||||
GetProApiDatasetFileListParams,
|
||||
GetProApiDatasetFilePreviewUrlParams
|
||||
} from '../../core/dataset/apiDataset/proApi';
|
||||
|
||||
declare global {
|
||||
var textCensorHandler: (params: { text: string }) => Promise<{ code: number; message?: string }>;
|
||||
@@ -19,16 +13,4 @@ declare global {
|
||||
var authOpenApiHandler: (data: AuthOpenApiLimitProps) => Promise<any>;
|
||||
var createUsageHandler: (data: CreateUsageProps) => any;
|
||||
var concatUsageHandler: (data: ConcatUsageProps) => any;
|
||||
|
||||
// API dataset
|
||||
var getProApiDatasetFileList: (data: GetProApiDatasetFileListParams) => Promise<APIFileItem[]>;
|
||||
var getProApiDatasetFileContent: (
|
||||
data: GetProApiDatasetFileContentParams
|
||||
) => Promise<ApiFileReadContentResponse>;
|
||||
var getProApiDatasetFilePreviewUrl: (
|
||||
data: GetProApiDatasetFilePreviewUrlParams
|
||||
) => Promise<string>;
|
||||
var getProApiDatasetFileDetail: (
|
||||
data: GetProApiDatasetFileDetailParams
|
||||
) => Promise<ApiDatasetDetailResponse>;
|
||||
}
|
||||
|
||||
@@ -210,15 +210,15 @@ export const readFileContentFromMongo = async ({
|
||||
tmbId,
|
||||
bucketName,
|
||||
fileId,
|
||||
isQAImport = false,
|
||||
customPdfParse = false
|
||||
customPdfParse = false,
|
||||
getFormatText
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
bucketName: `${BucketNameEnum}`;
|
||||
fileId: string;
|
||||
isQAImport?: boolean;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean; // 数据类型都尽可能转化成 markdown 格式
|
||||
}): Promise<{
|
||||
rawText: string;
|
||||
filename: string;
|
||||
@@ -254,8 +254,8 @@ export const readFileContentFromMongo = async ({
|
||||
// Get raw text
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
customPdfParse,
|
||||
getFormatText,
|
||||
extension,
|
||||
isQAImport,
|
||||
teamId,
|
||||
tmbId,
|
||||
buffer: fileBuffers,
|
||||
|
||||
@@ -16,6 +16,7 @@ export type readRawTextByLocalFileParams = {
|
||||
path: string;
|
||||
encoding: string;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean;
|
||||
metadata?: Record<string, any>;
|
||||
};
|
||||
export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParams) => {
|
||||
@@ -27,8 +28,8 @@ export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParam
|
||||
|
||||
return readRawContentByFileBuffer({
|
||||
extension,
|
||||
isQAImport: false,
|
||||
customPdfParse: params.customPdfParse,
|
||||
getFormatText: params.getFormatText,
|
||||
teamId: params.teamId,
|
||||
tmbId: params.tmbId,
|
||||
encoding: params.encoding,
|
||||
@@ -46,7 +47,7 @@ export const readRawContentByFileBuffer = async ({
|
||||
encoding,
|
||||
metadata,
|
||||
customPdfParse = false,
|
||||
isQAImport = false
|
||||
getFormatText = true
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
@@ -57,8 +58,10 @@ export const readRawContentByFileBuffer = async ({
|
||||
metadata?: Record<string, any>;
|
||||
|
||||
customPdfParse?: boolean;
|
||||
isQAImport: boolean;
|
||||
}): Promise<ReadFileResponse> => {
|
||||
getFormatText?: boolean;
|
||||
}): Promise<{
|
||||
rawText: string;
|
||||
}> => {
|
||||
const systemParse = () =>
|
||||
runWorker<ReadFileResponse>(WorkerNameEnum.readFile, {
|
||||
extension,
|
||||
@@ -176,16 +179,7 @@ export const readRawContentByFileBuffer = async ({
|
||||
});
|
||||
}
|
||||
|
||||
if (['csv', 'xlsx'].includes(extension)) {
|
||||
// qa data
|
||||
if (isQAImport) {
|
||||
rawText = rawText || '';
|
||||
} else {
|
||||
rawText = formatText || rawText;
|
||||
}
|
||||
}
|
||||
|
||||
addLog.debug(`Upload file success, time: ${Date.now() - start}ms`);
|
||||
|
||||
return { rawText, formatText, imageList };
|
||||
return { rawText: getFormatText ? formatText || rawText : rawText };
|
||||
};
|
||||
|
||||
@@ -10,6 +10,7 @@ let jieba: Jieba | undefined;
|
||||
})();
|
||||
|
||||
const stopWords = new Set([
|
||||
'\n',
|
||||
'--',
|
||||
'?',
|
||||
'“',
|
||||
@@ -1519,8 +1520,7 @@ const stopWords = new Set([
|
||||
]);
|
||||
|
||||
export async function jiebaSplit({ text }: { text: string }) {
|
||||
text = text.replace(/[#*`_~>[\](){}|]/g, '').replace(/\S*https?\S*/gi, '');
|
||||
|
||||
text = text.replace(/[#*`_~>[\](){}|]|\S*https?\S*/g, '').trim();
|
||||
const tokens = (await jieba!.cutAsync(text, true)) as string[];
|
||||
|
||||
return (
|
||||
|
||||
@@ -3,5 +3,6 @@ export const DatasetVectorTableName = 'modeldata';
|
||||
|
||||
export const PG_ADDRESS = process.env.PG_URL;
|
||||
export const OCEANBASE_ADDRESS = process.env.OCEANBASE_URL;
|
||||
export const OPENGAUSS_ADDRESS = process.env.OPENGAUSS_URL;
|
||||
export const MILVUS_ADDRESS = process.env.MILVUS_ADDRESS;
|
||||
export const MILVUS_TOKEN = process.env.MILVUS_TOKEN;
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
/* vector crud */
|
||||
import { PgVectorCtrl } from './pg';
|
||||
import { ObVectorCtrl } from './oceanbase';
|
||||
import { GsVectorCtrl } from './opengauss';
|
||||
import { getVectorsByText } from '../../core/ai/embedding';
|
||||
import { type DelDatasetVectorCtrlProps, type InsertVectorProps } from './controller.d';
|
||||
import { type EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { MILVUS_ADDRESS, PG_ADDRESS, OCEANBASE_ADDRESS } from './constants';
|
||||
import { MILVUS_ADDRESS, PG_ADDRESS, OCEANBASE_ADDRESS, OPENGAUSS_ADDRESS } from './constants';
|
||||
import { MilvusCtrl } from './milvus';
|
||||
import { setRedisCache, getRedisCache, delRedisCache, CacheKeyEnum } from '../redis/cache';
|
||||
import { throttle } from 'lodash';
|
||||
@@ -14,6 +15,7 @@ const getVectorObj = () => {
|
||||
if (PG_ADDRESS) return new PgVectorCtrl();
|
||||
if (OCEANBASE_ADDRESS) return new ObVectorCtrl();
|
||||
if (MILVUS_ADDRESS) return new MilvusCtrl();
|
||||
if (OPENGAUSS_ADDRESS) return new GsVectorCtrl();
|
||||
|
||||
return new PgVectorCtrl();
|
||||
};
|
||||
|
||||
188
packages/service/common/vectorDB/opengauss/controller.ts
Normal file
188
packages/service/common/vectorDB/opengauss/controller.ts
Normal file
@@ -0,0 +1,188 @@
|
||||
import { delay } from '@fastgpt/global/common/system/utils';
|
||||
import { addLog } from '../../system/log';
|
||||
import { Pool } from 'pg';
|
||||
import type { QueryResultRow } from 'pg';
|
||||
import { OPENGAUSS_ADDRESS } from '../constants';
|
||||
|
||||
export const connectGs = async (): Promise<Pool> => {
|
||||
if (global.gsClient) {
|
||||
return global.gsClient;
|
||||
}
|
||||
|
||||
global.gsClient = new Pool({
|
||||
connectionString: OPENGAUSS_ADDRESS,
|
||||
max: Number(process.env.DB_MAX_LINK || 20),
|
||||
min: 10,
|
||||
keepAlive: true,
|
||||
idleTimeoutMillis: 600000,
|
||||
connectionTimeoutMillis: 20000,
|
||||
query_timeout: 30000,
|
||||
statement_timeout: 40000,
|
||||
idle_in_transaction_session_timeout: 60000
|
||||
});
|
||||
|
||||
global.gsClient.on('error', async (err) => {
|
||||
addLog.error(`openGauss error`, err);
|
||||
global.gsClient?.end();
|
||||
global.gsClient = null;
|
||||
|
||||
await delay(1000);
|
||||
addLog.info(`Retry connect openGauss`);
|
||||
connectGs();
|
||||
});
|
||||
|
||||
try {
|
||||
await global.gsClient.connect();
|
||||
console.log('openGauss connected');
|
||||
return global.gsClient;
|
||||
} catch (error) {
|
||||
addLog.error(`openGauss connect error`, error);
|
||||
global.gsClient?.end();
|
||||
global.gsClient = null;
|
||||
|
||||
await delay(1000);
|
||||
addLog.info(`Retry connect openGauss`);
|
||||
|
||||
return connectGs();
|
||||
}
|
||||
};
|
||||
|
||||
type WhereProps = (string | [string, string | number])[];
|
||||
type GetProps = {
|
||||
fields?: string[];
|
||||
where?: WhereProps;
|
||||
order?: { field: string; mode: 'DESC' | 'ASC' | string }[];
|
||||
limit?: number;
|
||||
offset?: number;
|
||||
};
|
||||
|
||||
type DeleteProps = {
|
||||
where: WhereProps;
|
||||
};
|
||||
|
||||
type ValuesProps = { key: string; value?: string | number }[];
|
||||
type UpdateProps = {
|
||||
values: ValuesProps;
|
||||
where: WhereProps;
|
||||
};
|
||||
type InsertProps = {
|
||||
values: ValuesProps[];
|
||||
};
|
||||
|
||||
class GsClass {
|
||||
private getWhereStr(where?: WhereProps) {
|
||||
return where
|
||||
? `WHERE ${where
|
||||
.map((item) => {
|
||||
if (typeof item === 'string') {
|
||||
return item;
|
||||
}
|
||||
const val = typeof item[1] === 'number' ? item[1] : `'${String(item[1])}'`;
|
||||
return `${item[0]}=${val}`;
|
||||
})
|
||||
.join(' ')}`
|
||||
: '';
|
||||
}
|
||||
private getUpdateValStr(values: ValuesProps) {
|
||||
return values
|
||||
.map((item) => {
|
||||
const val =
|
||||
typeof item.value === 'number'
|
||||
? item.value
|
||||
: `'${String(item.value).replace(/\'/g, '"')}'`;
|
||||
|
||||
return `${item.key}=${val}`;
|
||||
})
|
||||
.join(',');
|
||||
}
|
||||
private getInsertValStr(values: ValuesProps[]) {
|
||||
return values
|
||||
.map(
|
||||
(items) =>
|
||||
`(${items
|
||||
.map((item) =>
|
||||
typeof item.value === 'number'
|
||||
? item.value
|
||||
: `'${String(item.value).replace(/\'/g, '"')}'`
|
||||
)
|
||||
.join(',')})`
|
||||
)
|
||||
.join(',');
|
||||
}
|
||||
async select<T extends QueryResultRow = any>(table: string, props: GetProps) {
|
||||
const sql = `SELECT ${
|
||||
!props.fields || props.fields?.length === 0 ? '*' : props.fields?.join(',')
|
||||
}
|
||||
FROM ${table}
|
||||
${this.getWhereStr(props.where)}
|
||||
${
|
||||
props.order
|
||||
? `ORDER BY ${props.order.map((item) => `${item.field} ${item.mode}`).join(',')}`
|
||||
: ''
|
||||
}
|
||||
LIMIT ${props.limit || 10} OFFSET ${props.offset || 0}
|
||||
`;
|
||||
|
||||
const gs = await connectGs();
|
||||
return gs.query<T>(sql);
|
||||
}
|
||||
async count(table: string, props: GetProps) {
|
||||
const sql = `SELECT COUNT(${props?.fields?.[0] || '*'})
|
||||
FROM ${table}
|
||||
${this.getWhereStr(props.where)}
|
||||
`;
|
||||
|
||||
const gs = await connectGs();
|
||||
return gs.query(sql).then((res) => Number(res.rows[0]?.count || 0));
|
||||
}
|
||||
async delete(table: string, props: DeleteProps) {
|
||||
const sql = `DELETE FROM ${table} ${this.getWhereStr(props.where)}`;
|
||||
const gs = await connectGs();
|
||||
return gs.query(sql);
|
||||
}
|
||||
async update(table: string, props: UpdateProps) {
|
||||
if (props.values.length === 0) {
|
||||
return {
|
||||
rowCount: 0
|
||||
};
|
||||
}
|
||||
|
||||
const sql = `UPDATE ${table} SET ${this.getUpdateValStr(props.values)} ${this.getWhereStr(
|
||||
props.where
|
||||
)}`;
|
||||
const gs = await connectGs();
|
||||
return gs.query(sql);
|
||||
}
|
||||
async insert(table: string, props: InsertProps) {
|
||||
if (props.values.length === 0) {
|
||||
return {
|
||||
rowCount: 0,
|
||||
rows: []
|
||||
};
|
||||
}
|
||||
|
||||
const fields = props.values[0].map((item) => item.key).join(',');
|
||||
const sql = `INSERT INTO ${table} (${fields}) VALUES ${this.getInsertValStr(
|
||||
props.values
|
||||
)} RETURNING id`;
|
||||
|
||||
const gs = await connectGs();
|
||||
return gs.query<{ id: string }>(sql);
|
||||
}
|
||||
async query<T extends QueryResultRow = any>(sql: string) {
|
||||
const gs = await connectGs();
|
||||
const start = Date.now();
|
||||
return gs.query<T>(sql).then((res) => {
|
||||
const time = Date.now() - start;
|
||||
|
||||
if (time > 300) {
|
||||
addLog.warn(`gs query time: ${time}ms, sql: ${sql}`);
|
||||
}
|
||||
|
||||
return res;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export const GsClient = new GsClass();
|
||||
export const Gs = global.gsClient;
|
||||
253
packages/service/common/vectorDB/opengauss/index.ts
Normal file
253
packages/service/common/vectorDB/opengauss/index.ts
Normal file
@@ -0,0 +1,253 @@
|
||||
/* pg vector crud */
|
||||
import { DatasetVectorTableName } from '../constants';
|
||||
import { delay } from '@fastgpt/global/common/system/utils';
|
||||
import { GsClient, connectGs } from './controller';
|
||||
import { GsSearchRawType } from '@fastgpt/global/core/dataset/api';
|
||||
import type {
|
||||
DelDatasetVectorCtrlProps,
|
||||
EmbeddingRecallCtrlProps,
|
||||
EmbeddingRecallResponse,
|
||||
InsertVectorControllerProps
|
||||
} from '../controller.d';
|
||||
import dayjs from 'dayjs';
|
||||
import { addLog } from '../../system/log';
|
||||
|
||||
export class GsVectorCtrl {
|
||||
constructor() {}
|
||||
init = async () => {
|
||||
try {
|
||||
await connectGs();
|
||||
await GsClient.query(`
|
||||
CREATE EXTENSION IF NOT EXISTS vector;
|
||||
CREATE TABLE IF NOT EXISTS ${DatasetVectorTableName} (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
vector VECTOR(1536) NOT NULL,
|
||||
team_id VARCHAR(50) NOT NULL,
|
||||
dataset_id VARCHAR(50) NOT NULL,
|
||||
collection_id VARCHAR(50) NOT NULL,
|
||||
createtime TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
`);
|
||||
|
||||
await GsClient.query(
|
||||
`CREATE INDEX CONCURRENTLY IF NOT EXISTS vector_index ON ${DatasetVectorTableName} USING hnsw (vector vector_ip_ops) WITH (m = 32, ef_construction = 128);`
|
||||
);
|
||||
await GsClient.query(
|
||||
`CREATE INDEX CONCURRENTLY IF NOT EXISTS team_dataset_collection_index ON ${DatasetVectorTableName} USING btree(team_id, dataset_id, collection_id);`
|
||||
);
|
||||
await GsClient.query(
|
||||
`CREATE INDEX CONCURRENTLY IF NOT EXISTS create_time_index ON ${DatasetVectorTableName} USING btree(createtime);`
|
||||
);
|
||||
|
||||
addLog.info('init pg successful');
|
||||
} catch (error) {
|
||||
addLog.error('init pg error', error);
|
||||
}
|
||||
};
|
||||
insert = async (props: InsertVectorControllerProps): Promise<{ insertId: string }> => {
|
||||
const { teamId, datasetId, collectionId, vector, retry = 3 } = props;
|
||||
|
||||
try {
|
||||
const { rowCount, rows } = await GsClient.insert(DatasetVectorTableName, {
|
||||
values: [
|
||||
[
|
||||
{ key: 'vector', value: `[${vector}]` },
|
||||
{ key: 'team_id', value: String(teamId) },
|
||||
{ key: 'dataset_id', value: String(datasetId) },
|
||||
{ key: 'collection_id', value: String(collectionId) }
|
||||
]
|
||||
]
|
||||
});
|
||||
|
||||
if (rowCount === 0) {
|
||||
return Promise.reject('insertDatasetData: no insert');
|
||||
}
|
||||
|
||||
return {
|
||||
insertId: rows[0].id
|
||||
};
|
||||
} catch (error) {
|
||||
if (retry <= 0) {
|
||||
return Promise.reject(error);
|
||||
}
|
||||
await delay(500);
|
||||
return this.insert({
|
||||
...props,
|
||||
retry: retry - 1
|
||||
});
|
||||
}
|
||||
};
|
||||
delete = async (props: DelDatasetVectorCtrlProps): Promise<any> => {
|
||||
const { teamId, retry = 2 } = props;
|
||||
|
||||
const teamIdWhere = `team_id='${String(teamId)}' AND`;
|
||||
|
||||
const where = await (() => {
|
||||
if ('id' in props && props.id) return `${teamIdWhere} id=${props.id}`;
|
||||
|
||||
if ('datasetIds' in props && props.datasetIds) {
|
||||
const datasetIdWhere = `dataset_id IN (${props.datasetIds
|
||||
.map((id) => `'${String(id)}'`)
|
||||
.join(',')})`;
|
||||
|
||||
if ('collectionIds' in props && props.collectionIds) {
|
||||
return `${teamIdWhere} ${datasetIdWhere} AND collection_id IN (${props.collectionIds
|
||||
.map((id) => `'${String(id)}'`)
|
||||
.join(',')})`;
|
||||
}
|
||||
|
||||
return `${teamIdWhere} ${datasetIdWhere}`;
|
||||
}
|
||||
|
||||
if ('idList' in props && Array.isArray(props.idList)) {
|
||||
if (props.idList.length === 0) return;
|
||||
return `${teamIdWhere} id IN (${props.idList.map((id) => String(id)).join(',')})`;
|
||||
}
|
||||
return Promise.reject('deleteDatasetData: no where');
|
||||
})();
|
||||
|
||||
if (!where) return;
|
||||
|
||||
try {
|
||||
await GsClient.delete(DatasetVectorTableName, {
|
||||
where: [where]
|
||||
});
|
||||
} catch (error) {
|
||||
if (retry <= 0) {
|
||||
return Promise.reject(error);
|
||||
}
|
||||
await delay(500);
|
||||
return this.delete({
|
||||
...props,
|
||||
retry: retry - 1
|
||||
});
|
||||
}
|
||||
};
|
||||
embRecall = async (props: EmbeddingRecallCtrlProps): Promise<EmbeddingRecallResponse> => {
|
||||
const {
|
||||
teamId,
|
||||
datasetIds,
|
||||
vector,
|
||||
limit,
|
||||
forbidCollectionIdList,
|
||||
filterCollectionIdList,
|
||||
retry = 2
|
||||
} = props;
|
||||
|
||||
// Get forbid collection
|
||||
const formatForbidCollectionIdList = (() => {
|
||||
if (!filterCollectionIdList) return forbidCollectionIdList;
|
||||
const list = forbidCollectionIdList
|
||||
.map((id) => String(id))
|
||||
.filter((id) => !filterCollectionIdList.includes(id));
|
||||
return list;
|
||||
})();
|
||||
const forbidCollectionSql =
|
||||
formatForbidCollectionIdList.length > 0
|
||||
? `AND collection_id NOT IN (${formatForbidCollectionIdList.map((id) => `'${id}'`).join(',')})`
|
||||
: '';
|
||||
|
||||
// Filter by collectionId
|
||||
const formatFilterCollectionId = (() => {
|
||||
if (!filterCollectionIdList) return;
|
||||
|
||||
return filterCollectionIdList
|
||||
.map((id) => String(id))
|
||||
.filter((id) => !forbidCollectionIdList.includes(id));
|
||||
})();
|
||||
const filterCollectionIdSql = formatFilterCollectionId
|
||||
? `AND collection_id IN (${formatFilterCollectionId.map((id) => `'${id}'`).join(',')})`
|
||||
: '';
|
||||
// Empty data
|
||||
if (formatFilterCollectionId && formatFilterCollectionId.length === 0) {
|
||||
return { results: [] };
|
||||
}
|
||||
|
||||
try {
|
||||
const results: any = await GsClient.query(
|
||||
`BEGIN;
|
||||
SET ob_hnsw_ef_search = ${global.systemEnv?.hnswEfSearch || 100};
|
||||
SELECT id, collection_id, inner_product(vector, [${vector}]) AS score
|
||||
FROM ${DatasetVectorTableName}
|
||||
WHERE team_id='${teamId}'
|
||||
AND dataset_id IN (${datasetIds.map((id) => `'${String(id)}'`).join(',')})
|
||||
${filterCollectionIdSql}
|
||||
${forbidCollectionSql}
|
||||
ORDER BY score desc APPROXIMATE LIMIT ${limit};
|
||||
COMMIT;`
|
||||
);
|
||||
const rows = results?.[3]?.rows as GsSearchRawType[];
|
||||
|
||||
if (!Array.isArray(rows)) {
|
||||
return {
|
||||
results: []
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
results: rows.map((item) => ({
|
||||
id: String(item.id),
|
||||
collectionId: item.collection_id,
|
||||
score: item.score * -1
|
||||
}))
|
||||
};
|
||||
} catch (error) {
|
||||
if (retry <= 0) {
|
||||
return Promise.reject(error);
|
||||
}
|
||||
return this.embRecall({
|
||||
...props,
|
||||
retry: retry - 1
|
||||
});
|
||||
}
|
||||
};
|
||||
getVectorDataByTime = async (start: Date, end: Date) => {
|
||||
const { rows } = await GsClient.query<{
|
||||
id: string;
|
||||
team_id: string;
|
||||
dataset_id: string;
|
||||
}>(`SELECT id, team_id, dataset_id
|
||||
FROM ${DatasetVectorTableName}
|
||||
WHERE createtime BETWEEN '${dayjs(start).format('YYYY-MM-DD HH:mm:ss')}' AND '${dayjs(
|
||||
end
|
||||
).format('YYYY-MM-DD HH:mm:ss')}';
|
||||
`);
|
||||
|
||||
return rows.map((item) => ({
|
||||
id: String(item.id),
|
||||
teamId: item.team_id,
|
||||
datasetId: item.dataset_id
|
||||
}));
|
||||
};
|
||||
getVectorCountByTeamId = async (teamId: string) => {
|
||||
const total = await GsClient.count(DatasetVectorTableName, {
|
||||
where: [['team_id', String(teamId)]]
|
||||
});
|
||||
|
||||
return total;
|
||||
};
|
||||
getVectorCountByDatasetId = async (teamId: string, datasetId: string) => {
|
||||
const total = await GsClient.count(DatasetVectorTableName, {
|
||||
where: [['team_id', String(teamId)], 'and', ['dataset_id', String(datasetId)]]
|
||||
});
|
||||
|
||||
return total;
|
||||
};
|
||||
getVectorCountByCollectionId = async (
|
||||
teamId: string,
|
||||
datasetId: string,
|
||||
collectionId: string
|
||||
) => {
|
||||
const total = await GsClient.count(DatasetVectorTableName, {
|
||||
where: [
|
||||
['team_id', String(teamId)],
|
||||
'and',
|
||||
['dataset_id', String(datasetId)],
|
||||
'and',
|
||||
['collection_id', String(collectionId)]
|
||||
]
|
||||
});
|
||||
|
||||
return total;
|
||||
};
|
||||
}
|
||||
@@ -188,6 +188,7 @@ export class PgVectorCtrl {
|
||||
const results: any = await PgClient.query(
|
||||
`BEGIN;
|
||||
SET LOCAL hnsw.ef_search = ${global.systemEnv?.hnswEfSearch || 100};
|
||||
SET LOCAL hnsw.max_scan_tuples = ${global.systemEnv?.hnswMaxScanTuples || 100000};
|
||||
SET LOCAL hnsw.iterative_scan = relaxed_order;
|
||||
WITH relaxed_results AS MATERIALIZED (
|
||||
select id, collection_id, vector <#> '[${vector}]' AS score
|
||||
@@ -199,7 +200,7 @@ export class PgVectorCtrl {
|
||||
) SELECT id, collection_id, score FROM relaxed_results ORDER BY score;
|
||||
COMMIT;`
|
||||
);
|
||||
const rows = results?.[3]?.rows as PgSearchRawType[];
|
||||
const rows = results?.[results.length - 2]?.rows as PgSearchRawType[];
|
||||
|
||||
if (!Array.isArray(rows)) {
|
||||
return {
|
||||
|
||||
1
packages/service/common/vectorDB/type.d.ts
vendored
1
packages/service/common/vectorDB/type.d.ts
vendored
@@ -6,6 +6,7 @@ declare global {
|
||||
var pgClient: Pool | null;
|
||||
var obClient: MysqlPool | null;
|
||||
var milvusClient: MilvusClient | null;
|
||||
var gsClient: Pool | null;
|
||||
}
|
||||
|
||||
export type EmbeddingRecallItemType = {
|
||||
|
||||
@@ -78,7 +78,7 @@ export const createChatCompletion = async ({
|
||||
}
|
||||
body.model = modelConstantsData.model;
|
||||
|
||||
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
|
||||
const formatTimeout = timeout ? timeout : 600000;
|
||||
const ai = getAIApi({
|
||||
userKey,
|
||||
timeout: formatTimeout
|
||||
|
||||
@@ -1,6 +1,54 @@
|
||||
{
|
||||
"provider": "Claude",
|
||||
"list": [
|
||||
{
|
||||
"model": "claude-sonnet-4-20250514",
|
||||
"name": "claude-sonnet-4-20250514",
|
||||
"maxContext": 200000,
|
||||
"maxResponse": 8000,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1,
|
||||
"showTopP": true,
|
||||
"showStopSign": true,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "claude-opus-4-20250514",
|
||||
"name": "claude-opus-4-20250514",
|
||||
"maxContext": 200000,
|
||||
"maxResponse": 4096,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1,
|
||||
"showTopP": true,
|
||||
"showStopSign": true,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "claude-3-7-sonnet-20250219",
|
||||
"name": "claude-3-7-sonnet-20250219",
|
||||
|
||||
@@ -25,6 +25,30 @@
|
||||
"showTopP": true,
|
||||
"showStopSign": true
|
||||
},
|
||||
{
|
||||
"model": "gemini-2.5-flash-preview-04-17",
|
||||
"name": "gemini-2.5-flash-preview-04-17",
|
||||
"maxContext": 1000000,
|
||||
"maxResponse": 8000,
|
||||
"quoteMaxToken": 60000,
|
||||
"maxTemperature": 1,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm",
|
||||
"showTopP": true,
|
||||
"showStopSign": true
|
||||
},
|
||||
{
|
||||
"model": "gemini-2.0-flash",
|
||||
"name": "gemini-2.0-flash",
|
||||
|
||||
@@ -18,15 +18,17 @@ import json5 from 'json5';
|
||||
*/
|
||||
export const computedMaxToken = ({
|
||||
maxToken,
|
||||
model
|
||||
model,
|
||||
min
|
||||
}: {
|
||||
maxToken?: number;
|
||||
model: LLMModelItemType;
|
||||
min?: number;
|
||||
}) => {
|
||||
if (maxToken === undefined) return;
|
||||
|
||||
maxToken = Math.min(maxToken, model.maxResponse);
|
||||
return maxToken;
|
||||
return Math.max(maxToken, min || 0);
|
||||
};
|
||||
|
||||
// FastGPT temperature range: [0,10], ai temperature:[0,2],{0,1]……
|
||||
@@ -178,7 +180,7 @@ export const llmStreamResponseToAnswerText = async (
|
||||
}
|
||||
}
|
||||
return {
|
||||
text: parseReasoningContent(answer)[1],
|
||||
text: removeDatasetCiteText(parseReasoningContent(answer)[1], false),
|
||||
usage,
|
||||
toolCalls
|
||||
};
|
||||
@@ -192,8 +194,9 @@ export const llmUnStreamResponseToAnswerText = async (
|
||||
}> => {
|
||||
const answer = response.choices?.[0]?.message?.content || '';
|
||||
const toolCalls = response.choices?.[0]?.message?.tool_calls;
|
||||
|
||||
return {
|
||||
text: answer,
|
||||
text: removeDatasetCiteText(parseReasoningContent(answer)[1], false),
|
||||
usage: response.usage,
|
||||
toolCalls
|
||||
};
|
||||
@@ -240,6 +243,12 @@ export const parseLLMStreamResponse = () => {
|
||||
let citeBuffer = '';
|
||||
const maxCiteBufferLength = 32; // [Object](CITE)总长度为32
|
||||
|
||||
// Buffer
|
||||
let buffer_finishReason: CompletionFinishReason = null;
|
||||
let buffer_usage: CompletionUsage = getLLMDefaultUsage();
|
||||
let buffer_reasoningContent = '';
|
||||
let buffer_content = '';
|
||||
|
||||
/*
|
||||
parseThinkTag - 只控制是否主动解析 <think></think>,如果接口已经解析了,则不再解析。
|
||||
retainDatasetCite -
|
||||
@@ -257,6 +266,7 @@ export const parseLLMStreamResponse = () => {
|
||||
};
|
||||
finish_reason?: CompletionFinishReason;
|
||||
}[];
|
||||
usage?: CompletionUsage;
|
||||
};
|
||||
parseThinkTag?: boolean;
|
||||
retainDatasetCite?: boolean;
|
||||
@@ -266,72 +276,71 @@ export const parseLLMStreamResponse = () => {
|
||||
responseContent: string;
|
||||
finishReason: CompletionFinishReason;
|
||||
} => {
|
||||
const finishReason = part.choices?.[0]?.finish_reason || null;
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
|
||||
const isStreamEnd = !!finishReason;
|
||||
const data = (() => {
|
||||
buffer_usage = part.usage || buffer_usage;
|
||||
|
||||
// Parse think
|
||||
const { reasoningContent: parsedThinkReasoningContent, content: parsedThinkContent } = (() => {
|
||||
if (reasoningContent || !parseThinkTag) {
|
||||
isInThinkTag = false;
|
||||
return { reasoningContent, content };
|
||||
}
|
||||
const finishReason = part.choices?.[0]?.finish_reason || null;
|
||||
buffer_finishReason = finishReason || buffer_finishReason;
|
||||
|
||||
if (!content) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
|
||||
const isStreamEnd = !!buffer_finishReason;
|
||||
|
||||
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
|
||||
if (isInThinkTag === false) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content
|
||||
};
|
||||
}
|
||||
// Parse think
|
||||
const { reasoningContent: parsedThinkReasoningContent, content: parsedThinkContent } =
|
||||
(() => {
|
||||
if (reasoningContent || !parseThinkTag) {
|
||||
isInThinkTag = false;
|
||||
return { reasoningContent, content };
|
||||
}
|
||||
|
||||
// 检测是否为 think 标签开头的数据
|
||||
if (isInThinkTag === undefined) {
|
||||
// Parse content think and answer
|
||||
startTagBuffer += content;
|
||||
// 太少内容时候,暂时不解析
|
||||
if (startTagBuffer.length < thinkStartChars.length) {
|
||||
if (isStreamEnd) {
|
||||
const tmpContent = startTagBuffer;
|
||||
startTagBuffer = '';
|
||||
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
|
||||
if (isInThinkTag === false) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: tmpContent
|
||||
content
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
if (startTagBuffer.startsWith(thinkStartChars)) {
|
||||
isInThinkTag = true;
|
||||
return {
|
||||
reasoningContent: startTagBuffer.slice(thinkStartChars.length),
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
// 检测是否为 think 标签开头的数据
|
||||
if (isInThinkTag === undefined) {
|
||||
// Parse content think and answer
|
||||
startTagBuffer += content;
|
||||
// 太少内容时候,暂时不解析
|
||||
if (startTagBuffer.length < thinkStartChars.length) {
|
||||
if (isStreamEnd) {
|
||||
const tmpContent = startTagBuffer;
|
||||
startTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: tmpContent
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
|
||||
isInThinkTag = false;
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: startTagBuffer
|
||||
};
|
||||
}
|
||||
if (startTagBuffer.startsWith(thinkStartChars)) {
|
||||
isInThinkTag = true;
|
||||
return {
|
||||
reasoningContent: startTagBuffer.slice(thinkStartChars.length),
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
|
||||
/*
|
||||
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
|
||||
isInThinkTag = false;
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: startTagBuffer
|
||||
};
|
||||
}
|
||||
|
||||
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
|
||||
/*
|
||||
检测 </think> 方案。
|
||||
存储所有疑似 </think> 的内容,直到检测到完整的 </think> 标签或超出 </think> 长度。
|
||||
content 返回值包含以下几种情况:
|
||||
@@ -342,124 +351,145 @@ export const parseLLMStreamResponse = () => {
|
||||
</think>abc - 完全命中尾标签
|
||||
k>abc - 命中一部分尾标签
|
||||
*/
|
||||
// endTagBuffer 专门用来记录疑似尾标签的内容
|
||||
if (endTagBuffer) {
|
||||
endTagBuffer += content;
|
||||
if (endTagBuffer.includes(thinkEndChars)) {
|
||||
isInThinkTag = false;
|
||||
const answer = endTagBuffer.slice(thinkEndChars.length);
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: answer
|
||||
};
|
||||
} else if (endTagBuffer.length >= thinkEndChars.length) {
|
||||
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
|
||||
const tmp = endTagBuffer;
|
||||
endTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: tmp,
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
} else if (content.includes(thinkEndChars)) {
|
||||
// 返回内容,完整命中</think>,直接结束
|
||||
isInThinkTag = false;
|
||||
const [think, answer] = content.split(thinkEndChars);
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: answer
|
||||
};
|
||||
} else {
|
||||
// 无 buffer,且未命中 </think>,开始疑似 </think> 检测。
|
||||
for (let i = 1; i < thinkEndChars.length; i++) {
|
||||
const partialEndTag = thinkEndChars.slice(0, i);
|
||||
// 命中一部分尾标签
|
||||
if (content.endsWith(partialEndTag)) {
|
||||
const think = content.slice(0, -partialEndTag.length);
|
||||
endTagBuffer += partialEndTag;
|
||||
// endTagBuffer 专门用来记录疑似尾标签的内容
|
||||
if (endTagBuffer) {
|
||||
endTagBuffer += content;
|
||||
if (endTagBuffer.includes(thinkEndChars)) {
|
||||
isInThinkTag = false;
|
||||
const answer = endTagBuffer.slice(thinkEndChars.length);
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: answer
|
||||
};
|
||||
} else if (endTagBuffer.length >= thinkEndChars.length) {
|
||||
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
|
||||
const tmp = endTagBuffer;
|
||||
endTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: tmp,
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: think,
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
} else if (content.includes(thinkEndChars)) {
|
||||
// 返回内容,完整命中</think>,直接结束
|
||||
isInThinkTag = false;
|
||||
const [think, answer] = content.split(thinkEndChars);
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: answer
|
||||
};
|
||||
} else {
|
||||
// 无 buffer,且未命中 </think>,开始疑似 </think> 检测。
|
||||
for (let i = 1; i < thinkEndChars.length; i++) {
|
||||
const partialEndTag = thinkEndChars.slice(0, i);
|
||||
// 命中一部分尾标签
|
||||
if (content.endsWith(partialEndTag)) {
|
||||
const think = content.slice(0, -partialEndTag.length);
|
||||
endTagBuffer += partialEndTag;
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 完全未命中尾标签,还是 think 阶段。
|
||||
return {
|
||||
reasoningContent: content,
|
||||
content: ''
|
||||
};
|
||||
})();
|
||||
|
||||
// Parse datset cite
|
||||
if (retainDatasetCite) {
|
||||
return {
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: parsedThinkContent,
|
||||
finishReason: buffer_finishReason
|
||||
};
|
||||
}
|
||||
|
||||
// 完全未命中尾标签,还是 think 阶段。
|
||||
return {
|
||||
reasoningContent: content,
|
||||
content: ''
|
||||
};
|
||||
})();
|
||||
// 缓存包含 [ 的字符串,直到超出 maxCiteBufferLength 再一次性返回
|
||||
const parseCite = (text: string) => {
|
||||
// 结束时,返回所有剩余内容
|
||||
if (isStreamEnd) {
|
||||
const content = citeBuffer + text;
|
||||
return {
|
||||
content: removeDatasetCiteText(content, false)
|
||||
};
|
||||
}
|
||||
|
||||
// 新内容包含 [,初始化缓冲数据
|
||||
if (text.includes('[')) {
|
||||
const index = text.indexOf('[');
|
||||
const beforeContent = citeBuffer + text.slice(0, index);
|
||||
citeBuffer = text.slice(index);
|
||||
|
||||
// beforeContent 可能是:普通字符串,带 [ 的字符串
|
||||
return {
|
||||
content: removeDatasetCiteText(beforeContent, false)
|
||||
};
|
||||
}
|
||||
// 处于 Cite 缓冲区,判断是否满足条件
|
||||
else if (citeBuffer) {
|
||||
citeBuffer += text;
|
||||
|
||||
// 检查缓冲区长度是否达到完整Quote长度或已经流结束
|
||||
if (citeBuffer.length >= maxCiteBufferLength) {
|
||||
const content = removeDatasetCiteText(citeBuffer, false);
|
||||
citeBuffer = '';
|
||||
|
||||
return {
|
||||
content
|
||||
};
|
||||
} else {
|
||||
// 暂时不返回内容
|
||||
return { content: '' };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: text
|
||||
};
|
||||
};
|
||||
const { content: pasedCiteContent } = parseCite(parsedThinkContent);
|
||||
|
||||
// Parse datset cite
|
||||
if (retainDatasetCite) {
|
||||
return {
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: parsedThinkContent,
|
||||
finishReason
|
||||
responseContent: pasedCiteContent,
|
||||
finishReason: buffer_finishReason
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
// 缓存包含 [ 的字符串,直到超出 maxCiteBufferLength 再一次性返回
|
||||
const parseCite = (text: string) => {
|
||||
// 结束时,返回所有剩余内容
|
||||
if (isStreamEnd) {
|
||||
const content = citeBuffer + text;
|
||||
return {
|
||||
content: removeDatasetCiteText(content, false)
|
||||
};
|
||||
}
|
||||
buffer_reasoningContent += data.reasoningContent;
|
||||
buffer_content += data.content;
|
||||
|
||||
// 新内容包含 [,初始化缓冲数据
|
||||
if (text.includes('[')) {
|
||||
const index = text.indexOf('[');
|
||||
const beforeContent = citeBuffer + text.slice(0, index);
|
||||
citeBuffer = text.slice(index);
|
||||
|
||||
// beforeContent 可能是:普通字符串,带 [ 的字符串
|
||||
return {
|
||||
content: removeDatasetCiteText(beforeContent, false)
|
||||
};
|
||||
}
|
||||
// 处于 Cite 缓冲区,判断是否满足条件
|
||||
else if (citeBuffer) {
|
||||
citeBuffer += text;
|
||||
|
||||
// 检查缓冲区长度是否达到完整Quote长度或已经流结束
|
||||
if (citeBuffer.length >= maxCiteBufferLength) {
|
||||
const content = removeDatasetCiteText(citeBuffer, false);
|
||||
citeBuffer = '';
|
||||
|
||||
return {
|
||||
content
|
||||
};
|
||||
} else {
|
||||
// 暂时不返回内容
|
||||
return { content: '' };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: text
|
||||
};
|
||||
};
|
||||
const { content: pasedCiteContent } = parseCite(parsedThinkContent);
|
||||
return data;
|
||||
};
|
||||
|
||||
const getResponseData = () => {
|
||||
return {
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: pasedCiteContent,
|
||||
finishReason
|
||||
finish_reason: buffer_finishReason,
|
||||
usage: buffer_usage,
|
||||
reasoningContent: buffer_reasoningContent,
|
||||
content: buffer_content
|
||||
};
|
||||
};
|
||||
|
||||
const updateFinishReason = (finishReason: CompletionFinishReason) => {
|
||||
buffer_finishReason = finishReason;
|
||||
};
|
||||
|
||||
return {
|
||||
parsePart
|
||||
parsePart,
|
||||
getResponseData,
|
||||
updateFinishReason
|
||||
};
|
||||
};
|
||||
|
||||
@@ -11,40 +11,6 @@ export const beforeUpdateAppFormat = <T extends AppSchema['modules'] | undefined
|
||||
nodes: T;
|
||||
isPlugin: boolean;
|
||||
}) => {
|
||||
if (nodes) {
|
||||
// Check dataset maxTokens
|
||||
if (isPlugin) {
|
||||
let maxTokens = 16000;
|
||||
|
||||
nodes.forEach((item) => {
|
||||
if (
|
||||
item.flowNodeType === FlowNodeTypeEnum.chatNode ||
|
||||
item.flowNodeType === FlowNodeTypeEnum.tools
|
||||
) {
|
||||
const model =
|
||||
item.inputs.find((item) => item.key === NodeInputKeyEnum.aiModel)?.value || '';
|
||||
const chatModel = getLLMModel(model);
|
||||
const quoteMaxToken = chatModel.quoteMaxToken || 16000;
|
||||
|
||||
maxTokens = Math.max(maxTokens, quoteMaxToken);
|
||||
}
|
||||
});
|
||||
|
||||
nodes.forEach((item) => {
|
||||
if (item.flowNodeType === FlowNodeTypeEnum.datasetSearchNode) {
|
||||
item.inputs.forEach((input) => {
|
||||
if (input.key === NodeInputKeyEnum.datasetMaxTokens) {
|
||||
const val = input.value as number;
|
||||
if (val > maxTokens) {
|
||||
input.value = maxTokens;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
nodes
|
||||
};
|
||||
|
||||
@@ -30,8 +30,7 @@ import { Types } from 'mongoose';
|
||||
community: community-id
|
||||
commercial: commercial-id
|
||||
*/
|
||||
|
||||
export async function splitCombinePluginId(id: string) {
|
||||
export function splitCombineToolId(id: string) {
|
||||
const splitRes = id.split('-');
|
||||
if (splitRes.length === 1) {
|
||||
// app id
|
||||
@@ -42,7 +41,7 @@ export async function splitCombinePluginId(id: string) {
|
||||
}
|
||||
|
||||
const [source, pluginId] = id.split('-') as [PluginSourceEnum, string];
|
||||
if (!source || !pluginId) return Promise.reject('pluginId not found');
|
||||
if (!source || !pluginId) throw new Error('pluginId not found');
|
||||
|
||||
return { source, pluginId: id };
|
||||
}
|
||||
@@ -54,7 +53,7 @@ const getSystemPluginTemplateById = async (
|
||||
versionId?: string
|
||||
): Promise<ChildAppType> => {
|
||||
const item = getSystemPluginTemplates().find((plugin) => plugin.id === pluginId);
|
||||
if (!item) return Promise.reject(PluginErrEnum.unAuth);
|
||||
if (!item) return Promise.reject(PluginErrEnum.unExist);
|
||||
|
||||
const plugin = cloneDeep(item);
|
||||
|
||||
@@ -64,10 +63,10 @@ const getSystemPluginTemplateById = async (
|
||||
{ pluginId: plugin.id, 'customConfig.associatedPluginId': plugin.associatedPluginId },
|
||||
'associatedPluginId'
|
||||
).lean();
|
||||
if (!systemPlugin) return Promise.reject(PluginErrEnum.unAuth);
|
||||
if (!systemPlugin) return Promise.reject(PluginErrEnum.unExist);
|
||||
|
||||
const app = await MongoApp.findById(plugin.associatedPluginId).lean();
|
||||
if (!app) return Promise.reject(PluginErrEnum.unAuth);
|
||||
if (!app) return Promise.reject(PluginErrEnum.unExist);
|
||||
|
||||
const version = versionId
|
||||
? await getAppVersionById({
|
||||
@@ -77,6 +76,12 @@ const getSystemPluginTemplateById = async (
|
||||
})
|
||||
: await getAppLatestVersion(plugin.associatedPluginId, app);
|
||||
if (!version.versionId) return Promise.reject('App version not found');
|
||||
const isLatest = version.versionId
|
||||
? await checkIsLatestVersion({
|
||||
appId: plugin.associatedPluginId,
|
||||
versionId: version.versionId
|
||||
})
|
||||
: true;
|
||||
|
||||
return {
|
||||
...plugin,
|
||||
@@ -85,12 +90,19 @@ const getSystemPluginTemplateById = async (
|
||||
edges: version.edges,
|
||||
chatConfig: version.chatConfig
|
||||
},
|
||||
version: versionId || String(version.versionId),
|
||||
version: versionId ? version?.versionId : '',
|
||||
versionLabel: version?.versionName,
|
||||
isLatestVersion: isLatest,
|
||||
teamId: String(app.teamId),
|
||||
tmbId: String(app.tmbId)
|
||||
};
|
||||
}
|
||||
return plugin;
|
||||
|
||||
return {
|
||||
...plugin,
|
||||
version: undefined,
|
||||
isLatestVersion: true
|
||||
};
|
||||
};
|
||||
|
||||
/* Format plugin to workflow preview node data */
|
||||
@@ -102,11 +114,11 @@ export async function getChildAppPreviewNode({
|
||||
versionId?: string;
|
||||
}): Promise<FlowNodeTemplateType> {
|
||||
const app: ChildAppType = await (async () => {
|
||||
const { source, pluginId } = await splitCombinePluginId(appId);
|
||||
const { source, pluginId } = splitCombineToolId(appId);
|
||||
|
||||
if (source === PluginSourceEnum.personal) {
|
||||
const item = await MongoApp.findById(appId).lean();
|
||||
if (!item) return Promise.reject('plugin not found');
|
||||
if (!item) return Promise.reject(PluginErrEnum.unExist);
|
||||
|
||||
const version = await getAppVersionById({ appId, versionId, app: item });
|
||||
|
||||
@@ -132,8 +144,8 @@ export async function getChildAppPreviewNode({
|
||||
},
|
||||
templateType: FlowNodeTemplateTypeEnum.teamApp,
|
||||
|
||||
version: version.versionId,
|
||||
versionLabel: version?.versionName || '',
|
||||
version: versionId ? version?.versionId : '',
|
||||
versionLabel: version?.versionName,
|
||||
isLatestVersion: isLatest,
|
||||
|
||||
originCost: 0,
|
||||
@@ -142,7 +154,7 @@ export async function getChildAppPreviewNode({
|
||||
pluginOrder: 0
|
||||
};
|
||||
} else {
|
||||
return getSystemPluginTemplateById(pluginId);
|
||||
return getSystemPluginTemplateById(pluginId, versionId);
|
||||
}
|
||||
})();
|
||||
|
||||
@@ -216,12 +228,12 @@ export async function getChildAppRuntimeById(
|
||||
id: string,
|
||||
versionId?: string
|
||||
): Promise<PluginRuntimeType> {
|
||||
const app: ChildAppType = await (async () => {
|
||||
const { source, pluginId } = await splitCombinePluginId(id);
|
||||
const app = await (async () => {
|
||||
const { source, pluginId } = splitCombineToolId(id);
|
||||
|
||||
if (source === PluginSourceEnum.personal) {
|
||||
const item = await MongoApp.findById(id).lean();
|
||||
if (!item) return Promise.reject('plugin not found');
|
||||
if (!item) return Promise.reject(PluginErrEnum.unExist);
|
||||
|
||||
const version = await getAppVersionById({
|
||||
appId: id,
|
||||
@@ -244,8 +256,6 @@ export async function getChildAppRuntimeById(
|
||||
},
|
||||
templateType: FlowNodeTemplateTypeEnum.teamApp,
|
||||
|
||||
// 用不到
|
||||
version: item?.pluginData?.nodeVersion,
|
||||
originCost: 0,
|
||||
currentCost: 0,
|
||||
hasTokenFee: false,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { type ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
|
||||
import { type PluginRuntimeType } from '@fastgpt/global/core/plugin/type';
|
||||
import { splitCombinePluginId } from './controller';
|
||||
import { splitCombineToolId } from './controller';
|
||||
import { PluginSourceEnum } from '@fastgpt/global/core/plugin/constants';
|
||||
|
||||
/*
|
||||
@@ -20,7 +20,7 @@ export const computedPluginUsage = async ({
|
||||
childrenUsage: ChatNodeUsageType[];
|
||||
error?: boolean;
|
||||
}) => {
|
||||
const { source } = await splitCombinePluginId(plugin.id);
|
||||
const { source } = splitCombineToolId(plugin.id);
|
||||
const childrenUsages = childrenUsage.reduce((sum, item) => sum + (item.totalPoints || 0), 0);
|
||||
|
||||
if (source !== PluginSourceEnum.personal) {
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import { MongoDataset } from '../dataset/schema';
|
||||
import { getEmbeddingModel } from '../ai/model';
|
||||
import {
|
||||
AppNodeFlowNodeTypeMap,
|
||||
FlowNodeTypeEnum
|
||||
} from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
||||
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node';
|
||||
import { MongoAppVersion } from './version/schema';
|
||||
import { checkIsLatestVersion } from './version/controller';
|
||||
import { Types } from '../../common/mongo';
|
||||
import { getChildAppPreviewNode, splitCombineToolId } from './plugin/controller';
|
||||
import { PluginSourceEnum } from '@fastgpt/global/core/plugin/constants';
|
||||
import { authAppByTmbId } from '../../support/permission/app/auth';
|
||||
import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant';
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
|
||||
export async function listAppDatasetDataByTeamIdAndDatasetIds({
|
||||
teamId,
|
||||
@@ -33,53 +32,58 @@ export async function listAppDatasetDataByTeamIdAndDatasetIds({
|
||||
export async function rewriteAppWorkflowToDetail({
|
||||
nodes,
|
||||
teamId,
|
||||
isRoot
|
||||
isRoot,
|
||||
ownerTmbId
|
||||
}: {
|
||||
nodes: StoreNodeItemType[];
|
||||
teamId: string;
|
||||
isRoot: boolean;
|
||||
ownerTmbId: string;
|
||||
}) {
|
||||
const datasetIdSet = new Set<string>();
|
||||
|
||||
// Add node(App Type) versionlabel and latest sign
|
||||
const appNodes = nodes.filter((node) => AppNodeFlowNodeTypeMap[node.flowNodeType]);
|
||||
const versionIds = appNodes
|
||||
.filter((node) => node.version && Types.ObjectId.isValid(node.version))
|
||||
.map((node) => node.version);
|
||||
/* Add node(App Type) versionlabel and latest sign ==== */
|
||||
await Promise.all(
|
||||
nodes.map(async (node) => {
|
||||
if (!node.pluginId) return;
|
||||
const { source } = splitCombineToolId(node.pluginId);
|
||||
|
||||
if (versionIds.length > 0) {
|
||||
const versionDataList = await MongoAppVersion.find(
|
||||
{
|
||||
_id: { $in: versionIds }
|
||||
},
|
||||
'_id versionName appId time'
|
||||
).lean();
|
||||
try {
|
||||
const [preview] = await Promise.all([
|
||||
getChildAppPreviewNode({
|
||||
appId: node.pluginId,
|
||||
versionId: node.version
|
||||
}),
|
||||
...(source === PluginSourceEnum.personal
|
||||
? [
|
||||
authAppByTmbId({
|
||||
tmbId: ownerTmbId,
|
||||
appId: node.pluginId,
|
||||
per: ReadPermissionVal
|
||||
})
|
||||
]
|
||||
: [])
|
||||
]);
|
||||
|
||||
const versionMap: Record<string, any> = {};
|
||||
|
||||
const isLatestChecks = await Promise.all(
|
||||
versionDataList.map(async (version) => {
|
||||
const isLatest = await checkIsLatestVersion({
|
||||
appId: version.appId,
|
||||
versionId: version._id
|
||||
});
|
||||
|
||||
return { versionId: String(version._id), isLatest };
|
||||
})
|
||||
);
|
||||
const isLatestMap = new Map(isLatestChecks.map((item) => [item.versionId, item.isLatest]));
|
||||
versionDataList.forEach((version) => {
|
||||
versionMap[String(version._id)] = version;
|
||||
});
|
||||
appNodes.forEach((node) => {
|
||||
if (!node.version) return;
|
||||
const versionData = versionMap[String(node.version)];
|
||||
if (versionData) {
|
||||
node.versionLabel = versionData.versionName;
|
||||
node.isLatestVersion = isLatestMap.get(String(node.version)) || false;
|
||||
node.pluginData = {
|
||||
diagram: preview.diagram,
|
||||
userGuide: preview.userGuide,
|
||||
courseUrl: preview.courseUrl,
|
||||
name: preview.name,
|
||||
avatar: preview.avatar
|
||||
};
|
||||
node.versionLabel = preview.versionLabel;
|
||||
node.isLatestVersion = preview.isLatestVersion;
|
||||
node.version = preview.version;
|
||||
} catch (error) {
|
||||
node.pluginData = {
|
||||
error: getErrText(error)
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
/* Add node(App Type) versionlabel and latest sign ==== */
|
||||
|
||||
// Get all dataset ids from nodes
|
||||
nodes.forEach((node) => {
|
||||
|
||||
@@ -68,6 +68,9 @@ export const checkIsLatestVersion = async ({
|
||||
appId: string;
|
||||
versionId: string;
|
||||
}) => {
|
||||
if (!Types.ObjectId.isValid(versionId)) {
|
||||
return false;
|
||||
}
|
||||
const version = await MongoAppVersion.findOne(
|
||||
{
|
||||
appId,
|
||||
|
||||
@@ -34,6 +34,10 @@ const ChatSchema = new Schema({
|
||||
ref: AppCollectionName,
|
||||
required: true
|
||||
},
|
||||
createTime: {
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
},
|
||||
updateTime: {
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
|
||||
@@ -65,8 +65,8 @@ export const filterGPTMessageByMaxContext = async ({
|
||||
if (lastMessage.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
const tokens = await countGptMessagesTokens([lastMessage, ...tmpChats]);
|
||||
maxContext -= tokens;
|
||||
// 该轮信息整体 tokens 超出范围,这段数据不要了
|
||||
if (maxContext < 0) {
|
||||
// 该轮信息整体 tokens 超出范围,这段数据不要了。但是至少保证一组。
|
||||
if (maxContext < 0 && chats.length > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -146,7 +146,8 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
|
||||
tmbId,
|
||||
url: previewUrl,
|
||||
relatedId: apiFileId,
|
||||
customPdfParse
|
||||
customPdfParse,
|
||||
getFormatText: true
|
||||
});
|
||||
return {
|
||||
title,
|
||||
|
||||
27
packages/service/core/dataset/apiDataset/index.ts
Normal file
27
packages/service/core/dataset/apiDataset/index.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import type {
|
||||
APIFileServer,
|
||||
YuqueServer,
|
||||
FeishuServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import { useApiDatasetRequest } from './api';
|
||||
import { useYuqueDatasetRequest } from '../yuqueDataset/api';
|
||||
import { useFeishuDatasetRequest } from '../feishuDataset/api';
|
||||
|
||||
export const getApiDatasetRequest = async (data: {
|
||||
apiServer?: APIFileServer;
|
||||
yuqueServer?: YuqueServer;
|
||||
feishuServer?: FeishuServer;
|
||||
}) => {
|
||||
const { apiServer, yuqueServer, feishuServer } = data;
|
||||
|
||||
if (apiServer) {
|
||||
return useApiDatasetRequest({ apiServer });
|
||||
}
|
||||
if (yuqueServer) {
|
||||
return useYuqueDatasetRequest({ yuqueServer });
|
||||
}
|
||||
if (feishuServer) {
|
||||
return useFeishuDatasetRequest({ feishuServer });
|
||||
}
|
||||
return Promise.reject('Can not find api dataset server');
|
||||
};
|
||||
@@ -1,30 +0,0 @@
|
||||
import { type ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
import { type FeishuServer, type YuqueServer } from '@fastgpt/global/core/dataset/apiDataset';
|
||||
|
||||
export enum ProApiDatasetOperationTypeEnum {
|
||||
LIST = 'list',
|
||||
READ = 'read',
|
||||
CONTENT = 'content',
|
||||
DETAIL = 'detail'
|
||||
}
|
||||
|
||||
export type ProApiDatasetCommonParams = {
|
||||
feishuServer?: FeishuServer;
|
||||
yuqueServer?: YuqueServer;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFileListParams = ProApiDatasetCommonParams & {
|
||||
parentId?: ParentIdType;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFileContentParams = ProApiDatasetCommonParams & {
|
||||
apiFileId: string;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFilePreviewUrlParams = ProApiDatasetCommonParams & {
|
||||
apiFileId: string;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFileDetailParams = ProApiDatasetCommonParams & {
|
||||
apiFileId: string;
|
||||
};
|
||||
@@ -34,15 +34,17 @@ import { getTrainingModeByCollection } from './utils';
|
||||
import {
|
||||
computeChunkSize,
|
||||
computeChunkSplitter,
|
||||
computeParagraphChunkDeep,
|
||||
getLLMMaxChunkSize
|
||||
} from '@fastgpt/global/core/dataset/training/utils';
|
||||
import { DatasetDataIndexTypeEnum } from '@fastgpt/global/core/dataset/data/constants';
|
||||
|
||||
export const createCollectionAndInsertData = async ({
|
||||
dataset,
|
||||
rawText,
|
||||
relatedId,
|
||||
createCollectionParams,
|
||||
isQAImport = false,
|
||||
backupParse = false,
|
||||
billId,
|
||||
session
|
||||
}: {
|
||||
@@ -50,8 +52,8 @@ export const createCollectionAndInsertData = async ({
|
||||
rawText: string;
|
||||
relatedId?: string;
|
||||
createCollectionParams: CreateOneCollectionParams;
|
||||
backupParse?: boolean;
|
||||
|
||||
isQAImport?: boolean;
|
||||
billId?: string;
|
||||
session?: ClientSession;
|
||||
}) => {
|
||||
@@ -73,15 +75,30 @@ export const createCollectionAndInsertData = async ({
|
||||
llmModel: getLLMModel(dataset.agentModel)
|
||||
});
|
||||
const chunkSplitter = computeChunkSplitter(createCollectionParams);
|
||||
const paragraphChunkDeep = computeParagraphChunkDeep(createCollectionParams);
|
||||
|
||||
if (trainingType === DatasetCollectionDataProcessModeEnum.qa) {
|
||||
delete createCollectionParams.chunkTriggerType;
|
||||
delete createCollectionParams.chunkTriggerMinSize;
|
||||
delete createCollectionParams.dataEnhanceCollectionName;
|
||||
delete createCollectionParams.imageIndex;
|
||||
delete createCollectionParams.autoIndexes;
|
||||
delete createCollectionParams.indexSize;
|
||||
delete createCollectionParams.qaPrompt;
|
||||
}
|
||||
|
||||
// 1. split chunks
|
||||
const chunks = rawText2Chunks({
|
||||
rawText,
|
||||
chunkTriggerType: createCollectionParams.chunkTriggerType,
|
||||
chunkTriggerMinSize: createCollectionParams.chunkTriggerMinSize,
|
||||
chunkSize,
|
||||
paragraphChunkDeep,
|
||||
paragraphChunkMinSize: createCollectionParams.paragraphChunkMinSize,
|
||||
maxSize: getLLMMaxChunkSize(getLLMModel(dataset.agentModel)),
|
||||
overlapRatio: trainingType === DatasetCollectionDataProcessModeEnum.chunk ? 0.2 : 0,
|
||||
customReg: chunkSplitter ? [chunkSplitter] : [],
|
||||
isQAImport
|
||||
backupParse
|
||||
});
|
||||
|
||||
// 2. auth limit
|
||||
@@ -102,6 +119,7 @@ export const createCollectionAndInsertData = async ({
|
||||
const { _id: collectionId } = await createOneCollection({
|
||||
...createCollectionParams,
|
||||
trainingType,
|
||||
paragraphChunkDeep,
|
||||
chunkSize,
|
||||
chunkSplitter,
|
||||
|
||||
@@ -157,6 +175,10 @@ export const createCollectionAndInsertData = async ({
|
||||
billId: traingBillId,
|
||||
data: chunks.map((item, index) => ({
|
||||
...item,
|
||||
indexes: item.indexes?.map((text) => ({
|
||||
type: DatasetDataIndexTypeEnum.custom,
|
||||
text
|
||||
})),
|
||||
chunkIndex: index
|
||||
})),
|
||||
session
|
||||
@@ -198,46 +220,19 @@ export type CreateOneCollectionParams = CreateDatasetCollectionParams & {
|
||||
tmbId: string;
|
||||
session?: ClientSession;
|
||||
};
|
||||
export async function createOneCollection({
|
||||
teamId,
|
||||
tmbId,
|
||||
name,
|
||||
parentId,
|
||||
datasetId,
|
||||
type,
|
||||
export async function createOneCollection({ session, ...props }: CreateOneCollectionParams) {
|
||||
const {
|
||||
teamId,
|
||||
parentId,
|
||||
datasetId,
|
||||
tags,
|
||||
|
||||
createTime,
|
||||
updateTime,
|
||||
|
||||
hashRawText,
|
||||
rawTextLength,
|
||||
metadata = {},
|
||||
tags,
|
||||
|
||||
nextSyncTime,
|
||||
|
||||
fileId,
|
||||
rawLink,
|
||||
externalFileId,
|
||||
externalFileUrl,
|
||||
apiFileId,
|
||||
|
||||
// Parse settings
|
||||
customPdfParse,
|
||||
imageIndex,
|
||||
autoIndexes,
|
||||
|
||||
// Chunk settings
|
||||
trainingType,
|
||||
chunkSettingMode,
|
||||
chunkSplitMode,
|
||||
chunkSize,
|
||||
indexSize,
|
||||
chunkSplitter,
|
||||
qaPrompt,
|
||||
|
||||
session
|
||||
}: CreateOneCollectionParams) {
|
||||
fileId,
|
||||
rawLink,
|
||||
externalFileId,
|
||||
externalFileUrl,
|
||||
apiFileId
|
||||
} = props;
|
||||
// Create collection tags
|
||||
const collectionTags = await createOrGetCollectionTags({ tags, teamId, datasetId, session });
|
||||
|
||||
@@ -245,41 +240,18 @@ export async function createOneCollection({
|
||||
const [collection] = await MongoDatasetCollection.create(
|
||||
[
|
||||
{
|
||||
...props,
|
||||
teamId,
|
||||
tmbId,
|
||||
parentId: parentId || null,
|
||||
datasetId,
|
||||
name,
|
||||
type,
|
||||
|
||||
rawTextLength,
|
||||
hashRawText,
|
||||
tags: collectionTags,
|
||||
metadata,
|
||||
|
||||
createTime,
|
||||
updateTime,
|
||||
nextSyncTime,
|
||||
|
||||
...(fileId ? { fileId } : {}),
|
||||
...(rawLink ? { rawLink } : {}),
|
||||
...(externalFileId ? { externalFileId } : {}),
|
||||
...(externalFileUrl ? { externalFileUrl } : {}),
|
||||
...(apiFileId ? { apiFileId } : {}),
|
||||
|
||||
// Parse settings
|
||||
customPdfParse,
|
||||
imageIndex,
|
||||
autoIndexes,
|
||||
|
||||
// Chunk settings
|
||||
trainingType,
|
||||
chunkSettingMode,
|
||||
chunkSplitMode,
|
||||
chunkSize,
|
||||
indexSize,
|
||||
chunkSplitter,
|
||||
qaPrompt
|
||||
...(apiFileId ? { apiFileId } : {})
|
||||
}
|
||||
],
|
||||
{ session, ordered: true }
|
||||
|
||||
@@ -34,9 +34,9 @@ const DatasetDataTextSchema = new Schema({
|
||||
|
||||
try {
|
||||
DatasetDataTextSchema.index(
|
||||
{ teamId: 1, datasetId: 1, fullTextToken: 'text' },
|
||||
{ teamId: 1, fullTextToken: 'text' },
|
||||
{
|
||||
name: 'teamId_1_datasetId_1_fullTextToken_text',
|
||||
name: 'teamId_1_fullTextToken_text',
|
||||
default_language: 'none'
|
||||
}
|
||||
);
|
||||
|
||||
208
packages/service/core/dataset/feishuDataset/api.ts
Normal file
208
packages/service/core/dataset/feishuDataset/api.ts
Normal file
@@ -0,0 +1,208 @@
|
||||
import type {
|
||||
APIFileItem,
|
||||
ApiFileReadContentResponse,
|
||||
ApiDatasetDetailResponse,
|
||||
FeishuServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import { type ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
import axios, { type Method } from 'axios';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
|
||||
type ResponseDataType = {
|
||||
success: boolean;
|
||||
message: string;
|
||||
data: any;
|
||||
};
|
||||
|
||||
type FeishuFileListResponse = {
|
||||
files: {
|
||||
token: string;
|
||||
parent_token: string;
|
||||
name: string;
|
||||
type: string;
|
||||
modified_time: number;
|
||||
created_time: number;
|
||||
url: string;
|
||||
owner_id: string;
|
||||
}[];
|
||||
has_more: boolean;
|
||||
next_page_token: string;
|
||||
};
|
||||
|
||||
const feishuBaseUrl = process.env.FEISHU_BASE_URL || 'https://open.feishu.cn';
|
||||
|
||||
export const useFeishuDatasetRequest = ({ feishuServer }: { feishuServer: FeishuServer }) => {
|
||||
const instance = axios.create({
|
||||
baseURL: feishuBaseUrl,
|
||||
timeout: 60000
|
||||
});
|
||||
|
||||
// 添加请求拦截器
|
||||
instance.interceptors.request.use(async (config) => {
|
||||
if (!config.headers.Authorization) {
|
||||
const { data } = await axios.post<{ tenant_access_token: string }>(
|
||||
`${feishuBaseUrl}/open-apis/auth/v3/tenant_access_token/internal`,
|
||||
{
|
||||
app_id: feishuServer.appId,
|
||||
app_secret: feishuServer.appSecret
|
||||
}
|
||||
);
|
||||
|
||||
config.headers['Authorization'] = `Bearer ${data.tenant_access_token}`;
|
||||
config.headers['Content-Type'] = 'application/json; charset=utf-8';
|
||||
}
|
||||
return config;
|
||||
});
|
||||
|
||||
/**
|
||||
* 响应数据检查
|
||||
*/
|
||||
const checkRes = (data: ResponseDataType) => {
|
||||
if (data === undefined) {
|
||||
addLog.info('yuque dataset data is empty');
|
||||
return Promise.reject('服务器异常');
|
||||
}
|
||||
return data.data;
|
||||
};
|
||||
const responseError = (err: any) => {
|
||||
console.log('error->', '请求错误', err);
|
||||
|
||||
if (!err) {
|
||||
return Promise.reject({ message: '未知错误' });
|
||||
}
|
||||
if (typeof err === 'string') {
|
||||
return Promise.reject({ message: err });
|
||||
}
|
||||
if (typeof err.message === 'string') {
|
||||
return Promise.reject({ message: err.message });
|
||||
}
|
||||
if (typeof err.data === 'string') {
|
||||
return Promise.reject({ message: err.data });
|
||||
}
|
||||
if (err?.response?.data) {
|
||||
return Promise.reject(err?.response?.data);
|
||||
}
|
||||
return Promise.reject(err);
|
||||
};
|
||||
|
||||
const request = <T>(url: string, data: any, method: Method): Promise<T> => {
|
||||
/* 去空 */
|
||||
for (const key in data) {
|
||||
if (data[key] === undefined) {
|
||||
delete data[key];
|
||||
}
|
||||
}
|
||||
|
||||
return instance
|
||||
.request({
|
||||
url,
|
||||
method,
|
||||
data: ['POST', 'PUT'].includes(method) ? data : undefined,
|
||||
params: !['POST', 'PUT'].includes(method) ? data : undefined
|
||||
})
|
||||
.then((res) => checkRes(res.data))
|
||||
.catch((err) => responseError(err));
|
||||
};
|
||||
|
||||
const listFiles = async ({ parentId }: { parentId?: ParentIdType }): Promise<APIFileItem[]> => {
|
||||
const fetchFiles = async (pageToken?: string): Promise<FeishuFileListResponse['files']> => {
|
||||
const data = await request<FeishuFileListResponse>(
|
||||
`/open-apis/drive/v1/files`,
|
||||
{
|
||||
folder_token: parentId || feishuServer.folderToken,
|
||||
page_size: 200,
|
||||
page_token: pageToken
|
||||
},
|
||||
'GET'
|
||||
);
|
||||
|
||||
if (data.has_more) {
|
||||
const nextFiles = await fetchFiles(data.next_page_token);
|
||||
return [...data.files, ...nextFiles];
|
||||
}
|
||||
|
||||
return data.files;
|
||||
};
|
||||
|
||||
const allFiles = await fetchFiles();
|
||||
|
||||
return allFiles
|
||||
.filter((file) => ['folder', 'docx'].includes(file.type))
|
||||
.map((file) => ({
|
||||
id: file.token,
|
||||
parentId: file.parent_token,
|
||||
name: file.name,
|
||||
type: file.type === 'folder' ? ('folder' as const) : ('file' as const),
|
||||
hasChild: file.type === 'folder',
|
||||
updateTime: new Date(file.modified_time * 1000),
|
||||
createTime: new Date(file.created_time * 1000)
|
||||
}));
|
||||
};
|
||||
|
||||
const getFileContent = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiFileReadContentResponse> => {
|
||||
const [{ content }, { document }] = await Promise.all([
|
||||
request<{ content: string }>(
|
||||
`/open-apis/docx/v1/documents/${apiFileId}/raw_content`,
|
||||
{},
|
||||
'GET'
|
||||
),
|
||||
request<{ document: { title: string } }>(
|
||||
`/open-apis/docx/v1/documents/${apiFileId}`,
|
||||
{},
|
||||
'GET'
|
||||
)
|
||||
]);
|
||||
|
||||
return {
|
||||
title: document?.title,
|
||||
rawText: content
|
||||
};
|
||||
};
|
||||
|
||||
const getFilePreviewUrl = async ({ apiFileId }: { apiFileId: string }): Promise<string> => {
|
||||
const { metas } = await request<{ metas: { url: string }[] }>(
|
||||
`/open-apis/drive/v1/metas/batch_query`,
|
||||
{
|
||||
request_docs: [
|
||||
{
|
||||
doc_token: apiFileId,
|
||||
doc_type: 'docx'
|
||||
}
|
||||
],
|
||||
with_url: true
|
||||
},
|
||||
'POST'
|
||||
);
|
||||
|
||||
return metas[0].url;
|
||||
};
|
||||
|
||||
const getFileDetail = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiDatasetDetailResponse> => {
|
||||
const { document } = await request<{ document: { title: string } }>(
|
||||
`/open-apis/docx/v1/documents/${apiFileId}`,
|
||||
{},
|
||||
'GET'
|
||||
);
|
||||
|
||||
return {
|
||||
name: document?.title,
|
||||
parentId: null,
|
||||
id: apiFileId
|
||||
};
|
||||
};
|
||||
|
||||
return {
|
||||
getFileContent,
|
||||
listFiles,
|
||||
getFilePreviewUrl,
|
||||
getFileDetail
|
||||
};
|
||||
};
|
||||
@@ -1,8 +1,10 @@
|
||||
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
|
||||
import { DatasetSourceReadTypeEnum } from '@fastgpt/global/core/dataset/constants';
|
||||
import {
|
||||
ChunkTriggerConfigTypeEnum,
|
||||
DatasetSourceReadTypeEnum
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
import { readFileContentFromMongo } from '../../common/file/gridfs/controller';
|
||||
import { urlsFetch } from '../../common/string/cheerio';
|
||||
import { parseCsvTable2Chunks } from './training/utils';
|
||||
import { type TextSplitProps, splitText2Chunks } from '@fastgpt/global/common/string/textSplitter';
|
||||
import axios from 'axios';
|
||||
import { readRawContentByFileBuffer } from '../../common/file/read/utils';
|
||||
@@ -12,19 +14,22 @@ import {
|
||||
type FeishuServer,
|
||||
type YuqueServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import { useApiDatasetRequest } from './apiDataset/api';
|
||||
import { getApiDatasetRequest } from './apiDataset';
|
||||
import Papa from 'papaparse';
|
||||
|
||||
export const readFileRawTextByUrl = async ({
|
||||
teamId,
|
||||
tmbId,
|
||||
url,
|
||||
customPdfParse,
|
||||
getFormatText,
|
||||
relatedId
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
url: string;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean;
|
||||
relatedId: string; // externalFileId / apiFileId
|
||||
}) => {
|
||||
const response = await axios({
|
||||
@@ -38,7 +43,7 @@ export const readFileRawTextByUrl = async ({
|
||||
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
customPdfParse,
|
||||
isQAImport: false,
|
||||
getFormatText,
|
||||
extension,
|
||||
teamId,
|
||||
tmbId,
|
||||
@@ -62,21 +67,21 @@ export const readDatasetSourceRawText = async ({
|
||||
tmbId,
|
||||
type,
|
||||
sourceId,
|
||||
isQAImport,
|
||||
selector,
|
||||
externalFileId,
|
||||
apiServer,
|
||||
feishuServer,
|
||||
yuqueServer,
|
||||
customPdfParse
|
||||
customPdfParse,
|
||||
getFormatText
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
type: DatasetSourceReadTypeEnum;
|
||||
sourceId: string;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean;
|
||||
|
||||
isQAImport?: boolean; // csv data
|
||||
selector?: string; // link selector
|
||||
externalFileId?: string; // external file dataset
|
||||
apiServer?: APIFileServer; // api dataset
|
||||
@@ -92,8 +97,8 @@ export const readDatasetSourceRawText = async ({
|
||||
tmbId,
|
||||
bucketName: BucketNameEnum.dataset,
|
||||
fileId: sourceId,
|
||||
isQAImport,
|
||||
customPdfParse
|
||||
customPdfParse,
|
||||
getFormatText
|
||||
});
|
||||
return {
|
||||
title: filename,
|
||||
@@ -161,38 +166,82 @@ export const readApiServerFileContent = async ({
|
||||
title?: string;
|
||||
rawText: string;
|
||||
}> => {
|
||||
if (apiServer) {
|
||||
return useApiDatasetRequest({ apiServer }).getFileContent({
|
||||
teamId,
|
||||
tmbId,
|
||||
apiFileId,
|
||||
customPdfParse
|
||||
});
|
||||
}
|
||||
|
||||
if (feishuServer || yuqueServer) {
|
||||
return global.getProApiDatasetFileContent({
|
||||
feishuServer,
|
||||
return (
|
||||
await getApiDatasetRequest({
|
||||
apiServer,
|
||||
yuqueServer,
|
||||
apiFileId
|
||||
});
|
||||
}
|
||||
|
||||
return Promise.reject('No apiServer or feishuServer or yuqueServer');
|
||||
feishuServer
|
||||
})
|
||||
).getFileContent({
|
||||
teamId,
|
||||
tmbId,
|
||||
apiFileId,
|
||||
customPdfParse
|
||||
});
|
||||
};
|
||||
|
||||
export const rawText2Chunks = ({
|
||||
rawText,
|
||||
isQAImport,
|
||||
chunkTriggerType = ChunkTriggerConfigTypeEnum.minSize,
|
||||
chunkTriggerMinSize = 1000,
|
||||
backupParse,
|
||||
chunkSize = 512,
|
||||
...splitProps
|
||||
}: {
|
||||
rawText: string;
|
||||
isQAImport?: boolean;
|
||||
} & TextSplitProps) => {
|
||||
if (isQAImport) {
|
||||
const { chunks } = parseCsvTable2Chunks(rawText);
|
||||
return chunks;
|
||||
|
||||
chunkTriggerType?: ChunkTriggerConfigTypeEnum;
|
||||
chunkTriggerMinSize?: number; // maxSize from agent model, not store
|
||||
|
||||
backupParse?: boolean;
|
||||
tableParse?: boolean;
|
||||
} & TextSplitProps): {
|
||||
q: string;
|
||||
a: string;
|
||||
indexes?: string[];
|
||||
}[] => {
|
||||
const parseDatasetBackup2Chunks = (rawText: string) => {
|
||||
const csvArr = Papa.parse(rawText).data as string[][];
|
||||
console.log(rawText, csvArr);
|
||||
|
||||
const chunks = csvArr
|
||||
.slice(1)
|
||||
.map((item) => ({
|
||||
q: item[0] || '',
|
||||
a: item[1] || '',
|
||||
indexes: item.slice(2)
|
||||
}))
|
||||
.filter((item) => item.q || item.a);
|
||||
|
||||
return {
|
||||
chunks
|
||||
};
|
||||
};
|
||||
|
||||
// Chunk condition
|
||||
// 1. 选择最大值条件,只有超过了最大值(默认为模型的最大值*0.7),才会触发分块
|
||||
if (chunkTriggerType === ChunkTriggerConfigTypeEnum.maxSize) {
|
||||
const textLength = rawText.trim().length;
|
||||
const maxSize = splitProps.maxSize ? splitProps.maxSize * 0.7 : 16000;
|
||||
if (textLength < maxSize) {
|
||||
return [
|
||||
{
|
||||
q: rawText,
|
||||
a: ''
|
||||
}
|
||||
];
|
||||
}
|
||||
}
|
||||
// 2. 选择最小值条件,只有超过最小值(手动决定)才会触发分块
|
||||
if (chunkTriggerType !== ChunkTriggerConfigTypeEnum.forceChunk) {
|
||||
const textLength = rawText.trim().length;
|
||||
if (textLength < chunkTriggerMinSize) {
|
||||
return [{ q: rawText, a: '' }];
|
||||
}
|
||||
}
|
||||
|
||||
if (backupParse) {
|
||||
return parseDatasetBackup2Chunks(rawText).chunks;
|
||||
}
|
||||
|
||||
const { chunks } = splitText2Chunks({
|
||||
@@ -203,6 +252,7 @@ export const rawText2Chunks = ({
|
||||
|
||||
return chunks.map((item) => ({
|
||||
q: item,
|
||||
a: ''
|
||||
a: '',
|
||||
indexes: []
|
||||
}));
|
||||
};
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import { getMongoModel, Schema } from '../../common/mongo';
|
||||
import {
|
||||
ChunkSettingModeEnum,
|
||||
ChunkTriggerConfigTypeEnum,
|
||||
DataChunkSplitModeEnum,
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
DatasetTypeEnum,
|
||||
DatasetTypeMap
|
||||
DatasetTypeMap,
|
||||
ParagraphChunkAIModeEnum
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
import {
|
||||
TeamCollectionName,
|
||||
@@ -15,12 +17,22 @@ import type { DatasetSchemaType } from '@fastgpt/global/core/dataset/type.d';
|
||||
export const DatasetCollectionName = 'datasets';
|
||||
|
||||
export const ChunkSettings = {
|
||||
imageIndex: Boolean,
|
||||
autoIndexes: Boolean,
|
||||
trainingType: {
|
||||
type: String,
|
||||
enum: Object.values(DatasetCollectionDataProcessModeEnum)
|
||||
},
|
||||
|
||||
chunkTriggerType: {
|
||||
type: String,
|
||||
enum: Object.values(ChunkTriggerConfigTypeEnum)
|
||||
},
|
||||
chunkTriggerMinSize: Number,
|
||||
|
||||
dataEnhanceCollectionName: Boolean,
|
||||
|
||||
imageIndex: Boolean,
|
||||
autoIndexes: Boolean,
|
||||
|
||||
chunkSettingMode: {
|
||||
type: String,
|
||||
enum: Object.values(ChunkSettingModeEnum)
|
||||
@@ -29,6 +41,12 @@ export const ChunkSettings = {
|
||||
type: String,
|
||||
enum: Object.values(DataChunkSplitModeEnum)
|
||||
},
|
||||
paragraphChunkAIMode: {
|
||||
type: String,
|
||||
enum: Object.values(ParagraphChunkAIModeEnum)
|
||||
},
|
||||
paragraphChunkDeep: Number,
|
||||
paragraphChunkMinSize: Number,
|
||||
chunkSize: Number,
|
||||
chunkSplitter: String,
|
||||
|
||||
@@ -115,9 +133,7 @@ const DatasetSchema = new Schema({
|
||||
|
||||
// abandoned
|
||||
autoSync: Boolean,
|
||||
externalReadUrl: {
|
||||
type: String
|
||||
},
|
||||
externalReadUrl: String,
|
||||
defaultPermission: Number
|
||||
});
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ import { type ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import type { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { datasetSearchQueryExtension } from './utils';
|
||||
import type { RerankModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
|
||||
export type SearchDatasetDataProps = {
|
||||
histories: ChatItemType[];
|
||||
@@ -544,123 +545,125 @@ export async function searchDatasetData(
|
||||
};
|
||||
}
|
||||
|
||||
const searchResults = (
|
||||
await Promise.all(
|
||||
datasetIds.map(async (id) => {
|
||||
return MongoDatasetDataText.aggregate(
|
||||
[
|
||||
{
|
||||
$match: {
|
||||
teamId: new Types.ObjectId(teamId),
|
||||
datasetId: new Types.ObjectId(id),
|
||||
$text: { $search: await jiebaSplit({ text: query }) },
|
||||
...(filterCollectionIdList
|
||||
? {
|
||||
collectionId: {
|
||||
$in: filterCollectionIdList.map((id) => new Types.ObjectId(id))
|
||||
}
|
||||
}
|
||||
: {}),
|
||||
...(forbidCollectionIdList && forbidCollectionIdList.length > 0
|
||||
? {
|
||||
collectionId: {
|
||||
$nin: forbidCollectionIdList.map((id) => new Types.ObjectId(id))
|
||||
}
|
||||
}
|
||||
: {})
|
||||
}
|
||||
},
|
||||
{
|
||||
$sort: {
|
||||
score: { $meta: 'textScore' }
|
||||
}
|
||||
},
|
||||
{
|
||||
$limit: limit
|
||||
},
|
||||
{
|
||||
$project: {
|
||||
_id: 1,
|
||||
collectionId: 1,
|
||||
dataId: 1,
|
||||
score: { $meta: 'textScore' }
|
||||
}
|
||||
}
|
||||
],
|
||||
{
|
||||
...readFromSecondary
|
||||
try {
|
||||
const searchResults = (await MongoDatasetDataText.aggregate(
|
||||
[
|
||||
{
|
||||
$match: {
|
||||
teamId: new Types.ObjectId(teamId),
|
||||
$text: { $search: await jiebaSplit({ text: query }) },
|
||||
datasetId: { $in: datasetIds.map((id) => new Types.ObjectId(id)) },
|
||||
...(filterCollectionIdList
|
||||
? {
|
||||
collectionId: {
|
||||
$in: filterCollectionIdList.map((id) => new Types.ObjectId(id))
|
||||
}
|
||||
}
|
||||
: {}),
|
||||
...(forbidCollectionIdList && forbidCollectionIdList.length > 0
|
||||
? {
|
||||
collectionId: {
|
||||
$nin: forbidCollectionIdList.map((id) => new Types.ObjectId(id))
|
||||
}
|
||||
}
|
||||
: {})
|
||||
}
|
||||
},
|
||||
{
|
||||
$sort: {
|
||||
score: { $meta: 'textScore' }
|
||||
}
|
||||
},
|
||||
{
|
||||
$limit: limit
|
||||
},
|
||||
{
|
||||
$project: {
|
||||
_id: 1,
|
||||
collectionId: 1,
|
||||
dataId: 1,
|
||||
score: { $meta: 'textScore' }
|
||||
}
|
||||
);
|
||||
})
|
||||
)
|
||||
).flat() as (DatasetDataTextSchemaType & { score: number })[];
|
||||
|
||||
// Get data and collections
|
||||
const [dataList, collections] = await Promise.all([
|
||||
MongoDatasetData.find(
|
||||
{
|
||||
_id: { $in: searchResults.map((item) => item.dataId) }
|
||||
},
|
||||
'_id datasetId collectionId updateTime q a chunkIndex indexes',
|
||||
{ ...readFromSecondary }
|
||||
).lean(),
|
||||
MongoDatasetCollection.find(
|
||||
{
|
||||
_id: { $in: searchResults.map((item) => item.collectionId) }
|
||||
},
|
||||
'_id name fileId rawLink apiFileId externalFileId externalFileUrl',
|
||||
{ ...readFromSecondary }
|
||||
).lean()
|
||||
]);
|
||||
|
||||
return {
|
||||
fullTextRecallResults: searchResults
|
||||
.map((item, index) => {
|
||||
const collection = collections.find(
|
||||
(col) => String(col._id) === String(item.collectionId)
|
||||
);
|
||||
if (!collection) {
|
||||
console.log('Collection is not found', item);
|
||||
return;
|
||||
}
|
||||
const data = dataList.find((data) => String(data._id) === String(item.dataId));
|
||||
if (!data) {
|
||||
console.log('Data is not found', item);
|
||||
return;
|
||||
}
|
||||
],
|
||||
{
|
||||
...readFromSecondary
|
||||
}
|
||||
)) as (DatasetDataTextSchemaType & { score: number })[];
|
||||
|
||||
return {
|
||||
id: String(data._id),
|
||||
datasetId: String(data.datasetId),
|
||||
collectionId: String(data.collectionId),
|
||||
updateTime: data.updateTime,
|
||||
q: data.q,
|
||||
a: data.a,
|
||||
chunkIndex: data.chunkIndex,
|
||||
indexes: data.indexes,
|
||||
...getCollectionSourceData(collection),
|
||||
score: [
|
||||
{
|
||||
type: SearchScoreTypeEnum.fullText,
|
||||
value: item.score || 0,
|
||||
index
|
||||
}
|
||||
]
|
||||
};
|
||||
})
|
||||
.filter((item) => {
|
||||
if (!item) return false;
|
||||
return true;
|
||||
})
|
||||
.map((item, index) => {
|
||||
if (!item) return;
|
||||
return {
|
||||
...item,
|
||||
score: item.score.map((item) => ({ ...item, index }))
|
||||
};
|
||||
}) as SearchDataResponseItemType[],
|
||||
tokenLen: 0
|
||||
};
|
||||
// Get data and collections
|
||||
const [dataList, collections] = await Promise.all([
|
||||
MongoDatasetData.find(
|
||||
{
|
||||
_id: { $in: searchResults.map((item) => item.dataId) }
|
||||
},
|
||||
'_id datasetId collectionId updateTime q a chunkIndex indexes',
|
||||
{ ...readFromSecondary }
|
||||
).lean(),
|
||||
MongoDatasetCollection.find(
|
||||
{
|
||||
_id: { $in: searchResults.map((item) => item.collectionId) }
|
||||
},
|
||||
'_id name fileId rawLink apiFileId externalFileId externalFileUrl',
|
||||
{ ...readFromSecondary }
|
||||
).lean()
|
||||
]);
|
||||
|
||||
return {
|
||||
fullTextRecallResults: searchResults
|
||||
.map((item, index) => {
|
||||
const collection = collections.find(
|
||||
(col) => String(col._id) === String(item.collectionId)
|
||||
);
|
||||
if (!collection) {
|
||||
console.log('Collection is not found', item);
|
||||
return;
|
||||
}
|
||||
const data = dataList.find((data) => String(data._id) === String(item.dataId));
|
||||
if (!data) {
|
||||
console.log('Data is not found', item);
|
||||
return;
|
||||
}
|
||||
|
||||
return {
|
||||
id: String(data._id),
|
||||
datasetId: String(data.datasetId),
|
||||
collectionId: String(data.collectionId),
|
||||
updateTime: data.updateTime,
|
||||
q: data.q,
|
||||
a: data.a,
|
||||
chunkIndex: data.chunkIndex,
|
||||
indexes: data.indexes,
|
||||
...getCollectionSourceData(collection),
|
||||
score: [
|
||||
{
|
||||
type: SearchScoreTypeEnum.fullText,
|
||||
value: item.score || 0,
|
||||
index
|
||||
}
|
||||
]
|
||||
};
|
||||
})
|
||||
.filter((item) => {
|
||||
if (!item) return false;
|
||||
return true;
|
||||
})
|
||||
.map((item, index) => {
|
||||
if (!item) return;
|
||||
return {
|
||||
...item,
|
||||
score: item.score.map((item) => ({ ...item, index }))
|
||||
};
|
||||
}) as SearchDataResponseItemType[],
|
||||
tokenLen: 0
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.error('Full text search error', error);
|
||||
return {
|
||||
fullTextRecallResults: [],
|
||||
tokenLen: 0
|
||||
};
|
||||
}
|
||||
};
|
||||
const multiQueryRecall = async ({
|
||||
embeddingLimit,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
export enum ImportDataSourceEnum {
|
||||
fileLocal = 'fileLocal',
|
||||
fileLink = 'fileLink',
|
||||
fileCustom = 'fileCustom',
|
||||
tableLocal = 'tableLocal'
|
||||
fileCustom = 'fileCustom'
|
||||
}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
import Papa from 'papaparse';
|
||||
|
||||
export const parseCsvTable2Chunks = (rawText: string) => {
|
||||
const csvArr = Papa.parse(rawText).data as string[][];
|
||||
|
||||
const chunks = csvArr
|
||||
.map((item) => ({
|
||||
q: item[0] || '',
|
||||
a: item[1] || ''
|
||||
}))
|
||||
.filter((item) => item.q || item.a);
|
||||
|
||||
return {
|
||||
chunks
|
||||
};
|
||||
};
|
||||
304
packages/service/core/dataset/yuqueDataset/api.ts
Normal file
304
packages/service/core/dataset/yuqueDataset/api.ts
Normal file
@@ -0,0 +1,304 @@
|
||||
import type {
|
||||
APIFileItem,
|
||||
ApiFileReadContentResponse,
|
||||
YuqueServer,
|
||||
ApiDatasetDetailResponse
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import axios, { type Method } from 'axios';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
import { type ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
|
||||
type ResponseDataType = {
|
||||
success: boolean;
|
||||
message: string;
|
||||
data: any;
|
||||
};
|
||||
|
||||
type YuqueRepoListResponse = {
|
||||
id: string;
|
||||
name: string;
|
||||
title: string;
|
||||
book_id: string | null;
|
||||
type: string;
|
||||
updated_at: Date;
|
||||
created_at: Date;
|
||||
slug?: string;
|
||||
}[];
|
||||
|
||||
type YuqueTocListResponse = {
|
||||
uuid: string;
|
||||
type: string;
|
||||
title: string;
|
||||
url: string;
|
||||
slug: string;
|
||||
id: string;
|
||||
doc_id: string;
|
||||
prev_uuid: string;
|
||||
sibling_uuid: string;
|
||||
child_uuid: string;
|
||||
parent_uuid: string;
|
||||
}[];
|
||||
|
||||
const yuqueBaseUrl = process.env.YUQUE_DATASET_BASE_URL || 'https://www.yuque.com';
|
||||
|
||||
export const useYuqueDatasetRequest = ({ yuqueServer }: { yuqueServer: YuqueServer }) => {
|
||||
const instance = axios.create({
|
||||
baseURL: yuqueBaseUrl,
|
||||
timeout: 60000, // 超时时间
|
||||
headers: {
|
||||
'X-Auth-Token': yuqueServer.token
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* 响应数据检查
|
||||
*/
|
||||
const checkRes = (data: ResponseDataType) => {
|
||||
if (data === undefined) {
|
||||
addLog.info('yuque dataset data is empty');
|
||||
return Promise.reject('服务器异常');
|
||||
}
|
||||
return data.data;
|
||||
};
|
||||
const responseError = (err: any) => {
|
||||
console.log('error->', '请求错误', err);
|
||||
|
||||
if (!err) {
|
||||
return Promise.reject({ message: '未知错误' });
|
||||
}
|
||||
if (typeof err === 'string') {
|
||||
return Promise.reject({ message: err });
|
||||
}
|
||||
if (typeof err.message === 'string') {
|
||||
return Promise.reject({ message: err.message });
|
||||
}
|
||||
if (typeof err.data === 'string') {
|
||||
return Promise.reject({ message: err.data });
|
||||
}
|
||||
if (err?.response?.data) {
|
||||
return Promise.reject(err?.response?.data);
|
||||
}
|
||||
return Promise.reject(err);
|
||||
};
|
||||
|
||||
const request = <T>(url: string, data: any, method: Method): Promise<T> => {
|
||||
/* 去空 */
|
||||
for (const key in data) {
|
||||
if (data[key] === undefined) {
|
||||
delete data[key];
|
||||
}
|
||||
}
|
||||
|
||||
return instance
|
||||
.request({
|
||||
url,
|
||||
method,
|
||||
data: ['POST', 'PUT'].includes(method) ? data : undefined,
|
||||
params: !['POST', 'PUT'].includes(method) ? data : undefined
|
||||
})
|
||||
.then((res) => checkRes(res.data))
|
||||
.catch((err) => responseError(err));
|
||||
};
|
||||
|
||||
const listFiles = async ({ parentId }: { parentId?: ParentIdType }) => {
|
||||
// Auto set baseurl to parentId
|
||||
if (!parentId) {
|
||||
if (yuqueServer.basePath) parentId = yuqueServer.basePath;
|
||||
}
|
||||
|
||||
let files: APIFileItem[] = [];
|
||||
|
||||
if (!parentId) {
|
||||
const limit = 100;
|
||||
let offset = 0;
|
||||
let allData: YuqueRepoListResponse = [];
|
||||
|
||||
while (true) {
|
||||
const data = await request<YuqueRepoListResponse>(
|
||||
`/api/v2/groups/${yuqueServer.userId}/repos`,
|
||||
{
|
||||
offset,
|
||||
limit
|
||||
},
|
||||
'GET'
|
||||
);
|
||||
|
||||
if (!data || data.length === 0) break;
|
||||
|
||||
allData = [...allData, ...data];
|
||||
if (data.length < limit) break;
|
||||
|
||||
offset += limit;
|
||||
}
|
||||
|
||||
files = allData.map((item) => {
|
||||
return {
|
||||
id: item.id,
|
||||
name: item.name,
|
||||
parentId: null,
|
||||
type: 'folder',
|
||||
updateTime: item.updated_at,
|
||||
createTime: item.created_at,
|
||||
hasChild: true,
|
||||
slug: item.slug
|
||||
};
|
||||
});
|
||||
} else {
|
||||
if (typeof parentId === 'number') {
|
||||
const data = await request<YuqueTocListResponse>(
|
||||
`/api/v2/repos/${parentId}/toc`,
|
||||
{},
|
||||
'GET'
|
||||
);
|
||||
|
||||
return data
|
||||
.filter((item) => !item.parent_uuid && item.type !== 'LINK')
|
||||
.map((item) => ({
|
||||
id: `${parentId}-${item.id}-${item.uuid}`,
|
||||
name: item.title,
|
||||
parentId: item.parent_uuid,
|
||||
type: item.type === 'TITLE' ? ('folder' as const) : ('file' as const),
|
||||
updateTime: new Date(),
|
||||
createTime: new Date(),
|
||||
uuid: item.uuid,
|
||||
slug: item.slug,
|
||||
hasChild: !!item.child_uuid
|
||||
}));
|
||||
} else {
|
||||
const [repoId, uuid, parentUuid] = parentId.split(/-(.*?)-(.*)/);
|
||||
const data = await request<YuqueTocListResponse>(`/api/v2/repos/${repoId}/toc`, {}, 'GET');
|
||||
|
||||
return data
|
||||
.filter((item) => item.parent_uuid === parentUuid)
|
||||
.map((item) => ({
|
||||
id: `${repoId}-${item.id}-${item.uuid}`,
|
||||
name: item.title,
|
||||
parentId: item.parent_uuid,
|
||||
type: item.type === 'TITLE' ? ('folder' as const) : ('file' as const),
|
||||
updateTime: new Date(),
|
||||
createTime: new Date(),
|
||||
uuid: item.uuid,
|
||||
slug: item.slug,
|
||||
hasChild: !!item.child_uuid
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
if (!Array.isArray(files)) {
|
||||
return Promise.reject('Invalid file list format');
|
||||
}
|
||||
if (files.some((file) => !file.id || !file.name || typeof file.type === 'undefined')) {
|
||||
return Promise.reject('Invalid file data format');
|
||||
}
|
||||
return files;
|
||||
};
|
||||
|
||||
const getFileContent = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiFileReadContentResponse> => {
|
||||
const [parentId, fileId] = apiFileId.split(/-(.*?)-(.*)/);
|
||||
|
||||
const data = await request<{ title: string; body: string }>(
|
||||
`/api/v2/repos/${parentId}/docs/${fileId}`,
|
||||
{},
|
||||
'GET'
|
||||
);
|
||||
|
||||
return {
|
||||
title: data.title,
|
||||
rawText: data.body
|
||||
};
|
||||
};
|
||||
|
||||
const getFilePreviewUrl = async ({ apiFileId }: { apiFileId: string }) => {
|
||||
const [parentId, fileId] = apiFileId.split(/-(.*?)-(.*)/);
|
||||
|
||||
const { slug: parentSlug } = await request<{ slug: string }>(
|
||||
`/api/v2/repos/${parentId}`,
|
||||
{ id: apiFileId },
|
||||
'GET'
|
||||
);
|
||||
|
||||
const { slug: fileSlug } = await request<{ slug: string }>(
|
||||
`/api/v2/repos/${parentId}/docs/${fileId}`,
|
||||
{},
|
||||
'GET'
|
||||
);
|
||||
|
||||
return `${yuqueBaseUrl}/${yuqueServer.userId}/${parentSlug}/${fileSlug}`;
|
||||
};
|
||||
|
||||
const getFileDetail = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiDatasetDetailResponse> => {
|
||||
//如果id是数字,认为是知识库,获取知识库列表
|
||||
if (typeof apiFileId === 'number' || !isNaN(Number(apiFileId))) {
|
||||
const limit = 100;
|
||||
let offset = 0;
|
||||
let allData: YuqueRepoListResponse = [];
|
||||
|
||||
while (true) {
|
||||
const data = await request<YuqueRepoListResponse>(
|
||||
`/api/v2/groups/${yuqueServer.userId}/repos`,
|
||||
{
|
||||
offset,
|
||||
limit
|
||||
},
|
||||
'GET'
|
||||
);
|
||||
|
||||
if (!data || data.length === 0) break;
|
||||
|
||||
allData = [...allData, ...data];
|
||||
if (data.length < limit) break;
|
||||
|
||||
offset += limit;
|
||||
}
|
||||
|
||||
const file = allData.find((item) => Number(item.id) === Number(apiFileId));
|
||||
if (!file) {
|
||||
return Promise.reject('文件不存在');
|
||||
}
|
||||
return {
|
||||
id: file.id,
|
||||
name: file.name,
|
||||
parentId: null
|
||||
};
|
||||
} else {
|
||||
const [repoId, parentUuid, fileId] = apiFileId.split(/-(.*?)-(.*)/);
|
||||
const data = await request<YuqueTocListResponse>(`/api/v2/repos/${repoId}/toc`, {}, 'GET');
|
||||
const file = data.find((item) => item.uuid === fileId);
|
||||
if (!file) {
|
||||
return Promise.reject('文件不存在');
|
||||
}
|
||||
const parentfile = data.find((item) => item.uuid === file.parent_uuid);
|
||||
const parentId = `${repoId}-${parentfile?.id}-${parentfile?.uuid}`;
|
||||
|
||||
//判断如果parent_uuid为空,则认为是知识库的根目录,返回知识库
|
||||
if (file.parent_uuid) {
|
||||
return {
|
||||
id: file.id,
|
||||
name: file.title,
|
||||
parentId: parentId
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
id: file.id,
|
||||
name: file.title,
|
||||
parentId: repoId
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
getFileContent,
|
||||
listFiles,
|
||||
getFilePreviewUrl,
|
||||
getFileDetail
|
||||
};
|
||||
};
|
||||
@@ -223,28 +223,29 @@ const toolChoice = async (props: ActionProps) => {
|
||||
}
|
||||
];
|
||||
|
||||
const body = llmCompletionsBodyFormat(
|
||||
{
|
||||
stream: true,
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
},
|
||||
extractModel
|
||||
);
|
||||
const { response } = await createChatCompletion({
|
||||
body: llmCompletionsBodyFormat(
|
||||
{
|
||||
stream: true,
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
},
|
||||
extractModel
|
||||
),
|
||||
body,
|
||||
userKey: externalProvider.openaiAccount
|
||||
});
|
||||
const { toolCalls, usage } = await formatLLMResponse(response);
|
||||
const { text, toolCalls, usage } = await formatLLMResponse(response);
|
||||
|
||||
const arg: Record<string, any> = (() => {
|
||||
try {
|
||||
return json5.parse(toolCalls?.[0]?.function?.arguments || '');
|
||||
} catch (error) {
|
||||
console.log(agentFunction.parameters);
|
||||
console.log(toolCalls?.[0]?.function);
|
||||
console.log('body', body);
|
||||
console.log('AI response', text, toolCalls?.[0]?.function);
|
||||
console.log('Your model may not support tool_call', error);
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import { createChatCompletion } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
type ChatCompletion,
|
||||
type StreamChatType,
|
||||
type ChatCompletionMessageParam,
|
||||
type ChatCompletionCreateParams,
|
||||
type ChatCompletionMessageFunctionCall,
|
||||
type ChatCompletionFunctionMessageParam,
|
||||
type ChatCompletionAssistantMessageParam
|
||||
import type {
|
||||
ChatCompletion,
|
||||
StreamChatType,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionMessageFunctionCall,
|
||||
ChatCompletionFunctionMessageParam,
|
||||
ChatCompletionAssistantMessageParam,
|
||||
CompletionFinishReason
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { type NextApiResponse } from 'next';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
@@ -259,14 +260,15 @@ export const runToolWithFunctionCall = async (
|
||||
}
|
||||
});
|
||||
|
||||
let { answer, functionCalls, inputTokens, outputTokens } = await (async () => {
|
||||
let { answer, functionCalls, inputTokens, outputTokens, finish_reason } = await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res || res.closed) {
|
||||
return {
|
||||
answer: '',
|
||||
functionCalls: [],
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
outputTokens: 0,
|
||||
finish_reason: 'close' as const
|
||||
};
|
||||
}
|
||||
const result = await streamResponse({
|
||||
@@ -281,10 +283,12 @@ export const runToolWithFunctionCall = async (
|
||||
answer: result.answer,
|
||||
functionCalls: result.functionCalls,
|
||||
inputTokens: result.usage.prompt_tokens,
|
||||
outputTokens: result.usage.completion_tokens
|
||||
outputTokens: result.usage.completion_tokens,
|
||||
finish_reason: result.finish_reason
|
||||
};
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const function_call = result.choices?.[0]?.message?.function_call;
|
||||
const usage = result.usage;
|
||||
|
||||
@@ -315,7 +319,8 @@ export const runToolWithFunctionCall = async (
|
||||
answer,
|
||||
functionCalls: toolCalls,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
outputTokens: usage?.completion_tokens,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
})();
|
||||
@@ -481,7 +486,8 @@ export const runToolWithFunctionCall = async (
|
||||
completeMessages,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
toolWorkflowInteractiveResponse,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
|
||||
@@ -495,7 +501,8 @@ export const runToolWithFunctionCall = async (
|
||||
toolNodeInputTokens,
|
||||
toolNodeOutputTokens,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes
|
||||
runTimes,
|
||||
finish_reason
|
||||
}
|
||||
);
|
||||
} else {
|
||||
@@ -523,7 +530,8 @@ export const runToolWithFunctionCall = async (
|
||||
: outputTokens,
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
|
||||
runTimes: (response?.runTimes || 0) + 1
|
||||
runTimes: (response?.runTimes || 0) + 1,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
};
|
||||
@@ -546,28 +554,25 @@ async function streamResponse({
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
let textAnswer = '';
|
||||
let functionCalls: ChatCompletionMessageFunctionCall[] = [];
|
||||
let functionId = getNanoid();
|
||||
let usage = getLLMDefaultUsage();
|
||||
|
||||
const { parsePart } = parseLLMStreamResponse();
|
||||
const { parsePart, getResponseData, updateFinishReason } = parseLLMStreamResponse();
|
||||
|
||||
for await (const part of stream) {
|
||||
usage = part.usage || usage;
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
updateFinishReason('close');
|
||||
break;
|
||||
}
|
||||
|
||||
const { content: toolChoiceContent, responseContent } = parsePart({
|
||||
const { responseContent } = parsePart({
|
||||
part,
|
||||
parseThinkTag: false,
|
||||
retainDatasetCite
|
||||
});
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
textAnswer += toolChoiceContent;
|
||||
|
||||
if (responseContent) {
|
||||
workflowStreamResponse?.({
|
||||
@@ -577,7 +582,7 @@ async function streamResponse({
|
||||
text: responseContent
|
||||
})
|
||||
});
|
||||
} else if (responseChoice.function_call) {
|
||||
} else if (responseChoice?.function_call) {
|
||||
const functionCall: {
|
||||
arguments?: string;
|
||||
name?: string;
|
||||
@@ -640,5 +645,7 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer: textAnswer, functionCalls, usage };
|
||||
const { content, finish_reason, usage } = getResponseData();
|
||||
|
||||
return { answer: content, functionCalls, finish_reason, usage };
|
||||
}
|
||||
|
||||
@@ -220,7 +220,8 @@ export const runToolWithPromptCall = async (
|
||||
|
||||
const max_tokens = computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken
|
||||
maxToken,
|
||||
min: 100
|
||||
});
|
||||
const filterMessages = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
@@ -592,28 +593,22 @@ async function streamResponse({
|
||||
|
||||
let startResponseWrite = false;
|
||||
let answer = '';
|
||||
let reasoning = '';
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
let usage = getLLMDefaultUsage();
|
||||
|
||||
const { parsePart } = parseLLMStreamResponse();
|
||||
const { parsePart, getResponseData, updateFinishReason } = parseLLMStreamResponse();
|
||||
|
||||
for await (const part of stream) {
|
||||
usage = part.usage || usage;
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
updateFinishReason('close');
|
||||
break;
|
||||
}
|
||||
|
||||
const { reasoningContent, content, responseContent, finishReason } = parsePart({
|
||||
const { reasoningContent, content, responseContent } = parsePart({
|
||||
part,
|
||||
parseThinkTag: aiChatReasoning,
|
||||
retainDatasetCite
|
||||
});
|
||||
finish_reason = finish_reason || finishReason;
|
||||
answer += content;
|
||||
reasoning += reasoningContent;
|
||||
|
||||
// Reasoning response
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
@@ -658,7 +653,9 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer, reasoning, finish_reason, usage };
|
||||
const { reasoningContent, content, finish_reason, usage } = getResponseData();
|
||||
|
||||
return { answer: content, reasoning: reasoningContent, finish_reason, usage };
|
||||
}
|
||||
|
||||
const parseAnswer = (
|
||||
|
||||
@@ -7,17 +7,13 @@ import {
|
||||
type ChatCompletionToolMessageParam,
|
||||
type ChatCompletionMessageParam,
|
||||
type ChatCompletionTool,
|
||||
type ChatCompletionAssistantMessageParam,
|
||||
type CompletionFinishReason
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { type NextApiResponse } from 'next';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import {
|
||||
ChatCompletionRequestMessageRoleEnum,
|
||||
getLLMDefaultUsage
|
||||
} from '@fastgpt/global/core/ai/constants';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { dispatchWorkFlow } from '../../index';
|
||||
import {
|
||||
type DispatchToolModuleProps,
|
||||
@@ -254,7 +250,8 @@ export const runToolWithToolChoice = async (
|
||||
|
||||
const max_tokens = computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken
|
||||
maxToken,
|
||||
min: 100
|
||||
});
|
||||
|
||||
// Filter histories by maxToken
|
||||
@@ -319,97 +316,101 @@ export const runToolWithToolChoice = async (
|
||||
}
|
||||
});
|
||||
|
||||
let { answer, toolCalls, finish_reason, inputTokens, outputTokens } = await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res || res.closed) {
|
||||
return {
|
||||
answer: '',
|
||||
toolCalls: [],
|
||||
finish_reason: 'close' as const,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
};
|
||||
}
|
||||
let { reasoningContent, answer, toolCalls, finish_reason, inputTokens, outputTokens } =
|
||||
await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res || res.closed) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
answer: '',
|
||||
toolCalls: [],
|
||||
finish_reason: 'close' as const,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
};
|
||||
}
|
||||
|
||||
const result = await streamResponse({
|
||||
res,
|
||||
workflowStreamResponse,
|
||||
toolNodes,
|
||||
stream: aiResponse,
|
||||
aiChatReasoning,
|
||||
retainDatasetCite
|
||||
});
|
||||
|
||||
return {
|
||||
answer: result.answer,
|
||||
toolCalls: result.toolCalls,
|
||||
finish_reason: result.finish_reason,
|
||||
inputTokens: result.usage.prompt_tokens,
|
||||
outputTokens: result.usage.completion_tokens
|
||||
};
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
const answer = result.choices?.[0]?.message?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = result.choices?.[0]?.message?.reasoning_content || '';
|
||||
const usage = result.usage;
|
||||
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: removeDatasetCiteText(reasoningContent, retainDatasetCite)
|
||||
})
|
||||
const result = await streamResponse({
|
||||
res,
|
||||
workflowStreamResponse,
|
||||
toolNodes,
|
||||
stream: aiResponse,
|
||||
aiChatReasoning,
|
||||
retainDatasetCite
|
||||
});
|
||||
}
|
||||
|
||||
// 格式化 toolCalls
|
||||
const toolCalls = calls.map((tool) => {
|
||||
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
|
||||
return {
|
||||
reasoningContent: result.reasoningContent,
|
||||
answer: result.answer,
|
||||
toolCalls: result.toolCalls,
|
||||
finish_reason: result.finish_reason,
|
||||
inputTokens: result.usage.prompt_tokens,
|
||||
outputTokens: result.usage.completion_tokens
|
||||
};
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
const answer = result.choices?.[0]?.message?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = result.choices?.[0]?.message?.reasoning_content || '';
|
||||
const usage = result.usage;
|
||||
|
||||
// 不支持 stream 模式的模型的这里需要补一个响应给客户端
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || '',
|
||||
functionName: tool.function.name,
|
||||
params: tool.function?.arguments ?? '',
|
||||
response: ''
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: removeDatasetCiteText(reasoningContent, retainDatasetCite)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
// 格式化 toolCalls
|
||||
const toolCalls = calls.map((tool) => {
|
||||
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
|
||||
|
||||
// 不支持 stream 模式的模型的这里需要补一个响应给客户端
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || '',
|
||||
functionName: tool.function.name,
|
||||
params: tool.function?.arguments ?? '',
|
||||
response: ''
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
...tool,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || ''
|
||||
};
|
||||
});
|
||||
|
||||
if (answer) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: removeDatasetCiteText(answer, retainDatasetCite)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
...tool,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || ''
|
||||
reasoningContent: (reasoningContent as string) || '',
|
||||
answer,
|
||||
toolCalls: toolCalls,
|
||||
finish_reason,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
};
|
||||
});
|
||||
|
||||
if (answer) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: removeDatasetCiteText(answer, retainDatasetCite)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
answer,
|
||||
toolCalls: toolCalls,
|
||||
finish_reason,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
};
|
||||
}
|
||||
})();
|
||||
if (!answer && toolCalls.length === 0) {
|
||||
})();
|
||||
if (!answer && !reasoningContent && toolCalls.length === 0) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
@@ -501,12 +502,13 @@ export const runToolWithToolChoice = async (
|
||||
|
||||
if (toolCalls.length > 0) {
|
||||
// Run the tool, combine its results, and perform another round of AI calls
|
||||
const assistantToolMsgParams: ChatCompletionAssistantMessageParam[] = [
|
||||
...(answer
|
||||
const assistantToolMsgParams: ChatCompletionMessageParam[] = [
|
||||
...(answer || reasoningContent
|
||||
? [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant as 'assistant',
|
||||
content: answer
|
||||
content: answer,
|
||||
reasoning_text: reasoningContent
|
||||
}
|
||||
]
|
||||
: []),
|
||||
@@ -627,9 +629,10 @@ export const runToolWithToolChoice = async (
|
||||
);
|
||||
} else {
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
|
||||
const gptAssistantResponse: ChatCompletionMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answer
|
||||
content: answer,
|
||||
reasoning_text: reasoningContent
|
||||
};
|
||||
const completeMessages = filterMessages.concat(gptAssistantResponse);
|
||||
inputTokens = inputTokens || (await countGptMessagesTokens(requestMessages, tools));
|
||||
@@ -671,34 +674,23 @@ async function streamResponse({
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
let textAnswer = '';
|
||||
let callingTool: { name: string; arguments: string } | null = null;
|
||||
let toolCalls: ChatCompletionMessageToolCall[] = [];
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
let usage = getLLMDefaultUsage();
|
||||
|
||||
const { parsePart } = parseLLMStreamResponse();
|
||||
const { parsePart, getResponseData, updateFinishReason } = parseLLMStreamResponse();
|
||||
|
||||
for await (const part of stream) {
|
||||
usage = part.usage || usage;
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
updateFinishReason('close');
|
||||
break;
|
||||
}
|
||||
|
||||
const {
|
||||
reasoningContent,
|
||||
content: toolChoiceContent,
|
||||
responseContent,
|
||||
finishReason
|
||||
} = parsePart({
|
||||
const { reasoningContent, responseContent } = parsePart({
|
||||
part,
|
||||
parseThinkTag: true,
|
||||
retainDatasetCite
|
||||
});
|
||||
textAnswer += toolChoiceContent;
|
||||
finish_reason = finishReason || finish_reason;
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
|
||||
@@ -800,5 +792,13 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer: textAnswer, toolCalls: toolCalls.filter(Boolean), finish_reason, usage };
|
||||
const { reasoningContent, content, finish_reason, usage } = getResponseData();
|
||||
|
||||
return {
|
||||
reasoningContent,
|
||||
answer: content,
|
||||
toolCalls: toolCalls.filter(Boolean),
|
||||
finish_reason,
|
||||
usage
|
||||
};
|
||||
}
|
||||
|
||||
@@ -556,30 +556,21 @@ async function streamResponse({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
let answer = '';
|
||||
let reasoning = '';
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
let usage: CompletionUsage = getLLMDefaultUsage();
|
||||
|
||||
const { parsePart } = parseLLMStreamResponse();
|
||||
const { parsePart, getResponseData, updateFinishReason } = parseLLMStreamResponse();
|
||||
|
||||
for await (const part of stream) {
|
||||
usage = part.usage || usage;
|
||||
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
updateFinishReason('close');
|
||||
break;
|
||||
}
|
||||
|
||||
const { reasoningContent, content, responseContent, finishReason } = parsePart({
|
||||
const { reasoningContent, responseContent } = parsePart({
|
||||
part,
|
||||
parseThinkTag,
|
||||
retainDatasetCite
|
||||
});
|
||||
finish_reason = finish_reason || finishReason;
|
||||
answer += content;
|
||||
reasoning += reasoningContent;
|
||||
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
workflowStreamResponse?.({
|
||||
@@ -602,5 +593,7 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
const { reasoningContent: reasoning, content: answer, finish_reason, usage } = getResponseData();
|
||||
|
||||
return { answer, reasoning, finish_reason, usage };
|
||||
}
|
||||
|
||||
@@ -49,8 +49,6 @@ export const dispatchRunCode = async (props: RunCodeType): Promise<RunCodeRespon
|
||||
variables: customVariables
|
||||
});
|
||||
|
||||
console.log(runResult);
|
||||
|
||||
if (runResult.success) {
|
||||
return {
|
||||
[NodeOutputKeyEnum.rawResponse]: runResult.data.codeReturn,
|
||||
|
||||
@@ -211,12 +211,12 @@ export const getFileContentFromLinks = async ({
|
||||
// Read file
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
extension,
|
||||
isQAImport: false,
|
||||
teamId,
|
||||
tmbId,
|
||||
buffer,
|
||||
encoding,
|
||||
customPdfParse
|
||||
customPdfParse,
|
||||
getFormatText: true
|
||||
});
|
||||
|
||||
// Add to buffer
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"lodash": "^4.17.21",
|
||||
"mammoth": "^1.6.0",
|
||||
"mongoose": "^8.10.1",
|
||||
"multer": "1.4.5-lts.1",
|
||||
"multer": "2.0.0",
|
||||
"mysql2": "^3.11.3",
|
||||
"next": "14.2.28",
|
||||
"nextjs-cors": "^2.2.0",
|
||||
|
||||
@@ -10,7 +10,7 @@ import { AppPermission } from '@fastgpt/global/support/permission/app/controller
|
||||
import { type PermissionValueType } from '@fastgpt/global/support/permission/type';
|
||||
import { AppFolderTypeList } from '@fastgpt/global/core/app/constants';
|
||||
import { type ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
import { splitCombinePluginId } from '../../../core/app/plugin/controller';
|
||||
import { splitCombineToolId } from '../../../core/app/plugin/controller';
|
||||
import { PluginSourceEnum } from '@fastgpt/global/core/plugin/constants';
|
||||
import { type AuthModeType, type AuthResponseType } from '../type';
|
||||
import { AppDefaultPermissionVal } from '@fastgpt/global/support/permission/app/constant';
|
||||
@@ -24,7 +24,7 @@ export const authPluginByTmbId = async ({
|
||||
appId: string;
|
||||
per: PermissionValueType;
|
||||
}) => {
|
||||
const { source } = await splitCombinePluginId(appId);
|
||||
const { source } = splitCombineToolId(appId);
|
||||
if (source === PluginSourceEnum.personal) {
|
||||
const { app } = await authAppByTmbId({
|
||||
appId,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import iconv from 'iconv-lite';
|
||||
import { type ReadRawTextByBuffer, type ReadFileResponse } from '../type';
|
||||
import { matchMdImg } from '@fastgpt/global/common/string/markdown';
|
||||
|
||||
const rawEncodingList = [
|
||||
'ascii',
|
||||
@@ -34,7 +35,10 @@ export const readFileRawText = ({ buffer, encoding }: ReadRawTextByBuffer): Read
|
||||
}
|
||||
})();
|
||||
|
||||
const { text, imageList } = matchMdImg(content);
|
||||
|
||||
return {
|
||||
rawText: content
|
||||
rawText: text,
|
||||
imageList
|
||||
};
|
||||
};
|
||||
|
||||
@@ -28,11 +28,11 @@ export const readXlsxRawText = async ({
|
||||
if (!header) return;
|
||||
|
||||
const formatText = `| ${header.join(' | ')} |
|
||||
| ${header.map(() => '---').join(' | ')} |
|
||||
${csvArr
|
||||
.slice(1)
|
||||
.map((row) => `| ${row.map((item) => item.replace(/\n/g, '\\n')).join(' | ')} |`)
|
||||
.join('\n')}`;
|
||||
| ${header.map(() => '---').join(' | ')} |
|
||||
${csvArr
|
||||
.slice(1)
|
||||
.map((row) => `| ${row.map((item) => item.replace(/\n/g, '\\n')).join(' | ')} |`)
|
||||
.join('\n')}`;
|
||||
|
||||
return formatText;
|
||||
})
|
||||
|
||||
@@ -6,10 +6,6 @@ export const getUserFingerprint = async () => {
|
||||
console.log(result.visitorId);
|
||||
};
|
||||
|
||||
export const hasHttps = () => {
|
||||
return window.location.protocol === 'https:';
|
||||
};
|
||||
|
||||
export const subRoute = process.env.NEXT_PUBLIC_BASE_URL;
|
||||
|
||||
export const getWebReqUrl = (url: string = '') => {
|
||||
@@ -20,3 +16,32 @@ export const getWebReqUrl = (url: string = '') => {
|
||||
if (!url.startsWith('/') || url.startsWith(baseUrl)) return url;
|
||||
return `${baseUrl}${url}`;
|
||||
};
|
||||
|
||||
export const isMobile = () => {
|
||||
// SSR return false
|
||||
if (typeof window === 'undefined') return false;
|
||||
|
||||
// 1. Check User-Agent
|
||||
const userAgent = navigator.userAgent.toLowerCase();
|
||||
const mobileKeywords = [
|
||||
'android',
|
||||
'iphone',
|
||||
'ipod',
|
||||
'ipad',
|
||||
'windows phone',
|
||||
'blackberry',
|
||||
'webos',
|
||||
'iemobile',
|
||||
'opera mini'
|
||||
];
|
||||
const isMobileUA = mobileKeywords.some((keyword) => userAgent.includes(keyword));
|
||||
|
||||
// 2. Check screen width
|
||||
const isMobileWidth = window.innerWidth <= 900;
|
||||
|
||||
// 3. Check if touch events are supported (exclude touch screen PCs)
|
||||
const isTouchDevice = 'ontouchstart' in window || navigator.maxTouchPoints > 0;
|
||||
|
||||
// If any of the following conditions are met, it is considered a mobile device
|
||||
return isMobileUA || (isMobileWidth && isTouchDevice);
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
export const iconPaths = {
|
||||
alignLeft: () => import('./icons/alignLeft.svg'),
|
||||
backup: () => import('./icons/backup.svg'),
|
||||
book: () => import('./icons/book.svg'),
|
||||
change: () => import('./icons/change.svg'),
|
||||
chatSend: () => import('./icons/chatSend.svg'),
|
||||
@@ -229,6 +230,7 @@ export const iconPaths = {
|
||||
'core/dataset/tableCollection': () => import('./icons/core/dataset/tableCollection.svg'),
|
||||
'core/dataset/tag': () => import('./icons/core/dataset/tag.svg'),
|
||||
'core/dataset/websiteDataset': () => import('./icons/core/dataset/websiteDataset.svg'),
|
||||
'core/dataset/otherDataset': () => import('./icons/core/dataset/otherDataset.svg'),
|
||||
'core/dataset/websiteDatasetColor': () => import('./icons/core/dataset/websiteDatasetColor.svg'),
|
||||
'core/dataset/websiteDatasetOutline': () =>
|
||||
import('./icons/core/dataset/websiteDatasetOutline.svg'),
|
||||
@@ -439,6 +441,7 @@ export const iconPaths = {
|
||||
point: () => import('./icons/point.svg'),
|
||||
preview: () => import('./icons/preview.svg'),
|
||||
'price/bg': () => import('./icons/price/bg.svg'),
|
||||
'price/pricearrow': () => import('./icons/price/pricearrow.svg'),
|
||||
'price/right': () => import('./icons/price/right.svg'),
|
||||
save: () => import('./icons/save.svg'),
|
||||
sliderTag: () => import('./icons/sliderTag.svg'),
|
||||
|
||||
4
packages/web/components/common/Icon/icons/backup.svg
Normal file
4
packages/web/components/common/Icon/icons/backup.svg
Normal file
@@ -0,0 +1,4 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" >
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M17.9386 2H10.2616C9.73441 1.99998 9.27964 1.99997 8.90507 2.03057C8.50973 2.06287 8.11651 2.13419 7.73813 2.32698C7.17364 2.6146 6.7147 3.07354 6.42708 3.63803C6.23429 4.01641 6.16297 4.40963 6.13067 4.80497C6.10007 5.17955 6.10008 5.63431 6.1001 6.16146V13.8385C6.10008 14.3657 6.10007 14.8205 6.13067 15.195C6.16297 15.5904 6.23429 15.9836 6.42708 16.362C6.7147 16.9265 7.17364 17.3854 7.73813 17.673C8.11651 17.8658 8.50973 17.9371 8.90507 17.9694C9.27961 18 9.73432 18 10.2614 18H17.9386C18.4657 18 18.9206 18 19.2951 17.9694C19.6905 17.9371 20.0837 17.8658 20.4621 17.673C21.0266 17.3854 21.4855 16.9265 21.7731 16.362C21.9659 15.9836 22.0372 15.5904 22.0695 15.195C22.1001 14.8205 22.1001 14.3658 22.1001 13.8387V6.16148C22.1001 5.63439 22.1001 5.17951 22.0695 4.80497C22.0372 4.40963 21.9659 4.01641 21.7731 3.63803C21.4855 3.07354 21.0266 2.6146 20.4621 2.32698C20.0837 2.13419 19.6905 2.06287 19.2951 2.03057C18.9206 1.99997 18.4658 1.99998 17.9386 2ZM15.1001 16H17.9001C18.4767 16 18.8489 15.9992 19.1323 15.9761C19.4039 15.9539 19.5046 15.9162 19.5541 15.891C19.7423 15.7951 19.8952 15.6422 19.9911 15.454C20.0163 15.4045 20.054 15.3038 20.0762 15.0322C20.0993 14.7488 20.1001 14.3766 20.1001 13.8V11H15.1001V16ZM20.1001 9V6.2C20.1001 5.62345 20.0993 5.25117 20.0762 4.96784C20.054 4.69617 20.0163 4.59546 19.9911 4.54601C19.8952 4.35785 19.7423 4.20487 19.5541 4.109C19.5046 4.0838 19.4039 4.04612 19.1323 4.02393C18.8489 4.00078 18.4767 4 17.9001 4H10.3001C9.72355 4 9.35127 4.00078 9.06793 4.02393C8.79627 4.04612 8.69555 4.0838 8.64611 4.109C8.45795 4.20487 8.30497 4.35785 8.20909 4.54601C8.1839 4.59546 8.14622 4.69617 8.12403 4.96784C8.10088 5.25117 8.1001 5.62345 8.1001 6.2V9H20.1001ZM13.1001 11V16H10.3001C9.72355 16 9.35127 15.9992 9.06793 15.9761C8.79627 15.9539 8.69555 15.9162 8.64611 15.891C8.45795 15.7951 8.30497 15.6422 8.20909 15.454C8.1839 15.4045 8.14622 15.3038 8.12403 15.0322C8.10088 14.7488 8.1001 14.3766 8.1001 13.8V11H13.1001Z" />
|
||||
<path d="M4.1001 7C4.1001 6.44772 3.65238 6 3.1001 6C2.54781 6 2.1001 6.44772 2.1001 7L2.1001 15.9217C2.10009 16.7823 2.10008 17.4887 2.14702 18.0632C2.19567 18.6586 2.29968 19.2 2.55787 19.7068C2.96054 20.497 3.60306 21.1396 4.39334 21.5422C4.90007 21.8004 5.44147 21.9044 6.03691 21.9531C6.61142 22 7.3177 22 8.17835 22H17.1001C17.6524 22 18.1001 21.5523 18.1001 21C18.1001 20.4477 17.6524 20 17.1001 20H8.2201C7.30751 20 6.68322 19.9992 6.19978 19.9597C5.72801 19.9212 5.47911 19.8508 5.30132 19.7602C4.88736 19.5493 4.55081 19.2127 4.33988 18.7988C4.2493 18.621 4.17892 18.3721 4.14038 17.9003C4.10088 17.4169 4.1001 16.7926 4.1001 15.88V7Z" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.7 KiB |
@@ -1,11 +1,11 @@
|
||||
<svg viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="100%" height="100%" fill="url(#paint0_linear_7967_30275)" />
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M12.7552 4.8767C12.3073 4.92969 11.9962 5.33456 12.0603 5.78101L13.3235 14.5762C13.3876 15.0226 13.8027 15.3416 14.2506 15.2886L16.1502 15.0639C16.5981 15.0109 16.9093 14.606 16.8451 14.1596L15.582 5.36443C15.5178 4.91798 15.1028 4.59901 14.6548 4.65199L12.7552 4.8767ZM4.0675 5.52248C4.0675 5.07145 4.43314 4.70582 4.88417 4.70582H6.80225C7.25328 4.70582 7.61892 5.07145 7.61892 5.52248V14.4772C7.61892 14.9282 7.25328 15.2938 6.80225 15.2938H4.88417C4.43314 15.2938 4.0675 14.9282 4.0675 14.4772V5.52248ZM8.20321 5.52248C8.20321 5.07145 8.56885 4.70582 9.01988 4.70582H10.938C11.389 4.70582 11.7546 5.07145 11.7546 5.52248V14.4772C11.7546 14.9282 11.389 15.2938 10.938 15.2938H9.01988C8.56885 15.2938 8.20321 14.9282 8.20321 14.4772V5.52248Z" fill="white"/>
|
||||
<svg viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="32" height="32" fill="url(#paint0_linear_20384_1853)"/>
|
||||
<path d="M22.8185 7.25714C23.3782 6.69749 24.2855 6.69749 24.8452 7.25714C25.4048 7.81678 25.4048 8.72415 24.8452 9.28379L23.8314 10.2976C25.0837 12.4456 24.7895 15.2473 22.9487 17.0881L21.7775 18.2593C21.4078 18.6289 20.8085 18.6289 20.4388 18.2593L13.8431 11.6635C13.4734 11.2938 13.4734 10.6945 13.8431 10.3249L15.0142 9.15369C16.855 7.3129 19.6567 7.01864 21.8047 8.27093L22.8185 7.25714Z" fill="white"/>
|
||||
<path d="M14.0661 16.4523L15.65 18.0362L16.3429 17.3434C16.7073 16.9789 17.2983 16.9789 17.6628 17.3434L18.3695 18.0501C18.734 18.4146 18.734 19.0056 18.3695 19.3701L17.6767 20.0629L18.1561 20.5422C18.5257 20.9118 18.5257 21.5111 18.1561 21.8807L16.9857 23.051C15.1449 24.8918 12.3432 25.1861 10.1952 23.9338L9.18136 24.9476C8.62172 25.5073 7.71435 25.5073 7.1547 24.9476C6.59506 24.388 6.59506 23.4806 7.1547 22.9209L8.16853 21.9071C6.91623 19.7591 7.21048 16.9574 9.05127 15.1166L10.2217 13.9462C10.5913 13.5767 11.1905 13.5767 11.5601 13.9462L12.0395 14.4256L12.7323 13.7328C13.0968 13.3683 13.6877 13.3683 14.0522 13.7328L14.7589 14.4396C15.1234 14.804 15.1234 15.395 14.7589 15.7595L14.0661 16.4523Z" fill="white"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_7967_30275" x1="1.5" y1="20" x2="20" y2="1.6056e-06" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#C172FF"/>
|
||||
<stop offset="1" stop-color="#F19EFF"/>
|
||||
<linearGradient id="paint0_linear_20384_1853" x1="2.4" y1="32" x2="32" y2="2.56896e-06" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#00CAD1"/>
|
||||
<stop offset="1" stop-color="#73E6D8"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 1.2 KiB After Width: | Height: | Size: 1.5 KiB |
@@ -0,0 +1,11 @@
|
||||
<svg viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="100%" height="100%" fill="url(#paint0_linear_7967_30275)" />
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M12.7552 4.8767C12.3073 4.92969 11.9962 5.33456 12.0603 5.78101L13.3235 14.5762C13.3876 15.0226 13.8027 15.3416 14.2506 15.2886L16.1502 15.0639C16.5981 15.0109 16.9093 14.606 16.8451 14.1596L15.582 5.36443C15.5178 4.91798 15.1028 4.59901 14.6548 4.65199L12.7552 4.8767ZM4.0675 5.52248C4.0675 5.07145 4.43314 4.70582 4.88417 4.70582H6.80225C7.25328 4.70582 7.61892 5.07145 7.61892 5.52248V14.4772C7.61892 14.9282 7.25328 15.2938 6.80225 15.2938H4.88417C4.43314 15.2938 4.0675 14.9282 4.0675 14.4772V5.52248ZM8.20321 5.52248C8.20321 5.07145 8.56885 4.70582 9.01988 4.70582H10.938C11.389 4.70582 11.7546 5.07145 11.7546 5.52248V14.4772C11.7546 14.9282 11.389 15.2938 10.938 15.2938H9.01988C8.56885 15.2938 8.20321 14.9282 8.20321 14.4772V5.52248Z" fill="white"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_7967_30275" x1="1.5" y1="20" x2="20" y2="1.6056e-06" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#C172FF"/>
|
||||
<stop offset="1" stop-color="#F19EFF"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
|
After Width: | Height: | Size: 1.2 KiB |
@@ -0,0 +1,3 @@
|
||||
<svg width="23" height="41" viewBox="0 0 23 41" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M0.487322 0.838902C5.52993 0.392809 14.4127 1.30573 19.3633 8.23636L19.7315 8.77445C23.4021 14.3865 23.0442 21.6048 19.9483 26.9717C17.7504 30.7815 14.1557 33.6822 9.62013 34.4327C9.59005 34.4426 9.55881 34.45 9.52638 34.4541C9.52493 34.4546 9.5225 34.4561 9.51955 34.4571C9.5153 34.4585 9.50623 34.4621 9.49806 34.4649L9.41994 34.4873C9.41814 34.4879 9.41426 34.49 9.40724 34.4922C9.39802 34.4952 9.37728 34.5023 9.35939 34.5078C9.3403 34.5137 9.31328 34.5214 9.2842 34.5284L9.1592 34.5469L8.89748 34.5459L8.79006 34.544C7.42741 34.6867 5.98678 34.6389 4.47853 34.3672C4.53191 34.4008 4.6025 34.4492 4.66799 34.5147C4.69099 34.5376 4.72144 34.5593 4.80959 34.6211C4.8805 34.6708 5.00368 34.7557 5.10646 34.878L5.13967 34.919C5.14802 34.9291 5.15311 34.935 5.15627 34.9385L5.41017 35.1338C5.6673 35.3405 5.90801 35.5576 6.13576 35.7627C6.44566 36.0419 6.73402 36.3007 7.04494 36.5264L7.20998 36.6602C7.36441 36.8001 7.49012 36.9562 7.59279 37.0967L7.68263 37.1787C7.73953 37.2274 7.85194 37.3224 7.94045 37.46L7.96291 37.4883C7.99181 37.5184 8.044 37.5555 8.15529 37.6202C8.27226 37.6881 8.51757 37.8164 8.6924 38.041L8.87306 38.2422C8.93835 38.3118 9.01823 38.4008 9.09572 38.5069C9.42285 38.7501 9.7519 38.9908 10.084 39.2266L10.3828 39.42C10.4872 39.4826 10.5969 39.5441 10.7119 39.6084C10.8236 39.6709 10.9418 39.7372 11.0615 39.8067L11.4199 40.0274L11.4981 40.0918C11.6617 40.257 11.6926 40.5197 11.5586 40.7207C11.4246 40.9213 11.1703 40.9945 10.9551 40.9073L10.8653 40.8594L10.5606 40.6709C10.4541 40.6092 10.3418 40.5476 10.2236 40.4815C10.052 40.3854 9.86755 40.2816 9.68556 40.1641L9.5049 40.042C9.41935 39.9813 9.33432 39.9197 9.24904 39.8584C9.2181 39.8524 9.1871 39.8442 9.15724 39.8321L9.06838 39.7842L8.88869 39.667C8.73114 39.5618 8.55702 39.4327 8.3965 39.2383L8.38967 39.2295C8.00348 38.941 7.62059 38.6476 7.2422 38.3487L6.50783 37.7569C6.00954 37.3479 5.57646 36.9545 5.12013 36.5811L4.65236 36.2149C4.41843 36.0403 4.17408 35.8774 3.92092 35.7198L3.13967 35.2539C3.00588 35.1761 2.86098 35.1009 2.69631 35.0137C2.53656 34.9291 2.36142 34.8357 2.19142 34.7305C1.89714 34.5484 1.58338 34.3126 1.34963 33.9825L1.2549 33.835L1.21291 33.7432C1.21251 33.742 1.21232 33.7405 1.21193 33.7393C1.08214 33.4994 1.16845 33.1982 1.40724 33.0645L3.10842 32.1084C3.68186 31.7899 4.26243 31.477 4.85158 31.1846L5.51271 30.8682C6.1781 30.5614 6.85297 30.2778 7.49806 29.9903L8.07424 29.7295C8.64423 29.4677 9.19873 29.1967 9.72951 28.8877L9.99416 28.7246C10.0791 28.6011 10.2177 28.5164 10.3789 28.5078C10.3824 28.5077 10.3862 28.5088 10.3897 28.5088C10.3915 28.5085 10.3937 28.5073 10.3955 28.5069C10.4079 28.5048 10.4781 28.4925 10.5654 28.5078C10.6116 28.516 10.7084 28.5395 10.8018 28.6182C10.9121 28.7113 10.972 28.8433 10.9785 28.9766C10.9839 29.0882 10.9514 29.1725 10.9336 29.211C10.9138 29.2537 10.8915 29.2851 10.8789 29.3018C10.8541 29.3349 10.8303 29.3577 10.8213 29.3662C10.8008 29.3857 10.7823 29.401 10.7754 29.4063L10.6533 29.4912C10.5861 29.5373 10.5101 29.5851 10.4365 29.6299L10.2324 29.752C9.66368 30.0831 9.07573 30.3708 8.49025 30.6397L7.90529 30.9034C7.23393 31.2026 6.579 31.4778 5.93556 31.7744L5.29592 32.0801C4.80426 32.3242 4.32055 32.583 3.83791 32.8487L3.93849 32.8799L4.09963 32.9278C4.16584 32.9496 4.26199 32.9861 4.35646 33.0479L4.41213 33.0713C4.44909 33.0849 4.49227 33.0981 4.53517 33.1094L4.90236 33.2129C5.0111 33.2435 5.11243 33.2689 5.22365 33.2891L5.49416 33.3506C5.57406 33.3703 5.63482 33.3852 5.6924 33.3946L6.34084 33.5C6.45433 33.5184 6.57269 33.5275 6.70802 33.5371C6.83757 33.5463 6.99208 33.5564 7.14455 33.5743L7.26662 33.5801C7.31235 33.5802 7.36473 33.5787 7.42677 33.5762C7.53858 33.5718 7.69494 33.5634 7.85256 33.5772H7.94631C7.98429 33.5753 8.02868 33.5718 8.07717 33.5664L8.40431 33.5352L8.48146 33.5293C8.50488 33.5292 8.52576 33.5306 8.54299 33.5323C8.56567 33.5345 8.58567 33.5385 8.59963 33.541C8.64475 33.5413 8.69333 33.5426 8.74318 33.543C13.3027 33.0453 16.9045 30.2471 19.082 26.4727C22.0174 21.3845 22.3308 14.5763 18.8945 9.32132L18.5498 8.81742C13.9144 2.32808 5.50466 1.3989 0.575212 1.835C0.300419 1.85909 0.0577133 1.65566 0.0332203 1.38089C0.00891035 1.10592 0.212384 0.863353 0.487322 0.838902ZM9.49123 34.461C9.49697 34.4606 9.506 34.46 9.51564 34.458C9.51858 34.4574 9.52136 34.4556 9.52345 34.4551C9.51294 34.4564 9.50195 34.4604 9.49123 34.461Z" fill="#DC7E03"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 4.4 KiB |
@@ -3,8 +3,10 @@ import { Box, HStack, Icon, type StackProps } from '@chakra-ui/react';
|
||||
|
||||
const LightTip = ({
|
||||
text,
|
||||
icon = 'common/info',
|
||||
...props
|
||||
}: {
|
||||
icon?: string;
|
||||
text: string;
|
||||
} & StackProps) => {
|
||||
return (
|
||||
@@ -17,7 +19,7 @@ const LightTip = ({
|
||||
fontSize={'sm'}
|
||||
{...props}
|
||||
>
|
||||
<Icon name="common/info" w="1rem" />
|
||||
<Icon name={icon} w="1rem" />
|
||||
<Box>{text}</Box>
|
||||
</HStack>
|
||||
);
|
||||
|
||||
@@ -216,7 +216,7 @@ const MyMenu = ({
|
||||
if (offset) return offset;
|
||||
if (typeof width === 'number') return [-width / 2, 5];
|
||||
return [0, 5];
|
||||
}, [offset]);
|
||||
}, [offset, width]);
|
||||
|
||||
return (
|
||||
<Menu
|
||||
|
||||
@@ -11,15 +11,16 @@ import {
|
||||
HStack,
|
||||
Box,
|
||||
Button,
|
||||
PopoverArrow
|
||||
PopoverArrow,
|
||||
Portal
|
||||
} from '@chakra-ui/react';
|
||||
|
||||
const PopoverConfirm = ({
|
||||
content,
|
||||
showCancel,
|
||||
showCancel = true,
|
||||
type,
|
||||
Trigger,
|
||||
placement = 'bottom-start',
|
||||
placement = 'auto',
|
||||
offset,
|
||||
onConfirm,
|
||||
confirmText,
|
||||
@@ -50,7 +51,7 @@ const PopoverConfirm = ({
|
||||
};
|
||||
if (type && map[type]) return map[type];
|
||||
return map.info;
|
||||
}, [type, t]);
|
||||
}, [type]);
|
||||
|
||||
const firstFieldRef = React.useRef(null);
|
||||
const { onOpen, onClose, isOpen } = useDisclosure();
|
||||
@@ -67,7 +68,7 @@ const PopoverConfirm = ({
|
||||
onClose={onClose}
|
||||
placement={placement}
|
||||
offset={offset}
|
||||
closeOnBlur={false}
|
||||
closeOnBlur={true}
|
||||
trigger={'click'}
|
||||
openDelay={100}
|
||||
closeDelay={100}
|
||||
@@ -75,6 +76,7 @@ const PopoverConfirm = ({
|
||||
lazyBehavior="keepMounted"
|
||||
arrowSize={10}
|
||||
strategy={'fixed'}
|
||||
computePositionOnMount={true}
|
||||
>
|
||||
<PopoverTrigger>{Trigger}</PopoverTrigger>
|
||||
<PopoverContent p={4}>
|
||||
@@ -82,15 +84,25 @@ const PopoverConfirm = ({
|
||||
|
||||
<HStack alignItems={'flex-start'} color={'myGray.700'}>
|
||||
<MyIcon name={map.icon as any} w={'1.5rem'} />
|
||||
<Box fontSize={'sm'}>{content}</Box>
|
||||
<Box fontSize={'sm'} whiteSpace={'pre-wrap'}>
|
||||
{content}
|
||||
</Box>
|
||||
</HStack>
|
||||
<HStack mt={1} justifyContent={'flex-end'}>
|
||||
<HStack mt={2} justifyContent={'flex-end'}>
|
||||
{showCancel && (
|
||||
<Button variant={'whiteBase'} size="sm" onClick={onClose}>
|
||||
{cancelText || t('common:Cancel')}
|
||||
</Button>
|
||||
)}
|
||||
<Button isLoading={loading} variant={map.variant} size="sm" onClick={onclickConfirm}>
|
||||
<Button
|
||||
isLoading={loading}
|
||||
variant={map.variant}
|
||||
size="sm"
|
||||
onClick={async (e) => {
|
||||
e.stopPropagation();
|
||||
await onclickConfirm();
|
||||
}}
|
||||
>
|
||||
{confirmText || t('common:Confirm')}
|
||||
</Button>
|
||||
</HStack>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user