Compare commits
25 Commits
v4.9.9-alp
...
v4.9.10-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9838593451 | ||
|
|
c25cd48e72 | ||
|
|
874300a56a | ||
|
|
1dea2b71b4 | ||
|
|
a8673344b1 | ||
|
|
9709ae7a4f | ||
|
|
fae76e887a | ||
|
|
9af92d1eae | ||
|
|
6a6719e93d | ||
|
|
50481f4ca8 | ||
|
|
88bd3aaa9e | ||
|
|
dd3c251603 | ||
|
|
aa55f059d4 | ||
|
|
89c9a02650 | ||
|
|
0f3bfa280a | ||
|
|
593ebfd269 | ||
|
|
f6dc2204f5 | ||
|
|
d44c338059 | ||
|
|
1dac2b70ec | ||
|
|
9fef3e15fb | ||
|
|
2d2d0fffe9 | ||
|
|
c6e0b5a1e7 | ||
|
|
932aa28a1f | ||
|
|
9c59bc2c17 | ||
|
|
e145f63554 |
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -21,7 +21,7 @@
|
||||
"i18n-ally.namespace": true,
|
||||
"i18n-ally.pathMatcher": "{locale}/{namespaces}.json",
|
||||
"i18n-ally.extract.targetPickingStrategy": "most-similar-by-key",
|
||||
"i18n-ally.translate.engines": ["google"],
|
||||
"i18n-ally.translate.engines": ["deepl","google"],
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
|
||||
@@ -132,15 +132,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@@ -150,8 +150,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@@ -109,15 +109,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@@ -127,8 +127,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@@ -23,7 +23,7 @@ services:
|
||||
volumes:
|
||||
- ./pg/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'aiproxy']
|
||||
test: ['CMD', 'pg_isready', '-U', 'postgres', '-d', 'postgres']
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
@@ -96,15 +96,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@@ -114,8 +114,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@@ -72,15 +72,15 @@ services:
|
||||
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.10 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt-mcp-server:
|
||||
container_name: fastgpt-mcp-server
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-mcp_server:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-mcp_server:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3005:3000
|
||||
networks:
|
||||
@@ -90,8 +90,8 @@ services:
|
||||
- FASTGPT_ENDPOINT=http://fastgpt:3000
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.9.8 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.8 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.9.10 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.10 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
BIN
docSite/assets/imgs/official_account_faq.png
Normal file
BIN
docSite/assets/imgs/official_account_faq.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 386 KiB |
@@ -959,10 +959,16 @@ curl --location --request POST 'http://localhost:3000/api/core/chat/getHistories
|
||||
{{< markdownify >}}
|
||||
|
||||
{{% alert icon=" " context="success" %}}
|
||||
目前仅能获取到当前 API key 的创建者的对话。
|
||||
|
||||
- appId - 应用 Id
|
||||
- offset - 偏移量,即从第几条数据开始取
|
||||
- pageSize - 记录数量
|
||||
- source - 对话源。source=api,表示获取通过 API 创建的对话(不会获取到页面上的对话记录)
|
||||
- startCreateTime - 开始创建时间(可选)
|
||||
- endCreateTime - 结束创建时间(可选)
|
||||
- startUpdateTime - 开始更新时间(可选)
|
||||
- endUpdateTime - 结束更新时间(可选)
|
||||
{{% /alert %}}
|
||||
|
||||
{{< /markdownify >}}
|
||||
|
||||
50
docSite/content/zh-cn/docs/development/upgrading/4910.md
Normal file
50
docSite/content/zh-cn/docs/development/upgrading/4910.md
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
title: 'V4.9.10(进行中)'
|
||||
description: 'FastGPT V4.9.10 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 790
|
||||
---
|
||||
|
||||
## 升级指南
|
||||
|
||||
重要提示:本次更新会重新构建全文索引,构建期间,全文检索结果会为空,4c16g 700 万组全文索引大致消耗 25 分钟。如需无缝升级,需自行做表同步工程。
|
||||
|
||||
### 1. 做好数据备份
|
||||
|
||||
### 2. 更新镜像 tag
|
||||
|
||||
- 更新 FastGPT 镜像 tag: v4.9.10
|
||||
- 更新 FastGPT 商业版镜像 tag: v4.9.10
|
||||
- mcp_server 无需更新
|
||||
- Sandbox 无需更新
|
||||
- AIProxy 无需更新
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 支持 PG 设置`systemEnv.hnswMaxScanTuples`参数,提高迭代搜索的数据总量。
|
||||
2. 知识库预处理参数增加 “分块条件”,可控制某些情况下不进行分块处理。
|
||||
3. 知识库预处理参数增加 “段落优先” 模式,可控制最大段落深度。原“长度优先”模式,不再内嵌段落优先逻辑。
|
||||
4. 工作流调整为单向接入和接出,支持快速的添加下一步节点。
|
||||
5. 开放飞书和语雀知识库到开源版。
|
||||
6. gemini 和 claude 最新模型预设。
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
1. LLM stream调用,默认超时调大。
|
||||
2. 部分确认交互优化。
|
||||
3. 纠正原先知识库的“表格数据集”名称,改成“备份导入”。同时支持知识库索引的导出和导入。
|
||||
4. 工作流知识库引用上限,如果工作流中没有相关 AI 节点,则交互模式改成纯手动输入,并且上限为 1000万。
|
||||
5. 语音输入,移动端判断逻辑,准确判断是否为手机,而不是小屏。
|
||||
6. 优化上下文截取算法,至少保证留下一组 Human 信息。
|
||||
|
||||
## 🐛 修复
|
||||
|
||||
1. 全文检索多知识库时排序得分排序不正确。
|
||||
2. 流响应捕获 finish_reason 可能不正确。
|
||||
3. 工具调用模式,未保存思考输出。
|
||||
4. 知识库 indexSize 参数未生效。
|
||||
5. 工作流嵌套 2 层后,获取预览引用、上下文不正确。
|
||||
6. xlsx 转成 Markdown 时候,前面会多出一个空格。
|
||||
7. 读取 Markdown 文件时,Base64 图片未进行额外抓换保存。
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.9.9(进行中)'
|
||||
title: 'V4.9.9'
|
||||
description: 'FastGPT V4.9.9 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
@@ -7,11 +7,28 @@ toc: true
|
||||
weight: 791
|
||||
---
|
||||
|
||||
## 升级指南
|
||||
|
||||
### 1. 做好数据备份
|
||||
|
||||
### 2. 商业版用户替换新 License
|
||||
|
||||
商业版用户可以联系 FastGPT 团队支持同学,获取 License 替换方案。替换后,可以直接升级系统,管理后台会提示输入新 License。
|
||||
|
||||
### 3. 更新镜像 tag
|
||||
|
||||
- 更新 FastGPT 镜像 tag: v4.9.9
|
||||
- 更新 FastGPT 商业版镜像 tag: v4.9.9
|
||||
- mcp_server 无需更新
|
||||
- Sandbox 无需更新
|
||||
- AIProxy 无需更新
|
||||
|
||||
## 🚀 新增内容
|
||||
|
||||
1. 切换 SessionId 来替代 JWT 实现登录鉴权,可控制最大登录客户端数量。
|
||||
2. 新的商业版 License 管理模式。
|
||||
3. 公众号调用,显示记录 chat 对话错误,方便排查。
|
||||
4. API 知识库支持 BasePath 选择,需增加 API 接口,具体可见[API 知识库介绍](/docs/guide/knowledge_base/api_dataset/#4-获取文件详细信息用于获取文件信息)
|
||||
|
||||
## ⚙️ 优化
|
||||
|
||||
@@ -23,3 +40,4 @@ weight: 791
|
||||
1. 无法正常获取应用历史保存/发布记录。
|
||||
2. 成员创建 MCP 工具权限问题。
|
||||
3. 来源引用展示,存在 ID 传递错误,导致提示无权操作该文件。
|
||||
4. 回答标注前端数据报错。
|
||||
|
||||
@@ -185,3 +185,40 @@ curl --location --request GET '{{baseURL}}/v1/file/read?id=xx' \
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
### 4. 获取文件详细信息(用于获取文件信息)
|
||||
|
||||
{{< tabs tabTotal="2" >}}
|
||||
{{< tab tabName="请求示例" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
id 为文件的 id。
|
||||
|
||||
```bash
|
||||
curl --location --request GET '{{baseURL}}/v1/file/detail?id=xx' \
|
||||
--header 'Authorization: Bearer {{authorization}}'
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab tabName="响应示例" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 200,
|
||||
"success": true,
|
||||
"message": "",
|
||||
"data": {
|
||||
"id": "docs",
|
||||
"parentId": "",
|
||||
"name": "docs"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
{{< /tabs >}}
|
||||
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ FastGPT 商业版是基于 FastGPT 开源版的增强版本,增加了一些独
|
||||
| 应用发布安全配置 | ❌ | ✅ | ✅ |
|
||||
| 内容审核 | ❌ | ✅ | ✅ |
|
||||
| web站点同步 | ❌ | ✅ | ✅ |
|
||||
| 主流文档库接入(目前支持:语雀、飞书) | ❌ | ✅ | ✅ |
|
||||
| 增强训练模式 | ❌ | ✅ | ✅ |
|
||||
| 第三方应用快速接入(飞书、公众号) | ❌ | ✅ | ✅ |
|
||||
| 管理后台 | ❌ | ✅ | 不需要 |
|
||||
|
||||
@@ -132,7 +132,9 @@ weight: 506
|
||||
### 公众号没响应
|
||||
|
||||
检查应用对话日志,如果有对话日志,但是微信公众号无响应,则是白名单 IP未成功。
|
||||
添加白名单IP 后,通常需要等待几分钟微信更新。
|
||||
添加白名单IP 后,通常需要等待几分钟微信更新。可以在对话日志中,找点错误日志。
|
||||
|
||||

|
||||
|
||||
### 如何新开一个聊天记录
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ const datasetErr = [
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unExist,
|
||||
message: 'core.dataset.error.unExistDataset'
|
||||
message: i18nT('common:core.dataset.error.unExistDataset')
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unExistCollection,
|
||||
|
||||
@@ -7,6 +7,10 @@ export const CUSTOM_SPLIT_SIGN = '-----CUSTOM_SPLIT_SIGN-----';
|
||||
type SplitProps = {
|
||||
text: string;
|
||||
chunkSize: number;
|
||||
|
||||
paragraphChunkDeep?: number; // Paragraph deep
|
||||
paragraphChunkMinSize?: number; // Paragraph min size, if too small, it will merge
|
||||
|
||||
maxSize?: number;
|
||||
overlapRatio?: number;
|
||||
customReg?: string[];
|
||||
@@ -108,6 +112,8 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
let {
|
||||
text = '',
|
||||
chunkSize,
|
||||
paragraphChunkDeep = 5,
|
||||
paragraphChunkMinSize = 100,
|
||||
maxSize = defaultMaxChunkSize,
|
||||
overlapRatio = 0.15,
|
||||
customReg = []
|
||||
@@ -123,7 +129,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
text = text.replace(/(```[\s\S]*?```|~~~[\s\S]*?~~~)/g, function (match) {
|
||||
return match.replace(/\n/g, codeBlockMarker);
|
||||
});
|
||||
// 2. 表格处理 - 单独提取表格出来,进行表头合并
|
||||
// 2. Markdown 表格处理 - 单独提取表格出来,进行表头合并
|
||||
const tableReg =
|
||||
/(\n\|(?:(?:[^\n|]+\|){1,})\n\|(?:[:\-\s]+\|){1,}\n(?:\|(?:[^\n|]+\|)*\n?)*)(?:\n|$)/g;
|
||||
const tableDataList = text.match(tableReg);
|
||||
@@ -143,25 +149,40 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
text = text.replace(/(\r?\n|\r){3,}/g, '\n\n\n');
|
||||
|
||||
// The larger maxLen is, the next sentence is less likely to trigger splitting
|
||||
const markdownIndex = 4;
|
||||
const forbidOverlapIndex = 8;
|
||||
const customRegLen = customReg.length;
|
||||
const markdownIndex = paragraphChunkDeep - 1;
|
||||
const forbidOverlapIndex = customRegLen + markdownIndex + 4;
|
||||
|
||||
const markdownHeaderRules = ((deep?: number): { reg: RegExp; maxLen: number }[] => {
|
||||
if (!deep || deep === 0) return [];
|
||||
|
||||
const maxDeep = Math.min(deep, 8); // Maximum 8 levels
|
||||
const rules: { reg: RegExp; maxLen: number }[] = [];
|
||||
|
||||
for (let i = 1; i <= maxDeep; i++) {
|
||||
const hashSymbols = '#'.repeat(i);
|
||||
rules.push({
|
||||
reg: new RegExp(`^(${hashSymbols}\\s[^\\n]+\\n)`, 'gm'),
|
||||
maxLen: chunkSize
|
||||
});
|
||||
}
|
||||
|
||||
return rules;
|
||||
})(paragraphChunkDeep);
|
||||
|
||||
const stepReges: { reg: RegExp | string; maxLen: number }[] = [
|
||||
...customReg.map((text) => ({
|
||||
reg: text.replaceAll('\\n', '\n'),
|
||||
maxLen: chunkSize
|
||||
})),
|
||||
{ reg: /^(#\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(##\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(###\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(####\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
{ reg: /^(#####\s[^\n]+\n)/gm, maxLen: chunkSize },
|
||||
...markdownHeaderRules,
|
||||
|
||||
{ reg: /([\n](```[\s\S]*?```|~~~[\s\S]*?~~~))/g, maxLen: maxSize }, // code block
|
||||
// HTML Table tag 尽可能保障完整
|
||||
{
|
||||
reg: /(\n\|(?:(?:[^\n|]+\|){1,})\n\|(?:[:\-\s]+\|){1,}\n(?:\|(?:[^\n|]+\|)*\n)*)/g,
|
||||
maxLen: Math.min(chunkSize * 1.5, maxSize)
|
||||
}, // Table 尽可能保证完整性
|
||||
maxLen: chunkSize
|
||||
}, // Markdown Table 尽可能保证完整性
|
||||
{ reg: /(\n{2,})/g, maxLen: chunkSize },
|
||||
{ reg: /([\n])/g, maxLen: chunkSize },
|
||||
// ------ There's no overlap on the top
|
||||
@@ -172,12 +193,10 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
{ reg: /([,]|,\s)/g, maxLen: chunkSize }
|
||||
];
|
||||
|
||||
const customRegLen = customReg.length;
|
||||
const checkIsCustomStep = (step: number) => step < customRegLen;
|
||||
const checkIsMarkdownSplit = (step: number) =>
|
||||
step >= customRegLen && step <= markdownIndex + customRegLen;
|
||||
|
||||
const checkForbidOverlap = (step: number) => step <= forbidOverlapIndex + customRegLen;
|
||||
const checkForbidOverlap = (step: number) => step <= forbidOverlapIndex;
|
||||
|
||||
// if use markdown title split, Separate record title
|
||||
const getSplitTexts = ({ text, step }: { text: string; step: number }) => {
|
||||
@@ -301,6 +320,7 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
const splitTexts = getSplitTexts({ text, step });
|
||||
|
||||
const chunks: string[] = [];
|
||||
|
||||
for (let i = 0; i < splitTexts.length; i++) {
|
||||
const item = splitTexts[i];
|
||||
|
||||
@@ -443,7 +463,6 @@ const commonSplit = (props: SplitProps): SplitResponse => {
|
||||
*/
|
||||
export const splitText2Chunks = (props: SplitProps): SplitResponse => {
|
||||
let { text = '' } = props;
|
||||
const start = Date.now();
|
||||
const splitWithCustomSign = text.split(CUSTOM_SPLIT_SIGN);
|
||||
|
||||
const splitResult = splitWithCustomSign.map((item) => {
|
||||
|
||||
@@ -130,9 +130,11 @@ export type SystemEnvType = {
|
||||
vectorMaxProcess: number;
|
||||
qaMaxProcess: number;
|
||||
vlmMaxProcess: number;
|
||||
hnswEfSearch: number;
|
||||
tokenWorkers: number; // token count max worker
|
||||
|
||||
hnswEfSearch: number;
|
||||
hnswMaxScanTuples: number;
|
||||
|
||||
oneapiUrl?: string;
|
||||
chatApiKey?: string;
|
||||
|
||||
|
||||
@@ -60,5 +60,3 @@ export enum AppTemplateTypeEnum {
|
||||
// special type
|
||||
contribute = 'contribute'
|
||||
}
|
||||
|
||||
export const defaultDatasetMaxTokens = 16000;
|
||||
|
||||
3
packages/global/core/chat/type.d.ts
vendored
3
packages/global/core/chat/type.d.ts
vendored
@@ -26,6 +26,7 @@ export type ChatSchema = {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
appId: string;
|
||||
createTime: Date;
|
||||
updateTime: Date;
|
||||
title: string;
|
||||
customTitle: string;
|
||||
@@ -112,6 +113,7 @@ export type ChatItemSchema = (UserChatItemType | SystemChatItemType | AIChatItem
|
||||
appId: string;
|
||||
time: Date;
|
||||
durationSeconds?: number;
|
||||
errorMsg?: string;
|
||||
};
|
||||
|
||||
export type AdminFbkType = {
|
||||
@@ -143,6 +145,7 @@ export type ChatSiteItemType = (UserChatItemType | SystemChatItemType | AIChatIt
|
||||
responseData?: ChatHistoryItemResType[];
|
||||
time?: Date;
|
||||
durationSeconds?: number;
|
||||
errorMsg?: string;
|
||||
} & ChatBoxInputType &
|
||||
ResponseTagItemType;
|
||||
|
||||
|
||||
31
packages/global/core/dataset/api.d.ts
vendored
31
packages/global/core/dataset/api.d.ts
vendored
@@ -1,9 +1,11 @@
|
||||
import type { DatasetDataIndexItemType, DatasetSchemaType } from './type';
|
||||
import type { ChunkSettingsType, DatasetDataIndexItemType, DatasetSchemaType } from './type';
|
||||
import type {
|
||||
DatasetCollectionTypeEnum,
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
ChunkSettingModeEnum,
|
||||
DataChunkSplitModeEnum
|
||||
DataChunkSplitModeEnum,
|
||||
ChunkTriggerConfigTypeEnum,
|
||||
ParagraphChunkAIModeEnum
|
||||
} from './constants';
|
||||
import type { LLMModelItemType } from '../ai/model.d';
|
||||
import type { ParentIdType } from 'common/parentFolder/type';
|
||||
@@ -32,26 +34,16 @@ export type DatasetUpdateBody = {
|
||||
};
|
||||
|
||||
/* ================= collection ===================== */
|
||||
export type DatasetCollectionChunkMetadataType = {
|
||||
// Input + store params
|
||||
type DatasetCollectionStoreDataType = ChunkSettingsType & {
|
||||
parentId?: string;
|
||||
customPdfParse?: boolean;
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
imageIndex?: boolean;
|
||||
autoIndexes?: boolean;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
indexSize?: number;
|
||||
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
metadata?: Record<string, any>;
|
||||
|
||||
customPdfParse?: boolean;
|
||||
};
|
||||
|
||||
// create collection params
|
||||
export type CreateDatasetCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
export type CreateDatasetCollectionParams = DatasetCollectionStoreDataType & {
|
||||
datasetId: string;
|
||||
name: string;
|
||||
type: DatasetCollectionTypeEnum;
|
||||
@@ -72,7 +64,7 @@ export type CreateDatasetCollectionParams = DatasetCollectionChunkMetadataType &
|
||||
nextSyncTime?: Date;
|
||||
};
|
||||
|
||||
export type ApiCreateDatasetCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
export type ApiCreateDatasetCollectionParams = DatasetCollectionStoreDataType & {
|
||||
datasetId: string;
|
||||
tags?: string[];
|
||||
};
|
||||
@@ -90,7 +82,7 @@ export type ApiDatasetCreateDatasetCollectionParams = ApiCreateDatasetCollection
|
||||
export type FileIdCreateDatasetCollectionParams = ApiCreateDatasetCollectionParams & {
|
||||
fileId: string;
|
||||
};
|
||||
export type reTrainingDatasetFileCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
export type reTrainingDatasetFileCollectionParams = DatasetCollectionStoreDataType & {
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
};
|
||||
@@ -147,6 +139,7 @@ export type PushDatasetDataProps = {
|
||||
collectionId: string;
|
||||
data: PushDatasetDataChunkProps[];
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
indexSize?: number;
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
prompt?: string;
|
||||
|
||||
@@ -120,6 +120,8 @@ export const DatasetCollectionSyncResultMap = {
|
||||
export enum DatasetCollectionDataProcessModeEnum {
|
||||
chunk = 'chunk',
|
||||
qa = 'qa',
|
||||
backup = 'backup',
|
||||
|
||||
auto = 'auto' // abandon
|
||||
}
|
||||
export const DatasetCollectionDataProcessModeMap = {
|
||||
@@ -131,21 +133,35 @@ export const DatasetCollectionDataProcessModeMap = {
|
||||
label: i18nT('common:core.dataset.training.QA mode'),
|
||||
tooltip: i18nT('common:core.dataset.import.QA Import Tip')
|
||||
},
|
||||
[DatasetCollectionDataProcessModeEnum.backup]: {
|
||||
label: i18nT('dataset:backup_mode'),
|
||||
tooltip: i18nT('dataset:backup_mode')
|
||||
},
|
||||
[DatasetCollectionDataProcessModeEnum.auto]: {
|
||||
label: i18nT('common:core.dataset.training.Auto mode'),
|
||||
tooltip: i18nT('common:core.dataset.training.Auto mode Tip')
|
||||
}
|
||||
};
|
||||
|
||||
export enum ChunkTriggerConfigTypeEnum {
|
||||
minSize = 'minSize',
|
||||
forceChunk = 'forceChunk',
|
||||
maxSize = 'maxSize'
|
||||
}
|
||||
export enum ChunkSettingModeEnum {
|
||||
auto = 'auto',
|
||||
custom = 'custom'
|
||||
}
|
||||
|
||||
export enum DataChunkSplitModeEnum {
|
||||
paragraph = 'paragraph',
|
||||
size = 'size',
|
||||
char = 'char'
|
||||
}
|
||||
export enum ParagraphChunkAIModeEnum {
|
||||
auto = 'auto',
|
||||
force = 'force'
|
||||
}
|
||||
|
||||
/* ------------ data -------------- */
|
||||
|
||||
@@ -154,7 +170,6 @@ export enum ImportDataSourceEnum {
|
||||
fileLocal = 'fileLocal',
|
||||
fileLink = 'fileLink',
|
||||
fileCustom = 'fileCustom',
|
||||
csvTable = 'csvTable',
|
||||
externalFile = 'externalFile',
|
||||
apiDataset = 'apiDataset',
|
||||
reTraining = 'reTraining'
|
||||
|
||||
@@ -32,7 +32,7 @@ export const DatasetDataIndexMap: Record<
|
||||
color: 'red'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.image]: {
|
||||
label: i18nT('common:data_index_image'),
|
||||
label: i18nT('dataset:data_index_image'),
|
||||
color: 'purple'
|
||||
}
|
||||
};
|
||||
|
||||
@@ -118,9 +118,8 @@ export const computeChunkSize = (params: {
|
||||
return getLLMMaxChunkSize(params.llmModel);
|
||||
}
|
||||
|
||||
return Math.min(params.chunkSize || chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
|
||||
return Math.min(params.chunkSize ?? chunkAutoChunkSize, getLLMMaxChunkSize(params.llmModel));
|
||||
};
|
||||
|
||||
export const computeChunkSplitter = (params: {
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
@@ -129,8 +128,21 @@ export const computeChunkSplitter = (params: {
|
||||
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
|
||||
return undefined;
|
||||
}
|
||||
if (params.chunkSplitMode === DataChunkSplitModeEnum.size) {
|
||||
if (params.chunkSplitMode !== DataChunkSplitModeEnum.char) {
|
||||
return undefined;
|
||||
}
|
||||
return params.chunkSplitter;
|
||||
};
|
||||
export const computeParagraphChunkDeep = (params: {
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
paragraphChunkDeep?: number;
|
||||
}) => {
|
||||
if (params.chunkSettingMode === ChunkSettingModeEnum.auto) {
|
||||
return 5;
|
||||
}
|
||||
if (params.chunkSplitMode === DataChunkSplitModeEnum.paragraph) {
|
||||
return params.paragraphChunkDeep;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
|
||||
48
packages/global/core/dataset/type.d.ts
vendored
48
packages/global/core/dataset/type.d.ts
vendored
@@ -8,26 +8,42 @@ import type {
|
||||
DatasetStatusEnum,
|
||||
DatasetTypeEnum,
|
||||
SearchScoreTypeEnum,
|
||||
TrainingModeEnum
|
||||
TrainingModeEnum,
|
||||
ChunkSettingModeEnum,
|
||||
ChunkTriggerConfigTypeEnum
|
||||
} from './constants';
|
||||
import type { DatasetPermission } from '../../support/permission/dataset/controller';
|
||||
import { Permission } from '../../support/permission/controller';
|
||||
import type { APIFileServer, FeishuServer, YuqueServer } from './apiDataset';
|
||||
import type { SourceMemberType } from 'support/user/type';
|
||||
import type { DatasetDataIndexTypeEnum } from './data/constants';
|
||||
import type { ChunkSettingModeEnum } from './constants';
|
||||
|
||||
export type ChunkSettingsType = {
|
||||
trainingType: DatasetCollectionDataProcessModeEnum;
|
||||
autoIndexes?: boolean;
|
||||
trainingType?: DatasetCollectionDataProcessModeEnum;
|
||||
|
||||
// Chunk trigger
|
||||
chunkTriggerType?: ChunkTriggerConfigTypeEnum;
|
||||
chunkTriggerMinSize?: number; // maxSize from agent model, not store
|
||||
|
||||
// Data enhance
|
||||
dataEnhanceCollectionName?: boolean; // Auto add collection name to data
|
||||
|
||||
// Index enhance
|
||||
imageIndex?: boolean;
|
||||
autoIndexes?: boolean;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
// Chunk setting
|
||||
chunkSettingMode?: ChunkSettingModeEnum; // 系统参数/自定义参数
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
// Paragraph split
|
||||
paragraphChunkAIMode?: ParagraphChunkAIModeEnum;
|
||||
paragraphChunkDeep?: number; // Paragraph deep
|
||||
paragraphChunkMinSize?: number; // Paragraph min size, if too small, it will merge
|
||||
// Size split
|
||||
chunkSize?: number; // chunk/qa chunk size, Paragraph max chunk size.
|
||||
// Char split
|
||||
chunkSplitter?: string; // chunk/qa chunk splitter
|
||||
indexSize?: number;
|
||||
chunkSplitter?: string;
|
||||
|
||||
qaPrompt?: string;
|
||||
};
|
||||
|
||||
@@ -66,7 +82,7 @@ export type DatasetSchemaType = {
|
||||
defaultPermission?: number;
|
||||
};
|
||||
|
||||
export type DatasetCollectionSchemaType = {
|
||||
export type DatasetCollectionSchemaType = ChunkSettingsType & {
|
||||
_id: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
@@ -101,18 +117,7 @@ export type DatasetCollectionSchemaType = {
|
||||
|
||||
// Parse settings
|
||||
customPdfParse?: boolean;
|
||||
// Chunk settings
|
||||
autoIndexes?: boolean;
|
||||
imageIndex?: boolean;
|
||||
trainingType: DatasetCollectionDataProcessModeEnum;
|
||||
|
||||
chunkSettingMode?: ChunkSettingModeEnum;
|
||||
chunkSplitMode?: DataChunkSplitModeEnum;
|
||||
|
||||
chunkSize?: number;
|
||||
indexSize?: number;
|
||||
chunkSplitter?: string;
|
||||
qaPrompt?: string;
|
||||
};
|
||||
|
||||
export type DatasetCollectionTagsSchemaType = {
|
||||
@@ -175,6 +180,7 @@ export type DatasetTrainingSchemaType = {
|
||||
q: string;
|
||||
a: string;
|
||||
chunkIndex: number;
|
||||
indexSize?: number;
|
||||
weight: number;
|
||||
indexes: Omit<DatasetDataIndexItemType, 'dataId'>[];
|
||||
retryCount: number;
|
||||
|
||||
18
packages/service/common/api/type.d.ts
vendored
18
packages/service/common/api/type.d.ts
vendored
@@ -6,12 +6,6 @@ import type {
|
||||
} from '../../core/dataset/search/controller';
|
||||
import type { AuthOpenApiLimitProps } from '../../support/openapi/auth';
|
||||
import type { CreateUsageProps, ConcatUsageProps } from '@fastgpt/global/support/wallet/usage/api';
|
||||
import type {
|
||||
GetProApiDatasetFileContentParams,
|
||||
GetProApiDatasetFileDetailParams,
|
||||
GetProApiDatasetFileListParams,
|
||||
GetProApiDatasetFilePreviewUrlParams
|
||||
} from '../../core/dataset/apiDataset/proApi';
|
||||
|
||||
declare global {
|
||||
var textCensorHandler: (params: { text: string }) => Promise<{ code: number; message?: string }>;
|
||||
@@ -19,16 +13,4 @@ declare global {
|
||||
var authOpenApiHandler: (data: AuthOpenApiLimitProps) => Promise<any>;
|
||||
var createUsageHandler: (data: CreateUsageProps) => any;
|
||||
var concatUsageHandler: (data: ConcatUsageProps) => any;
|
||||
|
||||
// API dataset
|
||||
var getProApiDatasetFileList: (data: GetProApiDatasetFileListParams) => Promise<APIFileItem[]>;
|
||||
var getProApiDatasetFileContent: (
|
||||
data: GetProApiDatasetFileContentParams
|
||||
) => Promise<ApiFileReadContentResponse>;
|
||||
var getProApiDatasetFilePreviewUrl: (
|
||||
data: GetProApiDatasetFilePreviewUrlParams
|
||||
) => Promise<string>;
|
||||
var getProApiDatasetFileDetail: (
|
||||
data: GetProApiDatasetFileDetailParams
|
||||
) => Promise<ApiDatasetDetailResponse>;
|
||||
}
|
||||
|
||||
@@ -210,15 +210,15 @@ export const readFileContentFromMongo = async ({
|
||||
tmbId,
|
||||
bucketName,
|
||||
fileId,
|
||||
isQAImport = false,
|
||||
customPdfParse = false
|
||||
customPdfParse = false,
|
||||
getFormatText
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
bucketName: `${BucketNameEnum}`;
|
||||
fileId: string;
|
||||
isQAImport?: boolean;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean; // 数据类型都尽可能转化成 markdown 格式
|
||||
}): Promise<{
|
||||
rawText: string;
|
||||
filename: string;
|
||||
@@ -254,8 +254,8 @@ export const readFileContentFromMongo = async ({
|
||||
// Get raw text
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
customPdfParse,
|
||||
getFormatText,
|
||||
extension,
|
||||
isQAImport,
|
||||
teamId,
|
||||
tmbId,
|
||||
buffer: fileBuffers,
|
||||
|
||||
@@ -16,6 +16,7 @@ export type readRawTextByLocalFileParams = {
|
||||
path: string;
|
||||
encoding: string;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean;
|
||||
metadata?: Record<string, any>;
|
||||
};
|
||||
export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParams) => {
|
||||
@@ -27,8 +28,8 @@ export const readRawTextByLocalFile = async (params: readRawTextByLocalFileParam
|
||||
|
||||
return readRawContentByFileBuffer({
|
||||
extension,
|
||||
isQAImport: false,
|
||||
customPdfParse: params.customPdfParse,
|
||||
getFormatText: params.getFormatText,
|
||||
teamId: params.teamId,
|
||||
tmbId: params.tmbId,
|
||||
encoding: params.encoding,
|
||||
@@ -46,7 +47,7 @@ export const readRawContentByFileBuffer = async ({
|
||||
encoding,
|
||||
metadata,
|
||||
customPdfParse = false,
|
||||
isQAImport = false
|
||||
getFormatText = true
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
@@ -57,8 +58,10 @@ export const readRawContentByFileBuffer = async ({
|
||||
metadata?: Record<string, any>;
|
||||
|
||||
customPdfParse?: boolean;
|
||||
isQAImport: boolean;
|
||||
}): Promise<ReadFileResponse> => {
|
||||
getFormatText?: boolean;
|
||||
}): Promise<{
|
||||
rawText: string;
|
||||
}> => {
|
||||
const systemParse = () =>
|
||||
runWorker<ReadFileResponse>(WorkerNameEnum.readFile, {
|
||||
extension,
|
||||
@@ -176,16 +179,7 @@ export const readRawContentByFileBuffer = async ({
|
||||
});
|
||||
}
|
||||
|
||||
if (['csv', 'xlsx'].includes(extension)) {
|
||||
// qa data
|
||||
if (isQAImport) {
|
||||
rawText = rawText || '';
|
||||
} else {
|
||||
rawText = formatText || rawText;
|
||||
}
|
||||
}
|
||||
|
||||
addLog.debug(`Upload file success, time: ${Date.now() - start}ms`);
|
||||
|
||||
return { rawText, formatText, imageList };
|
||||
return { rawText: getFormatText ? formatText || rawText : rawText };
|
||||
};
|
||||
|
||||
@@ -10,6 +10,7 @@ let jieba: Jieba | undefined;
|
||||
})();
|
||||
|
||||
const stopWords = new Set([
|
||||
'\n',
|
||||
'--',
|
||||
'?',
|
||||
'“',
|
||||
@@ -1519,8 +1520,7 @@ const stopWords = new Set([
|
||||
]);
|
||||
|
||||
export async function jiebaSplit({ text }: { text: string }) {
|
||||
text = text.replace(/[#*`_~>[\](){}|]/g, '').replace(/\S*https?\S*/gi, '');
|
||||
|
||||
text = text.replace(/[#*`_~>[\](){}|]|\S*https?\S*/g, '').trim();
|
||||
const tokens = (await jieba!.cutAsync(text, true)) as string[];
|
||||
|
||||
return (
|
||||
|
||||
@@ -57,14 +57,19 @@ export const addLog = {
|
||||
|
||||
level === LogLevelEnum.error && console.error(obj);
|
||||
|
||||
// store
|
||||
// store log
|
||||
if (level >= STORE_LOG_LEVEL && connectionMongo.connection.readyState === 1) {
|
||||
// store log
|
||||
getMongoLog().create({
|
||||
text: msg,
|
||||
level,
|
||||
metadata: obj
|
||||
});
|
||||
(async () => {
|
||||
try {
|
||||
await getMongoLog().create({
|
||||
text: msg,
|
||||
level,
|
||||
metadata: obj
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('store log error', error);
|
||||
}
|
||||
})();
|
||||
}
|
||||
},
|
||||
debug(msg: string, obj?: Record<string, any>) {
|
||||
|
||||
@@ -188,6 +188,7 @@ export class PgVectorCtrl {
|
||||
const results: any = await PgClient.query(
|
||||
`BEGIN;
|
||||
SET LOCAL hnsw.ef_search = ${global.systemEnv?.hnswEfSearch || 100};
|
||||
SET LOCAL hnsw.max_scan_tuples = ${global.systemEnv?.hnswMaxScanTuples || 100000};
|
||||
SET LOCAL hnsw.iterative_scan = relaxed_order;
|
||||
WITH relaxed_results AS MATERIALIZED (
|
||||
select id, collection_id, vector <#> '[${vector}]' AS score
|
||||
@@ -199,7 +200,7 @@ export class PgVectorCtrl {
|
||||
) SELECT id, collection_id, score FROM relaxed_results ORDER BY score;
|
||||
COMMIT;`
|
||||
);
|
||||
const rows = results?.[3]?.rows as PgSearchRawType[];
|
||||
const rows = results?.[results.length - 2]?.rows as PgSearchRawType[];
|
||||
|
||||
if (!Array.isArray(rows)) {
|
||||
return {
|
||||
|
||||
@@ -78,7 +78,7 @@ export const createChatCompletion = async ({
|
||||
}
|
||||
body.model = modelConstantsData.model;
|
||||
|
||||
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
|
||||
const formatTimeout = timeout ? timeout : 600000;
|
||||
const ai = getAIApi({
|
||||
userKey,
|
||||
timeout: formatTimeout
|
||||
|
||||
@@ -1,6 +1,54 @@
|
||||
{
|
||||
"provider": "Claude",
|
||||
"list": [
|
||||
{
|
||||
"model": "claude-sonnet-4-20250514",
|
||||
"name": "claude-sonnet-4-20250514",
|
||||
"maxContext": 200000,
|
||||
"maxResponse": 8000,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1,
|
||||
"showTopP": true,
|
||||
"showStopSign": true,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "claude-opus-4-20250514",
|
||||
"name": "claude-opus-4-20250514",
|
||||
"maxContext": 200000,
|
||||
"maxResponse": 4096,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1,
|
||||
"showTopP": true,
|
||||
"showStopSign": true,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm"
|
||||
},
|
||||
{
|
||||
"model": "claude-3-7-sonnet-20250219",
|
||||
"name": "claude-3-7-sonnet-20250219",
|
||||
|
||||
@@ -25,6 +25,30 @@
|
||||
"showTopP": true,
|
||||
"showStopSign": true
|
||||
},
|
||||
{
|
||||
"model": "gemini-2.5-flash-preview-04-17",
|
||||
"name": "gemini-2.5-flash-preview-04-17",
|
||||
"maxContext": 1000000,
|
||||
"maxResponse": 8000,
|
||||
"quoteMaxToken": 60000,
|
||||
"maxTemperature": 1,
|
||||
"vision": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"defaultSystemChatPrompt": "",
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"customCQPrompt": "",
|
||||
"usedInExtractFields": true,
|
||||
"usedInQueryExtension": true,
|
||||
"customExtractPrompt": "",
|
||||
"usedInToolCall": true,
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {},
|
||||
"type": "llm",
|
||||
"showTopP": true,
|
||||
"showStopSign": true
|
||||
},
|
||||
{
|
||||
"model": "gemini-2.0-flash",
|
||||
"name": "gemini-2.0-flash",
|
||||
|
||||
@@ -18,15 +18,17 @@ import json5 from 'json5';
|
||||
*/
|
||||
export const computedMaxToken = ({
|
||||
maxToken,
|
||||
model
|
||||
model,
|
||||
min
|
||||
}: {
|
||||
maxToken?: number;
|
||||
model: LLMModelItemType;
|
||||
min?: number;
|
||||
}) => {
|
||||
if (maxToken === undefined) return;
|
||||
|
||||
maxToken = Math.min(maxToken, model.maxResponse);
|
||||
return maxToken;
|
||||
return Math.max(maxToken, min || 0);
|
||||
};
|
||||
|
||||
// FastGPT temperature range: [0,10], ai temperature:[0,2],{0,1]……
|
||||
@@ -178,7 +180,7 @@ export const llmStreamResponseToAnswerText = async (
|
||||
}
|
||||
}
|
||||
return {
|
||||
text: parseReasoningContent(answer)[1],
|
||||
text: removeDatasetCiteText(parseReasoningContent(answer)[1], false),
|
||||
usage,
|
||||
toolCalls
|
||||
};
|
||||
@@ -192,8 +194,9 @@ export const llmUnStreamResponseToAnswerText = async (
|
||||
}> => {
|
||||
const answer = response.choices?.[0]?.message?.content || '';
|
||||
const toolCalls = response.choices?.[0]?.message?.tool_calls;
|
||||
|
||||
return {
|
||||
text: answer,
|
||||
text: removeDatasetCiteText(parseReasoningContent(answer)[1], false),
|
||||
usage: response.usage,
|
||||
toolCalls
|
||||
};
|
||||
@@ -240,6 +243,12 @@ export const parseLLMStreamResponse = () => {
|
||||
let citeBuffer = '';
|
||||
const maxCiteBufferLength = 32; // [Object](CITE)总长度为32
|
||||
|
||||
// Buffer
|
||||
let buffer_finishReason: CompletionFinishReason = null;
|
||||
let buffer_usage: CompletionUsage = getLLMDefaultUsage();
|
||||
let buffer_reasoningContent = '';
|
||||
let buffer_content = '';
|
||||
|
||||
/*
|
||||
parseThinkTag - 只控制是否主动解析 <think></think>,如果接口已经解析了,则不再解析。
|
||||
retainDatasetCite -
|
||||
@@ -257,6 +266,7 @@ export const parseLLMStreamResponse = () => {
|
||||
};
|
||||
finish_reason?: CompletionFinishReason;
|
||||
}[];
|
||||
usage?: CompletionUsage;
|
||||
};
|
||||
parseThinkTag?: boolean;
|
||||
retainDatasetCite?: boolean;
|
||||
@@ -266,72 +276,71 @@ export const parseLLMStreamResponse = () => {
|
||||
responseContent: string;
|
||||
finishReason: CompletionFinishReason;
|
||||
} => {
|
||||
const finishReason = part.choices?.[0]?.finish_reason || null;
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
|
||||
const isStreamEnd = !!finishReason;
|
||||
const data = (() => {
|
||||
buffer_usage = part.usage || buffer_usage;
|
||||
|
||||
// Parse think
|
||||
const { reasoningContent: parsedThinkReasoningContent, content: parsedThinkContent } = (() => {
|
||||
if (reasoningContent || !parseThinkTag) {
|
||||
isInThinkTag = false;
|
||||
return { reasoningContent, content };
|
||||
}
|
||||
const finishReason = part.choices?.[0]?.finish_reason || null;
|
||||
buffer_finishReason = finishReason || buffer_finishReason;
|
||||
|
||||
if (!content) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
|
||||
const isStreamEnd = !!buffer_finishReason;
|
||||
|
||||
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
|
||||
if (isInThinkTag === false) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content
|
||||
};
|
||||
}
|
||||
// Parse think
|
||||
const { reasoningContent: parsedThinkReasoningContent, content: parsedThinkContent } =
|
||||
(() => {
|
||||
if (reasoningContent || !parseThinkTag) {
|
||||
isInThinkTag = false;
|
||||
return { reasoningContent, content };
|
||||
}
|
||||
|
||||
// 检测是否为 think 标签开头的数据
|
||||
if (isInThinkTag === undefined) {
|
||||
// Parse content think and answer
|
||||
startTagBuffer += content;
|
||||
// 太少内容时候,暂时不解析
|
||||
if (startTagBuffer.length < thinkStartChars.length) {
|
||||
if (isStreamEnd) {
|
||||
const tmpContent = startTagBuffer;
|
||||
startTagBuffer = '';
|
||||
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
|
||||
if (isInThinkTag === false) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: tmpContent
|
||||
content
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
if (startTagBuffer.startsWith(thinkStartChars)) {
|
||||
isInThinkTag = true;
|
||||
return {
|
||||
reasoningContent: startTagBuffer.slice(thinkStartChars.length),
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
// 检测是否为 think 标签开头的数据
|
||||
if (isInThinkTag === undefined) {
|
||||
// Parse content think and answer
|
||||
startTagBuffer += content;
|
||||
// 太少内容时候,暂时不解析
|
||||
if (startTagBuffer.length < thinkStartChars.length) {
|
||||
if (isStreamEnd) {
|
||||
const tmpContent = startTagBuffer;
|
||||
startTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: tmpContent
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
|
||||
isInThinkTag = false;
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: startTagBuffer
|
||||
};
|
||||
}
|
||||
if (startTagBuffer.startsWith(thinkStartChars)) {
|
||||
isInThinkTag = true;
|
||||
return {
|
||||
reasoningContent: startTagBuffer.slice(thinkStartChars.length),
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
|
||||
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
|
||||
/*
|
||||
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
|
||||
isInThinkTag = false;
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: startTagBuffer
|
||||
};
|
||||
}
|
||||
|
||||
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
|
||||
/*
|
||||
检测 </think> 方案。
|
||||
存储所有疑似 </think> 的内容,直到检测到完整的 </think> 标签或超出 </think> 长度。
|
||||
content 返回值包含以下几种情况:
|
||||
@@ -342,124 +351,145 @@ export const parseLLMStreamResponse = () => {
|
||||
</think>abc - 完全命中尾标签
|
||||
k>abc - 命中一部分尾标签
|
||||
*/
|
||||
// endTagBuffer 专门用来记录疑似尾标签的内容
|
||||
if (endTagBuffer) {
|
||||
endTagBuffer += content;
|
||||
if (endTagBuffer.includes(thinkEndChars)) {
|
||||
isInThinkTag = false;
|
||||
const answer = endTagBuffer.slice(thinkEndChars.length);
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: answer
|
||||
};
|
||||
} else if (endTagBuffer.length >= thinkEndChars.length) {
|
||||
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
|
||||
const tmp = endTagBuffer;
|
||||
endTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: tmp,
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
} else if (content.includes(thinkEndChars)) {
|
||||
// 返回内容,完整命中</think>,直接结束
|
||||
isInThinkTag = false;
|
||||
const [think, answer] = content.split(thinkEndChars);
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: answer
|
||||
};
|
||||
} else {
|
||||
// 无 buffer,且未命中 </think>,开始疑似 </think> 检测。
|
||||
for (let i = 1; i < thinkEndChars.length; i++) {
|
||||
const partialEndTag = thinkEndChars.slice(0, i);
|
||||
// 命中一部分尾标签
|
||||
if (content.endsWith(partialEndTag)) {
|
||||
const think = content.slice(0, -partialEndTag.length);
|
||||
endTagBuffer += partialEndTag;
|
||||
// endTagBuffer 专门用来记录疑似尾标签的内容
|
||||
if (endTagBuffer) {
|
||||
endTagBuffer += content;
|
||||
if (endTagBuffer.includes(thinkEndChars)) {
|
||||
isInThinkTag = false;
|
||||
const answer = endTagBuffer.slice(thinkEndChars.length);
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: answer
|
||||
};
|
||||
} else if (endTagBuffer.length >= thinkEndChars.length) {
|
||||
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
|
||||
const tmp = endTagBuffer;
|
||||
endTagBuffer = '';
|
||||
return {
|
||||
reasoningContent: tmp,
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
return {
|
||||
reasoningContent: think,
|
||||
reasoningContent: '',
|
||||
content: ''
|
||||
};
|
||||
} else if (content.includes(thinkEndChars)) {
|
||||
// 返回内容,完整命中</think>,直接结束
|
||||
isInThinkTag = false;
|
||||
const [think, answer] = content.split(thinkEndChars);
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: answer
|
||||
};
|
||||
} else {
|
||||
// 无 buffer,且未命中 </think>,开始疑似 </think> 检测。
|
||||
for (let i = 1; i < thinkEndChars.length; i++) {
|
||||
const partialEndTag = thinkEndChars.slice(0, i);
|
||||
// 命中一部分尾标签
|
||||
if (content.endsWith(partialEndTag)) {
|
||||
const think = content.slice(0, -partialEndTag.length);
|
||||
endTagBuffer += partialEndTag;
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: ''
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 完全未命中尾标签,还是 think 阶段。
|
||||
return {
|
||||
reasoningContent: content,
|
||||
content: ''
|
||||
};
|
||||
})();
|
||||
|
||||
// Parse datset cite
|
||||
if (retainDatasetCite) {
|
||||
return {
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: parsedThinkContent,
|
||||
finishReason: buffer_finishReason
|
||||
};
|
||||
}
|
||||
|
||||
// 完全未命中尾标签,还是 think 阶段。
|
||||
return {
|
||||
reasoningContent: content,
|
||||
content: ''
|
||||
};
|
||||
})();
|
||||
// 缓存包含 [ 的字符串,直到超出 maxCiteBufferLength 再一次性返回
|
||||
const parseCite = (text: string) => {
|
||||
// 结束时,返回所有剩余内容
|
||||
if (isStreamEnd) {
|
||||
const content = citeBuffer + text;
|
||||
return {
|
||||
content: removeDatasetCiteText(content, false)
|
||||
};
|
||||
}
|
||||
|
||||
// 新内容包含 [,初始化缓冲数据
|
||||
if (text.includes('[')) {
|
||||
const index = text.indexOf('[');
|
||||
const beforeContent = citeBuffer + text.slice(0, index);
|
||||
citeBuffer = text.slice(index);
|
||||
|
||||
// beforeContent 可能是:普通字符串,带 [ 的字符串
|
||||
return {
|
||||
content: removeDatasetCiteText(beforeContent, false)
|
||||
};
|
||||
}
|
||||
// 处于 Cite 缓冲区,判断是否满足条件
|
||||
else if (citeBuffer) {
|
||||
citeBuffer += text;
|
||||
|
||||
// 检查缓冲区长度是否达到完整Quote长度或已经流结束
|
||||
if (citeBuffer.length >= maxCiteBufferLength) {
|
||||
const content = removeDatasetCiteText(citeBuffer, false);
|
||||
citeBuffer = '';
|
||||
|
||||
return {
|
||||
content
|
||||
};
|
||||
} else {
|
||||
// 暂时不返回内容
|
||||
return { content: '' };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: text
|
||||
};
|
||||
};
|
||||
const { content: pasedCiteContent } = parseCite(parsedThinkContent);
|
||||
|
||||
// Parse datset cite
|
||||
if (retainDatasetCite) {
|
||||
return {
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: parsedThinkContent,
|
||||
finishReason
|
||||
responseContent: pasedCiteContent,
|
||||
finishReason: buffer_finishReason
|
||||
};
|
||||
}
|
||||
})();
|
||||
|
||||
// 缓存包含 [ 的字符串,直到超出 maxCiteBufferLength 再一次性返回
|
||||
const parseCite = (text: string) => {
|
||||
// 结束时,返回所有剩余内容
|
||||
if (isStreamEnd) {
|
||||
const content = citeBuffer + text;
|
||||
return {
|
||||
content: removeDatasetCiteText(content, false)
|
||||
};
|
||||
}
|
||||
buffer_reasoningContent += data.reasoningContent;
|
||||
buffer_content += data.content;
|
||||
|
||||
// 新内容包含 [,初始化缓冲数据
|
||||
if (text.includes('[')) {
|
||||
const index = text.indexOf('[');
|
||||
const beforeContent = citeBuffer + text.slice(0, index);
|
||||
citeBuffer = text.slice(index);
|
||||
|
||||
// beforeContent 可能是:普通字符串,带 [ 的字符串
|
||||
return {
|
||||
content: removeDatasetCiteText(beforeContent, false)
|
||||
};
|
||||
}
|
||||
// 处于 Cite 缓冲区,判断是否满足条件
|
||||
else if (citeBuffer) {
|
||||
citeBuffer += text;
|
||||
|
||||
// 检查缓冲区长度是否达到完整Quote长度或已经流结束
|
||||
if (citeBuffer.length >= maxCiteBufferLength) {
|
||||
const content = removeDatasetCiteText(citeBuffer, false);
|
||||
citeBuffer = '';
|
||||
|
||||
return {
|
||||
content
|
||||
};
|
||||
} else {
|
||||
// 暂时不返回内容
|
||||
return { content: '' };
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: text
|
||||
};
|
||||
};
|
||||
const { content: pasedCiteContent } = parseCite(parsedThinkContent);
|
||||
return data;
|
||||
};
|
||||
|
||||
const getResponseData = () => {
|
||||
return {
|
||||
reasoningContent: parsedThinkReasoningContent,
|
||||
content: parsedThinkContent,
|
||||
responseContent: pasedCiteContent,
|
||||
finishReason
|
||||
finish_reason: buffer_finishReason,
|
||||
usage: buffer_usage,
|
||||
reasoningContent: buffer_reasoningContent,
|
||||
content: buffer_content
|
||||
};
|
||||
};
|
||||
|
||||
const updateFinishReason = (finishReason: CompletionFinishReason) => {
|
||||
buffer_finishReason = finishReason;
|
||||
};
|
||||
|
||||
return {
|
||||
parsePart
|
||||
parsePart,
|
||||
getResponseData,
|
||||
updateFinishReason
|
||||
};
|
||||
};
|
||||
|
||||
@@ -11,40 +11,6 @@ export const beforeUpdateAppFormat = <T extends AppSchema['modules'] | undefined
|
||||
nodes: T;
|
||||
isPlugin: boolean;
|
||||
}) => {
|
||||
if (nodes) {
|
||||
// Check dataset maxTokens
|
||||
if (isPlugin) {
|
||||
let maxTokens = 16000;
|
||||
|
||||
nodes.forEach((item) => {
|
||||
if (
|
||||
item.flowNodeType === FlowNodeTypeEnum.chatNode ||
|
||||
item.flowNodeType === FlowNodeTypeEnum.tools
|
||||
) {
|
||||
const model =
|
||||
item.inputs.find((item) => item.key === NodeInputKeyEnum.aiModel)?.value || '';
|
||||
const chatModel = getLLMModel(model);
|
||||
const quoteMaxToken = chatModel.quoteMaxToken || 16000;
|
||||
|
||||
maxTokens = Math.max(maxTokens, quoteMaxToken);
|
||||
}
|
||||
});
|
||||
|
||||
nodes.forEach((item) => {
|
||||
if (item.flowNodeType === FlowNodeTypeEnum.datasetSearchNode) {
|
||||
item.inputs.forEach((input) => {
|
||||
if (input.key === NodeInputKeyEnum.datasetMaxTokens) {
|
||||
const val = input.value as number;
|
||||
if (val > maxTokens) {
|
||||
input.value = maxTokens;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
nodes
|
||||
};
|
||||
|
||||
@@ -61,6 +61,7 @@ const ChatItemSchema = new Schema({
|
||||
type: Array,
|
||||
default: []
|
||||
},
|
||||
errorMsg: String,
|
||||
userGoodFeedback: {
|
||||
type: String
|
||||
},
|
||||
|
||||
@@ -34,6 +34,10 @@ const ChatSchema = new Schema({
|
||||
ref: AppCollectionName,
|
||||
required: true
|
||||
},
|
||||
createTime: {
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
},
|
||||
updateTime: {
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
|
||||
@@ -32,6 +32,7 @@ type Props = {
|
||||
content: [UserChatItemType & { dataId?: string }, AIChatItemType & { dataId?: string }];
|
||||
metadata?: Record<string, any>;
|
||||
durationSeconds: number; //s
|
||||
errorMsg?: string;
|
||||
};
|
||||
|
||||
export async function saveChat({
|
||||
@@ -50,6 +51,7 @@ export async function saveChat({
|
||||
outLinkUid,
|
||||
content,
|
||||
durationSeconds,
|
||||
errorMsg,
|
||||
metadata = {}
|
||||
}: Props) {
|
||||
if (!chatId || chatId === 'NO_RECORD_HISTORIES') return;
|
||||
@@ -104,7 +106,8 @@ export async function saveChat({
|
||||
return {
|
||||
...item,
|
||||
[DispatchNodeResponseKeyEnum.nodeResponse]: nodeResponse,
|
||||
durationSeconds
|
||||
durationSeconds,
|
||||
errorMsg
|
||||
};
|
||||
}
|
||||
return item;
|
||||
|
||||
@@ -65,8 +65,8 @@ export const filterGPTMessageByMaxContext = async ({
|
||||
if (lastMessage.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
const tokens = await countGptMessagesTokens([lastMessage, ...tmpChats]);
|
||||
maxContext -= tokens;
|
||||
// 该轮信息整体 tokens 超出范围,这段数据不要了
|
||||
if (maxContext < 0) {
|
||||
// 该轮信息整体 tokens 超出范围,这段数据不要了。但是至少保证一组。
|
||||
if (maxContext < 0 && chats.length > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,9 @@ import type {
|
||||
APIFileListResponse,
|
||||
ApiFileReadContentResponse,
|
||||
APIFileReadResponse,
|
||||
APIFileServer
|
||||
ApiDatasetDetailResponse,
|
||||
APIFileServer,
|
||||
APIFileItem
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import axios, { type Method } from 'axios';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
@@ -89,7 +91,7 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
|
||||
`/v1/file/list`,
|
||||
{
|
||||
searchKey,
|
||||
parentId
|
||||
parentId: parentId || apiServer.basePath
|
||||
},
|
||||
'POST'
|
||||
);
|
||||
@@ -144,7 +146,8 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
|
||||
tmbId,
|
||||
url: previewUrl,
|
||||
relatedId: apiFileId,
|
||||
customPdfParse
|
||||
customPdfParse,
|
||||
getFormatText: true
|
||||
});
|
||||
return {
|
||||
title,
|
||||
@@ -164,9 +167,34 @@ export const useApiDatasetRequest = ({ apiServer }: { apiServer: APIFileServer }
|
||||
return url;
|
||||
};
|
||||
|
||||
const getFileDetail = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiDatasetDetailResponse> => {
|
||||
const fileData = await request<ApiDatasetDetailResponse>(
|
||||
`/v1/file/detail`,
|
||||
{
|
||||
id: apiFileId
|
||||
},
|
||||
'GET'
|
||||
);
|
||||
|
||||
if (fileData) {
|
||||
return {
|
||||
id: fileData.id,
|
||||
name: fileData.name,
|
||||
parentId: fileData.parentId === null ? '' : fileData.parentId
|
||||
};
|
||||
}
|
||||
|
||||
return Promise.reject('File not found');
|
||||
};
|
||||
|
||||
return {
|
||||
getFileContent,
|
||||
listFiles,
|
||||
getFilePreviewUrl
|
||||
getFilePreviewUrl,
|
||||
getFileDetail
|
||||
};
|
||||
};
|
||||
|
||||
27
packages/service/core/dataset/apiDataset/index.ts
Normal file
27
packages/service/core/dataset/apiDataset/index.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import type {
|
||||
APIFileServer,
|
||||
YuqueServer,
|
||||
FeishuServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import { useApiDatasetRequest } from './api';
|
||||
import { useYuqueDatasetRequest } from '../yuqueDataset/api';
|
||||
import { useFeishuDatasetRequest } from '../feishuDataset/api';
|
||||
|
||||
export const getApiDatasetRequest = async (data: {
|
||||
apiServer?: APIFileServer;
|
||||
yuqueServer?: YuqueServer;
|
||||
feishuServer?: FeishuServer;
|
||||
}) => {
|
||||
const { apiServer, yuqueServer, feishuServer } = data;
|
||||
|
||||
if (apiServer) {
|
||||
return useApiDatasetRequest({ apiServer });
|
||||
}
|
||||
if (yuqueServer) {
|
||||
return useYuqueDatasetRequest({ yuqueServer });
|
||||
}
|
||||
if (feishuServer) {
|
||||
return useFeishuDatasetRequest({ feishuServer });
|
||||
}
|
||||
return Promise.reject('Can not find api dataset server');
|
||||
};
|
||||
@@ -1,30 +0,0 @@
|
||||
import { type ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
import { type FeishuServer, type YuqueServer } from '@fastgpt/global/core/dataset/apiDataset';
|
||||
|
||||
export enum ProApiDatasetOperationTypeEnum {
|
||||
LIST = 'list',
|
||||
READ = 'read',
|
||||
CONTENT = 'content',
|
||||
DETAIL = 'detail'
|
||||
}
|
||||
|
||||
export type ProApiDatasetCommonParams = {
|
||||
feishuServer?: FeishuServer;
|
||||
yuqueServer?: YuqueServer;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFileListParams = ProApiDatasetCommonParams & {
|
||||
parentId?: ParentIdType;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFileContentParams = ProApiDatasetCommonParams & {
|
||||
apiFileId: string;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFilePreviewUrlParams = ProApiDatasetCommonParams & {
|
||||
apiFileId: string;
|
||||
};
|
||||
|
||||
export type GetProApiDatasetFileDetailParams = ProApiDatasetCommonParams & {
|
||||
apiFileId: string;
|
||||
};
|
||||
@@ -34,15 +34,17 @@ import { getTrainingModeByCollection } from './utils';
|
||||
import {
|
||||
computeChunkSize,
|
||||
computeChunkSplitter,
|
||||
computeParagraphChunkDeep,
|
||||
getLLMMaxChunkSize
|
||||
} from '@fastgpt/global/core/dataset/training/utils';
|
||||
import { DatasetDataIndexTypeEnum } from '@fastgpt/global/core/dataset/data/constants';
|
||||
|
||||
export const createCollectionAndInsertData = async ({
|
||||
dataset,
|
||||
rawText,
|
||||
relatedId,
|
||||
createCollectionParams,
|
||||
isQAImport = false,
|
||||
backupParse = false,
|
||||
billId,
|
||||
session
|
||||
}: {
|
||||
@@ -50,8 +52,8 @@ export const createCollectionAndInsertData = async ({
|
||||
rawText: string;
|
||||
relatedId?: string;
|
||||
createCollectionParams: CreateOneCollectionParams;
|
||||
backupParse?: boolean;
|
||||
|
||||
isQAImport?: boolean;
|
||||
billId?: string;
|
||||
session?: ClientSession;
|
||||
}) => {
|
||||
@@ -73,15 +75,30 @@ export const createCollectionAndInsertData = async ({
|
||||
llmModel: getLLMModel(dataset.agentModel)
|
||||
});
|
||||
const chunkSplitter = computeChunkSplitter(createCollectionParams);
|
||||
const paragraphChunkDeep = computeParagraphChunkDeep(createCollectionParams);
|
||||
|
||||
if (trainingType === DatasetCollectionDataProcessModeEnum.qa) {
|
||||
delete createCollectionParams.chunkTriggerType;
|
||||
delete createCollectionParams.chunkTriggerMinSize;
|
||||
delete createCollectionParams.dataEnhanceCollectionName;
|
||||
delete createCollectionParams.imageIndex;
|
||||
delete createCollectionParams.autoIndexes;
|
||||
delete createCollectionParams.indexSize;
|
||||
delete createCollectionParams.qaPrompt;
|
||||
}
|
||||
|
||||
// 1. split chunks
|
||||
const chunks = rawText2Chunks({
|
||||
rawText,
|
||||
chunkTriggerType: createCollectionParams.chunkTriggerType,
|
||||
chunkTriggerMinSize: createCollectionParams.chunkTriggerMinSize,
|
||||
chunkSize,
|
||||
paragraphChunkDeep,
|
||||
paragraphChunkMinSize: createCollectionParams.paragraphChunkMinSize,
|
||||
maxSize: getLLMMaxChunkSize(getLLMModel(dataset.agentModel)),
|
||||
overlapRatio: trainingType === DatasetCollectionDataProcessModeEnum.chunk ? 0.2 : 0,
|
||||
customReg: chunkSplitter ? [chunkSplitter] : [],
|
||||
isQAImport
|
||||
backupParse
|
||||
});
|
||||
|
||||
// 2. auth limit
|
||||
@@ -102,6 +119,7 @@ export const createCollectionAndInsertData = async ({
|
||||
const { _id: collectionId } = await createOneCollection({
|
||||
...createCollectionParams,
|
||||
trainingType,
|
||||
paragraphChunkDeep,
|
||||
chunkSize,
|
||||
chunkSplitter,
|
||||
|
||||
@@ -157,6 +175,10 @@ export const createCollectionAndInsertData = async ({
|
||||
billId: traingBillId,
|
||||
data: chunks.map((item, index) => ({
|
||||
...item,
|
||||
indexes: item.indexes?.map((text) => ({
|
||||
type: DatasetDataIndexTypeEnum.custom,
|
||||
text
|
||||
})),
|
||||
chunkIndex: index
|
||||
})),
|
||||
session
|
||||
@@ -198,46 +220,19 @@ export type CreateOneCollectionParams = CreateDatasetCollectionParams & {
|
||||
tmbId: string;
|
||||
session?: ClientSession;
|
||||
};
|
||||
export async function createOneCollection({
|
||||
teamId,
|
||||
tmbId,
|
||||
name,
|
||||
parentId,
|
||||
datasetId,
|
||||
type,
|
||||
export async function createOneCollection({ session, ...props }: CreateOneCollectionParams) {
|
||||
const {
|
||||
teamId,
|
||||
parentId,
|
||||
datasetId,
|
||||
tags,
|
||||
|
||||
createTime,
|
||||
updateTime,
|
||||
|
||||
hashRawText,
|
||||
rawTextLength,
|
||||
metadata = {},
|
||||
tags,
|
||||
|
||||
nextSyncTime,
|
||||
|
||||
fileId,
|
||||
rawLink,
|
||||
externalFileId,
|
||||
externalFileUrl,
|
||||
apiFileId,
|
||||
|
||||
// Parse settings
|
||||
customPdfParse,
|
||||
imageIndex,
|
||||
autoIndexes,
|
||||
|
||||
// Chunk settings
|
||||
trainingType,
|
||||
chunkSettingMode,
|
||||
chunkSplitMode,
|
||||
chunkSize,
|
||||
indexSize,
|
||||
chunkSplitter,
|
||||
qaPrompt,
|
||||
|
||||
session
|
||||
}: CreateOneCollectionParams) {
|
||||
fileId,
|
||||
rawLink,
|
||||
externalFileId,
|
||||
externalFileUrl,
|
||||
apiFileId
|
||||
} = props;
|
||||
// Create collection tags
|
||||
const collectionTags = await createOrGetCollectionTags({ tags, teamId, datasetId, session });
|
||||
|
||||
@@ -245,41 +240,18 @@ export async function createOneCollection({
|
||||
const [collection] = await MongoDatasetCollection.create(
|
||||
[
|
||||
{
|
||||
...props,
|
||||
teamId,
|
||||
tmbId,
|
||||
parentId: parentId || null,
|
||||
datasetId,
|
||||
name,
|
||||
type,
|
||||
|
||||
rawTextLength,
|
||||
hashRawText,
|
||||
tags: collectionTags,
|
||||
metadata,
|
||||
|
||||
createTime,
|
||||
updateTime,
|
||||
nextSyncTime,
|
||||
|
||||
...(fileId ? { fileId } : {}),
|
||||
...(rawLink ? { rawLink } : {}),
|
||||
...(externalFileId ? { externalFileId } : {}),
|
||||
...(externalFileUrl ? { externalFileUrl } : {}),
|
||||
...(apiFileId ? { apiFileId } : {}),
|
||||
|
||||
// Parse settings
|
||||
customPdfParse,
|
||||
imageIndex,
|
||||
autoIndexes,
|
||||
|
||||
// Chunk settings
|
||||
trainingType,
|
||||
chunkSettingMode,
|
||||
chunkSplitMode,
|
||||
chunkSize,
|
||||
indexSize,
|
||||
chunkSplitter,
|
||||
qaPrompt
|
||||
...(apiFileId ? { apiFileId } : {})
|
||||
}
|
||||
],
|
||||
{ session, ordered: true }
|
||||
|
||||
@@ -34,9 +34,9 @@ const DatasetDataTextSchema = new Schema({
|
||||
|
||||
try {
|
||||
DatasetDataTextSchema.index(
|
||||
{ teamId: 1, datasetId: 1, fullTextToken: 'text' },
|
||||
{ teamId: 1, fullTextToken: 'text' },
|
||||
{
|
||||
name: 'teamId_1_datasetId_1_fullTextToken_text',
|
||||
name: 'teamId_1_fullTextToken_text',
|
||||
default_language: 'none'
|
||||
}
|
||||
);
|
||||
|
||||
208
packages/service/core/dataset/feishuDataset/api.ts
Normal file
208
packages/service/core/dataset/feishuDataset/api.ts
Normal file
@@ -0,0 +1,208 @@
|
||||
import type {
|
||||
APIFileItem,
|
||||
ApiFileReadContentResponse,
|
||||
ApiDatasetDetailResponse,
|
||||
FeishuServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import { type ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
import axios, { type Method } from 'axios';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
|
||||
type ResponseDataType = {
|
||||
success: boolean;
|
||||
message: string;
|
||||
data: any;
|
||||
};
|
||||
|
||||
type FeishuFileListResponse = {
|
||||
files: {
|
||||
token: string;
|
||||
parent_token: string;
|
||||
name: string;
|
||||
type: string;
|
||||
modified_time: number;
|
||||
created_time: number;
|
||||
url: string;
|
||||
owner_id: string;
|
||||
}[];
|
||||
has_more: boolean;
|
||||
next_page_token: string;
|
||||
};
|
||||
|
||||
const feishuBaseUrl = process.env.FEISHU_BASE_URL || 'https://open.feishu.cn';
|
||||
|
||||
export const useFeishuDatasetRequest = ({ feishuServer }: { feishuServer: FeishuServer }) => {
|
||||
const instance = axios.create({
|
||||
baseURL: feishuBaseUrl,
|
||||
timeout: 60000
|
||||
});
|
||||
|
||||
// 添加请求拦截器
|
||||
instance.interceptors.request.use(async (config) => {
|
||||
if (!config.headers.Authorization) {
|
||||
const { data } = await axios.post<{ tenant_access_token: string }>(
|
||||
`${feishuBaseUrl}/open-apis/auth/v3/tenant_access_token/internal`,
|
||||
{
|
||||
app_id: feishuServer.appId,
|
||||
app_secret: feishuServer.appSecret
|
||||
}
|
||||
);
|
||||
|
||||
config.headers['Authorization'] = `Bearer ${data.tenant_access_token}`;
|
||||
config.headers['Content-Type'] = 'application/json; charset=utf-8';
|
||||
}
|
||||
return config;
|
||||
});
|
||||
|
||||
/**
|
||||
* 响应数据检查
|
||||
*/
|
||||
const checkRes = (data: ResponseDataType) => {
|
||||
if (data === undefined) {
|
||||
addLog.info('yuque dataset data is empty');
|
||||
return Promise.reject('服务器异常');
|
||||
}
|
||||
return data.data;
|
||||
};
|
||||
const responseError = (err: any) => {
|
||||
console.log('error->', '请求错误', err);
|
||||
|
||||
if (!err) {
|
||||
return Promise.reject({ message: '未知错误' });
|
||||
}
|
||||
if (typeof err === 'string') {
|
||||
return Promise.reject({ message: err });
|
||||
}
|
||||
if (typeof err.message === 'string') {
|
||||
return Promise.reject({ message: err.message });
|
||||
}
|
||||
if (typeof err.data === 'string') {
|
||||
return Promise.reject({ message: err.data });
|
||||
}
|
||||
if (err?.response?.data) {
|
||||
return Promise.reject(err?.response?.data);
|
||||
}
|
||||
return Promise.reject(err);
|
||||
};
|
||||
|
||||
const request = <T>(url: string, data: any, method: Method): Promise<T> => {
|
||||
/* 去空 */
|
||||
for (const key in data) {
|
||||
if (data[key] === undefined) {
|
||||
delete data[key];
|
||||
}
|
||||
}
|
||||
|
||||
return instance
|
||||
.request({
|
||||
url,
|
||||
method,
|
||||
data: ['POST', 'PUT'].includes(method) ? data : undefined,
|
||||
params: !['POST', 'PUT'].includes(method) ? data : undefined
|
||||
})
|
||||
.then((res) => checkRes(res.data))
|
||||
.catch((err) => responseError(err));
|
||||
};
|
||||
|
||||
const listFiles = async ({ parentId }: { parentId?: ParentIdType }): Promise<APIFileItem[]> => {
|
||||
const fetchFiles = async (pageToken?: string): Promise<FeishuFileListResponse['files']> => {
|
||||
const data = await request<FeishuFileListResponse>(
|
||||
`/open-apis/drive/v1/files`,
|
||||
{
|
||||
folder_token: parentId || feishuServer.folderToken,
|
||||
page_size: 200,
|
||||
page_token: pageToken
|
||||
},
|
||||
'GET'
|
||||
);
|
||||
|
||||
if (data.has_more) {
|
||||
const nextFiles = await fetchFiles(data.next_page_token);
|
||||
return [...data.files, ...nextFiles];
|
||||
}
|
||||
|
||||
return data.files;
|
||||
};
|
||||
|
||||
const allFiles = await fetchFiles();
|
||||
|
||||
return allFiles
|
||||
.filter((file) => ['folder', 'docx'].includes(file.type))
|
||||
.map((file) => ({
|
||||
id: file.token,
|
||||
parentId: file.parent_token,
|
||||
name: file.name,
|
||||
type: file.type === 'folder' ? ('folder' as const) : ('file' as const),
|
||||
hasChild: file.type === 'folder',
|
||||
updateTime: new Date(file.modified_time * 1000),
|
||||
createTime: new Date(file.created_time * 1000)
|
||||
}));
|
||||
};
|
||||
|
||||
const getFileContent = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiFileReadContentResponse> => {
|
||||
const [{ content }, { document }] = await Promise.all([
|
||||
request<{ content: string }>(
|
||||
`/open-apis/docx/v1/documents/${apiFileId}/raw_content`,
|
||||
{},
|
||||
'GET'
|
||||
),
|
||||
request<{ document: { title: string } }>(
|
||||
`/open-apis/docx/v1/documents/${apiFileId}`,
|
||||
{},
|
||||
'GET'
|
||||
)
|
||||
]);
|
||||
|
||||
return {
|
||||
title: document?.title,
|
||||
rawText: content
|
||||
};
|
||||
};
|
||||
|
||||
const getFilePreviewUrl = async ({ apiFileId }: { apiFileId: string }): Promise<string> => {
|
||||
const { metas } = await request<{ metas: { url: string }[] }>(
|
||||
`/open-apis/drive/v1/metas/batch_query`,
|
||||
{
|
||||
request_docs: [
|
||||
{
|
||||
doc_token: apiFileId,
|
||||
doc_type: 'docx'
|
||||
}
|
||||
],
|
||||
with_url: true
|
||||
},
|
||||
'POST'
|
||||
);
|
||||
|
||||
return metas[0].url;
|
||||
};
|
||||
|
||||
const getFileDetail = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiDatasetDetailResponse> => {
|
||||
const { document } = await request<{ document: { title: string } }>(
|
||||
`/open-apis/docx/v1/documents/${apiFileId}`,
|
||||
{},
|
||||
'GET'
|
||||
);
|
||||
|
||||
return {
|
||||
name: document?.title,
|
||||
parentId: null,
|
||||
id: apiFileId
|
||||
};
|
||||
};
|
||||
|
||||
return {
|
||||
getFileContent,
|
||||
listFiles,
|
||||
getFilePreviewUrl,
|
||||
getFileDetail
|
||||
};
|
||||
};
|
||||
@@ -1,8 +1,10 @@
|
||||
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
|
||||
import { DatasetSourceReadTypeEnum } from '@fastgpt/global/core/dataset/constants';
|
||||
import {
|
||||
ChunkTriggerConfigTypeEnum,
|
||||
DatasetSourceReadTypeEnum
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
import { readFileContentFromMongo } from '../../common/file/gridfs/controller';
|
||||
import { urlsFetch } from '../../common/string/cheerio';
|
||||
import { parseCsvTable2Chunks } from './training/utils';
|
||||
import { type TextSplitProps, splitText2Chunks } from '@fastgpt/global/common/string/textSplitter';
|
||||
import axios from 'axios';
|
||||
import { readRawContentByFileBuffer } from '../../common/file/read/utils';
|
||||
@@ -12,19 +14,22 @@ import {
|
||||
type FeishuServer,
|
||||
type YuqueServer
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import { useApiDatasetRequest } from './apiDataset/api';
|
||||
import { getApiDatasetRequest } from './apiDataset';
|
||||
import Papa from 'papaparse';
|
||||
|
||||
export const readFileRawTextByUrl = async ({
|
||||
teamId,
|
||||
tmbId,
|
||||
url,
|
||||
customPdfParse,
|
||||
getFormatText,
|
||||
relatedId
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
url: string;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean;
|
||||
relatedId: string; // externalFileId / apiFileId
|
||||
}) => {
|
||||
const response = await axios({
|
||||
@@ -38,7 +43,7 @@ export const readFileRawTextByUrl = async ({
|
||||
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
customPdfParse,
|
||||
isQAImport: false,
|
||||
getFormatText,
|
||||
extension,
|
||||
teamId,
|
||||
tmbId,
|
||||
@@ -62,21 +67,21 @@ export const readDatasetSourceRawText = async ({
|
||||
tmbId,
|
||||
type,
|
||||
sourceId,
|
||||
isQAImport,
|
||||
selector,
|
||||
externalFileId,
|
||||
apiServer,
|
||||
feishuServer,
|
||||
yuqueServer,
|
||||
customPdfParse
|
||||
customPdfParse,
|
||||
getFormatText
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
type: DatasetSourceReadTypeEnum;
|
||||
sourceId: string;
|
||||
customPdfParse?: boolean;
|
||||
getFormatText?: boolean;
|
||||
|
||||
isQAImport?: boolean; // csv data
|
||||
selector?: string; // link selector
|
||||
externalFileId?: string; // external file dataset
|
||||
apiServer?: APIFileServer; // api dataset
|
||||
@@ -92,8 +97,8 @@ export const readDatasetSourceRawText = async ({
|
||||
tmbId,
|
||||
bucketName: BucketNameEnum.dataset,
|
||||
fileId: sourceId,
|
||||
isQAImport,
|
||||
customPdfParse
|
||||
customPdfParse,
|
||||
getFormatText
|
||||
});
|
||||
return {
|
||||
title: filename,
|
||||
@@ -161,38 +166,82 @@ export const readApiServerFileContent = async ({
|
||||
title?: string;
|
||||
rawText: string;
|
||||
}> => {
|
||||
if (apiServer) {
|
||||
return useApiDatasetRequest({ apiServer }).getFileContent({
|
||||
teamId,
|
||||
tmbId,
|
||||
apiFileId,
|
||||
customPdfParse
|
||||
});
|
||||
}
|
||||
|
||||
if (feishuServer || yuqueServer) {
|
||||
return global.getProApiDatasetFileContent({
|
||||
feishuServer,
|
||||
return (
|
||||
await getApiDatasetRequest({
|
||||
apiServer,
|
||||
yuqueServer,
|
||||
apiFileId
|
||||
});
|
||||
}
|
||||
|
||||
return Promise.reject('No apiServer or feishuServer or yuqueServer');
|
||||
feishuServer
|
||||
})
|
||||
).getFileContent({
|
||||
teamId,
|
||||
tmbId,
|
||||
apiFileId,
|
||||
customPdfParse
|
||||
});
|
||||
};
|
||||
|
||||
export const rawText2Chunks = ({
|
||||
rawText,
|
||||
isQAImport,
|
||||
chunkTriggerType = ChunkTriggerConfigTypeEnum.minSize,
|
||||
chunkTriggerMinSize = 1000,
|
||||
backupParse,
|
||||
chunkSize = 512,
|
||||
...splitProps
|
||||
}: {
|
||||
rawText: string;
|
||||
isQAImport?: boolean;
|
||||
} & TextSplitProps) => {
|
||||
if (isQAImport) {
|
||||
const { chunks } = parseCsvTable2Chunks(rawText);
|
||||
return chunks;
|
||||
|
||||
chunkTriggerType?: ChunkTriggerConfigTypeEnum;
|
||||
chunkTriggerMinSize?: number; // maxSize from agent model, not store
|
||||
|
||||
backupParse?: boolean;
|
||||
tableParse?: boolean;
|
||||
} & TextSplitProps): {
|
||||
q: string;
|
||||
a: string;
|
||||
indexes?: string[];
|
||||
}[] => {
|
||||
const parseDatasetBackup2Chunks = (rawText: string) => {
|
||||
const csvArr = Papa.parse(rawText).data as string[][];
|
||||
console.log(rawText, csvArr);
|
||||
|
||||
const chunks = csvArr
|
||||
.slice(1)
|
||||
.map((item) => ({
|
||||
q: item[0] || '',
|
||||
a: item[1] || '',
|
||||
indexes: item.slice(2)
|
||||
}))
|
||||
.filter((item) => item.q || item.a);
|
||||
|
||||
return {
|
||||
chunks
|
||||
};
|
||||
};
|
||||
|
||||
// Chunk condition
|
||||
// 1. 选择最大值条件,只有超过了最大值(默认为模型的最大值*0.7),才会触发分块
|
||||
if (chunkTriggerType === ChunkTriggerConfigTypeEnum.maxSize) {
|
||||
const textLength = rawText.trim().length;
|
||||
const maxSize = splitProps.maxSize ? splitProps.maxSize * 0.7 : 16000;
|
||||
if (textLength < maxSize) {
|
||||
return [
|
||||
{
|
||||
q: rawText,
|
||||
a: ''
|
||||
}
|
||||
];
|
||||
}
|
||||
}
|
||||
// 2. 选择最小值条件,只有超过最小值(手动决定)才会触发分块
|
||||
if (chunkTriggerType !== ChunkTriggerConfigTypeEnum.forceChunk) {
|
||||
const textLength = rawText.trim().length;
|
||||
if (textLength < chunkTriggerMinSize) {
|
||||
return [{ q: rawText, a: '' }];
|
||||
}
|
||||
}
|
||||
|
||||
if (backupParse) {
|
||||
return parseDatasetBackup2Chunks(rawText).chunks;
|
||||
}
|
||||
|
||||
const { chunks } = splitText2Chunks({
|
||||
@@ -203,6 +252,7 @@ export const rawText2Chunks = ({
|
||||
|
||||
return chunks.map((item) => ({
|
||||
q: item,
|
||||
a: ''
|
||||
a: '',
|
||||
indexes: []
|
||||
}));
|
||||
};
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import { getMongoModel, Schema } from '../../common/mongo';
|
||||
import {
|
||||
ChunkSettingModeEnum,
|
||||
ChunkTriggerConfigTypeEnum,
|
||||
DataChunkSplitModeEnum,
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
DatasetTypeEnum,
|
||||
DatasetTypeMap
|
||||
DatasetTypeMap,
|
||||
ParagraphChunkAIModeEnum
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
import {
|
||||
TeamCollectionName,
|
||||
@@ -15,12 +17,22 @@ import type { DatasetSchemaType } from '@fastgpt/global/core/dataset/type.d';
|
||||
export const DatasetCollectionName = 'datasets';
|
||||
|
||||
export const ChunkSettings = {
|
||||
imageIndex: Boolean,
|
||||
autoIndexes: Boolean,
|
||||
trainingType: {
|
||||
type: String,
|
||||
enum: Object.values(DatasetCollectionDataProcessModeEnum)
|
||||
},
|
||||
|
||||
chunkTriggerType: {
|
||||
type: String,
|
||||
enum: Object.values(ChunkTriggerConfigTypeEnum)
|
||||
},
|
||||
chunkTriggerMinSize: Number,
|
||||
|
||||
dataEnhanceCollectionName: Boolean,
|
||||
|
||||
imageIndex: Boolean,
|
||||
autoIndexes: Boolean,
|
||||
|
||||
chunkSettingMode: {
|
||||
type: String,
|
||||
enum: Object.values(ChunkSettingModeEnum)
|
||||
@@ -29,6 +41,12 @@ export const ChunkSettings = {
|
||||
type: String,
|
||||
enum: Object.values(DataChunkSplitModeEnum)
|
||||
},
|
||||
paragraphChunkAIMode: {
|
||||
type: String,
|
||||
enum: Object.values(ParagraphChunkAIModeEnum)
|
||||
},
|
||||
paragraphChunkDeep: Number,
|
||||
paragraphChunkMinSize: Number,
|
||||
chunkSize: Number,
|
||||
chunkSplitter: String,
|
||||
|
||||
@@ -115,9 +133,7 @@ const DatasetSchema = new Schema({
|
||||
|
||||
// abandoned
|
||||
autoSync: Boolean,
|
||||
externalReadUrl: {
|
||||
type: String
|
||||
},
|
||||
externalReadUrl: String,
|
||||
defaultPermission: Number
|
||||
});
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ import { type ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import type { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import { datasetSearchQueryExtension } from './utils';
|
||||
import type { RerankModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
|
||||
export type SearchDatasetDataProps = {
|
||||
histories: ChatItemType[];
|
||||
@@ -544,123 +545,125 @@ export async function searchDatasetData(
|
||||
};
|
||||
}
|
||||
|
||||
const searchResults = (
|
||||
await Promise.all(
|
||||
datasetIds.map(async (id) => {
|
||||
return MongoDatasetDataText.aggregate(
|
||||
[
|
||||
{
|
||||
$match: {
|
||||
teamId: new Types.ObjectId(teamId),
|
||||
datasetId: new Types.ObjectId(id),
|
||||
$text: { $search: await jiebaSplit({ text: query }) },
|
||||
...(filterCollectionIdList
|
||||
? {
|
||||
collectionId: {
|
||||
$in: filterCollectionIdList.map((id) => new Types.ObjectId(id))
|
||||
}
|
||||
}
|
||||
: {}),
|
||||
...(forbidCollectionIdList && forbidCollectionIdList.length > 0
|
||||
? {
|
||||
collectionId: {
|
||||
$nin: forbidCollectionIdList.map((id) => new Types.ObjectId(id))
|
||||
}
|
||||
}
|
||||
: {})
|
||||
}
|
||||
},
|
||||
{
|
||||
$sort: {
|
||||
score: { $meta: 'textScore' }
|
||||
}
|
||||
},
|
||||
{
|
||||
$limit: limit
|
||||
},
|
||||
{
|
||||
$project: {
|
||||
_id: 1,
|
||||
collectionId: 1,
|
||||
dataId: 1,
|
||||
score: { $meta: 'textScore' }
|
||||
}
|
||||
}
|
||||
],
|
||||
{
|
||||
...readFromSecondary
|
||||
try {
|
||||
const searchResults = (await MongoDatasetDataText.aggregate(
|
||||
[
|
||||
{
|
||||
$match: {
|
||||
teamId: new Types.ObjectId(teamId),
|
||||
$text: { $search: await jiebaSplit({ text: query }) },
|
||||
datasetId: { $in: datasetIds.map((id) => new Types.ObjectId(id)) },
|
||||
...(filterCollectionIdList
|
||||
? {
|
||||
collectionId: {
|
||||
$in: filterCollectionIdList.map((id) => new Types.ObjectId(id))
|
||||
}
|
||||
}
|
||||
: {}),
|
||||
...(forbidCollectionIdList && forbidCollectionIdList.length > 0
|
||||
? {
|
||||
collectionId: {
|
||||
$nin: forbidCollectionIdList.map((id) => new Types.ObjectId(id))
|
||||
}
|
||||
}
|
||||
: {})
|
||||
}
|
||||
},
|
||||
{
|
||||
$sort: {
|
||||
score: { $meta: 'textScore' }
|
||||
}
|
||||
},
|
||||
{
|
||||
$limit: limit
|
||||
},
|
||||
{
|
||||
$project: {
|
||||
_id: 1,
|
||||
collectionId: 1,
|
||||
dataId: 1,
|
||||
score: { $meta: 'textScore' }
|
||||
}
|
||||
);
|
||||
})
|
||||
)
|
||||
).flat() as (DatasetDataTextSchemaType & { score: number })[];
|
||||
|
||||
// Get data and collections
|
||||
const [dataList, collections] = await Promise.all([
|
||||
MongoDatasetData.find(
|
||||
{
|
||||
_id: { $in: searchResults.map((item) => item.dataId) }
|
||||
},
|
||||
'_id datasetId collectionId updateTime q a chunkIndex indexes',
|
||||
{ ...readFromSecondary }
|
||||
).lean(),
|
||||
MongoDatasetCollection.find(
|
||||
{
|
||||
_id: { $in: searchResults.map((item) => item.collectionId) }
|
||||
},
|
||||
'_id name fileId rawLink apiFileId externalFileId externalFileUrl',
|
||||
{ ...readFromSecondary }
|
||||
).lean()
|
||||
]);
|
||||
|
||||
return {
|
||||
fullTextRecallResults: searchResults
|
||||
.map((item, index) => {
|
||||
const collection = collections.find(
|
||||
(col) => String(col._id) === String(item.collectionId)
|
||||
);
|
||||
if (!collection) {
|
||||
console.log('Collection is not found', item);
|
||||
return;
|
||||
}
|
||||
const data = dataList.find((data) => String(data._id) === String(item.dataId));
|
||||
if (!data) {
|
||||
console.log('Data is not found', item);
|
||||
return;
|
||||
}
|
||||
],
|
||||
{
|
||||
...readFromSecondary
|
||||
}
|
||||
)) as (DatasetDataTextSchemaType & { score: number })[];
|
||||
|
||||
return {
|
||||
id: String(data._id),
|
||||
datasetId: String(data.datasetId),
|
||||
collectionId: String(data.collectionId),
|
||||
updateTime: data.updateTime,
|
||||
q: data.q,
|
||||
a: data.a,
|
||||
chunkIndex: data.chunkIndex,
|
||||
indexes: data.indexes,
|
||||
...getCollectionSourceData(collection),
|
||||
score: [
|
||||
{
|
||||
type: SearchScoreTypeEnum.fullText,
|
||||
value: item.score || 0,
|
||||
index
|
||||
}
|
||||
]
|
||||
};
|
||||
})
|
||||
.filter((item) => {
|
||||
if (!item) return false;
|
||||
return true;
|
||||
})
|
||||
.map((item, index) => {
|
||||
if (!item) return;
|
||||
return {
|
||||
...item,
|
||||
score: item.score.map((item) => ({ ...item, index }))
|
||||
};
|
||||
}) as SearchDataResponseItemType[],
|
||||
tokenLen: 0
|
||||
};
|
||||
// Get data and collections
|
||||
const [dataList, collections] = await Promise.all([
|
||||
MongoDatasetData.find(
|
||||
{
|
||||
_id: { $in: searchResults.map((item) => item.dataId) }
|
||||
},
|
||||
'_id datasetId collectionId updateTime q a chunkIndex indexes',
|
||||
{ ...readFromSecondary }
|
||||
).lean(),
|
||||
MongoDatasetCollection.find(
|
||||
{
|
||||
_id: { $in: searchResults.map((item) => item.collectionId) }
|
||||
},
|
||||
'_id name fileId rawLink apiFileId externalFileId externalFileUrl',
|
||||
{ ...readFromSecondary }
|
||||
).lean()
|
||||
]);
|
||||
|
||||
return {
|
||||
fullTextRecallResults: searchResults
|
||||
.map((item, index) => {
|
||||
const collection = collections.find(
|
||||
(col) => String(col._id) === String(item.collectionId)
|
||||
);
|
||||
if (!collection) {
|
||||
console.log('Collection is not found', item);
|
||||
return;
|
||||
}
|
||||
const data = dataList.find((data) => String(data._id) === String(item.dataId));
|
||||
if (!data) {
|
||||
console.log('Data is not found', item);
|
||||
return;
|
||||
}
|
||||
|
||||
return {
|
||||
id: String(data._id),
|
||||
datasetId: String(data.datasetId),
|
||||
collectionId: String(data.collectionId),
|
||||
updateTime: data.updateTime,
|
||||
q: data.q,
|
||||
a: data.a,
|
||||
chunkIndex: data.chunkIndex,
|
||||
indexes: data.indexes,
|
||||
...getCollectionSourceData(collection),
|
||||
score: [
|
||||
{
|
||||
type: SearchScoreTypeEnum.fullText,
|
||||
value: item.score || 0,
|
||||
index
|
||||
}
|
||||
]
|
||||
};
|
||||
})
|
||||
.filter((item) => {
|
||||
if (!item) return false;
|
||||
return true;
|
||||
})
|
||||
.map((item, index) => {
|
||||
if (!item) return;
|
||||
return {
|
||||
...item,
|
||||
score: item.score.map((item) => ({ ...item, index }))
|
||||
};
|
||||
}) as SearchDataResponseItemType[],
|
||||
tokenLen: 0
|
||||
};
|
||||
} catch (error) {
|
||||
addLog.error('Full text search error', error);
|
||||
return {
|
||||
fullTextRecallResults: [],
|
||||
tokenLen: 0
|
||||
};
|
||||
}
|
||||
};
|
||||
const multiQueryRecall = async ({
|
||||
embeddingLimit,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
export enum ImportDataSourceEnum {
|
||||
fileLocal = 'fileLocal',
|
||||
fileLink = 'fileLink',
|
||||
fileCustom = 'fileCustom',
|
||||
tableLocal = 'tableLocal'
|
||||
fileCustom = 'fileCustom'
|
||||
}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
import Papa from 'papaparse';
|
||||
|
||||
export const parseCsvTable2Chunks = (rawText: string) => {
|
||||
const csvArr = Papa.parse(rawText).data as string[][];
|
||||
|
||||
const chunks = csvArr
|
||||
.map((item) => ({
|
||||
q: item[0] || '',
|
||||
a: item[1] || ''
|
||||
}))
|
||||
.filter((item) => item.q || item.a);
|
||||
|
||||
return {
|
||||
chunks
|
||||
};
|
||||
};
|
||||
304
packages/service/core/dataset/yuqueDataset/api.ts
Normal file
304
packages/service/core/dataset/yuqueDataset/api.ts
Normal file
@@ -0,0 +1,304 @@
|
||||
import type {
|
||||
APIFileItem,
|
||||
ApiFileReadContentResponse,
|
||||
YuqueServer,
|
||||
ApiDatasetDetailResponse
|
||||
} from '@fastgpt/global/core/dataset/apiDataset';
|
||||
import axios, { type Method } from 'axios';
|
||||
import { addLog } from '../../../common/system/log';
|
||||
import { type ParentIdType } from '@fastgpt/global/common/parentFolder/type';
|
||||
|
||||
type ResponseDataType = {
|
||||
success: boolean;
|
||||
message: string;
|
||||
data: any;
|
||||
};
|
||||
|
||||
type YuqueRepoListResponse = {
|
||||
id: string;
|
||||
name: string;
|
||||
title: string;
|
||||
book_id: string | null;
|
||||
type: string;
|
||||
updated_at: Date;
|
||||
created_at: Date;
|
||||
slug?: string;
|
||||
}[];
|
||||
|
||||
type YuqueTocListResponse = {
|
||||
uuid: string;
|
||||
type: string;
|
||||
title: string;
|
||||
url: string;
|
||||
slug: string;
|
||||
id: string;
|
||||
doc_id: string;
|
||||
prev_uuid: string;
|
||||
sibling_uuid: string;
|
||||
child_uuid: string;
|
||||
parent_uuid: string;
|
||||
}[];
|
||||
|
||||
const yuqueBaseUrl = process.env.YUQUE_DATASET_BASE_URL || 'https://www.yuque.com';
|
||||
|
||||
export const useYuqueDatasetRequest = ({ yuqueServer }: { yuqueServer: YuqueServer }) => {
|
||||
const instance = axios.create({
|
||||
baseURL: yuqueBaseUrl,
|
||||
timeout: 60000, // 超时时间
|
||||
headers: {
|
||||
'X-Auth-Token': yuqueServer.token
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* 响应数据检查
|
||||
*/
|
||||
const checkRes = (data: ResponseDataType) => {
|
||||
if (data === undefined) {
|
||||
addLog.info('yuque dataset data is empty');
|
||||
return Promise.reject('服务器异常');
|
||||
}
|
||||
return data.data;
|
||||
};
|
||||
const responseError = (err: any) => {
|
||||
console.log('error->', '请求错误', err);
|
||||
|
||||
if (!err) {
|
||||
return Promise.reject({ message: '未知错误' });
|
||||
}
|
||||
if (typeof err === 'string') {
|
||||
return Promise.reject({ message: err });
|
||||
}
|
||||
if (typeof err.message === 'string') {
|
||||
return Promise.reject({ message: err.message });
|
||||
}
|
||||
if (typeof err.data === 'string') {
|
||||
return Promise.reject({ message: err.data });
|
||||
}
|
||||
if (err?.response?.data) {
|
||||
return Promise.reject(err?.response?.data);
|
||||
}
|
||||
return Promise.reject(err);
|
||||
};
|
||||
|
||||
const request = <T>(url: string, data: any, method: Method): Promise<T> => {
|
||||
/* 去空 */
|
||||
for (const key in data) {
|
||||
if (data[key] === undefined) {
|
||||
delete data[key];
|
||||
}
|
||||
}
|
||||
|
||||
return instance
|
||||
.request({
|
||||
url,
|
||||
method,
|
||||
data: ['POST', 'PUT'].includes(method) ? data : undefined,
|
||||
params: !['POST', 'PUT'].includes(method) ? data : undefined
|
||||
})
|
||||
.then((res) => checkRes(res.data))
|
||||
.catch((err) => responseError(err));
|
||||
};
|
||||
|
||||
const listFiles = async ({ parentId }: { parentId?: ParentIdType }) => {
|
||||
// Auto set baseurl to parentId
|
||||
if (!parentId) {
|
||||
if (yuqueServer.basePath) parentId = yuqueServer.basePath;
|
||||
}
|
||||
|
||||
let files: APIFileItem[] = [];
|
||||
|
||||
if (!parentId) {
|
||||
const limit = 100;
|
||||
let offset = 0;
|
||||
let allData: YuqueRepoListResponse = [];
|
||||
|
||||
while (true) {
|
||||
const data = await request<YuqueRepoListResponse>(
|
||||
`/api/v2/groups/${yuqueServer.userId}/repos`,
|
||||
{
|
||||
offset,
|
||||
limit
|
||||
},
|
||||
'GET'
|
||||
);
|
||||
|
||||
if (!data || data.length === 0) break;
|
||||
|
||||
allData = [...allData, ...data];
|
||||
if (data.length < limit) break;
|
||||
|
||||
offset += limit;
|
||||
}
|
||||
|
||||
files = allData.map((item) => {
|
||||
return {
|
||||
id: item.id,
|
||||
name: item.name,
|
||||
parentId: null,
|
||||
type: 'folder',
|
||||
updateTime: item.updated_at,
|
||||
createTime: item.created_at,
|
||||
hasChild: true,
|
||||
slug: item.slug
|
||||
};
|
||||
});
|
||||
} else {
|
||||
if (typeof parentId === 'number') {
|
||||
const data = await request<YuqueTocListResponse>(
|
||||
`/api/v2/repos/${parentId}/toc`,
|
||||
{},
|
||||
'GET'
|
||||
);
|
||||
|
||||
return data
|
||||
.filter((item) => !item.parent_uuid && item.type !== 'LINK')
|
||||
.map((item) => ({
|
||||
id: `${parentId}-${item.id}-${item.uuid}`,
|
||||
name: item.title,
|
||||
parentId: item.parent_uuid,
|
||||
type: item.type === 'TITLE' ? ('folder' as const) : ('file' as const),
|
||||
updateTime: new Date(),
|
||||
createTime: new Date(),
|
||||
uuid: item.uuid,
|
||||
slug: item.slug,
|
||||
hasChild: !!item.child_uuid
|
||||
}));
|
||||
} else {
|
||||
const [repoId, uuid, parentUuid] = parentId.split(/-(.*?)-(.*)/);
|
||||
const data = await request<YuqueTocListResponse>(`/api/v2/repos/${repoId}/toc`, {}, 'GET');
|
||||
|
||||
return data
|
||||
.filter((item) => item.parent_uuid === parentUuid)
|
||||
.map((item) => ({
|
||||
id: `${repoId}-${item.id}-${item.uuid}`,
|
||||
name: item.title,
|
||||
parentId: item.parent_uuid,
|
||||
type: item.type === 'TITLE' ? ('folder' as const) : ('file' as const),
|
||||
updateTime: new Date(),
|
||||
createTime: new Date(),
|
||||
uuid: item.uuid,
|
||||
slug: item.slug,
|
||||
hasChild: !!item.child_uuid
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
if (!Array.isArray(files)) {
|
||||
return Promise.reject('Invalid file list format');
|
||||
}
|
||||
if (files.some((file) => !file.id || !file.name || typeof file.type === 'undefined')) {
|
||||
return Promise.reject('Invalid file data format');
|
||||
}
|
||||
return files;
|
||||
};
|
||||
|
||||
const getFileContent = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiFileReadContentResponse> => {
|
||||
const [parentId, fileId] = apiFileId.split(/-(.*?)-(.*)/);
|
||||
|
||||
const data = await request<{ title: string; body: string }>(
|
||||
`/api/v2/repos/${parentId}/docs/${fileId}`,
|
||||
{},
|
||||
'GET'
|
||||
);
|
||||
|
||||
return {
|
||||
title: data.title,
|
||||
rawText: data.body
|
||||
};
|
||||
};
|
||||
|
||||
const getFilePreviewUrl = async ({ apiFileId }: { apiFileId: string }) => {
|
||||
const [parentId, fileId] = apiFileId.split(/-(.*?)-(.*)/);
|
||||
|
||||
const { slug: parentSlug } = await request<{ slug: string }>(
|
||||
`/api/v2/repos/${parentId}`,
|
||||
{ id: apiFileId },
|
||||
'GET'
|
||||
);
|
||||
|
||||
const { slug: fileSlug } = await request<{ slug: string }>(
|
||||
`/api/v2/repos/${parentId}/docs/${fileId}`,
|
||||
{},
|
||||
'GET'
|
||||
);
|
||||
|
||||
return `${yuqueBaseUrl}/${yuqueServer.userId}/${parentSlug}/${fileSlug}`;
|
||||
};
|
||||
|
||||
const getFileDetail = async ({
|
||||
apiFileId
|
||||
}: {
|
||||
apiFileId: string;
|
||||
}): Promise<ApiDatasetDetailResponse> => {
|
||||
//如果id是数字,认为是知识库,获取知识库列表
|
||||
if (typeof apiFileId === 'number' || !isNaN(Number(apiFileId))) {
|
||||
const limit = 100;
|
||||
let offset = 0;
|
||||
let allData: YuqueRepoListResponse = [];
|
||||
|
||||
while (true) {
|
||||
const data = await request<YuqueRepoListResponse>(
|
||||
`/api/v2/groups/${yuqueServer.userId}/repos`,
|
||||
{
|
||||
offset,
|
||||
limit
|
||||
},
|
||||
'GET'
|
||||
);
|
||||
|
||||
if (!data || data.length === 0) break;
|
||||
|
||||
allData = [...allData, ...data];
|
||||
if (data.length < limit) break;
|
||||
|
||||
offset += limit;
|
||||
}
|
||||
|
||||
const file = allData.find((item) => Number(item.id) === Number(apiFileId));
|
||||
if (!file) {
|
||||
return Promise.reject('文件不存在');
|
||||
}
|
||||
return {
|
||||
id: file.id,
|
||||
name: file.name,
|
||||
parentId: null
|
||||
};
|
||||
} else {
|
||||
const [repoId, parentUuid, fileId] = apiFileId.split(/-(.*?)-(.*)/);
|
||||
const data = await request<YuqueTocListResponse>(`/api/v2/repos/${repoId}/toc`, {}, 'GET');
|
||||
const file = data.find((item) => item.uuid === fileId);
|
||||
if (!file) {
|
||||
return Promise.reject('文件不存在');
|
||||
}
|
||||
const parentfile = data.find((item) => item.uuid === file.parent_uuid);
|
||||
const parentId = `${repoId}-${parentfile?.id}-${parentfile?.uuid}`;
|
||||
|
||||
//判断如果parent_uuid为空,则认为是知识库的根目录,返回知识库
|
||||
if (file.parent_uuid) {
|
||||
return {
|
||||
id: file.id,
|
||||
name: file.title,
|
||||
parentId: parentId
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
id: file.id,
|
||||
name: file.title,
|
||||
parentId: repoId
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
getFileContent,
|
||||
listFiles,
|
||||
getFilePreviewUrl,
|
||||
getFileDetail
|
||||
};
|
||||
};
|
||||
@@ -223,28 +223,29 @@ const toolChoice = async (props: ActionProps) => {
|
||||
}
|
||||
];
|
||||
|
||||
const body = llmCompletionsBodyFormat(
|
||||
{
|
||||
stream: true,
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
},
|
||||
extractModel
|
||||
);
|
||||
const { response } = await createChatCompletion({
|
||||
body: llmCompletionsBodyFormat(
|
||||
{
|
||||
stream: true,
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: filterMessages,
|
||||
tools,
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
},
|
||||
extractModel
|
||||
),
|
||||
body,
|
||||
userKey: externalProvider.openaiAccount
|
||||
});
|
||||
const { toolCalls, usage } = await formatLLMResponse(response);
|
||||
const { text, toolCalls, usage } = await formatLLMResponse(response);
|
||||
|
||||
const arg: Record<string, any> = (() => {
|
||||
try {
|
||||
return json5.parse(toolCalls?.[0]?.function?.arguments || '');
|
||||
} catch (error) {
|
||||
console.log(agentFunction.parameters);
|
||||
console.log(toolCalls?.[0]?.function);
|
||||
console.log('body', body);
|
||||
console.log('AI response', text, toolCalls?.[0]?.function);
|
||||
console.log('Your model may not support tool_call', error);
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import { createChatCompletion } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
type ChatCompletion,
|
||||
type StreamChatType,
|
||||
type ChatCompletionMessageParam,
|
||||
type ChatCompletionCreateParams,
|
||||
type ChatCompletionMessageFunctionCall,
|
||||
type ChatCompletionFunctionMessageParam,
|
||||
type ChatCompletionAssistantMessageParam
|
||||
import type {
|
||||
ChatCompletion,
|
||||
StreamChatType,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionMessageFunctionCall,
|
||||
ChatCompletionFunctionMessageParam,
|
||||
ChatCompletionAssistantMessageParam,
|
||||
CompletionFinishReason
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { type NextApiResponse } from 'next';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
@@ -259,14 +260,15 @@ export const runToolWithFunctionCall = async (
|
||||
}
|
||||
});
|
||||
|
||||
let { answer, functionCalls, inputTokens, outputTokens } = await (async () => {
|
||||
let { answer, functionCalls, inputTokens, outputTokens, finish_reason } = await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res || res.closed) {
|
||||
return {
|
||||
answer: '',
|
||||
functionCalls: [],
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
outputTokens: 0,
|
||||
finish_reason: 'close' as const
|
||||
};
|
||||
}
|
||||
const result = await streamResponse({
|
||||
@@ -281,10 +283,12 @@ export const runToolWithFunctionCall = async (
|
||||
answer: result.answer,
|
||||
functionCalls: result.functionCalls,
|
||||
inputTokens: result.usage.prompt_tokens,
|
||||
outputTokens: result.usage.completion_tokens
|
||||
outputTokens: result.usage.completion_tokens,
|
||||
finish_reason: result.finish_reason
|
||||
};
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const function_call = result.choices?.[0]?.message?.function_call;
|
||||
const usage = result.usage;
|
||||
|
||||
@@ -315,7 +319,8 @@ export const runToolWithFunctionCall = async (
|
||||
answer,
|
||||
functionCalls: toolCalls,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
outputTokens: usage?.completion_tokens,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
})();
|
||||
@@ -481,7 +486,8 @@ export const runToolWithFunctionCall = async (
|
||||
completeMessages,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
toolWorkflowInteractiveResponse,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
|
||||
@@ -495,7 +501,8 @@ export const runToolWithFunctionCall = async (
|
||||
toolNodeInputTokens,
|
||||
toolNodeOutputTokens,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes
|
||||
runTimes,
|
||||
finish_reason
|
||||
}
|
||||
);
|
||||
} else {
|
||||
@@ -523,7 +530,8 @@ export const runToolWithFunctionCall = async (
|
||||
: outputTokens,
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
|
||||
runTimes: (response?.runTimes || 0) + 1
|
||||
runTimes: (response?.runTimes || 0) + 1,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
};
|
||||
@@ -546,28 +554,25 @@ async function streamResponse({
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
let textAnswer = '';
|
||||
let functionCalls: ChatCompletionMessageFunctionCall[] = [];
|
||||
let functionId = getNanoid();
|
||||
let usage = getLLMDefaultUsage();
|
||||
|
||||
const { parsePart } = parseLLMStreamResponse();
|
||||
const { parsePart, getResponseData, updateFinishReason } = parseLLMStreamResponse();
|
||||
|
||||
for await (const part of stream) {
|
||||
usage = part.usage || usage;
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
updateFinishReason('close');
|
||||
break;
|
||||
}
|
||||
|
||||
const { content: toolChoiceContent, responseContent } = parsePart({
|
||||
const { responseContent } = parsePart({
|
||||
part,
|
||||
parseThinkTag: false,
|
||||
retainDatasetCite
|
||||
});
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
textAnswer += toolChoiceContent;
|
||||
|
||||
if (responseContent) {
|
||||
workflowStreamResponse?.({
|
||||
@@ -577,7 +582,7 @@ async function streamResponse({
|
||||
text: responseContent
|
||||
})
|
||||
});
|
||||
} else if (responseChoice.function_call) {
|
||||
} else if (responseChoice?.function_call) {
|
||||
const functionCall: {
|
||||
arguments?: string;
|
||||
name?: string;
|
||||
@@ -640,5 +645,7 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer: textAnswer, functionCalls, usage };
|
||||
const { content, finish_reason, usage } = getResponseData();
|
||||
|
||||
return { answer: content, functionCalls, finish_reason, usage };
|
||||
}
|
||||
|
||||
@@ -220,7 +220,8 @@ export const runToolWithPromptCall = async (
|
||||
|
||||
const max_tokens = computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken
|
||||
maxToken,
|
||||
min: 100
|
||||
});
|
||||
const filterMessages = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
@@ -592,28 +593,22 @@ async function streamResponse({
|
||||
|
||||
let startResponseWrite = false;
|
||||
let answer = '';
|
||||
let reasoning = '';
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
let usage = getLLMDefaultUsage();
|
||||
|
||||
const { parsePart } = parseLLMStreamResponse();
|
||||
const { parsePart, getResponseData, updateFinishReason } = parseLLMStreamResponse();
|
||||
|
||||
for await (const part of stream) {
|
||||
usage = part.usage || usage;
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
updateFinishReason('close');
|
||||
break;
|
||||
}
|
||||
|
||||
const { reasoningContent, content, responseContent, finishReason } = parsePart({
|
||||
const { reasoningContent, content, responseContent } = parsePart({
|
||||
part,
|
||||
parseThinkTag: aiChatReasoning,
|
||||
retainDatasetCite
|
||||
});
|
||||
finish_reason = finish_reason || finishReason;
|
||||
answer += content;
|
||||
reasoning += reasoningContent;
|
||||
|
||||
// Reasoning response
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
@@ -658,7 +653,9 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer, reasoning, finish_reason, usage };
|
||||
const { reasoningContent, content, finish_reason, usage } = getResponseData();
|
||||
|
||||
return { answer: content, reasoning: reasoningContent, finish_reason, usage };
|
||||
}
|
||||
|
||||
const parseAnswer = (
|
||||
|
||||
@@ -7,17 +7,13 @@ import {
|
||||
type ChatCompletionToolMessageParam,
|
||||
type ChatCompletionMessageParam,
|
||||
type ChatCompletionTool,
|
||||
type ChatCompletionAssistantMessageParam,
|
||||
type CompletionFinishReason
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { type NextApiResponse } from 'next';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import {
|
||||
ChatCompletionRequestMessageRoleEnum,
|
||||
getLLMDefaultUsage
|
||||
} from '@fastgpt/global/core/ai/constants';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { dispatchWorkFlow } from '../../index';
|
||||
import {
|
||||
type DispatchToolModuleProps,
|
||||
@@ -254,7 +250,8 @@ export const runToolWithToolChoice = async (
|
||||
|
||||
const max_tokens = computedMaxToken({
|
||||
model: toolModel,
|
||||
maxToken
|
||||
maxToken,
|
||||
min: 100
|
||||
});
|
||||
|
||||
// Filter histories by maxToken
|
||||
@@ -319,97 +316,101 @@ export const runToolWithToolChoice = async (
|
||||
}
|
||||
});
|
||||
|
||||
let { answer, toolCalls, finish_reason, inputTokens, outputTokens } = await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res || res.closed) {
|
||||
return {
|
||||
answer: '',
|
||||
toolCalls: [],
|
||||
finish_reason: 'close' as const,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
};
|
||||
}
|
||||
let { reasoningContent, answer, toolCalls, finish_reason, inputTokens, outputTokens } =
|
||||
await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res || res.closed) {
|
||||
return {
|
||||
reasoningContent: '',
|
||||
answer: '',
|
||||
toolCalls: [],
|
||||
finish_reason: 'close' as const,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
};
|
||||
}
|
||||
|
||||
const result = await streamResponse({
|
||||
res,
|
||||
workflowStreamResponse,
|
||||
toolNodes,
|
||||
stream: aiResponse,
|
||||
aiChatReasoning,
|
||||
retainDatasetCite
|
||||
});
|
||||
|
||||
return {
|
||||
answer: result.answer,
|
||||
toolCalls: result.toolCalls,
|
||||
finish_reason: result.finish_reason,
|
||||
inputTokens: result.usage.prompt_tokens,
|
||||
outputTokens: result.usage.completion_tokens
|
||||
};
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
const answer = result.choices?.[0]?.message?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = result.choices?.[0]?.message?.reasoning_content || '';
|
||||
const usage = result.usage;
|
||||
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: removeDatasetCiteText(reasoningContent, retainDatasetCite)
|
||||
})
|
||||
const result = await streamResponse({
|
||||
res,
|
||||
workflowStreamResponse,
|
||||
toolNodes,
|
||||
stream: aiResponse,
|
||||
aiChatReasoning,
|
||||
retainDatasetCite
|
||||
});
|
||||
}
|
||||
|
||||
// 格式化 toolCalls
|
||||
const toolCalls = calls.map((tool) => {
|
||||
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
|
||||
return {
|
||||
reasoningContent: result.reasoningContent,
|
||||
answer: result.answer,
|
||||
toolCalls: result.toolCalls,
|
||||
finish_reason: result.finish_reason,
|
||||
inputTokens: result.usage.prompt_tokens,
|
||||
outputTokens: result.usage.completion_tokens
|
||||
};
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
const answer = result.choices?.[0]?.message?.content || '';
|
||||
// @ts-ignore
|
||||
const reasoningContent = result.choices?.[0]?.message?.reasoning_content || '';
|
||||
const usage = result.usage;
|
||||
|
||||
// 不支持 stream 模式的模型的这里需要补一个响应给客户端
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || '',
|
||||
functionName: tool.function.name,
|
||||
params: tool.function?.arguments ?? '',
|
||||
response: ''
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
reasoning_content: removeDatasetCiteText(reasoningContent, retainDatasetCite)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
// 格式化 toolCalls
|
||||
const toolCalls = calls.map((tool) => {
|
||||
const toolNode = toolNodes.find((item) => item.nodeId === tool.function?.name);
|
||||
|
||||
// 不支持 stream 模式的模型的这里需要补一个响应给客户端
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.toolCall,
|
||||
data: {
|
||||
tool: {
|
||||
id: tool.id,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || '',
|
||||
functionName: tool.function.name,
|
||||
params: tool.function?.arguments ?? '',
|
||||
response: ''
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
...tool,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || ''
|
||||
};
|
||||
});
|
||||
|
||||
if (answer) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: removeDatasetCiteText(answer, retainDatasetCite)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
...tool,
|
||||
toolName: toolNode?.name || '',
|
||||
toolAvatar: toolNode?.avatar || ''
|
||||
reasoningContent: (reasoningContent as string) || '',
|
||||
answer,
|
||||
toolCalls: toolCalls,
|
||||
finish_reason,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
};
|
||||
});
|
||||
|
||||
if (answer) {
|
||||
workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.fastAnswer,
|
||||
data: textAdaptGptResponse({
|
||||
text: removeDatasetCiteText(answer, retainDatasetCite)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
answer,
|
||||
toolCalls: toolCalls,
|
||||
finish_reason,
|
||||
inputTokens: usage?.prompt_tokens,
|
||||
outputTokens: usage?.completion_tokens
|
||||
};
|
||||
}
|
||||
})();
|
||||
if (!answer && toolCalls.length === 0) {
|
||||
})();
|
||||
if (!answer && !reasoningContent && toolCalls.length === 0) {
|
||||
return Promise.reject(getEmptyResponseTip());
|
||||
}
|
||||
|
||||
@@ -501,12 +502,13 @@ export const runToolWithToolChoice = async (
|
||||
|
||||
if (toolCalls.length > 0) {
|
||||
// Run the tool, combine its results, and perform another round of AI calls
|
||||
const assistantToolMsgParams: ChatCompletionAssistantMessageParam[] = [
|
||||
...(answer
|
||||
const assistantToolMsgParams: ChatCompletionMessageParam[] = [
|
||||
...(answer || reasoningContent
|
||||
? [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant as 'assistant',
|
||||
content: answer
|
||||
content: answer,
|
||||
reasoning_text: reasoningContent
|
||||
}
|
||||
]
|
||||
: []),
|
||||
@@ -627,9 +629,10 @@ export const runToolWithToolChoice = async (
|
||||
);
|
||||
} else {
|
||||
// No tool is invoked, indicating that the process is over
|
||||
const gptAssistantResponse: ChatCompletionAssistantMessageParam = {
|
||||
const gptAssistantResponse: ChatCompletionMessageParam = {
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: answer
|
||||
content: answer,
|
||||
reasoning_text: reasoningContent
|
||||
};
|
||||
const completeMessages = filterMessages.concat(gptAssistantResponse);
|
||||
inputTokens = inputTokens || (await countGptMessagesTokens(requestMessages, tools));
|
||||
@@ -671,34 +674,23 @@ async function streamResponse({
|
||||
readStream: stream
|
||||
});
|
||||
|
||||
let textAnswer = '';
|
||||
let callingTool: { name: string; arguments: string } | null = null;
|
||||
let toolCalls: ChatCompletionMessageToolCall[] = [];
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
let usage = getLLMDefaultUsage();
|
||||
|
||||
const { parsePart } = parseLLMStreamResponse();
|
||||
const { parsePart, getResponseData, updateFinishReason } = parseLLMStreamResponse();
|
||||
|
||||
for await (const part of stream) {
|
||||
usage = part.usage || usage;
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
updateFinishReason('close');
|
||||
break;
|
||||
}
|
||||
|
||||
const {
|
||||
reasoningContent,
|
||||
content: toolChoiceContent,
|
||||
responseContent,
|
||||
finishReason
|
||||
} = parsePart({
|
||||
const { reasoningContent, responseContent } = parsePart({
|
||||
part,
|
||||
parseThinkTag: true,
|
||||
retainDatasetCite
|
||||
});
|
||||
textAnswer += toolChoiceContent;
|
||||
finish_reason = finishReason || finish_reason;
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
|
||||
@@ -800,5 +792,13 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer: textAnswer, toolCalls: toolCalls.filter(Boolean), finish_reason, usage };
|
||||
const { reasoningContent, content, finish_reason, usage } = getResponseData();
|
||||
|
||||
return {
|
||||
reasoningContent,
|
||||
answer: content,
|
||||
toolCalls: toolCalls.filter(Boolean),
|
||||
finish_reason,
|
||||
usage
|
||||
};
|
||||
}
|
||||
|
||||
@@ -556,30 +556,21 @@ async function streamResponse({
|
||||
res,
|
||||
readStream: stream
|
||||
});
|
||||
let answer = '';
|
||||
let reasoning = '';
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
let usage: CompletionUsage = getLLMDefaultUsage();
|
||||
|
||||
const { parsePart } = parseLLMStreamResponse();
|
||||
const { parsePart, getResponseData, updateFinishReason } = parseLLMStreamResponse();
|
||||
|
||||
for await (const part of stream) {
|
||||
usage = part.usage || usage;
|
||||
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
updateFinishReason('close');
|
||||
break;
|
||||
}
|
||||
|
||||
const { reasoningContent, content, responseContent, finishReason } = parsePart({
|
||||
const { reasoningContent, responseContent } = parsePart({
|
||||
part,
|
||||
parseThinkTag,
|
||||
retainDatasetCite
|
||||
});
|
||||
finish_reason = finish_reason || finishReason;
|
||||
answer += content;
|
||||
reasoning += reasoningContent;
|
||||
|
||||
if (aiChatReasoning && reasoningContent) {
|
||||
workflowStreamResponse?.({
|
||||
@@ -602,5 +593,7 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
const { reasoningContent: reasoning, content: answer, finish_reason, usage } = getResponseData();
|
||||
|
||||
return { answer, reasoning, finish_reason, usage };
|
||||
}
|
||||
|
||||
@@ -49,8 +49,6 @@ export const dispatchRunCode = async (props: RunCodeType): Promise<RunCodeRespon
|
||||
variables: customVariables
|
||||
});
|
||||
|
||||
console.log(runResult);
|
||||
|
||||
if (runResult.success) {
|
||||
return {
|
||||
[NodeOutputKeyEnum.rawResponse]: runResult.data.codeReturn,
|
||||
|
||||
@@ -211,12 +211,12 @@ export const getFileContentFromLinks = async ({
|
||||
// Read file
|
||||
const { rawText } = await readRawContentByFileBuffer({
|
||||
extension,
|
||||
isQAImport: false,
|
||||
teamId,
|
||||
tmbId,
|
||||
buffer,
|
||||
encoding,
|
||||
customPdfParse
|
||||
customPdfParse,
|
||||
getFormatText: true
|
||||
});
|
||||
|
||||
// Add to buffer
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"lodash": "^4.17.21",
|
||||
"mammoth": "^1.6.0",
|
||||
"mongoose": "^8.10.1",
|
||||
"multer": "1.4.5-lts.1",
|
||||
"multer": "2.0.0",
|
||||
"mysql2": "^3.11.3",
|
||||
"next": "14.2.28",
|
||||
"nextjs-cors": "^2.2.0",
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import iconv from 'iconv-lite';
|
||||
import { type ReadRawTextByBuffer, type ReadFileResponse } from '../type';
|
||||
import { matchMdImg } from '@fastgpt/global/common/string/markdown';
|
||||
|
||||
const rawEncodingList = [
|
||||
'ascii',
|
||||
@@ -34,7 +35,10 @@ export const readFileRawText = ({ buffer, encoding }: ReadRawTextByBuffer): Read
|
||||
}
|
||||
})();
|
||||
|
||||
const { text, imageList } = matchMdImg(content);
|
||||
|
||||
return {
|
||||
rawText: content
|
||||
rawText: text,
|
||||
imageList
|
||||
};
|
||||
};
|
||||
|
||||
@@ -28,11 +28,11 @@ export const readXlsxRawText = async ({
|
||||
if (!header) return;
|
||||
|
||||
const formatText = `| ${header.join(' | ')} |
|
||||
| ${header.map(() => '---').join(' | ')} |
|
||||
${csvArr
|
||||
.slice(1)
|
||||
.map((row) => `| ${row.map((item) => item.replace(/\n/g, '\\n')).join(' | ')} |`)
|
||||
.join('\n')}`;
|
||||
| ${header.map(() => '---').join(' | ')} |
|
||||
${csvArr
|
||||
.slice(1)
|
||||
.map((row) => `| ${row.map((item) => item.replace(/\n/g, '\\n')).join(' | ')} |`)
|
||||
.join('\n')}`;
|
||||
|
||||
return formatText;
|
||||
})
|
||||
|
||||
@@ -6,10 +6,6 @@ export const getUserFingerprint = async () => {
|
||||
console.log(result.visitorId);
|
||||
};
|
||||
|
||||
export const hasHttps = () => {
|
||||
return window.location.protocol === 'https:';
|
||||
};
|
||||
|
||||
export const subRoute = process.env.NEXT_PUBLIC_BASE_URL;
|
||||
|
||||
export const getWebReqUrl = (url: string = '') => {
|
||||
@@ -20,3 +16,32 @@ export const getWebReqUrl = (url: string = '') => {
|
||||
if (!url.startsWith('/') || url.startsWith(baseUrl)) return url;
|
||||
return `${baseUrl}${url}`;
|
||||
};
|
||||
|
||||
export const isMobile = () => {
|
||||
// SSR return false
|
||||
if (typeof window === 'undefined') return false;
|
||||
|
||||
// 1. Check User-Agent
|
||||
const userAgent = navigator.userAgent.toLowerCase();
|
||||
const mobileKeywords = [
|
||||
'android',
|
||||
'iphone',
|
||||
'ipod',
|
||||
'ipad',
|
||||
'windows phone',
|
||||
'blackberry',
|
||||
'webos',
|
||||
'iemobile',
|
||||
'opera mini'
|
||||
];
|
||||
const isMobileUA = mobileKeywords.some((keyword) => userAgent.includes(keyword));
|
||||
|
||||
// 2. Check screen width
|
||||
const isMobileWidth = window.innerWidth <= 900;
|
||||
|
||||
// 3. Check if touch events are supported (exclude touch screen PCs)
|
||||
const isTouchDevice = 'ontouchstart' in window || navigator.maxTouchPoints > 0;
|
||||
|
||||
// If any of the following conditions are met, it is considered a mobile device
|
||||
return isMobileUA || (isMobileWidth && isTouchDevice);
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
export const iconPaths = {
|
||||
alignLeft: () => import('./icons/alignLeft.svg'),
|
||||
backup: () => import('./icons/backup.svg'),
|
||||
book: () => import('./icons/book.svg'),
|
||||
change: () => import('./icons/change.svg'),
|
||||
chatSend: () => import('./icons/chatSend.svg'),
|
||||
@@ -229,6 +230,7 @@ export const iconPaths = {
|
||||
'core/dataset/tableCollection': () => import('./icons/core/dataset/tableCollection.svg'),
|
||||
'core/dataset/tag': () => import('./icons/core/dataset/tag.svg'),
|
||||
'core/dataset/websiteDataset': () => import('./icons/core/dataset/websiteDataset.svg'),
|
||||
'core/dataset/otherDataset': () => import('./icons/core/dataset/otherDataset.svg'),
|
||||
'core/dataset/websiteDatasetColor': () => import('./icons/core/dataset/websiteDatasetColor.svg'),
|
||||
'core/dataset/websiteDatasetOutline': () =>
|
||||
import('./icons/core/dataset/websiteDatasetOutline.svg'),
|
||||
@@ -439,6 +441,7 @@ export const iconPaths = {
|
||||
point: () => import('./icons/point.svg'),
|
||||
preview: () => import('./icons/preview.svg'),
|
||||
'price/bg': () => import('./icons/price/bg.svg'),
|
||||
'price/pricearrow': () => import('./icons/price/pricearrow.svg'),
|
||||
'price/right': () => import('./icons/price/right.svg'),
|
||||
save: () => import('./icons/save.svg'),
|
||||
sliderTag: () => import('./icons/sliderTag.svg'),
|
||||
|
||||
4
packages/web/components/common/Icon/icons/backup.svg
Normal file
4
packages/web/components/common/Icon/icons/backup.svg
Normal file
@@ -0,0 +1,4 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" >
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M17.9386 2H10.2616C9.73441 1.99998 9.27964 1.99997 8.90507 2.03057C8.50973 2.06287 8.11651 2.13419 7.73813 2.32698C7.17364 2.6146 6.7147 3.07354 6.42708 3.63803C6.23429 4.01641 6.16297 4.40963 6.13067 4.80497C6.10007 5.17955 6.10008 5.63431 6.1001 6.16146V13.8385C6.10008 14.3657 6.10007 14.8205 6.13067 15.195C6.16297 15.5904 6.23429 15.9836 6.42708 16.362C6.7147 16.9265 7.17364 17.3854 7.73813 17.673C8.11651 17.8658 8.50973 17.9371 8.90507 17.9694C9.27961 18 9.73432 18 10.2614 18H17.9386C18.4657 18 18.9206 18 19.2951 17.9694C19.6905 17.9371 20.0837 17.8658 20.4621 17.673C21.0266 17.3854 21.4855 16.9265 21.7731 16.362C21.9659 15.9836 22.0372 15.5904 22.0695 15.195C22.1001 14.8205 22.1001 14.3658 22.1001 13.8387V6.16148C22.1001 5.63439 22.1001 5.17951 22.0695 4.80497C22.0372 4.40963 21.9659 4.01641 21.7731 3.63803C21.4855 3.07354 21.0266 2.6146 20.4621 2.32698C20.0837 2.13419 19.6905 2.06287 19.2951 2.03057C18.9206 1.99997 18.4658 1.99998 17.9386 2ZM15.1001 16H17.9001C18.4767 16 18.8489 15.9992 19.1323 15.9761C19.4039 15.9539 19.5046 15.9162 19.5541 15.891C19.7423 15.7951 19.8952 15.6422 19.9911 15.454C20.0163 15.4045 20.054 15.3038 20.0762 15.0322C20.0993 14.7488 20.1001 14.3766 20.1001 13.8V11H15.1001V16ZM20.1001 9V6.2C20.1001 5.62345 20.0993 5.25117 20.0762 4.96784C20.054 4.69617 20.0163 4.59546 19.9911 4.54601C19.8952 4.35785 19.7423 4.20487 19.5541 4.109C19.5046 4.0838 19.4039 4.04612 19.1323 4.02393C18.8489 4.00078 18.4767 4 17.9001 4H10.3001C9.72355 4 9.35127 4.00078 9.06793 4.02393C8.79627 4.04612 8.69555 4.0838 8.64611 4.109C8.45795 4.20487 8.30497 4.35785 8.20909 4.54601C8.1839 4.59546 8.14622 4.69617 8.12403 4.96784C8.10088 5.25117 8.1001 5.62345 8.1001 6.2V9H20.1001ZM13.1001 11V16H10.3001C9.72355 16 9.35127 15.9992 9.06793 15.9761C8.79627 15.9539 8.69555 15.9162 8.64611 15.891C8.45795 15.7951 8.30497 15.6422 8.20909 15.454C8.1839 15.4045 8.14622 15.3038 8.12403 15.0322C8.10088 14.7488 8.1001 14.3766 8.1001 13.8V11H13.1001Z" />
|
||||
<path d="M4.1001 7C4.1001 6.44772 3.65238 6 3.1001 6C2.54781 6 2.1001 6.44772 2.1001 7L2.1001 15.9217C2.10009 16.7823 2.10008 17.4887 2.14702 18.0632C2.19567 18.6586 2.29968 19.2 2.55787 19.7068C2.96054 20.497 3.60306 21.1396 4.39334 21.5422C4.90007 21.8004 5.44147 21.9044 6.03691 21.9531C6.61142 22 7.3177 22 8.17835 22H17.1001C17.6524 22 18.1001 21.5523 18.1001 21C18.1001 20.4477 17.6524 20 17.1001 20H8.2201C7.30751 20 6.68322 19.9992 6.19978 19.9597C5.72801 19.9212 5.47911 19.8508 5.30132 19.7602C4.88736 19.5493 4.55081 19.2127 4.33988 18.7988C4.2493 18.621 4.17892 18.3721 4.14038 17.9003C4.10088 17.4169 4.1001 16.7926 4.1001 15.88V7Z" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.7 KiB |
@@ -1,11 +1,11 @@
|
||||
<svg viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="100%" height="100%" fill="url(#paint0_linear_7967_30275)" />
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M12.7552 4.8767C12.3073 4.92969 11.9962 5.33456 12.0603 5.78101L13.3235 14.5762C13.3876 15.0226 13.8027 15.3416 14.2506 15.2886L16.1502 15.0639C16.5981 15.0109 16.9093 14.606 16.8451 14.1596L15.582 5.36443C15.5178 4.91798 15.1028 4.59901 14.6548 4.65199L12.7552 4.8767ZM4.0675 5.52248C4.0675 5.07145 4.43314 4.70582 4.88417 4.70582H6.80225C7.25328 4.70582 7.61892 5.07145 7.61892 5.52248V14.4772C7.61892 14.9282 7.25328 15.2938 6.80225 15.2938H4.88417C4.43314 15.2938 4.0675 14.9282 4.0675 14.4772V5.52248ZM8.20321 5.52248C8.20321 5.07145 8.56885 4.70582 9.01988 4.70582H10.938C11.389 4.70582 11.7546 5.07145 11.7546 5.52248V14.4772C11.7546 14.9282 11.389 15.2938 10.938 15.2938H9.01988C8.56885 15.2938 8.20321 14.9282 8.20321 14.4772V5.52248Z" fill="white"/>
|
||||
<svg viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="32" height="32" fill="url(#paint0_linear_20384_1853)"/>
|
||||
<path d="M22.8185 7.25714C23.3782 6.69749 24.2855 6.69749 24.8452 7.25714C25.4048 7.81678 25.4048 8.72415 24.8452 9.28379L23.8314 10.2976C25.0837 12.4456 24.7895 15.2473 22.9487 17.0881L21.7775 18.2593C21.4078 18.6289 20.8085 18.6289 20.4388 18.2593L13.8431 11.6635C13.4734 11.2938 13.4734 10.6945 13.8431 10.3249L15.0142 9.15369C16.855 7.3129 19.6567 7.01864 21.8047 8.27093L22.8185 7.25714Z" fill="white"/>
|
||||
<path d="M14.0661 16.4523L15.65 18.0362L16.3429 17.3434C16.7073 16.9789 17.2983 16.9789 17.6628 17.3434L18.3695 18.0501C18.734 18.4146 18.734 19.0056 18.3695 19.3701L17.6767 20.0629L18.1561 20.5422C18.5257 20.9118 18.5257 21.5111 18.1561 21.8807L16.9857 23.051C15.1449 24.8918 12.3432 25.1861 10.1952 23.9338L9.18136 24.9476C8.62172 25.5073 7.71435 25.5073 7.1547 24.9476C6.59506 24.388 6.59506 23.4806 7.1547 22.9209L8.16853 21.9071C6.91623 19.7591 7.21048 16.9574 9.05127 15.1166L10.2217 13.9462C10.5913 13.5767 11.1905 13.5767 11.5601 13.9462L12.0395 14.4256L12.7323 13.7328C13.0968 13.3683 13.6877 13.3683 14.0522 13.7328L14.7589 14.4396C15.1234 14.804 15.1234 15.395 14.7589 15.7595L14.0661 16.4523Z" fill="white"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_7967_30275" x1="1.5" y1="20" x2="20" y2="1.6056e-06" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#C172FF"/>
|
||||
<stop offset="1" stop-color="#F19EFF"/>
|
||||
<linearGradient id="paint0_linear_20384_1853" x1="2.4" y1="32" x2="32" y2="2.56896e-06" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#00CAD1"/>
|
||||
<stop offset="1" stop-color="#73E6D8"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 1.2 KiB After Width: | Height: | Size: 1.5 KiB |
@@ -0,0 +1,11 @@
|
||||
<svg viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="100%" height="100%" fill="url(#paint0_linear_7967_30275)" />
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M12.7552 4.8767C12.3073 4.92969 11.9962 5.33456 12.0603 5.78101L13.3235 14.5762C13.3876 15.0226 13.8027 15.3416 14.2506 15.2886L16.1502 15.0639C16.5981 15.0109 16.9093 14.606 16.8451 14.1596L15.582 5.36443C15.5178 4.91798 15.1028 4.59901 14.6548 4.65199L12.7552 4.8767ZM4.0675 5.52248C4.0675 5.07145 4.43314 4.70582 4.88417 4.70582H6.80225C7.25328 4.70582 7.61892 5.07145 7.61892 5.52248V14.4772C7.61892 14.9282 7.25328 15.2938 6.80225 15.2938H4.88417C4.43314 15.2938 4.0675 14.9282 4.0675 14.4772V5.52248ZM8.20321 5.52248C8.20321 5.07145 8.56885 4.70582 9.01988 4.70582H10.938C11.389 4.70582 11.7546 5.07145 11.7546 5.52248V14.4772C11.7546 14.9282 11.389 15.2938 10.938 15.2938H9.01988C8.56885 15.2938 8.20321 14.9282 8.20321 14.4772V5.52248Z" fill="white"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_7967_30275" x1="1.5" y1="20" x2="20" y2="1.6056e-06" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#C172FF"/>
|
||||
<stop offset="1" stop-color="#F19EFF"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
|
After Width: | Height: | Size: 1.2 KiB |
@@ -0,0 +1,3 @@
|
||||
<svg width="23" height="41" viewBox="0 0 23 41" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M0.487322 0.838902C5.52993 0.392809 14.4127 1.30573 19.3633 8.23636L19.7315 8.77445C23.4021 14.3865 23.0442 21.6048 19.9483 26.9717C17.7504 30.7815 14.1557 33.6822 9.62013 34.4327C9.59005 34.4426 9.55881 34.45 9.52638 34.4541C9.52493 34.4546 9.5225 34.4561 9.51955 34.4571C9.5153 34.4585 9.50623 34.4621 9.49806 34.4649L9.41994 34.4873C9.41814 34.4879 9.41426 34.49 9.40724 34.4922C9.39802 34.4952 9.37728 34.5023 9.35939 34.5078C9.3403 34.5137 9.31328 34.5214 9.2842 34.5284L9.1592 34.5469L8.89748 34.5459L8.79006 34.544C7.42741 34.6867 5.98678 34.6389 4.47853 34.3672C4.53191 34.4008 4.6025 34.4492 4.66799 34.5147C4.69099 34.5376 4.72144 34.5593 4.80959 34.6211C4.8805 34.6708 5.00368 34.7557 5.10646 34.878L5.13967 34.919C5.14802 34.9291 5.15311 34.935 5.15627 34.9385L5.41017 35.1338C5.6673 35.3405 5.90801 35.5576 6.13576 35.7627C6.44566 36.0419 6.73402 36.3007 7.04494 36.5264L7.20998 36.6602C7.36441 36.8001 7.49012 36.9562 7.59279 37.0967L7.68263 37.1787C7.73953 37.2274 7.85194 37.3224 7.94045 37.46L7.96291 37.4883C7.99181 37.5184 8.044 37.5555 8.15529 37.6202C8.27226 37.6881 8.51757 37.8164 8.6924 38.041L8.87306 38.2422C8.93835 38.3118 9.01823 38.4008 9.09572 38.5069C9.42285 38.7501 9.7519 38.9908 10.084 39.2266L10.3828 39.42C10.4872 39.4826 10.5969 39.5441 10.7119 39.6084C10.8236 39.6709 10.9418 39.7372 11.0615 39.8067L11.4199 40.0274L11.4981 40.0918C11.6617 40.257 11.6926 40.5197 11.5586 40.7207C11.4246 40.9213 11.1703 40.9945 10.9551 40.9073L10.8653 40.8594L10.5606 40.6709C10.4541 40.6092 10.3418 40.5476 10.2236 40.4815C10.052 40.3854 9.86755 40.2816 9.68556 40.1641L9.5049 40.042C9.41935 39.9813 9.33432 39.9197 9.24904 39.8584C9.2181 39.8524 9.1871 39.8442 9.15724 39.8321L9.06838 39.7842L8.88869 39.667C8.73114 39.5618 8.55702 39.4327 8.3965 39.2383L8.38967 39.2295C8.00348 38.941 7.62059 38.6476 7.2422 38.3487L6.50783 37.7569C6.00954 37.3479 5.57646 36.9545 5.12013 36.5811L4.65236 36.2149C4.41843 36.0403 4.17408 35.8774 3.92092 35.7198L3.13967 35.2539C3.00588 35.1761 2.86098 35.1009 2.69631 35.0137C2.53656 34.9291 2.36142 34.8357 2.19142 34.7305C1.89714 34.5484 1.58338 34.3126 1.34963 33.9825L1.2549 33.835L1.21291 33.7432C1.21251 33.742 1.21232 33.7405 1.21193 33.7393C1.08214 33.4994 1.16845 33.1982 1.40724 33.0645L3.10842 32.1084C3.68186 31.7899 4.26243 31.477 4.85158 31.1846L5.51271 30.8682C6.1781 30.5614 6.85297 30.2778 7.49806 29.9903L8.07424 29.7295C8.64423 29.4677 9.19873 29.1967 9.72951 28.8877L9.99416 28.7246C10.0791 28.6011 10.2177 28.5164 10.3789 28.5078C10.3824 28.5077 10.3862 28.5088 10.3897 28.5088C10.3915 28.5085 10.3937 28.5073 10.3955 28.5069C10.4079 28.5048 10.4781 28.4925 10.5654 28.5078C10.6116 28.516 10.7084 28.5395 10.8018 28.6182C10.9121 28.7113 10.972 28.8433 10.9785 28.9766C10.9839 29.0882 10.9514 29.1725 10.9336 29.211C10.9138 29.2537 10.8915 29.2851 10.8789 29.3018C10.8541 29.3349 10.8303 29.3577 10.8213 29.3662C10.8008 29.3857 10.7823 29.401 10.7754 29.4063L10.6533 29.4912C10.5861 29.5373 10.5101 29.5851 10.4365 29.6299L10.2324 29.752C9.66368 30.0831 9.07573 30.3708 8.49025 30.6397L7.90529 30.9034C7.23393 31.2026 6.579 31.4778 5.93556 31.7744L5.29592 32.0801C4.80426 32.3242 4.32055 32.583 3.83791 32.8487L3.93849 32.8799L4.09963 32.9278C4.16584 32.9496 4.26199 32.9861 4.35646 33.0479L4.41213 33.0713C4.44909 33.0849 4.49227 33.0981 4.53517 33.1094L4.90236 33.2129C5.0111 33.2435 5.11243 33.2689 5.22365 33.2891L5.49416 33.3506C5.57406 33.3703 5.63482 33.3852 5.6924 33.3946L6.34084 33.5C6.45433 33.5184 6.57269 33.5275 6.70802 33.5371C6.83757 33.5463 6.99208 33.5564 7.14455 33.5743L7.26662 33.5801C7.31235 33.5802 7.36473 33.5787 7.42677 33.5762C7.53858 33.5718 7.69494 33.5634 7.85256 33.5772H7.94631C7.98429 33.5753 8.02868 33.5718 8.07717 33.5664L8.40431 33.5352L8.48146 33.5293C8.50488 33.5292 8.52576 33.5306 8.54299 33.5323C8.56567 33.5345 8.58567 33.5385 8.59963 33.541C8.64475 33.5413 8.69333 33.5426 8.74318 33.543C13.3027 33.0453 16.9045 30.2471 19.082 26.4727C22.0174 21.3845 22.3308 14.5763 18.8945 9.32132L18.5498 8.81742C13.9144 2.32808 5.50466 1.3989 0.575212 1.835C0.300419 1.85909 0.0577133 1.65566 0.0332203 1.38089C0.00891035 1.10592 0.212384 0.863353 0.487322 0.838902ZM9.49123 34.461C9.49697 34.4606 9.506 34.46 9.51564 34.458C9.51858 34.4574 9.52136 34.4556 9.52345 34.4551C9.51294 34.4564 9.50195 34.4604 9.49123 34.461Z" fill="#DC7E03"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 4.4 KiB |
@@ -3,8 +3,10 @@ import { Box, HStack, Icon, type StackProps } from '@chakra-ui/react';
|
||||
|
||||
const LightTip = ({
|
||||
text,
|
||||
icon = 'common/info',
|
||||
...props
|
||||
}: {
|
||||
icon?: string;
|
||||
text: string;
|
||||
} & StackProps) => {
|
||||
return (
|
||||
@@ -17,7 +19,7 @@ const LightTip = ({
|
||||
fontSize={'sm'}
|
||||
{...props}
|
||||
>
|
||||
<Icon name="common/info" w="1rem" />
|
||||
<Icon name={icon} w="1rem" />
|
||||
<Box>{text}</Box>
|
||||
</HStack>
|
||||
);
|
||||
|
||||
@@ -216,7 +216,7 @@ const MyMenu = ({
|
||||
if (offset) return offset;
|
||||
if (typeof width === 'number') return [-width / 2, 5];
|
||||
return [0, 5];
|
||||
}, [offset]);
|
||||
}, [offset, width]);
|
||||
|
||||
return (
|
||||
<Menu
|
||||
|
||||
@@ -11,15 +11,16 @@ import {
|
||||
HStack,
|
||||
Box,
|
||||
Button,
|
||||
PopoverArrow
|
||||
PopoverArrow,
|
||||
Portal
|
||||
} from '@chakra-ui/react';
|
||||
|
||||
const PopoverConfirm = ({
|
||||
content,
|
||||
showCancel,
|
||||
showCancel = true,
|
||||
type,
|
||||
Trigger,
|
||||
placement = 'bottom-start',
|
||||
placement = 'auto',
|
||||
offset,
|
||||
onConfirm,
|
||||
confirmText,
|
||||
@@ -50,7 +51,7 @@ const PopoverConfirm = ({
|
||||
};
|
||||
if (type && map[type]) return map[type];
|
||||
return map.info;
|
||||
}, [type, t]);
|
||||
}, [type]);
|
||||
|
||||
const firstFieldRef = React.useRef(null);
|
||||
const { onOpen, onClose, isOpen } = useDisclosure();
|
||||
@@ -67,7 +68,7 @@ const PopoverConfirm = ({
|
||||
onClose={onClose}
|
||||
placement={placement}
|
||||
offset={offset}
|
||||
closeOnBlur={false}
|
||||
closeOnBlur={true}
|
||||
trigger={'click'}
|
||||
openDelay={100}
|
||||
closeDelay={100}
|
||||
@@ -75,6 +76,7 @@ const PopoverConfirm = ({
|
||||
lazyBehavior="keepMounted"
|
||||
arrowSize={10}
|
||||
strategy={'fixed'}
|
||||
computePositionOnMount={true}
|
||||
>
|
||||
<PopoverTrigger>{Trigger}</PopoverTrigger>
|
||||
<PopoverContent p={4}>
|
||||
@@ -82,15 +84,25 @@ const PopoverConfirm = ({
|
||||
|
||||
<HStack alignItems={'flex-start'} color={'myGray.700'}>
|
||||
<MyIcon name={map.icon as any} w={'1.5rem'} />
|
||||
<Box fontSize={'sm'}>{content}</Box>
|
||||
<Box fontSize={'sm'} whiteSpace={'pre-wrap'}>
|
||||
{content}
|
||||
</Box>
|
||||
</HStack>
|
||||
<HStack mt={1} justifyContent={'flex-end'}>
|
||||
<HStack mt={2} justifyContent={'flex-end'}>
|
||||
{showCancel && (
|
||||
<Button variant={'whiteBase'} size="sm" onClick={onClose}>
|
||||
{cancelText || t('common:Cancel')}
|
||||
</Button>
|
||||
)}
|
||||
<Button isLoading={loading} variant={map.variant} size="sm" onClick={onclickConfirm}>
|
||||
<Button
|
||||
isLoading={loading}
|
||||
variant={map.variant}
|
||||
size="sm"
|
||||
onClick={async (e) => {
|
||||
e.stopPropagation();
|
||||
await onclickConfirm();
|
||||
}}
|
||||
>
|
||||
{confirmText || t('common:Confirm')}
|
||||
</Button>
|
||||
</HStack>
|
||||
|
||||
@@ -57,6 +57,7 @@ const MyPopover = ({
|
||||
closeDelay={100}
|
||||
isLazy
|
||||
lazyBehavior="keepMounted"
|
||||
autoFocus={false}
|
||||
>
|
||||
<PopoverTrigger>{Trigger}</PopoverTrigger>
|
||||
<PopoverContent {...props}>
|
||||
|
||||
@@ -54,7 +54,7 @@ const RadioGroup = <T = any,>({ list, value, onChange, ...props }: Props<T>) =>
|
||||
/>
|
||||
</Flex>
|
||||
</Box>
|
||||
<HStack spacing={1} color={'myGray.900'} whiteSpace={'nowrap'} fontSize={'sm'}>
|
||||
<HStack spacing={0.5} color={'myGray.900'} whiteSpace={'nowrap'} fontSize={'sm'}>
|
||||
<Box>{typeof item.title === 'string' ? t(item.title as any) : item.title}</Box>
|
||||
{!!item.tooltip && <QuestionTip label={item.tooltip} color={'myGray.600'} />}
|
||||
</HStack>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import React, { forwardRef } from 'react';
|
||||
import { Flex, Box, type BoxProps } from '@chakra-ui/react';
|
||||
import { Flex, Box, type BoxProps, HStack } from '@chakra-ui/react';
|
||||
import MyIcon from '../Icon';
|
||||
|
||||
type Props<T = string> = Omit<BoxProps, 'onChange'> & {
|
||||
@@ -10,9 +10,22 @@ type Props<T = string> = Omit<BoxProps, 'onChange'> & {
|
||||
}[];
|
||||
value: T;
|
||||
onChange: (e: T) => void;
|
||||
iconSize?: string;
|
||||
labelSize?: string;
|
||||
iconGap?: number;
|
||||
};
|
||||
|
||||
const FillRowTabs = ({ list, value, onChange, py = '7px', px = '12px', ...props }: Props) => {
|
||||
const FillRowTabs = ({
|
||||
list,
|
||||
value,
|
||||
onChange,
|
||||
py = '2.5',
|
||||
px = '4',
|
||||
iconSize = '18px',
|
||||
labelSize = 'sm',
|
||||
iconGap = 2,
|
||||
...props
|
||||
}: Props) => {
|
||||
return (
|
||||
<Box
|
||||
display={'inline-flex'}
|
||||
@@ -28,7 +41,7 @@ const FillRowTabs = ({ list, value, onChange, py = '7px', px = '12px', ...props
|
||||
{...props}
|
||||
>
|
||||
{list.map((item) => (
|
||||
<Flex
|
||||
<HStack
|
||||
key={item.value}
|
||||
flex={'1 0 0'}
|
||||
alignItems={'center'}
|
||||
@@ -39,6 +52,7 @@ const FillRowTabs = ({ list, value, onChange, py = '7px', px = '12px', ...props
|
||||
py={py}
|
||||
userSelect={'none'}
|
||||
whiteSpace={'noWrap'}
|
||||
gap={iconGap}
|
||||
{...(value === item.value
|
||||
? {
|
||||
bg: 'white',
|
||||
@@ -53,9 +67,9 @@ const FillRowTabs = ({ list, value, onChange, py = '7px', px = '12px', ...props
|
||||
onClick: () => onChange(item.value)
|
||||
})}
|
||||
>
|
||||
{item.icon && <MyIcon name={item.icon as any} mr={1.5} w={'18px'} />}
|
||||
<Box>{item.label}</Box>
|
||||
</Flex>
|
||||
{item.icon && <MyIcon name={item.icon as any} w={iconSize} />}
|
||||
<Box fontSize={labelSize}>{item.label}</Box>
|
||||
</HStack>
|
||||
))}
|
||||
</Box>
|
||||
);
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import { useToast } from './useToast';
|
||||
import { useCallback } from 'react';
|
||||
import { hasHttps } from '../common/system/utils';
|
||||
import { isProduction } from '@fastgpt/global/common/system/constants';
|
||||
import MyModal from '../components/common/MyModal';
|
||||
import React from 'react';
|
||||
import { Box, ModalBody } from '@chakra-ui/react';
|
||||
@@ -26,7 +24,7 @@ export const useCopyData = () => {
|
||||
data = data.trim();
|
||||
|
||||
try {
|
||||
if ((hasHttps() || !isProduction) && navigator.clipboard) {
|
||||
if (navigator.clipboard && window.isSecureContext) {
|
||||
await navigator.clipboard.writeText(data);
|
||||
if (title) {
|
||||
toast({
|
||||
@@ -36,13 +34,35 @@ export const useCopyData = () => {
|
||||
});
|
||||
}
|
||||
} else {
|
||||
throw new Error('');
|
||||
let textArea = document.createElement('textarea');
|
||||
textArea.value = data;
|
||||
// 使text area不在viewport,同时设置不可见
|
||||
textArea.style.position = 'absolute';
|
||||
// @ts-ignore
|
||||
textArea.style.opacity = 0;
|
||||
textArea.style.left = '-999999px';
|
||||
textArea.style.top = '-999999px';
|
||||
document.body.appendChild(textArea);
|
||||
textArea.focus();
|
||||
textArea.select();
|
||||
await new Promise((res, rej) => {
|
||||
document.execCommand('copy') ? res('') : rej();
|
||||
textArea.remove();
|
||||
}).then(() => {
|
||||
if (title) {
|
||||
toast({
|
||||
title,
|
||||
status: 'success',
|
||||
duration
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
setCopyContent(data);
|
||||
}
|
||||
},
|
||||
[t, toast]
|
||||
[setCopyContent, t, toast]
|
||||
);
|
||||
|
||||
return {
|
||||
|
||||
@@ -66,6 +66,7 @@
|
||||
"model.tool_choice_tip": "If the model supports tool calling, turn on this switch",
|
||||
"model.used_in_classify": "Used for problem classification",
|
||||
"model.used_in_extract_fields": "for text extraction",
|
||||
"model.used_in_query_extension": "For problem optimization",
|
||||
"model.used_in_tool_call": "Used for tool call nodes",
|
||||
"model.vision": "Vision model",
|
||||
"model.vision_tag": "Vision",
|
||||
|
||||
@@ -39,8 +39,6 @@
|
||||
"new_password": "New Password",
|
||||
"notification_receiving": "Notify",
|
||||
"old_password": "Old Password",
|
||||
"openai_account_configuration": "OpenAI account configuration",
|
||||
"openai_account_setting_exception": "Setting OpenAI account exception",
|
||||
"package_and_usage": "Plans",
|
||||
"package_details": "Details",
|
||||
"package_expiry_time": "Expired",
|
||||
@@ -52,8 +50,10 @@
|
||||
"password_update_success": "Password changed successfully",
|
||||
"pending_usage": "To be used",
|
||||
"phone_label": "Phone number",
|
||||
"please_bind_contact": "Please bind the contact information",
|
||||
"please_bind_notification_receiving_path": "Please bind the notification receiving method first",
|
||||
"purchase_extra_package": "Upgrade",
|
||||
"redeem_coupon": "Redeem coupon",
|
||||
"reminder_create_bound_notification_account": "Remind the creator to bind the notification account",
|
||||
"reset_password": "reset password",
|
||||
"resource_usage": "Usages",
|
||||
@@ -75,6 +75,5 @@
|
||||
"user_team_team_name": "Team",
|
||||
"verification_code": "Verification code",
|
||||
"you_can_convert": "you can redeem",
|
||||
"yuan": "Yuan",
|
||||
"redeem_coupon": "Redeem coupon"
|
||||
"yuan": "Yuan"
|
||||
}
|
||||
|
||||
@@ -8,8 +8,9 @@
|
||||
"assign_permission": "Permission change",
|
||||
"change_department_name": "Department Editor",
|
||||
"change_member_name": "Member name change",
|
||||
"confirm_delete_from_org": "Confirm to move {{username}} out of the department?",
|
||||
"confirm_delete_from_team": "Confirm to move {{username}} out of the team?",
|
||||
"confirm_delete_group": "Confirm to delete group?",
|
||||
"confirm_delete_member": "Confirm to delete member?",
|
||||
"confirm_delete_org": "Confirm to delete organization?",
|
||||
"confirm_forbidden": "Confirm forbidden",
|
||||
"confirm_leave_team": "Confirmed to leave the team? \nAfter exiting, all your resources in the team are transferred to the team owner.",
|
||||
@@ -21,6 +22,8 @@
|
||||
"create_sub_org": "Create sub-organization",
|
||||
"delete": "delete",
|
||||
"delete_department": "Delete sub-department",
|
||||
"delete_from_org": "Move out of department",
|
||||
"delete_from_team": "Move out of the team",
|
||||
"delete_group": "Delete a group",
|
||||
"delete_org": "Delete organization",
|
||||
"edit_info": "Edit information",
|
||||
@@ -28,6 +31,7 @@
|
||||
"edit_member_tip": "Name",
|
||||
"edit_org_info": "Edit organization information",
|
||||
"expires": "Expiration time",
|
||||
"export_members": "Export members",
|
||||
"forbid_hint": "After forbidden, this invitation link will become invalid. This action is irreversible. Are you sure you want to deactivate?",
|
||||
"forbid_success": "Forbid success",
|
||||
"forbidden": "Forbidden",
|
||||
@@ -44,8 +48,10 @@
|
||||
"invite_member": "Invite members",
|
||||
"invited": "Invited",
|
||||
"join_team": "Join the team",
|
||||
"join_update_time": "Join/Update Time",
|
||||
"kick_out_team": "Remove members",
|
||||
"label_sync": "Tag sync",
|
||||
"leave": "Resigned",
|
||||
"leave_team_failed": "Leaving the team exception",
|
||||
"log_assign_permission": "[{{name}}] Updated the permissions of [{{objectName}}]: [Application creation: [{{appCreate}}], Knowledge Base: [{{datasetCreate}}], API Key: [{{apiKeyCreate}}], Management: [{{manage}}]]",
|
||||
"log_change_department": "【{{name}}】Updated department【{{departmentName}}】",
|
||||
@@ -70,6 +76,7 @@
|
||||
"member_group": "Belonging to member group",
|
||||
"move_member": "Move member",
|
||||
"move_org": "Move organization",
|
||||
"notification_recieve": "Team notification reception",
|
||||
"operation_log": "log",
|
||||
"org": "organization",
|
||||
"org_description": "Organization description",
|
||||
@@ -84,13 +91,22 @@
|
||||
"permission_datasetCreate_Tip": "Can create knowledge bases in the root directory (creation permissions in folders are controlled by the folder)",
|
||||
"permission_manage": "Admin",
|
||||
"permission_manage_tip": "Can manage members, create groups, manage all groups, and assign permissions to groups and members",
|
||||
"please_bind_contact": "Please bind the contact information",
|
||||
"recover_team_member": "Member Recovery",
|
||||
"relocate_department": "Department Mobile",
|
||||
"remark": "remark",
|
||||
"remove_tip": "Confirm to remove {{username}} from the team?",
|
||||
"restore_tip": "Confirm to join the team {{username}}? \nOnly the availability and related permissions of this member account are restored, and the resources under the account cannot be restored.",
|
||||
"restore_tip_title": "Recovery confirmation",
|
||||
"retain_admin_permissions": "Keep administrator rights",
|
||||
"search_log": "Search log",
|
||||
"search_member": "Search for members",
|
||||
"search_member_group_name": "Search member/group name",
|
||||
"search_org": "Search Department",
|
||||
"set_name_avatar": "Team avatar",
|
||||
"sync_immediately": "Synchronize now",
|
||||
"sync_member_failed": "Synchronization of members failed",
|
||||
"sync_member_success": "Synchronize members successfully",
|
||||
"total_team_members": "{{amount}} members in total",
|
||||
"transfer_ownership": "transfer owner",
|
||||
"unlimited": "Unlimited",
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
{
|
||||
"configured": "Configured",
|
||||
"error.no_permission": "Please contact the administrator to configure",
|
||||
"get_usage_failed": "Failed to get usage",
|
||||
"laf_account": "laf account",
|
||||
"no_intro": "No explanation yet",
|
||||
"not_configured": "Not configured",
|
||||
"open_api_notice": "You can fill in the relevant key of OpenAI/OneAPI. \nIf you fill in this content, the online platform using [AI Dialogue], [Problem Classification] and [Content Extraction] will use the Key you filled in, and there will be no charge. \nPlease pay attention to whether your Key has permission to access the corresponding model. \nGPT models can choose FastAI.",
|
||||
"openai_account_configuration": "OpenAI/OneAPI account",
|
||||
"openai_account_setting_exception": "Setting up an exception to OpenAI account",
|
||||
"request_address_notice": "Request address, default is openai official. \nThe forwarding address can be filled in, but \\\"v1\\\" is not automatically completed.",
|
||||
"third_party_account": "Third-party account",
|
||||
"third_party_account.configured": "Configured",
|
||||
"third_party_account.not_configured": "Not configured",
|
||||
"third_party_account_desc": "The administrator can configure third-party accounts or variables here, and the account will be used by all team members.",
|
||||
"unavailable": "Get usage exception",
|
||||
"usage": "Usage",
|
||||
|
||||
@@ -13,8 +13,10 @@
|
||||
"embedding_index": "Embedding",
|
||||
"every_day": "Day",
|
||||
"every_month": "Moon",
|
||||
"every_week": "weekly",
|
||||
"export_confirm": "Export confirmation",
|
||||
"export_confirm_tip": "There are currently {{total}} usage records in total. Are you sure to export?",
|
||||
"export_success": "Export successfully",
|
||||
"export_title": "Time,Members,Type,Project name,AI points",
|
||||
"feishu": "Feishu",
|
||||
"generation_time": "Generation time",
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
{
|
||||
"Click_to_delete_this_field": "Click to delete this field",
|
||||
"Filed_is_deprecated": "This field is deprecated",
|
||||
"MCP_tools_debug": "debug",
|
||||
"MCP_tools_detail": "check the details",
|
||||
"MCP_tools_list": "Tool list",
|
||||
"MCP_tools_list_is_empty": "MCP tool not resolved",
|
||||
"MCP_tools_list_with_number": "Tool list: {{total}}",
|
||||
"MCP_tools_parse_failed": "Failed to parse MCP address",
|
||||
"MCP_tools_url": "MCP Address",
|
||||
"MCP_tools_url_is_empty": "The MCP address cannot be empty",
|
||||
@@ -130,6 +134,7 @@
|
||||
"response_format": "Response format",
|
||||
"saved_success": "Saved successfully! \nTo use this version externally, click Save and Publish",
|
||||
"search_app": "Search apps",
|
||||
"search_tool": "Search Tools",
|
||||
"setting_app": "Workflow",
|
||||
"setting_plugin": "Workflow",
|
||||
"show_top_p_tip": "An alternative method of temperature sampling, called Nucleus sampling, the model considers the results of tokens with TOP_P probability mass quality. \nTherefore, 0.1 means that only tokens containing the highest probability quality are considered. \nThe default is 1.",
|
||||
@@ -165,6 +170,7 @@
|
||||
"template_market_description": "Explore more features in the template market, with configuration tutorials and usage guides to help you understand and get started with various applications.",
|
||||
"template_market_empty_data": "No suitable templates found",
|
||||
"time_zone": "Time Zone",
|
||||
"tool_detail": "Tool details",
|
||||
"tool_input_param_tip": "This plugin requires configuration of related information to run properly.",
|
||||
"tools_no_description": "This tool has not been introduced ~",
|
||||
"transition_to_workflow": "Convert to Workflow",
|
||||
@@ -175,6 +181,7 @@
|
||||
"tts_close": "Close",
|
||||
"type.All": "All",
|
||||
"type.Create http plugin tip": "Batch create plugins through OpenAPI Schema, compatible with GPTs format.",
|
||||
"type.Create mcp tools tip": "Automatically parse and batch create callable MCP tools by entering the MCP address",
|
||||
"type.Create one plugin tip": "Customizable input and output workflows, usually used to encapsulate reusable workflows.",
|
||||
"type.Create plugin bot": "Create Plugin",
|
||||
"type.Create simple bot": "Create Simple App",
|
||||
@@ -186,6 +193,8 @@
|
||||
"type.Import from json tip": "Create applications directly through JSON configuration files",
|
||||
"type.Import from json_error": "Failed to get workflow data, please check the URL or manually paste the JSON data",
|
||||
"type.Import from json_loading": "Workflow data is being retrieved, please wait...",
|
||||
"type.MCP tools": "MCP Toolset",
|
||||
"type.MCP_tools_url": "MCP Address",
|
||||
"type.Plugin": "Plugin",
|
||||
"type.Simple bot": "Simple App",
|
||||
"type.Workflow bot": "Workflow",
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
"delete_all_input_guide_confirm": "Are you sure you want to clear the input guide lexicon?",
|
||||
"download_chunks": "Download data",
|
||||
"empty_directory": "This directory is empty~",
|
||||
"error_message": "error message",
|
||||
"file_amount_over": "Exceeded maximum file quantity {{max}}",
|
||||
"file_input": "File input",
|
||||
"file_input_tip": "You can obtain the link to the corresponding file through the \"File Link\" of the [Plug-in Start] node",
|
||||
|
||||
@@ -186,7 +186,6 @@
|
||||
"commercial_function_tip": "Please Upgrade to the Commercial Version to Use This Feature: https://doc.fastgpt.cn/docs/commercial/intro/",
|
||||
"comon.Continue_Adding": "Continue Adding",
|
||||
"compliance.chat": "The content is generated by third-party AI and cannot be guaranteed to be true and accurate. It is for reference only.",
|
||||
"compliance.compliance.dataset": "Please ensure that your content strictly complies with relevant laws and regulations and avoid containing any illegal or infringing content. \nPlease be careful when uploading materials that may contain sensitive information.",
|
||||
"compliance.dataset": "Please ensure that your content strictly complies with relevant laws and regulations and avoid containing any illegal or infringing content. \nPlease be careful when uploading materials that may contain sensitive information.",
|
||||
"confirm_choice": "Confirm Choice",
|
||||
"confirm_move": "Move Here",
|
||||
@@ -431,7 +430,6 @@
|
||||
"core.dataset.Read Dataset": "View Dataset Details",
|
||||
"core.dataset.Set Website Config": "Start Configuring",
|
||||
"core.dataset.Start export": "Export Started",
|
||||
"core.dataset.Table collection": "Table Dataset",
|
||||
"core.dataset.Text collection": "Text Dataset",
|
||||
"core.dataset.apiFile": "API File",
|
||||
"core.dataset.collection.Click top config website": "Click to Configure Website",
|
||||
@@ -476,6 +474,7 @@
|
||||
"core.dataset.error.unAuthDatasetData": "Unauthorized to Operate This Data",
|
||||
"core.dataset.error.unAuthDatasetFile": "Unauthorized to Operate This File",
|
||||
"core.dataset.error.unCreateCollection": "Unauthorized to Operate This Data",
|
||||
"core.dataset.error.unExistDataset": "The knowledge base does not exist",
|
||||
"core.dataset.error.unLinkCollection": "Not a Web Link Collection",
|
||||
"core.dataset.externalFile": "External File Library",
|
||||
"core.dataset.file": "File",
|
||||
@@ -529,7 +528,6 @@
|
||||
"core.dataset.search.mode.fullTextRecall desc": "Use traditional full-text search, suitable for finding some keywords and subject-predicate special data",
|
||||
"core.dataset.search.mode.mixedRecall": "Mixed Search",
|
||||
"core.dataset.search.mode.mixedRecall desc": "Use a combination of vector search and full-text search results, sorted using the RRF algorithm.",
|
||||
"core.dataset.search.score.embedding": "Semantic Search",
|
||||
"core.dataset.search.score.embedding desc": "Get scores by calculating the distance between vectors, ranging from 0 to 1.",
|
||||
"core.dataset.search.score.fullText": "Full Text Search",
|
||||
"core.dataset.search.score.fullText desc": "Calculate the score of the same keywords, ranging from 0 to infinity.",
|
||||
@@ -751,7 +749,6 @@
|
||||
"custom_title": "Custom Title",
|
||||
"data_index_custom": "Custom index",
|
||||
"data_index_default": "Default index",
|
||||
"data_index_image": "Image Index",
|
||||
"data_index_question": "Inferred question index",
|
||||
"data_index_summary": "Summary Index",
|
||||
"data_not_found": "Data can't be found",
|
||||
@@ -923,6 +920,7 @@
|
||||
"not_open": "Not Open",
|
||||
"not_permission": "The current subscription package does not support team operation logs",
|
||||
"not_support": "Not Supported",
|
||||
"not_support_wechat_image": "This is a WeChat picture",
|
||||
"not_yet_introduced": "No Introduction Yet",
|
||||
"open_folder": "Open Folder",
|
||||
"option": "Option",
|
||||
@@ -940,6 +938,7 @@
|
||||
"pay_corporate_payment": "Payment to the public",
|
||||
"pay_money": "Amount payable",
|
||||
"pay_success": "Payment successfully",
|
||||
"pay_year_tip": "Pay 10 months, enjoy 1 year!",
|
||||
"permission.Collaborator": "Collaborator",
|
||||
"permission.Default permission": "Default Permission",
|
||||
"permission.Manage": "Manage",
|
||||
@@ -1134,7 +1133,7 @@
|
||||
"support.wallet.subscription.AI points usage tip": "Each time the AI model is called, a certain amount of AI points will be consumed. For specific calculation standards, please refer to the 'Pricing' above.",
|
||||
"support.wallet.subscription.Ai points": "AI Points Calculation Standards",
|
||||
"support.wallet.subscription.Current plan": "Current Package",
|
||||
"support.wallet.subscription.Extra ai points": "Extra AI Points",
|
||||
"support.wallet.subscription.Extra ai points": "AI points",
|
||||
"support.wallet.subscription.Extra dataset size": "Extra Dataset Capacity",
|
||||
"support.wallet.subscription.Extra plan": "Extra Resource Pack",
|
||||
"support.wallet.subscription.Extra plan tip": "When the standard package is not enough, you can purchase extra resource packs to continue using",
|
||||
@@ -1143,11 +1142,11 @@
|
||||
"support.wallet.subscription.Next plan": "Future Package",
|
||||
"support.wallet.subscription.Stand plan level": "Subscription Package",
|
||||
"support.wallet.subscription.Sub plan": "Subscription Package",
|
||||
"support.wallet.subscription.Sub plan tip": "Free to use {{title}} or upgrade to a higher package",
|
||||
"support.wallet.subscription.Sub plan tip": "Free to use [{{title}}] or upgrade to a higher package",
|
||||
"support.wallet.subscription.Team plan and usage": "Package and Usage",
|
||||
"support.wallet.subscription.Training weight": "Training Priority: {{weight}}",
|
||||
"support.wallet.subscription.Update extra ai points": "Extra AI Points",
|
||||
"support.wallet.subscription.Update extra dataset size": "Extra Storage",
|
||||
"support.wallet.subscription.Update extra dataset size": "Storage",
|
||||
"support.wallet.subscription.Upgrade plan": "Upgrade Package",
|
||||
"support.wallet.subscription.ai_model": "AI Language Model",
|
||||
"support.wallet.subscription.function.History store": "{{amount}} Days of Chat History Retention",
|
||||
@@ -1156,9 +1155,9 @@
|
||||
"support.wallet.subscription.function.Max dataset size": "{{amount}} Dataset Indexes",
|
||||
"support.wallet.subscription.function.Max members": "{{amount}} Team Members",
|
||||
"support.wallet.subscription.function.Points": "{{amount}} AI Points",
|
||||
"support.wallet.subscription.mode.Month": "Monthly",
|
||||
"support.wallet.subscription.mode.Month": "Month",
|
||||
"support.wallet.subscription.mode.Period": "Subscription Period",
|
||||
"support.wallet.subscription.mode.Year": "Yearly",
|
||||
"support.wallet.subscription.mode.Year": "Year",
|
||||
"support.wallet.subscription.mode.Year sale": "Two Months Free",
|
||||
"support.wallet.subscription.point": "Points",
|
||||
"support.wallet.subscription.standardSubLevel.custom": "Custom",
|
||||
@@ -1167,7 +1166,7 @@
|
||||
"support.wallet.subscription.standardSubLevel.experience": "Experience",
|
||||
"support.wallet.subscription.standardSubLevel.experience_desc": "Unlock the full functionality of FastGPT",
|
||||
"support.wallet.subscription.standardSubLevel.free": "Free",
|
||||
"support.wallet.subscription.standardSubLevel.free desc": "Basic functions can be used for free every month. If the system is not logged in for 30 consecutive days, the Dataset will be automatically cleared.",
|
||||
"support.wallet.subscription.standardSubLevel.free desc": "Free trial of core features. \nIf you haven't logged in for 30 days, the knowledge base will be cleared.",
|
||||
"support.wallet.subscription.standardSubLevel.team": "Team",
|
||||
"support.wallet.subscription.standardSubLevel.team_desc": "Suitable for small teams to build Dataset applications and provide external services",
|
||||
"support.wallet.subscription.status.active": "Active",
|
||||
@@ -1269,8 +1268,6 @@
|
||||
"user.reset_password_tip": "The initial password is not set/the password has not been modified for a long time, please reset the password",
|
||||
"user.team.Balance": "Team Balance",
|
||||
"user.team.Check Team": "Switch",
|
||||
"user.team.Confirm Invite": "Confirm Invite",
|
||||
"user.team.Create Team": "Create New Team",
|
||||
"user.team.Leave Team": "Leave Team",
|
||||
"user.team.Leave Team Failed": "Failed to Leave Team",
|
||||
"user.team.Member": "Member",
|
||||
@@ -1281,13 +1278,10 @@
|
||||
"user.team.Processing invitations Tips": "You have {{amount}} team invitations to process",
|
||||
"user.team.Remove Member Confirm Tip": "Confirm to remove {{username}} from the team?",
|
||||
"user.team.Select Team": "Select Team",
|
||||
"user.team.Set Name": "Name the Team",
|
||||
"user.team.Switch Team Failed": "Failed to Switch Team",
|
||||
"user.team.Tags Async": "Save",
|
||||
"user.team.Team Name": "Team Name",
|
||||
"user.team.Team Tags Async": "Tag Sync",
|
||||
"user.team.Team Tags Async Success": "Link Error Successful, Tag Information Updated",
|
||||
"user.team.Update Team": "Update Team Information",
|
||||
"user.team.invite.Accepted": "Joined Team",
|
||||
"user.team.invite.Deal Width Footer Tip": "It will automatically close after processing",
|
||||
"user.team.invite.Reject": "Invitation Rejected",
|
||||
|
||||
@@ -7,22 +7,35 @@
|
||||
"auto_indexes": "Automatically generate supplementary indexes",
|
||||
"auto_indexes_tips": "Additional index generation is performed through large models to improve semantic richness and improve retrieval accuracy.",
|
||||
"auto_training_queue": "Enhanced index queueing",
|
||||
"backup_collection": "Backup data",
|
||||
"backup_data_parse": "Backup data is being parsed",
|
||||
"backup_data_uploading": "Backup data is being uploaded: {{num}}%",
|
||||
"backup_dataset": "Backup import",
|
||||
"backup_dataset_success": "The backup was created successfully",
|
||||
"backup_dataset_tip": "You can reimport the downloaded csv file when exporting the knowledge base.",
|
||||
"backup_mode": "Backup import",
|
||||
"chunk_max_tokens": "max_tokens",
|
||||
"chunk_process_params": "Block processing parameters",
|
||||
"chunk_size": "Block size",
|
||||
"chunk_trigger": "Blocking conditions",
|
||||
"chunk_trigger_force_chunk": "Forced chunking",
|
||||
"chunk_trigger_max_size": "The original text length is less than the maximum context 70% of the file processing model",
|
||||
"chunk_trigger_min_size": "The original text is greater than",
|
||||
"chunk_trigger_tips": "Block storage is triggered when certain conditions are met, otherwise the original text will be stored in full directly",
|
||||
"close_auto_sync": "Are you sure you want to turn off automatic sync?",
|
||||
"collection.Create update time": "Creation/Update Time",
|
||||
"collection.Training type": "Training",
|
||||
"collection.training_type": "Chunk type",
|
||||
"collection_data_count": "Data amount",
|
||||
"collection_metadata_custom_pdf_parse": "PDF enhancement analysis",
|
||||
"collection_metadata_image_parse": "Image tagging",
|
||||
"collection_not_support_retraining": "This collection type does not support retuning parameters",
|
||||
"collection_not_support_sync": "This collection does not support synchronization",
|
||||
"collection_sync": "Sync data",
|
||||
"collection_sync_confirm_tip": "Confirm to start synchronizing data? \nThe system will pull the latest data for comparison. If the contents are different, a new collection will be created and the old collection will be deleted. Please confirm!",
|
||||
"collection_tags": "Collection Tags",
|
||||
"common_dataset": "General Dataset",
|
||||
"common_dataset_desc": "Build a Dataset by importing files, web links, or manual input.",
|
||||
"common_dataset_desc": "Building a knowledge base by importing files, web page links, or manual entry",
|
||||
"condition": "condition",
|
||||
"config_sync_schedule": "Configure scheduled synchronization",
|
||||
"confirm_to_rebuild_embedding_tip": "Are you sure you want to switch the index for the Dataset?\nSwitching the index is a significant operation that requires re-indexing all data in your Dataset, which may take a long time. Please ensure your account has sufficient remaining points.\n\nAdditionally, you need to update the applications that use this Dataset to avoid conflicts with other indexed model Datasets.",
|
||||
"core.dataset.import.Adjust parameters": "Adjust parameters",
|
||||
@@ -31,6 +44,7 @@
|
||||
"custom_split_sign_tip": "Allows you to chunk according to custom delimiters. \nUsually used for processed data, using specific separators for precise chunking. \nYou can use the | symbol to represent multiple splitters, such as: \".|.\" to represent a period in Chinese and English.\n\nTry to avoid using special symbols related to regular, such as: * () [] {}, etc.",
|
||||
"data_amount": "{{dataAmount}} Datas, {{indexAmount}} Indexes",
|
||||
"data_error_amount": "{{errorAmount}} Group training exception",
|
||||
"data_index_image": "Image index",
|
||||
"data_index_num": "Index {{index}}",
|
||||
"data_process_params": "Params",
|
||||
"data_process_setting": "Processing config",
|
||||
@@ -57,8 +71,9 @@
|
||||
"enhanced_indexes": "Index enhancement",
|
||||
"error.collectionNotFound": "Collection not found~",
|
||||
"external_file": "External File Library",
|
||||
"external_file_dataset_desc": "Import files from an external file library to build a Dataset. The files will not be stored again.",
|
||||
"external_file_dataset_desc": "You can use external file library to build a knowledge library through the API",
|
||||
"external_id": "File Reading ID",
|
||||
"external_other_dataset_desc": "Customize API, Feishu, Yuque and other external documents as knowledge bases",
|
||||
"external_read_url": "External Preview URL",
|
||||
"external_read_url_tip": "Configure the reading URL of your file library for user authentication. Use the {{fileId}} variable to refer to the external file ID.",
|
||||
"external_url": "File Access URL",
|
||||
@@ -92,14 +107,18 @@
|
||||
"is_open_schedule": "Enable scheduled synchronization",
|
||||
"keep_image": "Keep the picture",
|
||||
"loading": "Loading...",
|
||||
"max_chunk_size": "Maximum chunk size",
|
||||
"move.hint": "After moving, the selected knowledge base/folder will inherit the permission settings of the new folder, and the original permission settings will become invalid.",
|
||||
"noChildren": "No subdirectories",
|
||||
"noSelectedFolder": "No selected folder",
|
||||
"noSelectedId": "No selected ID",
|
||||
"noValidId": "No valid ID",
|
||||
"open_auto_sync": "After scheduled synchronization is turned on, the system will try to synchronize the collection from time to time every day. During the collection synchronization period, the collection data will not be searched.",
|
||||
"other_dataset": "Third-party knowledge base",
|
||||
"paragraph_max_deep": "Maximum paragraph depth",
|
||||
"paragraph_split": "Partition by paragraph",
|
||||
"paragraph_split_tip": "Priority is given to chunking according to the Makdown title paragraph. If the chunking is too long, then chunking is done according to the length.",
|
||||
"params_config": "Config",
|
||||
"params_setting": "Parameter settings",
|
||||
"pdf_enhance_parse": "PDF enhancement analysis",
|
||||
"pdf_enhance_parse_price": "{{price}} points/page",
|
||||
"pdf_enhance_parse_tips": "Calling PDF recognition model for parsing, you can convert it into Markdown and retain pictures in the document. At the same time, you can also identify scanned documents, which will take a long time to identify them.",
|
||||
@@ -115,6 +134,7 @@
|
||||
"process.Get QA": "Q&A extraction",
|
||||
"process.Image_Index": "Image index generation",
|
||||
"process.Is_Ready": "Ready",
|
||||
"process.Is_Ready_Count": "{{count}} Group is ready",
|
||||
"process.Parsing": "Parsing",
|
||||
"process.Vectorizing": "Index vectorization",
|
||||
"process.Waiting": "Queue",
|
||||
@@ -143,6 +163,7 @@
|
||||
"sync_collection_failed": "Synchronization collection error, please check whether the source file can be accessed normally",
|
||||
"sync_schedule": "Timing synchronization",
|
||||
"sync_schedule_tip": "Only existing collections will be synchronized. \nIncludes linked collections and all collections in the API knowledge base. \nThe system will poll for updates every day, and the specific update time cannot be determined.",
|
||||
"table_model_tip": "Store each row of data as a chunk",
|
||||
"tag.Add_new_tag": "add_new Tag",
|
||||
"tag.Edit_tag": "Edit Tag",
|
||||
"tag.add": "Create",
|
||||
@@ -162,7 +183,7 @@
|
||||
"vector_model_max_tokens_tip": "Each chunk of data has a maximum length of 3000 tokens",
|
||||
"vllm_model": "Image understanding model",
|
||||
"website_dataset": "Website Sync",
|
||||
"website_dataset_desc": "Website sync allows you to build a Dataset directly using a web link.",
|
||||
"website_dataset_desc": "Build knowledge base by crawling web page data in batches",
|
||||
"website_info": "Website Information",
|
||||
"yuque_dataset": "Yuque Dataset",
|
||||
"yuque_dataset_config": "Yuque Dataset Config",
|
||||
|
||||
@@ -16,5 +16,6 @@
|
||||
"register": "Register",
|
||||
"root_password_placeholder": "The root user password is the value of the environment variable DEFAULT_ROOT_PSW",
|
||||
"terms": "Terms",
|
||||
"use_root_login": "Log in as root user"
|
||||
"use_root_login": "Log in as root user",
|
||||
"wecom": "Enterprise WeChat"
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
{
|
||||
"Array_element": "Array element",
|
||||
"Array_element_index": "Index",
|
||||
"Click": "Click ",
|
||||
"Code": "Code",
|
||||
"Confirm_sync_node": "It will be updated to the latest node configuration and fields that do not exist in the template will be deleted (including all custom fields).\n\nIf the fields are complex, it is recommended that you copy a node first and then update the original node to facilitate parameter copying.",
|
||||
"Drag": "Drag ",
|
||||
"Node.Open_Node_Course": "Open node course",
|
||||
"Node_variables": "Node variables",
|
||||
"Quote_prompt_setting": "Quote prompt",
|
||||
@@ -175,6 +177,9 @@
|
||||
"text_content_extraction": "Text Extract",
|
||||
"text_to_extract": "Text to Extract",
|
||||
"these_variables_will_be_input_parameters_for_code_execution": "These variables will be input parameters for code execution",
|
||||
"tool.tool_result": "Tool operation results",
|
||||
"to_add_node": "to add",
|
||||
"to_connect_node": "to connect",
|
||||
"tool_call_termination": "Stop ToolCall",
|
||||
"tool_custom_field": "Custom Tool",
|
||||
"tool_field": " Tool Field Parameter Configuration",
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
"delete_all_input_guide_confirm": "确定要清空输入引导词库吗?",
|
||||
"download_chunks": "下载数据",
|
||||
"empty_directory": "这个目录已经没东西可选了~",
|
||||
"error_message": "错误信息",
|
||||
"file_amount_over": "超出最大文件数量 {{max}}",
|
||||
"file_input": "系统文件",
|
||||
"file_input_tip": "可通过【插件开始】节点的“文件链接”获取对应文件的链接",
|
||||
|
||||
@@ -430,7 +430,6 @@
|
||||
"core.dataset.Read Dataset": "查看知识库详情",
|
||||
"core.dataset.Set Website Config": "开始配置",
|
||||
"core.dataset.Start export": "已开始导出",
|
||||
"core.dataset.Table collection": "表格数据集",
|
||||
"core.dataset.Text collection": "文本数据集",
|
||||
"core.dataset.apiFile": "API 文件",
|
||||
"core.dataset.collection.Click top config website": "点击配置网站",
|
||||
@@ -475,7 +474,7 @@
|
||||
"core.dataset.error.unAuthDatasetData": "无权操作该数据",
|
||||
"core.dataset.error.unAuthDatasetFile": "无权操作该文件",
|
||||
"core.dataset.error.unCreateCollection": "无权操作该数据",
|
||||
"core.dataset.error.unExistDataset": "数据集不存在",
|
||||
"core.dataset.error.unExistDataset": "知识库不存在",
|
||||
"core.dataset.error.unLinkCollection": "不是网络链接集合",
|
||||
"core.dataset.externalFile": "外部文件库",
|
||||
"core.dataset.file": "文件",
|
||||
@@ -555,7 +554,7 @@
|
||||
"core.dataset.training.Agent queue": "QA 训练排队",
|
||||
"core.dataset.training.Auto mode": "补充索引",
|
||||
"core.dataset.training.Auto mode Tip": "通过子索引以及调用模型生成相关问题与摘要,来增加数据块的语义丰富度,更利于检索。需要消耗更多的存储空间和增加 AI 调用次数。",
|
||||
"core.dataset.training.Chunk mode": "直接分块",
|
||||
"core.dataset.training.Chunk mode": "分块存储",
|
||||
"core.dataset.training.Full": "预计 20 分钟以上",
|
||||
"core.dataset.training.Leisure": "空闲",
|
||||
"core.dataset.training.QA mode": "问答对提取",
|
||||
@@ -750,7 +749,6 @@
|
||||
"custom_title": "自定义标题",
|
||||
"data_index_custom": "自定义索引",
|
||||
"data_index_default": "默认索引",
|
||||
"data_index_image": "图片索引",
|
||||
"data_index_question": "推测问题索引",
|
||||
"data_index_summary": "摘要索引",
|
||||
"data_not_found": "数据找不到了",
|
||||
@@ -922,6 +920,7 @@
|
||||
"not_open": "未开启",
|
||||
"not_permission": "当前订阅套餐不支持团队操作日志",
|
||||
"not_support": "不支持",
|
||||
"not_support_wechat_image": "这是一张微信图片",
|
||||
"not_yet_introduced": "暂无介绍",
|
||||
"open_folder": "打开文件夹",
|
||||
"option": "选项",
|
||||
@@ -939,6 +938,7 @@
|
||||
"pay_corporate_payment": "对公支付",
|
||||
"pay_money": "应付金额",
|
||||
"pay_success": "支付成功",
|
||||
"pay_year_tip": "支付 10 个月,畅享 1 年!",
|
||||
"permission.Collaborator": "协作者",
|
||||
"permission.Default permission": "默认权限",
|
||||
"permission.Manage": "管理",
|
||||
@@ -1142,7 +1142,7 @@
|
||||
"support.wallet.subscription.Next plan": "未来套餐",
|
||||
"support.wallet.subscription.Stand plan level": "订阅套餐",
|
||||
"support.wallet.subscription.Sub plan": "订阅套餐",
|
||||
"support.wallet.subscription.Sub plan tip": "免费使用 {{title}} 或升级更高的套餐",
|
||||
"support.wallet.subscription.Sub plan tip": "免费使用【{{title}}】或升级更高的套餐",
|
||||
"support.wallet.subscription.Team plan and usage": "套餐与用量",
|
||||
"support.wallet.subscription.Training weight": "训练优先级:{{weight}}",
|
||||
"support.wallet.subscription.Update extra ai points": "额外 AI 积分",
|
||||
@@ -1166,7 +1166,7 @@
|
||||
"support.wallet.subscription.standardSubLevel.experience": "体验版",
|
||||
"support.wallet.subscription.standardSubLevel.experience_desc": "可解锁 FastGPT 完整功能",
|
||||
"support.wallet.subscription.standardSubLevel.free": "免费版",
|
||||
"support.wallet.subscription.standardSubLevel.free desc": "每月均可免费使用基础功能,连续 30 天未登录系统,将会自动清除知识库",
|
||||
"support.wallet.subscription.standardSubLevel.free desc": "核心功能免费试用。30 天未登录,将会清空知识库。",
|
||||
"support.wallet.subscription.standardSubLevel.team": "团队版",
|
||||
"support.wallet.subscription.standardSubLevel.team_desc": "适合小团队构建知识库应用并提供对外服务",
|
||||
"support.wallet.subscription.status.active": "生效中",
|
||||
|
||||
@@ -7,22 +7,35 @@
|
||||
"auto_indexes": "自动生成补充索引",
|
||||
"auto_indexes_tips": "通过大模型进行额外索引生成,提高语义丰富度,提高检索的精度。",
|
||||
"auto_training_queue": "增强索引排队",
|
||||
"backup_collection": "备份数据",
|
||||
"backup_data_parse": "备份数据解析中",
|
||||
"backup_data_uploading": "备份数据上传中: {{num}}%",
|
||||
"backup_dataset": "备份导入",
|
||||
"backup_dataset_success": "备份创建成功",
|
||||
"backup_dataset_tip": "可以将导出知识库时,下载的 csv 文件重新导入。",
|
||||
"backup_mode": "备份导入",
|
||||
"chunk_max_tokens": "分块上限",
|
||||
"chunk_process_params": "分块处理参数",
|
||||
"chunk_size": "分块大小",
|
||||
"chunk_trigger": "分块条件",
|
||||
"chunk_trigger_force_chunk": "强制分块",
|
||||
"chunk_trigger_max_size": "原文长度小于文件处理模型最大上下文70%",
|
||||
"chunk_trigger_min_size": "原文长度大于",
|
||||
"chunk_trigger_tips": "当满足一定条件时才触发分块存储,否则会直接完整存储原文",
|
||||
"close_auto_sync": "确认关闭自动同步功能?",
|
||||
"collection.Create update time": "创建/更新时间",
|
||||
"collection.Training type": "训练模式",
|
||||
"collection.training_type": "处理模式",
|
||||
"collection_data_count": "数据量",
|
||||
"collection_metadata_custom_pdf_parse": "PDF增强解析",
|
||||
"collection_metadata_image_parse": "图片标注",
|
||||
"collection_not_support_retraining": "该集合类型不支持重新调整参数",
|
||||
"collection_not_support_sync": "该集合不支持同步",
|
||||
"collection_sync": "立即同步",
|
||||
"collection_sync_confirm_tip": "确认开始同步数据?系统将会拉取最新数据进行比较,如果内容不相同,则会创建一个新的集合并删除旧的集合,请确认!",
|
||||
"collection_tags": "集合标签",
|
||||
"common_dataset": "通用知识库",
|
||||
"common_dataset_desc": "可通过导入文件、网页链接或手动录入形式构建知识库",
|
||||
"common_dataset_desc": "通过导入文件、网页链接或手动录入形式构建知识库",
|
||||
"condition": "条件",
|
||||
"config_sync_schedule": "配置定时同步",
|
||||
"confirm_to_rebuild_embedding_tip": "确认为知识库切换索引?\n切换索引是一个非常重量的操作,需要对您知识库内所有数据进行重新索引,时间可能较长,请确保账号内剩余积分充足。\n\n此外,你还需要注意修改选择该知识库的应用,避免它们与其他索引模型知识库混用。",
|
||||
"core.dataset.import.Adjust parameters": "调整参数",
|
||||
@@ -31,6 +44,7 @@
|
||||
"custom_split_sign_tip": "允许你根据自定义的分隔符进行分块。通常用于已处理好的数据,使用特定的分隔符来精确分块。可以使用 | 符号表示多个分割符,例如:“。|.” 表示中英文句号。\n尽量避免使用正则相关特殊符号,例如: * () [] {} 等。",
|
||||
"data_amount": "{{dataAmount}} 组数据, {{indexAmount}} 组索引",
|
||||
"data_error_amount": "{{errorAmount}} 组训练异常",
|
||||
"data_index_image": "图片索引",
|
||||
"data_index_num": "索引 {{index}}",
|
||||
"data_process_params": "处理参数",
|
||||
"data_process_setting": "数据处理配置",
|
||||
@@ -57,8 +71,9 @@
|
||||
"enhanced_indexes": "索引增强",
|
||||
"error.collectionNotFound": "集合找不到了~",
|
||||
"external_file": "外部文件库",
|
||||
"external_file_dataset_desc": "可以从外部文件库导入文件构建知识库,文件不会进行二次存储",
|
||||
"external_file_dataset_desc": "可以通过 API,使用外部文件库构建知识库",
|
||||
"external_id": "文件阅读 ID",
|
||||
"external_other_dataset_desc": "自定义API、飞书、语雀等外部文档作为知识库",
|
||||
"external_read_url": "外部预览地址",
|
||||
"external_read_url_tip": "可以配置你文件库的阅读地址。便于对用户进行阅读鉴权操作。目前可以使用 {{fileId}} 变量来指代外部文件 ID。",
|
||||
"external_url": "文件访问 URL",
|
||||
@@ -92,14 +107,18 @@
|
||||
"is_open_schedule": "启用定时同步",
|
||||
"keep_image": "保留图片",
|
||||
"loading": "加载中...",
|
||||
"max_chunk_size": "最大分块大小",
|
||||
"move.hint": "移动后,所选知识库/文件夹将继承新文件夹的权限设置,原先的权限设置失效。",
|
||||
"noChildren": "无子目录",
|
||||
"noSelectedFolder": "没有选择文件夹",
|
||||
"noSelectedId": "没有选择 ID",
|
||||
"noValidId": "没有有效的 ID",
|
||||
"open_auto_sync": "开启定时同步后,系统将会每天不定时尝试同步集合,集合同步期间,会出现无法搜索到该集合数据现象。",
|
||||
"other_dataset": "第三方知识库",
|
||||
"paragraph_max_deep": "最大段落深度",
|
||||
"paragraph_split": "按段落分块",
|
||||
"paragraph_split_tip": "优先按 Makdown 标题段落进行分块,如果分块过长,再按长度进行二次分块",
|
||||
"params_config": "配置",
|
||||
"params_setting": "参数设置",
|
||||
"pdf_enhance_parse": "PDF增强解析",
|
||||
"pdf_enhance_parse_price": "{{price}}积分/页",
|
||||
"pdf_enhance_parse_tips": "调用 PDF 识别模型进行解析,可以将其转换成 Markdown 并保留文档中的图片,同时也可以对扫描件进行识别,识别时间较长。",
|
||||
@@ -163,7 +182,7 @@
|
||||
"vector_model_max_tokens_tip": "每个分块数据,最大长度为 3000 tokens",
|
||||
"vllm_model": "图片理解模型",
|
||||
"website_dataset": "Web 站点同步",
|
||||
"website_dataset_desc": "Web 站点同步允许你直接使用一个网页链接构建知识库",
|
||||
"website_dataset_desc": "通过爬虫,批量爬取网页数据构建知识库",
|
||||
"website_info": "网站信息",
|
||||
"yuque_dataset": "语雀知识库",
|
||||
"yuque_dataset_config": "配置语雀知识库",
|
||||
|
||||
@@ -88,6 +88,7 @@
|
||||
"team.add_writer": "添加可写成员",
|
||||
"team.avatar_and_name": "头像 & 名称",
|
||||
"team.belong_to_group": "所属群组",
|
||||
"team.collaborator.added": "已添加",
|
||||
"team.group.avatar": "群头像",
|
||||
"team.group.create": "创建群组",
|
||||
"team.group.create_failed": "创建群组失败",
|
||||
@@ -99,9 +100,10 @@
|
||||
"team.group.keep_admin": "保留管理员权限",
|
||||
"team.group.manage_member": "管理成员",
|
||||
"team.group.manage_tip": "可以管理成员、创建群组、管理所有群组、为群组和成员分配权限",
|
||||
"team.group.permission_tip": "单独配置权限的成员,将遵循个人权限配置,不再受群组权限影响。\n若成员在多个权限组,则该成员的权限取并集。",
|
||||
"team.group.members": "成员",
|
||||
"team.group.name": "群组名称",
|
||||
"team.group.permission.write": "",
|
||||
"team.group.permission_tip": "单独配置权限的成员,将遵循个人权限配置,不再受群组权限影响。\n若成员在多个权限组,则该成员的权限取并集。",
|
||||
"team.group.role.admin": "管理员",
|
||||
"team.group.role.member": "成员",
|
||||
"team.group.role.owner": "所有者",
|
||||
@@ -111,6 +113,5 @@
|
||||
"team.manage_collaborators": "管理协作者",
|
||||
"team.no_collaborators": "暂无协作者",
|
||||
"team.org.org": "部门",
|
||||
"team.write_role_member": "可写权限",
|
||||
"team.collaborator.added": "已添加"
|
||||
"team.write_role_member": "可写权限"
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
{
|
||||
"Array_element": "数组元素",
|
||||
"Array_element_index": "下标",
|
||||
"Click": "点击",
|
||||
"Code": "代码",
|
||||
"Confirm_sync_node": "将会更新至最新的节点配置,不存在模板中的字段将会被删除(包括所有自定义字段)。\n如果字段较为复杂,建议您先复制一份节点,再更新原来的节点,便于参数复制。",
|
||||
"Drag": "拖拽",
|
||||
"Node.Open_Node_Course": "查看节点教程",
|
||||
"Node_variables": "节点变量",
|
||||
"Quote_prompt_setting": "引用提示词配置",
|
||||
@@ -175,6 +177,8 @@
|
||||
"text_content_extraction": "文本内容提取",
|
||||
"text_to_extract": "需要提取的文本",
|
||||
"these_variables_will_be_input_parameters_for_code_execution": "这些变量会作为代码的运行的输入参数",
|
||||
"to_add_node": "添加节点",
|
||||
"to_connect_node": "连接节点",
|
||||
"tool.tool_result": "工具运行结果",
|
||||
"tool_call_termination": "工具调用终止",
|
||||
"tool_custom_field": "自定义工具变量",
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
"model.active": "啟用",
|
||||
"model.alias": "別名",
|
||||
"model.alias_tip": "模型在系統中展示的名字,方便使用者理解",
|
||||
"model.censor": "啟用敏感校驗",
|
||||
"model.censor_tip": "如果需要進行敏感校驗,則開啟該開關",
|
||||
"model.charsPointsPrice": "模型綜合價格",
|
||||
"model.charsPointsPrice_tip": "將模型輸入和輸出合併起來進行 Token 計費,語言模型如果單獨設定了輸入和輸出計費,則按輸入和輸出分別計算",
|
||||
@@ -65,6 +66,7 @@
|
||||
"model.tool_choice_tip": "如果該模型支援工具呼叫,則開啟該開關",
|
||||
"model.used_in_classify": "用於問題分類",
|
||||
"model.used_in_extract_fields": "用於文字擷取",
|
||||
"model.used_in_query_extension": "用於問題優化",
|
||||
"model.used_in_tool_call": "用於工具呼叫節點",
|
||||
"model.vision": "支援圖片識別",
|
||||
"model.vision_tag": "視覺",
|
||||
|
||||
@@ -39,8 +39,6 @@
|
||||
"new_password": "新密碼",
|
||||
"notification_receiving": "通知接收",
|
||||
"old_password": "舊密碼",
|
||||
"openai_account_configuration": "OpenAI 帳號設定",
|
||||
"openai_account_setting_exception": "設定 OpenAI 帳號異常",
|
||||
"package_and_usage": "套餐與用量",
|
||||
"package_details": "套餐詳細資訊",
|
||||
"package_expiry_time": "套餐到期時間",
|
||||
@@ -52,8 +50,10 @@
|
||||
"password_update_success": "修改密碼成功",
|
||||
"pending_usage": "待使用",
|
||||
"phone_label": "手機號",
|
||||
"please_bind_contact": "請綁定聯繫方式",
|
||||
"please_bind_notification_receiving_path": "請先綁定通知接收途徑",
|
||||
"purchase_extra_package": "購買額外套餐",
|
||||
"redeem_coupon": "兌換代碼",
|
||||
"reminder_create_bound_notification_account": "提醒建立者綁定通知帳號",
|
||||
"reset_password": "重置密碼",
|
||||
"resource_usage": "資源用量",
|
||||
@@ -75,6 +75,5 @@
|
||||
"user_team_team_name": "團隊",
|
||||
"verification_code": "驗證碼",
|
||||
"you_can_convert": "您可以兌換",
|
||||
"yuan": "元",
|
||||
"redeem_coupon": "兌換代碼"
|
||||
"yuan": "元"
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
"create_channel": "新增管道",
|
||||
"default_url": "預設地址",
|
||||
"detail": "詳細資訊",
|
||||
"duration": "耗時",
|
||||
"edit": "編輯",
|
||||
"edit_channel": "管道設定",
|
||||
"enable_channel": "啟用",
|
||||
"forbid_channel": "停用",
|
||||
|
||||
@@ -8,8 +8,9 @@
|
||||
"assign_permission": "權限變更",
|
||||
"change_department_name": "部門編輯",
|
||||
"change_member_name": "成員改名",
|
||||
"confirm_delete_from_org": "確認將 {{username}} 移出部門?",
|
||||
"confirm_delete_from_team": "確認將 {{username}} 移出團隊?",
|
||||
"confirm_delete_group": "確認刪除群組?",
|
||||
"confirm_delete_member": "確認刪除成員?",
|
||||
"confirm_delete_org": "確認刪除該部門?",
|
||||
"confirm_forbidden": "確認停用",
|
||||
"confirm_leave_team": "確認離開該團隊? \n結束後,您在該團隊所有的資源轉讓給團隊所有者。",
|
||||
@@ -21,6 +22,8 @@
|
||||
"create_sub_org": "建立子部門",
|
||||
"delete": "刪除",
|
||||
"delete_department": "刪除子部門",
|
||||
"delete_from_org": "移出部門",
|
||||
"delete_from_team": "移出團隊",
|
||||
"delete_group": "刪除群組",
|
||||
"delete_org": "刪除部門",
|
||||
"edit_info": "編輯訊息",
|
||||
@@ -28,6 +31,7 @@
|
||||
"edit_member_tip": "成員名",
|
||||
"edit_org_info": "編輯部門資訊",
|
||||
"expires": "過期時間",
|
||||
"export_members": "導出成員",
|
||||
"forbid_hint": "停用後,該邀請連結將失效。該操作不可撤銷,是否確認停用?",
|
||||
"forbid_success": "停用成功",
|
||||
"forbidden": "停用",
|
||||
@@ -44,8 +48,10 @@
|
||||
"invite_member": "邀請成員",
|
||||
"invited": "已邀請",
|
||||
"join_team": "加入團隊",
|
||||
"join_update_time": "加入/更新時間",
|
||||
"kick_out_team": "移除成員",
|
||||
"label_sync": "標籤同步",
|
||||
"leave": "已離職",
|
||||
"leave_team_failed": "離開團隊異常",
|
||||
"log_assign_permission": "【{{name}}】更新了【{{objectName}}】的權限:[應用創建:【{{appCreate}}】, 知識庫:【{{datasetCreate}}】, API密鑰:【{{apiKeyCreate}}】, 管理:【{{manage}}】]",
|
||||
"log_change_department": "【{{name}}】更新了部門【{{departmentName}}】",
|
||||
@@ -70,6 +76,7 @@
|
||||
"member_group": "所屬成員組",
|
||||
"move_member": "移動成員",
|
||||
"move_org": "行動部門",
|
||||
"notification_recieve": "團隊通知接收",
|
||||
"operation_log": "紀錄",
|
||||
"org": "組織",
|
||||
"org_description": "介紹",
|
||||
@@ -84,13 +91,22 @@
|
||||
"permission_datasetCreate_Tip": "可以在根目錄建立知識庫,(資料夾下的建立權限由資料夾控制)",
|
||||
"permission_manage": "管理員",
|
||||
"permission_manage_tip": "可以管理成員、建立群組、管理所有群組、為群組和成員分配權限",
|
||||
"please_bind_contact": "請綁定聯繫方式",
|
||||
"recover_team_member": "成員恢復",
|
||||
"relocate_department": "部門移動",
|
||||
"remark": "備註",
|
||||
"remove_tip": "確認將 {{username}} 移出團隊?",
|
||||
"restore_tip": "確認將 {{username}} 加入團隊嗎?\n僅恢復該成員賬號可用性及相關權限,無法恢復賬號下資源。",
|
||||
"restore_tip_title": "恢復確認",
|
||||
"retain_admin_permissions": "保留管理員權限",
|
||||
"search_log": "搜索日誌",
|
||||
"search_member": "搜索成員",
|
||||
"search_member_group_name": "搜尋成員/群組名稱",
|
||||
"search_org": "搜索部門",
|
||||
"set_name_avatar": "團隊頭像",
|
||||
"sync_immediately": "立即同步",
|
||||
"sync_member_failed": "同步成員失敗",
|
||||
"sync_member_success": "同步成員成功",
|
||||
"total_team_members": "共 {{amount}} 名成員",
|
||||
"transfer_ownership": "轉讓所有者",
|
||||
"unlimited": "無限制",
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
{
|
||||
"configured": "已設定",
|
||||
"error.no_permission": "請聯絡管理員設定",
|
||||
"get_usage_failed": "獲取使用量失敗",
|
||||
"laf_account": "af 帳號",
|
||||
"no_intro": "暫無說明",
|
||||
"not_configured": "未設定",
|
||||
"open_api_notice": "可以填寫 OpenAI/OneAPI 的相關金鑰。\n如果你填寫了該內容,在線上平臺使用【AI 對話】、【問題分類】和【內容提取】將會走你填寫的 Key,不會計費用。\n請注意你的 Key 是否有存取對應模型的權限。 \nGPT 模型可以選擇 FastAI。",
|
||||
"openai_account_configuration": "OpenAI/OneAPI 帳號",
|
||||
"openai_account_setting_exception": "設置 OpenAI 賬號異常",
|
||||
"request_address_notice": "請求地址,預設為 openai 官方。可填中轉位址,未自動補全 \"v1\"",
|
||||
"third_party_account": "第三方帳號",
|
||||
"third_party_account": "第三方賬號",
|
||||
"third_party_account.configured": "已配置",
|
||||
"third_party_account.not_configured": "未配置",
|
||||
"third_party_account_desc": "管理員可以在這裡設定第三方帳號或變數,該帳號將被團隊所有人使用",
|
||||
"unavailable": "取得使用量異常",
|
||||
"usage": "使用量:",
|
||||
|
||||
@@ -13,8 +13,10 @@
|
||||
"embedding_index": "索引生成",
|
||||
"every_day": "天",
|
||||
"every_month": "月",
|
||||
"every_week": "每週",
|
||||
"export_confirm": "匯出確認",
|
||||
"export_confirm_tip": "目前共 {{total}} 筆使用記錄,確認匯出?",
|
||||
"export_success": "導出成功",
|
||||
"export_title": "時間,成員,類型,項目名,AI 積分消耗",
|
||||
"feishu": "飛書",
|
||||
"generation_time": "生成時間",
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
{
|
||||
"Click_to_delete_this_field": "點擊刪除該字段",
|
||||
"Filed_is_deprecated": "該字段已棄用",
|
||||
"MCP_tools_debug": "偵錯",
|
||||
"MCP_tools_detail": "查看詳情",
|
||||
"MCP_tools_list": "工具列表",
|
||||
"MCP_tools_list_is_empty": "未解析到 MCP 工具",
|
||||
"MCP_tools_list_with_number": "工具列表: {{total}}",
|
||||
"MCP_tools_parse_failed": "解析 MCP 地址失敗",
|
||||
"MCP_tools_url": "MCP 地址",
|
||||
"MCP_tools_url_is_empty": "MCP 地址不能為空",
|
||||
@@ -130,6 +134,7 @@
|
||||
"response_format": "回覆格式",
|
||||
"saved_success": "儲存成功!\n如需在外部使用該版本,請點選“儲存並發布”",
|
||||
"search_app": "搜尋應用程式",
|
||||
"search_tool": "搜索工具",
|
||||
"setting_app": "應用程式設定",
|
||||
"setting_plugin": "外掛設定",
|
||||
"show_top_p_tip": "用溫度取樣的替代方法,稱為 Nucleus 取樣,該模型考慮了具有 TOP_P 機率質量質量的令牌的結果。\n因此,0.1 表示僅考慮包含最高機率質量的令牌。\n預設為 1。",
|
||||
@@ -165,6 +170,7 @@
|
||||
"template_market_description": "在範本市集探索更多玩法,設定教學與使用指引,帶您理解並上手各種應用程式",
|
||||
"template_market_empty_data": "找不到合適的範本",
|
||||
"time_zone": "時區",
|
||||
"tool_detail": "工具詳情",
|
||||
"tool_input_param_tip": "這個外掛正常執行需要設定相關資訊",
|
||||
"tools_no_description": "這個工具沒有介紹~",
|
||||
"transition_to_workflow": "轉換成工作流程",
|
||||
@@ -175,6 +181,7 @@
|
||||
"tts_close": "關閉",
|
||||
"type.All": "全部",
|
||||
"type.Create http plugin tip": "透過 OpenAPI Schema 批次建立外掛,相容 GPTs 格式",
|
||||
"type.Create mcp tools tip": "通過輸入 MCP 地址,自動解析並批量創建可調用的 MCP 工具",
|
||||
"type.Create one plugin tip": "可以自訂輸入和輸出的工作流程,通常用於封裝重複使用的工作流程",
|
||||
"type.Create plugin bot": "建立外掛",
|
||||
"type.Create simple bot": "建立簡易應用程式",
|
||||
@@ -186,6 +193,8 @@
|
||||
"type.Import from json tip": "透過 JSON 設定文件,直接建立應用",
|
||||
"type.Import from json_error": "獲取工作流數據失敗,請檢查URL或手動粘貼JSON數據",
|
||||
"type.Import from json_loading": "正在獲取工作流數據,請稍候...",
|
||||
"type.MCP tools": "MCP 工具集",
|
||||
"type.MCP_tools_url": "MCP 地址",
|
||||
"type.Plugin": "外掛",
|
||||
"type.Simple bot": "簡易應用程式",
|
||||
"type.Workflow bot": "工作流程",
|
||||
|
||||
@@ -26,6 +26,8 @@
|
||||
"content_empty": "無內容",
|
||||
"contextual": "{{num}} 筆上下文",
|
||||
"contextual_preview": "上下文預覽 {{num}} 筆",
|
||||
"core.chat.moveCancel": "上滑取消",
|
||||
"core.chat.shortSpeak": "說話時間太短",
|
||||
"csv_input_lexicon_tip": "僅支援 CSV 批次匯入,點選下載範本",
|
||||
"custom_input_guide_url": "自訂詞彙庫網址",
|
||||
"data_source": "來源知識庫:{{name}}",
|
||||
@@ -33,6 +35,7 @@
|
||||
"delete_all_input_guide_confirm": "確定要清除輸入導引詞彙庫嗎?",
|
||||
"download_chunks": "下載資料",
|
||||
"empty_directory": "此目錄中已無項目可選~",
|
||||
"error_message": "錯誤訊息",
|
||||
"file_amount_over": "超出檔案數量上限 {{max}}",
|
||||
"file_input": "檔案輸入",
|
||||
"file_input_tip": "可透過「外掛程式啟動」節點的「檔案連結」取得對應檔案的連結",
|
||||
@@ -47,7 +50,6 @@
|
||||
"items": "筆",
|
||||
"llm_tokens": "LLM tokens",
|
||||
"module_runtime_and": "模組執行總時間",
|
||||
"moveCancel": "上滑取消",
|
||||
"multiple_AI_conversations": "多組 AI 對話",
|
||||
"new_input_guide_lexicon": "新增詞彙庫",
|
||||
"no_workflow_response": "無工作流程資料",
|
||||
@@ -56,6 +58,7 @@
|
||||
"plugins_output": "外掛程式輸出",
|
||||
"press_to_speak": "按住說話",
|
||||
"query_extension_IO_tokens": "問題最佳化輸入/輸出 Tokens",
|
||||
"query_extension_result": "問題優化結果",
|
||||
"question_tip": "由上至下,各個模組的回應順序",
|
||||
"read_raw_source": "開啟原文",
|
||||
"reasoning_text": "思考過程",
|
||||
@@ -72,7 +75,6 @@
|
||||
"select_file": "上傳檔案",
|
||||
"select_file_img": "上傳檔案 / 圖片",
|
||||
"select_img": "上傳圖片",
|
||||
"shortSpeak ": "說話時間太短",
|
||||
"source_cronJob": "定時執行",
|
||||
"stream_output": "串流輸出",
|
||||
"to_dataset": "前往知識庫",
|
||||
|
||||
@@ -114,6 +114,7 @@
|
||||
"click_to_resume": "點選繼續",
|
||||
"code_editor": "程式碼編輯器",
|
||||
"code_error.account_error": "帳號名稱或密碼錯誤",
|
||||
"code_error.account_exist": "賬號已註冊",
|
||||
"code_error.account_not_found": "使用者未註冊",
|
||||
"code_error.app_error.invalid_app_type": "無效的應用程式類型",
|
||||
"code_error.app_error.invalid_owner": "非法的應用程式擁有者",
|
||||
@@ -185,7 +186,6 @@
|
||||
"commercial_function_tip": "請升級為商業版後使用此功能:https://doc.fastgpt.cn/docs/commercial/intro/",
|
||||
"comon.Continue_Adding": "繼續新增",
|
||||
"compliance.chat": "內容由第三方 AI 產生,無法保證其真實性與準確性,僅供參考。",
|
||||
"compliance.compliance.dataset": "請確保您的內容嚴格遵守相關法律法規,避免包含任何違法或侵權的內容。\n在上傳可能涉及敏感資訊的資料時請務必謹慎。",
|
||||
"compliance.dataset": "請確保您的內容嚴格遵守相關法律法規,避免包含任何違法或侵權的內容。\n在上傳可能涉及敏感資訊的資料時請務必謹慎。",
|
||||
"confirm_choice": "確認選擇",
|
||||
"confirm_move": "移動至此",
|
||||
@@ -430,7 +430,6 @@
|
||||
"core.dataset.Read Dataset": "檢視知識庫詳細資料",
|
||||
"core.dataset.Set Website Config": "開始設定",
|
||||
"core.dataset.Start export": "已開始匯出",
|
||||
"core.dataset.Table collection": "表格資料集",
|
||||
"core.dataset.Text collection": "文字資料集",
|
||||
"core.dataset.apiFile": "API 檔案",
|
||||
"core.dataset.collection.Click top config website": "點選設定網站",
|
||||
@@ -475,6 +474,7 @@
|
||||
"core.dataset.error.unAuthDatasetData": "無權操作此資料",
|
||||
"core.dataset.error.unAuthDatasetFile": "無權操作此檔案",
|
||||
"core.dataset.error.unCreateCollection": "無權操作此資料",
|
||||
"core.dataset.error.unExistDataset": "知識庫不存在",
|
||||
"core.dataset.error.unLinkCollection": "不是網路連結集合",
|
||||
"core.dataset.externalFile": "外部檔案庫",
|
||||
"core.dataset.file": "檔案",
|
||||
@@ -528,7 +528,6 @@
|
||||
"core.dataset.search.mode.fullTextRecall desc": "使用傳統的全文檢索,適合尋找特定關鍵字和主謂語的特殊資料",
|
||||
"core.dataset.search.mode.mixedRecall": "混合檢索",
|
||||
"core.dataset.search.mode.mixedRecall desc": "使用向量檢索與全文檢索的綜合結果,並使用 RRF 演算法進行排序。",
|
||||
"core.dataset.search.score.embedding": "語意檢索",
|
||||
"core.dataset.search.score.embedding desc": "透過計算向量之間的距離取得分數,範圍為 0 到 1。",
|
||||
"core.dataset.search.score.fullText": "全文檢索",
|
||||
"core.dataset.search.score.fullText desc": "計算相同關鍵字的分數,範圍為 0 到無限大。",
|
||||
@@ -555,7 +554,7 @@
|
||||
"core.dataset.training.Agent queue": "問答訓練排隊中",
|
||||
"core.dataset.training.Auto mode": "補充索引",
|
||||
"core.dataset.training.Auto mode Tip": "透過子索引以及呼叫模型產生相關問題與摘要,來增加資料區塊的語意豐富度,更有利於檢索。需要消耗更多的儲存空間並增加 AI 呼叫次數。",
|
||||
"core.dataset.training.Chunk mode": "直接分塊",
|
||||
"core.dataset.training.Chunk mode": "分塊存儲",
|
||||
"core.dataset.training.Full": "預計 20 分鐘以上",
|
||||
"core.dataset.training.Leisure": "閒置",
|
||||
"core.dataset.training.QA mode": "問答對提取",
|
||||
@@ -750,7 +749,6 @@
|
||||
"custom_title": "自訂標題",
|
||||
"data_index_custom": "自定義索引",
|
||||
"data_index_default": "預設索引",
|
||||
"data_index_image": "圖片索引",
|
||||
"data_index_question": "推測問題索引",
|
||||
"data_index_summary": "摘要索引",
|
||||
"data_not_found": "數據找不到了",
|
||||
@@ -760,7 +758,6 @@
|
||||
"dataset.Create Folder": "建立資料夾",
|
||||
"dataset.Create manual collection": "建立手動資料集",
|
||||
"dataset.Delete Dataset Error": "刪除知識庫錯誤",
|
||||
"dataset.Edit API Service": "編輯 API 檔案介面",
|
||||
"dataset.Edit Folder": "編輯資料夾",
|
||||
"dataset.Edit Info": "編輯資訊",
|
||||
"dataset.Export": "匯出",
|
||||
@@ -878,6 +875,7 @@
|
||||
"model.provider": "模型提供者",
|
||||
"model.search_name_placeholder": "根據模型名搜尋",
|
||||
"model.type.chat": "語言模型",
|
||||
"model.type.embedding": "索引模型",
|
||||
"model.type.reRank": "重排模型",
|
||||
"model.type.stt": "語音辨識",
|
||||
"model.type.tts": "語音合成",
|
||||
@@ -922,6 +920,7 @@
|
||||
"not_open": "未開啟",
|
||||
"not_permission": "當前訂閱套餐不支持團隊操作日誌",
|
||||
"not_support": "不支援",
|
||||
"not_support_wechat_image": "這是一張微信圖片",
|
||||
"not_yet_introduced": "暫無介紹",
|
||||
"open_folder": "開啟資料夾",
|
||||
"option": "選項",
|
||||
@@ -939,6 +938,7 @@
|
||||
"pay_corporate_payment": "對公支付",
|
||||
"pay_money": "應付金額",
|
||||
"pay_success": "支付成功",
|
||||
"pay_year_tip": "支付 10 個月,暢享 1 年!",
|
||||
"permission.Collaborator": "協作者",
|
||||
"permission.Default permission": "預設權限",
|
||||
"permission.Manage": "管理",
|
||||
@@ -1142,7 +1142,7 @@
|
||||
"support.wallet.subscription.Next plan": "未來方案",
|
||||
"support.wallet.subscription.Stand plan level": "訂閱方案",
|
||||
"support.wallet.subscription.Sub plan": "訂閱方案",
|
||||
"support.wallet.subscription.Sub plan tip": "免費使用 {{title}} 或升級更進階的方案",
|
||||
"support.wallet.subscription.Sub plan tip": "免費使用【{{title}}】或升級更進階的方案",
|
||||
"support.wallet.subscription.Team plan and usage": "方案與使用量",
|
||||
"support.wallet.subscription.Training weight": "訓練優先權:{{weight}}",
|
||||
"support.wallet.subscription.Update extra ai points": "額外 AI 點數",
|
||||
@@ -1166,7 +1166,7 @@
|
||||
"support.wallet.subscription.standardSubLevel.experience": "體驗版",
|
||||
"support.wallet.subscription.standardSubLevel.experience_desc": "可解鎖 FastGPT 完整功能",
|
||||
"support.wallet.subscription.standardSubLevel.free": "免費版",
|
||||
"support.wallet.subscription.standardSubLevel.free desc": "每月可免費使用基本功能。若連續 30 天未登入系統,系統將自動清除知識庫",
|
||||
"support.wallet.subscription.standardSubLevel.free desc": "核心功能免費試用。 \n30 天未登錄,將會清空知識庫。",
|
||||
"support.wallet.subscription.standardSubLevel.team": "團隊版",
|
||||
"support.wallet.subscription.standardSubLevel.team_desc": "適合小團隊建構知識庫應用並提供對外服務",
|
||||
"support.wallet.subscription.status.active": "使用中",
|
||||
@@ -1268,8 +1268,6 @@
|
||||
"user.reset_password_tip": "未設置初始密碼/長時間未修改密碼,請重置密碼",
|
||||
"user.team.Balance": "團隊餘額",
|
||||
"user.team.Check Team": "切換",
|
||||
"user.team.Confirm Invite": "確認邀請",
|
||||
"user.team.Create Team": "建立新團隊",
|
||||
"user.team.Leave Team": "離開團隊",
|
||||
"user.team.Leave Team Failed": "離開團隊失敗",
|
||||
"user.team.Member": "成員",
|
||||
@@ -1280,13 +1278,10 @@
|
||||
"user.team.Processing invitations Tips": "您有 {{amount}} 個需要處理的團隊邀請",
|
||||
"user.team.Remove Member Confirm Tip": "確認將 {{username}} 移出團隊?",
|
||||
"user.team.Select Team": "選擇團隊",
|
||||
"user.team.Set Name": "為團隊命名",
|
||||
"user.team.Switch Team Failed": "切換團隊失敗",
|
||||
"user.team.Tags Async": "儲存",
|
||||
"user.team.Team Name": "團隊名稱",
|
||||
"user.team.Team Tags Async": "標籤同步",
|
||||
"user.team.Team Tags Async Success": "連結錯誤修正成功,標籤資訊已更新",
|
||||
"user.team.Update Team": "更新團隊資訊",
|
||||
"user.team.invite.Accepted": "已加入團隊",
|
||||
"user.team.invite.Deal Width Footer Tip": "處理完會自動關閉",
|
||||
"user.team.invite.Reject": "已拒絕邀請",
|
||||
|
||||
@@ -7,22 +7,33 @@
|
||||
"auto_indexes": "自動生成補充索引",
|
||||
"auto_indexes_tips": "透過大模型進行額外索引生成,提高語義豐富度,提高檢索的精度。",
|
||||
"auto_training_queue": "增強索引排隊",
|
||||
"backup_collection": "備份數據",
|
||||
"backup_data_uploading": "備份數據上傳中: {{num}}%",
|
||||
"backup_dataset": "備份導入",
|
||||
"backup_dataset_success": "備份創建成功",
|
||||
"backup_dataset_tip": "可以將導出知識庫時,下載的 csv 文件重新導入。",
|
||||
"backup_mode": "備份導入",
|
||||
"chunk_max_tokens": "分塊上限",
|
||||
"chunk_process_params": "分塊處理參數",
|
||||
"chunk_size": "分塊大小",
|
||||
"chunk_trigger": "分塊條件",
|
||||
"chunk_trigger_force_chunk": "強制分塊",
|
||||
"chunk_trigger_max_size": "原文長度小於文件處理模型最大上下文 70%",
|
||||
"chunk_trigger_min_size": "原文長度大於",
|
||||
"close_auto_sync": "確認關閉自動同步功能?",
|
||||
"collection.Create update time": "建立/更新時間",
|
||||
"collection.Training type": "分段模式",
|
||||
"collection.training_type": "處理模式",
|
||||
"collection_data_count": "資料量",
|
||||
"collection_metadata_custom_pdf_parse": "PDF 增強解析",
|
||||
"collection_metadata_image_parse": "圖片標註",
|
||||
"collection_not_support_retraining": "此集合類型不支援重新調整參數",
|
||||
"collection_not_support_sync": "該集合不支援同步",
|
||||
"collection_sync": "立即同步",
|
||||
"collection_sync_confirm_tip": "確認開始同步資料?\n系統將會拉取最新資料進行比較,如果內容不相同,則會建立一個新的集合並刪除舊的集合,請確認!",
|
||||
"collection_tags": "集合標籤",
|
||||
"common_dataset": "通用資料集",
|
||||
"common_dataset_desc": "可透過匯入檔案、網頁連結或手動輸入的方式建立資料集",
|
||||
"common_dataset_desc": "通過導入文件、網頁鏈接或手動錄入形式構建知識庫",
|
||||
"condition": "條件",
|
||||
"config_sync_schedule": "設定定時同步",
|
||||
"confirm_to_rebuild_embedding_tip": "確定要為資料集切換索引嗎?\n切換索引是一個重要的操作,需要對您資料集內所有資料重新建立索引,可能需要較長時間,請確保帳號內剩餘點數充足。\n\n此外,您還需要注意修改使用此資料集的應用程式,避免與其他索引模型資料集混用。",
|
||||
"core.dataset.import.Adjust parameters": "調整參數",
|
||||
@@ -31,6 +42,7 @@
|
||||
"custom_split_sign_tip": "允許你根據自定義的分隔符進行分塊。\n通常用於已處理好的資料,使用特定的分隔符來精確分塊。\n可以使用 | 符號表示多個分割符,例如:“。|.”表示中英文句號。\n\n盡量避免使用正則相關特殊符號,例如:* () [] {} 等。",
|
||||
"data_amount": "{{dataAmount}} 組資料,{{indexAmount}} 組索引",
|
||||
"data_error_amount": "{{errorAmount}} 組訓練異常",
|
||||
"data_index_image": "圖片索引",
|
||||
"data_index_num": "索引 {{index}}",
|
||||
"data_process_params": "處理參數",
|
||||
"data_process_setting": "資料處理設定",
|
||||
@@ -57,8 +69,9 @@
|
||||
"enhanced_indexes": "索引增強",
|
||||
"error.collectionNotFound": "找不到集合",
|
||||
"external_file": "外部檔案庫",
|
||||
"external_file_dataset_desc": "可以從外部檔案庫匯入檔案建立資料集,檔案不會進行二次儲存",
|
||||
"external_file_dataset_desc": "可以通過 API,使用外部文件庫構建知識庫",
|
||||
"external_id": "檔案讀取識別碼",
|
||||
"external_other_dataset_desc": "自定義API、飛書、語雀等外部文檔作為知識庫",
|
||||
"external_read_url": "外部預覽網址",
|
||||
"external_read_url_tip": "可以設定您檔案庫的讀取網址,方便對使用者進行讀取權限驗證。目前可使用 {{fileId}} 變數來代表外部檔案識別碼。",
|
||||
"external_url": "檔案存取網址",
|
||||
@@ -92,14 +105,18 @@
|
||||
"is_open_schedule": "啟用定時同步",
|
||||
"keep_image": "保留圖片",
|
||||
"loading": "加載中...",
|
||||
"max_chunk_size": "最大分塊大小",
|
||||
"move.hint": "移動後,所選資料集/資料夾將繼承新資料夾的權限設定,原先的權限設定將失效。",
|
||||
"noChildren": "無子目錄",
|
||||
"noSelectedFolder": "沒有選擇文件夾",
|
||||
"noSelectedId": "沒有選擇 ID",
|
||||
"noValidId": "沒有有效的 ID",
|
||||
"open_auto_sync": "開啟定時同步後,系統將每天不定時嘗試同步集合,集合同步期間,會出現無法搜尋到該集合資料現象。",
|
||||
"other_dataset": "第三方知識庫",
|
||||
"paragraph_max_deep": "最大段落深度",
|
||||
"paragraph_split": "按段落分塊",
|
||||
"paragraph_split_tip": "優先按 Makdown 標題段落進行分塊,如果分塊過長,再按長度進行二次分塊",
|
||||
"params_config": "設定",
|
||||
"params_setting": "參數設定",
|
||||
"pdf_enhance_parse": "PDF 增強解析",
|
||||
"pdf_enhance_parse_price": "{{price}}積分/頁",
|
||||
"pdf_enhance_parse_tips": "呼叫 PDF 識別模型進行解析,可以將其轉換成 Markdown 並保留文件中的圖片,同時也可以對掃描件進行識別,識別時間較長。",
|
||||
@@ -115,6 +132,7 @@
|
||||
"process.Get QA": "問答對提取",
|
||||
"process.Image_Index": "圖片索引生成",
|
||||
"process.Is_Ready": "已就緒",
|
||||
"process.Is_Ready_Count": "{{count}} 組已就緒",
|
||||
"process.Parsing": "內容解析中",
|
||||
"process.Vectorizing": "索引向量化",
|
||||
"process.Waiting": "排隊中",
|
||||
@@ -162,7 +180,7 @@
|
||||
"vector_model_max_tokens_tip": "每個分塊資料,最大長度為 3000 tokens",
|
||||
"vllm_model": "圖片理解模型",
|
||||
"website_dataset": "網站同步",
|
||||
"website_dataset_desc": "網站同步功能讓您可以直接使用網頁連結建立資料集",
|
||||
"website_dataset_desc": "通過爬蟲,批量爬取網頁數據構建知識庫",
|
||||
"website_info": "網站資訊",
|
||||
"yuque_dataset": "語雀知識庫",
|
||||
"yuque_dataset_config": "設定語雀知識庫",
|
||||
|
||||
@@ -16,5 +16,6 @@
|
||||
"register": "註冊帳號",
|
||||
"root_password_placeholder": "root 使用者密碼為環境變數 DEFAULT_ROOT_PSW 的值",
|
||||
"terms": "服務條款",
|
||||
"use_root_login": "使用 root 使用者登入"
|
||||
"use_root_login": "使用 root 使用者登入",
|
||||
"wecom": "企業微信"
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
{
|
||||
"Array_element": "陣列元素",
|
||||
"Array_element_index": "索引",
|
||||
"Click": "點擊",
|
||||
"Code": "程式碼",
|
||||
"Confirm_sync_node": "將會更新至最新的節點設定,不存在於範本中的欄位將會被刪除(包含所有自訂欄位)。\n如果欄位比較複雜,建議您先複製一份節點,再更新原來的節點,以便複製參數。",
|
||||
"Drag": "拖拽",
|
||||
"Node.Open_Node_Course": "開啟節點教學課程",
|
||||
"Node_variables": "節點變數",
|
||||
"Quote_prompt_setting": "引用提示詞設定",
|
||||
@@ -175,6 +177,9 @@
|
||||
"text_content_extraction": "文字內容擷取",
|
||||
"text_to_extract": "要擷取的文字",
|
||||
"these_variables_will_be_input_parameters_for_code_execution": "這些變數會作為程式碼執行的輸入參數",
|
||||
"tool.tool_result": "工具運行結果",
|
||||
"to_add_node": "添加節點",
|
||||
"to_connect_node": "連接節點",
|
||||
"tool_call_termination": "工具呼叫終止",
|
||||
"tool_custom_field": "自訂工具變數",
|
||||
"tool_field": "工具參數設定",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user