Compare commits

...

32 Commits

Author SHA1 Message Date
Archer
3ba9c21828 4.8.9 test fix (#2291)
* perf: read file icon

* perf:icon

* fix: i18n

* perf: hide pro api

* perf: upload expired time

* perf: upload file frequency limit

* perf: upload file ux

* perf: input file tip

* perf: qa custom chunk size

* feat: dataset openapi

* fix: auth dataset list

* fix: openapi doc

* perf: zero temperature change to 0.01

* perf: read file prompt

* perf: read file prompt

* perf: free plan tip

* feat: cron job usage
2024-08-08 10:07:24 +08:00
Coder-Wjt
7b388b287a Adjust the parameters of gpt-40-mini model, custom for QA mode Add chunkSize setting input (#2139)
* Adjust the parameters of gpt-40-mini model, custom for QA mode Add chunkSize setting input

* Adjust the parameters of gpt-40-mini model
2024-08-07 18:05:41 +08:00
shilin
e9ba00d38f fix(dataset): 知识库列表右下角类型的名称上下居中 (#2287) 2024-08-07 18:01:13 +08:00
Archer
a109c59cc6 Move app owner (#2280)
* feat: app owner change (#2271)

* feat(app): changeOwner api

* feat: changeOwner api

* feat: ChangeOwnerModal

* feat: update change owner api

* chore: move change owner api into pro version
feat(fe): change owner modal

* feat: add change owner button and modal to InfoModal

* change owner ux

* feat: doc

* perf: remove info change owner btn

---------

Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
2024-08-06 22:13:16 +08:00
Archer
91bc573571 Update 482.md (#2283) 2024-08-06 20:16:30 +08:00
Hwting
2712ef21ff docs: Repair and supplement document content xinference.md (#2278)
Repair and supplement document content
2024-08-06 19:59:34 +08:00
Archer
593af8f7dc Update README.md (#2274) 2024-08-06 12:13:58 +08:00
Archer
96ebec9809 Fix navbar (#2273)
* fix: phone navbar cannot scroll; fix: file uplaod process error

* perf: select repeat file
2024-08-06 11:49:52 +08:00
shilin
f35ba8e5a7 feat(chatbot-extension): a Chrome extension that can be using for chat with AI on any website (#2235)
* feat(chatbot-extension): a Chrome extension that can be using for chat with AI on any website

* fix: 插件支持语音输入
feat:chatbot支持切换

* fix: 切换chatbot后,自动隐藏bot列表
2024-08-06 10:53:01 +08:00
Archer
e36d9d794f File input (#2270)
* doc

* feat: file upload  config

* perf: chat box file params

* feat: markdown show file

* feat: chat file store and clear

* perf: read file contentType

* feat: llm vision config

* feat: file url output

* perf: plugin error text

* perf: image load

* feat: ai chat document

* perf: file block ui

* feat: read file node

* feat: file read response field

* feat: simple mode support read files

* feat: tool call

* feat: read file histories

* perf: select file

* perf: select file config

* i18n

* i18n

* fix: ts; feat: tool response preview result
2024-08-06 10:00:22 +08:00
papapatrick
10dcdb5491 I18n Translations (#2267)
* rebase

* i18n-1

* add error info i18n

* fix

* fix

* refactor: 删除error.json

* delete useI18n
2024-08-05 18:42:21 +08:00
heheer
025d405fe2 feat: allow adding tags when creating collections via api (#2268)
* feat: allow adding tags when creating collections via api

* fix
2024-08-05 18:33:58 +08:00
Archer
fe71efbbd2 Collection tag (#2266)
* feat: collection metadata filter (#2211)

* feat: add dataset collection tags (#2231)

* dataset page

* workflow page

* move

* fix

* add plus filter

* fix

* fix

* fix

* perf: collection tag code

* fix: collection tags (#2249)

* fix

* fix

* fix tags of dataset page

* fix tags of workflow page

* doc

* add comments

* fix: collection tags (#2264)

* fix: metadata filter

* feat: search filter

---------

Co-authored-by: heheer <1239331448@qq.com>
Co-authored-by: heheer <heheer@sealos.io>
2024-08-05 12:08:46 +08:00
Archer
56f6e69bc7 System inform (#2263)
* feat: Bind Notification Pipe (#2229)

* chore: account page add bind notification modal

* feat: timerlock schema and type

* feat(fe): bind notification method modal

* chore: fe adjust

* feat: clean useless code

* fix: cron lock

* chore: adjust the code

* chore: rename api

* chore: remove unused code

* chore: fe adjust

* perf: bind inform ux

* fix: time ts

* chore: notification (#2251)

* perf: send message code

* perf: sub schema index

* fix: timezone plugin

* fix: format

---------

Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
2024-08-05 00:29:14 +08:00
heheer
998e7833e8 fix: workflow plugin module default input value (#2257) 2024-08-03 18:16:50 +08:00
heheer
110bf9391f fix: get plugin input variables from history (#2255) 2024-08-03 12:31:31 +08:00
heheer
48f2c95bb9 fix: add delete all input guide lexicon button (#2241)
* fix: add delete all input guide lexicon button

* fix

* fix permisstion
2024-08-03 12:29:15 +08:00
Archer
9f37e56173 Chat perf test (#2252)
* perf: optimize chat init api (#2233)

* perf: optimize the chat/init api

* perf: 添加团队和分享api的优化

* perf: api写法优化

* perf: api写法优化

* perf: 完善细节

* perf: 添加auth字段

* perf: 优雅的写法🥳

* fix: Fix the bug in debugging Tag (#2250)

* fix: 修复调试tag不显示bug

* perf

* perf: 优化代码

* fix: 返回新对象

* fix: show tag error

---------

Co-authored-by: papapatrick <109422393+Patrickill@users.noreply.github.com>
2024-08-03 10:44:31 +08:00
lolocoo
af1cff6230 fix(appList): 删除应用后回到聊天选择最后一次对话的应用时提示无该应用bug (#2236) 2024-08-02 10:14:39 +08:00
Archer
3760726354 Update 488.md (#2234) 2024-08-01 12:24:38 +08:00
Archer
6ddf0b23fc Update 488.md (#2232) 2024-08-01 10:06:44 +08:00
Archer
f680bdf54d yml version (#2230) 2024-07-31 17:04:48 +08:00
Archer
834d36981f fix: response modal (#2228) 2024-07-31 16:11:04 +08:00
Archer
2fdca621e6 perf: remove unuse code (#2227) 2024-07-31 15:43:58 +08:00
heheer
8c7f4a3a30 fix: add check for reset (#2226) 2024-07-31 15:42:59 +08:00
Leo Liu Junyi
41da52d6ed Fix Switch state inconsistency for Scheduled Trigger (#2221)
* 修复定时关闭时Switch显示不同步

* 优化写法
2024-07-31 12:07:31 +08:00
Archer
446d9cb1b9 fix: oneapi image (#2222) 2024-07-31 12:04:11 +08:00
Rick
48a5853550 fix: the helm release failed due to version handle (#2199)
Co-authored-by: rick <LinuxSuRen@users.noreply.github.com>
2024-07-31 10:19:42 +08:00
Archer
c129874d59 fix: dataset params default error (#2210) 2024-07-30 17:58:40 +08:00
papapatrick
cc6fa4207a style: 优化样式 (#2203) 2024-07-30 11:47:26 +08:00
papapatrick
e27abe1f6b perf: 运行详情弹窗优化 (#2192)
* perf: 运行详情弹窗优化

* style: 调整样式

* style: 弹窗样式优化&&应用切换圆角添加

* fix: 修复编译错误
2024-07-29 18:36:13 +08:00
Zhedong Cen
5cecef5836 Update README.md (#2197) 2024-07-29 17:28:50 +08:00
344 changed files with 8939 additions and 2616 deletions

View File

@@ -24,11 +24,6 @@ jobs:
export APP_VERSION=${{ steps.vars.outputs.tag }}
export HELM_VERSION=${{ steps.vars.outputs.tag }}
export HELM_REPO=ghcr.io/${{ github.repository_owner }}
if [[ ! "$line" =~ ^v ]]
then
unset APP_VERSION
unset HELM_VERSION
fi
helm dependency update files/helm/fastgpt
helm package files/helm/fastgpt --version ${HELM_VERSION}-helm --app-version ${APP_VERSION} -d bin
helm push bin/fastgpt-${HELM_VERSION}-helm.tgz oci://${HELM_REPO}

View File

@@ -21,6 +21,7 @@
"i18n-ally.namespace": true,
"i18n-ally.pathMatcher": "{locale}/{namespaces}.json",
"i18n-ally.extract.targetPickingStrategy": "most-similar-by-key",
"i18n-ally.translate.engines": ["deepl", "google"],
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
}

View File

@@ -106,7 +106,7 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
由于需要部署数据库,部署完后需要等待 2~4 分钟才能正常访问。默认用了最低配置,首次访问时会有些慢。相关使用教程可查看:[Sealos 部署 FastGPT](https://doc.fastgpt.in/docs/development/sealos/)
* [快开始本地开发](https://doc.fastgpt.in/docs/development/intro/)
* [开始本地开发](https://doc.fastgpt.in/docs/development/intro/)
* [部署 FastGPT](https://doc.fastgpt.in/docs/development/sealos)
* [系统配置文件说明](https://doc.fastgpt.in/docs/development/configuration/)
* [多模型配置](https://doc.fastgpt.in/docs/development/one-api/)
@@ -122,7 +122,7 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
扫码加入飞书话题群 (新开,逐渐弃用微信群)
![](https://oss.laf.run/otnvvf-imgs/1719505774252.jpg)
![](https://oss.laf.run/otnvvf-imgs/飞书20240806-114214.png)
<a href="#readme">
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">

BIN
bin/fastgpt-v1.0.0-helm.tgz Normal file

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 294 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

View File

@@ -4,7 +4,7 @@ description: "FastGPT 对话问题引导"
icon: "code"
draft: false
toc: true
weight: 350
weight: 108
---
![](/imgs/questionGuide.png)

View File

@@ -0,0 +1,50 @@
---
title: "知识库集合标签"
description: "FastGPT 知识库集合标签使用说明"
icon: "developer_guide"
draft: false
toc: true
weight: 108
---
知识库集合标签是 FastGPT 商业版特有功能。它允许你对知识库中的数据集合添加标签进行分类,更高效地管理知识库数据。
而进一步可以在问答中,搜索知识库时添加集合过滤,实现更精确的搜索。
| | | |
| --------------------- | --------------------- | --------------------- |
| ![](/imgs/collection-tags-1.png) | ![](/imgs/collection-tags-2.png) | ![](/imgs/collection-tags-3.png) |
## 标签基础操作说明
在知识库详情页面,可以对标签进行管理,可执行的操作有
- 创建标签
- 修改标签名
- 删除标签
- 将一个标签赋给多个数据集合
- 给一个数据集合添加多个标签
也可以利用标签对数据集合进行筛选
## 知识库搜索-集合过滤说明
利用标签可以在知识库搜索时,通过填写「集合过滤」这一栏来实现更精确的搜索,具体的填写示例如下
```json
{
"tags": {
"$and": ["标签 1","标签 2"],
"$or": ["有 $and 标签时and 生效or 不生效"]
},
"createTime": {
"$gte": "YYYY-MM-DD HH:mm 格式即可,集合的创建时间大于该时间",
"$lte": "YYYY-MM-DD HH:mm 格式即可,集合的创建时间小于该时间,可和 $gte 共同使用"
}
}
```
在填写时有两个注意的点,
- 标签值可以为 `string` 类型的标签名,也可以为 `null`,而 `null` 代表着未设置标签的数据集合
- 标签过滤有 `$and``$or` 两种条件类型,在同时设置了 `$and``$or` 的情况下,只有 `$and` 会生效

View File

@@ -124,53 +124,36 @@ curl --location --request POST 'https://<oneapi_url>/v1/chat/completions' \
## 将本地模型接入 FastGPT
修改 FastGPT 的 `config.json` 配置文件,其中 chatModels对话模型用于聊天对话cqModels问题分类模型用来对问题进行分类extractModels内容提取模型则用来进行工具选择。我们分别在 chatModels、cqModels 和 extractModels 中加入 qwen-chat 模型:
修改 FastGPT 的 `config.json` 配置文件的 llmModels 部分加入 qwen-chat 模型:
```json
{
"chatModels": [
...
{
"model": "qwen-chat",
"name": "Qwen",
"maxContext": 2048,
"maxResponse": 2048,
"quoteMaxToken": 2000,
"maxTemperature": 1,
"vision": false,
"defaultSystemChatPrompt": ""
}
...
...
"llmModels": [
{
"model": "qwen-chat", // 模型名(对应OneAPI中渠道的模型名)
"name": "Qwen", // 模型别名
"avatar": "/imgs/model/Qwen.svg", // 模型的logo
"maxContext": 125000, // 最大上下文
"maxResponse": 4000, // 最大回复
"quoteMaxToken": 120000, // 最大引用内容
"maxTemperature": 1.2, // 最大温度
"charsPointsPrice": 0, // n积分/1k token商业版
"censor": false, // 是否开启敏感校验(商业版)
"vision": true, // 是否支持图片输入
"datasetProcess": true, // 是否设置为知识库处理模型QA务必保证至少有一个为true否则知识库会报错
"usedInClassify": true, // 是否用于问题分类务必保证至少有一个为true
"usedInExtractFields": true, // 是否用于内容提取务必保证至少有一个为true
"usedInToolCall": true, // 是否用于工具调用务必保证至少有一个为true
"usedInQueryExtension": true, // 是否用于问题优化务必保证至少有一个为true
"toolChoice": true, // 是否支持工具选择分类内容提取工具调用会用到。目前只有gpt支持
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice如果为false则使用 functionCall如果仍为 false则使用提示词模式
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
"customExtractPrompt": "", // 自定义内容提取提示词
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
"defaultConfig": {} // 请求API时挟带一些默认配置比如 GLM4 的 top_p
}
],
"cqModels": [
...
{
"model": "qwen-chat",
"name": "Qwen",
"maxContext": 2048,
"maxResponse": 2048,
"inputPrice": 0,
"outputPrice": 0,
"toolChoice": true,
"functionPrompt": ""
}
...
],
"extractModels": [
...
{
"model": "qwen-chat",
"name": "Qwen",
"maxContext": 2048,
"maxResponse": 2048,
"inputPrice": 0,
"outputPrice": 0,
"toolChoice": true,
"functionPrompt": ""
}
...
]
}
...
```
然后重启 FastGPT 就可以在应用配置中选择 Qwen 模型进行对话:

View File

@@ -531,6 +531,8 @@ data 为集合的 ID。
{{< tab tabName="请求示例" >}}
{{< markdownify >}}
使用代码上传时,请注意中文 filename 需要进行 encode 处理,否则容易乱码。
```bash
curl --location --request POST 'http://localhost:3000/api/core/dataset/collection/create/localFile' \
--header 'Authorization: Bearer {{authorization}}' \
@@ -1173,7 +1175,7 @@ curl --location --request PUT 'http://localhost:3000/api/core/dataset/data/updat
--header 'Authorization: Bearer {{authorization}}' \
--header 'Content-Type: application/json' \
--data-raw '{
"id":"65abd4b29d1448617cba61db",
"dataId":"65abd4b29d1448617cba61db",
"q":"测试111",
"a":"sss",
"indexes":[
@@ -1196,7 +1198,7 @@ curl --location --request PUT 'http://localhost:3000/api/core/dataset/data/updat
{{< markdownify >}}
{{% alert icon=" " context="success" %}}
- id: 数据的id
- dataId: 数据的id
- q: 主要数据(选填)
- a: 辅助数据(选填)
- indexes: 自定义索引(选填),类型参考`为集合批量添加添加数据`。如果创建时候有自定义索引,

View File

@@ -10,7 +10,7 @@ weight: 822
## Sealos 升级说明
1. 在应用管理中新建一个应用镜像为registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.1
2. 无需外网访问地址
2. 无需外网访问地址端口号为3000
3. 部署完后,复制应用的内网地址
4. 点击变更`FastGPT - 修改环境变量,增加下面的环境变量即可
@@ -31,4 +31,4 @@ SANDBOX_URL=内网地址
1. 新增 - js代码运行节点更完整的type提醒后续继续完善
2. 新增 - 内容提取节点支持数据类型选择
3. 修复 - 新增的站点同步无法使用
4. 修复 - 定时任务无法输入内容
4. 修复 - 定时任务无法输入内容

View File

@@ -1,5 +1,5 @@
---
title: 'V4.8.8'
title: 'V4.8.8(需要初始化)'
description: 'FastGPT V4.8.8 更新说明'
icon: 'upgrade'
draft: false
@@ -13,7 +13,7 @@ weight: 816
### 2. 修改镜像
- fastgpt 镜像 tag 修改成 v4.8.8
- fastgpt 镜像 tag 修改成 v4.8.8-fix2
- 商业版镜像 tag 修改成 v4.8.8
### 3. 执行初始化

View File

@@ -0,0 +1,51 @@
---
title: 'V4.8.9(进行中)'
description: 'FastGPT V4.8.9 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 816
---
## 升级指南
### 1. 做好数据库备份
### 2. 修改镜像
### 3. 执行初始化
从任意终端,发起 1 个 HTTP 请求。其中 {{rootkey}} 替换成环境变量里的 `rootkey`{{host}} 替换成**FastGPT 商业版域名**。
```bash
curl --location --request POST 'https://{{host}}/api/admin/init/489' \
--header 'rootkey: {{rootkey}}' \
--header 'Content-Type: application/json'
```
会初始化多租户的通知方式
-------
## V4.8.9 更新说明
1. 新增 - 文件上传配置,不再依赖视觉模型决定是否可上传图片,而是通过系统配置决定。
2. 新增 - AI 对话节点和工具调用支持选择“是否开启图片识别”,开启后会自动获取对话框上传的图片和“用户问题”中的图片链接。
3. 新增 - 文档解析节点。
4. 商业版新增 - 团队通知账号绑定,用于接收重要信息。
5. 商业版新增 - 知识库集合标签功能,可以对知识库进行标签管理。
6. 商业版新增 - 知识库搜索节点支持标签过滤和创建时间过滤。
7. 商业版新增 - 转移 App owner 权限。
8. 新增 - 删除所有对话引导内容。
9. 新增 - QA 拆分支持自定义 chunk 大小,并优化 gpt4o-mini 拆分时chunk 太大导致生成内容很少的问题。
10. 优化 - 对话框信息懒加载,减少网络传输。
11. 优化 - 清除选文件缓存,支持重复选择同一个文件。
12. 修复 - 知识库上传文件,网络不稳定或文件较多情况下,进度无法到 100%。
13. 修复 - 删除应用后回到聊天选择最后一次对话的应用为删除的应用时提示无该应用问题。
14. 修复 - 插件动态变量配置默认值时,无法正常显示默认值。
15. 修复 - 工具调用温度和最大回复值未生效。
16. 修复 - 函数调用模式assistant role 中GPT 模型必须传入 content 参数。(不影响大部分模型,目前基本都改用用 ToolChoice 模式FC 模式已弃用)。
17. 修复 - 知识库文件上传进度更新可能异常。
18. 修复 - 知识库 rebuilding 时候,页面总是刷新到第一页。
19. 修复 - 知识库 list openapi 鉴权问题。

View File

@@ -121,8 +121,8 @@ services:
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.8.8 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.8 # 阿里云
image: ghcr.io/labring/fastgpt:v4.8.8-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.8-fix2 # 阿里云
ports:
- 3000:3000
networks:
@@ -179,7 +179,7 @@ services:
- ./mysql:/var/lib/mysql
oneapi:
container_name: oneapi
image: ghcr.io/songquanpeng/one-api:0.6.7
image: ghcr.io/songquanpeng/one-api:v0.6.7
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/one-api:v0.6.6 # 阿里云
ports:
- 3001:3000

View File

@@ -79,8 +79,8 @@ services:
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.8.8 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.8 # 阿里云
image: ghcr.io/labring/fastgpt:v4.8.8-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.8-fix2 # 阿里云
ports:
- 3000:3000
networks:
@@ -136,7 +136,7 @@ services:
- ./mysql:/var/lib/mysql
oneapi:
container_name: oneapi
image: ghcr.io/songquanpeng/one-api:0.6.7
image: ghcr.io/songquanpeng/one-api:v0.6.7
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/one-api:v0.6.6 # 阿里云
ports:
- 3001:3000

View File

@@ -60,8 +60,8 @@ services:
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.8.8 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.8 # 阿里云
image: ghcr.io/labring/fastgpt:v4.8.8-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.8-fix2 # 阿里云
ports:
- 3000:3000
networks:
@@ -117,7 +117,7 @@ services:
- ./mysql:/var/lib/mysql
oneapi:
container_name: oneapi
image: ghcr.io/songquanpeng/one-api:0.6.7
image: ghcr.io/songquanpeng/one-api:v0.6.7
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/one-api:v0.6.6 # 阿里云
ports:
- 3001:3000

View File

@@ -1,18 +1,28 @@
import { ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 502000 */
export enum AppErrEnum {
unExist = 'appUnExist',
unAuthApp = 'unAuthApp'
unAuthApp = 'unAuthApp',
invalidOwner = 'invalidOwner',
invalidAppType = 'invalidAppType'
}
const appErrList = [
{
statusText: AppErrEnum.unExist,
message: '应用不存在'
message: i18nT('common:code_error.app_error.not_exist')
},
{
statusText: AppErrEnum.unAuthApp,
message: '无权操作该应用'
message: i18nT('common:code_error.app_error.un_auth_app')
},
{
statusText: AppErrEnum.invalidOwner,
message: i18nT('common:code_error.app_error.invalid_owner')
},
{
statusText: AppErrEnum.invalidAppType,
message: i18nT('common:code_error.app_error.invalid_app_type')
}
];
export default appErrList.reduce((acc, cur, index) => {

View File

@@ -1,5 +1,5 @@
import { ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 504000 */
export enum ChatErrEnum {
unAuthChat = 'unAuthChat'
@@ -7,7 +7,7 @@ export enum ChatErrEnum {
const errList = [
{
statusText: ChatErrEnum.unAuthChat,
message: '无权操作该对话记录'
message: i18nT('common:code_error.chat_error.un_auth')
}
];
export default errList.reduce((acc, cur, index) => {

View File

@@ -1,25 +1,27 @@
import { ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 506000 */
export enum OpenApiErrEnum {
unExist = 'openapiUnExist',
unAuth = 'openapiUnAuth',
exceedLimit = 'openapiExceedLimit'
}
const errList = [
{
statusText: OpenApiErrEnum.unExist,
message: 'Api Key 不存在'
message: i18nT('common:code_error.openapi_error.api_key_not_exist')
},
{
statusText: OpenApiErrEnum.unAuth,
message: '无权操作该 Api Key'
message: i18nT('common:code_error.openapi_error.un_auth')
},
{
statusText: OpenApiErrEnum.exceedLimit,
message: '最多 10 组 API 密钥'
message: i18nT('common:code_error.openapi_error.exceed_limit')
}
];
export default errList.reduce((acc, cur, index) => {
return {
...acc,

View File

@@ -1,32 +1,33 @@
import { ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 505000 */
export enum OutLinkErrEnum {
unExist = 'outlinkUnExist',
unAuthLink = 'unAuthLink',
linkUnInvalid = 'linkUnInvalid',
unAuthUser = 'unAuthUser'
}
const errList = [
{
statusText: OutLinkErrEnum.unExist,
message: '分享链接不存在'
message: i18nT('common:code_error.outlink_error.link_not_exist')
},
{
statusText: OutLinkErrEnum.unAuthLink,
message: '分享链接无效'
message: i18nT('common:code_error.outlink_error.invalid_link')
},
{
code: 501,
statusText: OutLinkErrEnum.linkUnInvalid,
message: '分享链接无效'
message: i18nT('common:code_error.outlink_error.invalid_link') // 使用相同的错误消息
},
{
statusText: OutLinkErrEnum.unAuthUser,
message: '身份校验失败'
message: i18nT('common:code_error.outlink_error.un_auth_user')
}
];
export default errList.reduce((acc, cur, index) => {
return {
...acc,

View File

@@ -1,20 +1,22 @@
import { ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 508000 */
export enum PluginErrEnum {
unExist = 'pluginUnExist',
unAuth = 'pluginUnAuth'
}
const errList = [
{
statusText: PluginErrEnum.unExist,
message: '插件不存在'
message: i18nT('common:code_error.plugin_error.not_exist')
},
{
statusText: PluginErrEnum.unAuth,
message: '无权操作该插件'
message: i18nT('common:code_error.plugin_error.un_auth')
}
];
export default errList.reduce((acc, cur, index) => {
return {
...acc,

View File

@@ -1,15 +1,17 @@
import { ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* dataset: 509000 */
export enum SystemErrEnum {
communityVersionNumLimit = 'communityVersionNumLimit'
}
const systemErr = [
{
statusText: SystemErrEnum.communityVersionNumLimit,
message: '超出开源版数量限制,请升级商业版: https://fastgpt.in'
message: i18nT('common:code_error.system_error.community_version_num_limit')
}
];
export default systemErr.reduce((acc, cur, index) => {
return {
...acc,

View File

@@ -1,5 +1,5 @@
import { ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* team: 500000 */
export enum TeamErrEnum {
teamOverSize = 'teamOverSize',
@@ -12,17 +12,43 @@ export enum TeamErrEnum {
websiteSyncNotEnough = 'websiteSyncNotEnough',
reRankNotEnough = 'reRankNotEnough'
}
const teamErr = [
{ statusText: TeamErrEnum.teamOverSize, message: 'error.team.overSize' },
{ statusText: TeamErrEnum.unAuthTeam, message: '无权操作该团队' },
{ statusText: TeamErrEnum.aiPointsNotEnough, message: '' },
{ statusText: TeamErrEnum.datasetSizeNotEnough, message: '知识库容量不足,请先扩容~' },
{ statusText: TeamErrEnum.datasetAmountNotEnough, message: '知识库数量已达上限~' },
{ statusText: TeamErrEnum.appAmountNotEnough, message: '应用数量已达上限~' },
{ statusText: TeamErrEnum.pluginAmountNotEnough, message: '插件数量已达上限~' },
{ statusText: TeamErrEnum.websiteSyncNotEnough, message: '无权使用Web站点同步~' },
{ statusText: TeamErrEnum.reRankNotEnough, message: '无权使用检索重排~' }
{
statusText: TeamErrEnum.teamOverSize,
message: i18nT('common:code_error.team_error.over_size')
},
{ statusText: TeamErrEnum.unAuthTeam, message: i18nT('common:code_error.team_error.un_auth') },
{
statusText: TeamErrEnum.aiPointsNotEnough,
message: i18nT('common:code_error.team_error.ai_points_not_enough')
}, // 需要定义或留空
{
statusText: TeamErrEnum.datasetSizeNotEnough,
message: i18nT('common:code_error.team_error.dataset_size_not_enough')
},
{
statusText: TeamErrEnum.datasetAmountNotEnough,
message: i18nT('common:code_error.team_error.dataset_amount_not_enough')
},
{
statusText: TeamErrEnum.appAmountNotEnough,
message: i18nT('common:code_error.team_error.app_amount_not_enough')
},
{
statusText: TeamErrEnum.pluginAmountNotEnough,
message: i18nT('common:code_error.team_error.plugin_amount_not_enough')
},
{
statusText: TeamErrEnum.websiteSyncNotEnough,
message: i18nT('common:code_error.team_error.website_sync_not_enough')
},
{
statusText: TeamErrEnum.reRankNotEnough,
message: i18nT('common:code_error.team_error.re_rank_not_enough')
}
];
export default teamErr.reduce((acc, cur, index) => {
return {
...acc,

View File

@@ -1,5 +1,5 @@
import { ErrType } from '../errorCode';
import { i18nT } from '../../../../web/i18n/utils';
/* team: 503000 */
export enum UserErrEnum {
unAuthUser = 'unAuthUser',
@@ -8,10 +8,22 @@ export enum UserErrEnum {
balanceNotEnough = 'balanceNotEnough'
}
const errList = [
{ statusText: UserErrEnum.unAuthUser, message: '找不到该用户' },
{ statusText: UserErrEnum.binVisitor, message: '您的身份校验未通过' },
{ statusText: UserErrEnum.binVisitor, message: '您当前身份为游客,无权操作' },
{ statusText: UserErrEnum.balanceNotEnough, message: '账号余额不足~' }
{
statusText: UserErrEnum.unAuthUser,
message: i18nT('common:code_error.user_error.un_auth_user')
},
{
statusText: UserErrEnum.binVisitor,
message: i18nT('common:code_error.user_error.bin_visitor')
}, // 身份校验未通过
{
statusText: UserErrEnum.binVisitor,
message: i18nT('common:code_error.user_error.bin_visitor_guest')
}, // 游客身份
{
statusText: UserErrEnum.balanceNotEnough,
message: i18nT('common:code_error.user_error.balance_not_enough')
}
];
export default errList.reduce((acc, cur, index) => {
return {

View File

@@ -8,24 +8,25 @@ import teamErr from './code/team';
import userErr from './code/user';
import commonErr from './code/common';
import SystemErrEnum from './code/system';
import { i18nT } from '../../../web/i18n/utils';
export const ERROR_CODE: { [key: number]: string } = {
400: '请求失败',
401: '无权访问',
403: '紧张访问',
404: '请求不存在',
405: '请求方法错误',
406: '请求的格式错误',
410: '资源已删除',
422: '验证错误',
500: '服务器发生错误',
502: '网关错误',
503: '服务器暂时过载或维护',
504: '网关超时'
400: i18nT('common:code_error.error_code.400'),
401: i18nT('common:code_error.error_code.401'),
403: i18nT('common:code_error.error_code.403'),
404: i18nT('common:code_error.error_code.404'),
405: i18nT('common:code_error.error_code.405'),
406: i18nT('common:code_error.error_code.406'),
410: i18nT('common:code_error.error_code.410'),
422: i18nT('common:code_error.error_code.422'),
500: i18nT('common:code_error.error_code.500'),
502: i18nT('common:code_error.error_code.502'),
503: i18nT('common:code_error.error_code.503'),
504: i18nT('common:code_error.error_code.504')
};
export const TOKEN_ERROR_CODE: Record<number, string> = {
403: '登录状态无效,请重新登录'
403: i18nT('common:code_error.token_error_code.403')
};
export const proxyError: Record<string, boolean> = {
@@ -63,32 +64,31 @@ export const ERROR_RESPONSE: Record<
[ERROR_ENUM.unAuthorization]: {
code: 403,
statusText: ERROR_ENUM.unAuthorization,
message: '凭证错误',
message: i18nT('common:code_error.error_message.403'),
data: null
},
[ERROR_ENUM.insufficientQuota]: {
code: 510,
statusText: ERROR_ENUM.insufficientQuota,
message: '账号余额不足',
message: i18nT('common:code_error.error_message.510'),
data: null
},
[ERROR_ENUM.unAuthModel]: {
code: 511,
statusText: ERROR_ENUM.unAuthModel,
message: '无权操作该模型',
message: i18nT('common:code_error.error_message.511'),
data: null
},
[ERROR_ENUM.unAuthFile]: {
code: 513,
statusText: ERROR_ENUM.unAuthFile,
message: '无权阅读该文件',
message: i18nT('common:code_error.error_message.513'),
data: null
},
[ERROR_ENUM.unAuthApiKey]: {
code: 514,
statusText: ERROR_ENUM.unAuthApiKey,
message: 'Api Key 不合法',
message: i18nT('common:code_error.error_message.514'),
data: null
},
...appErr,

View File

@@ -1,6 +1,6 @@
import { replaceSensitiveText } from '../string/tools';
export const getErrText = (err: any, def = '') => {
export const getErrText = (err: any, def = ''): any => {
const msg: string =
typeof err === 'string'
? err

View File

@@ -1,11 +1,21 @@
import { i18nT } from '../../../web/i18n/utils';
/* mongo fs bucket */
export enum BucketNameEnum {
dataset = 'dataset'
dataset = 'dataset',
chat = 'chat'
}
export const bucketNameMap = {
[BucketNameEnum.dataset]: {
label: 'file.bucket.dataset'
label: i18nT('file:bucket_file'),
previewExpireMinutes: 30 // 30 minutes
},
[BucketNameEnum.chat]: {
label: i18nT('file:bucket_chat'),
previewExpireMinutes: 7 * 24 * 60 // 7 days
}
};
export const ReadFileBaseUrl = '/api/common/file/read';
export const documentFileType = '.txt, .docx, .csv, .xlsx, .pdf, .md, .html, .pptx';

View File

@@ -0,0 +1,5 @@
export type AuthFrequencyLimitProps = {
eventId: string;
maxAmount: number;
expiredTime: Date;
};

View File

@@ -1,5 +1,10 @@
import dayjs from 'dayjs';
import cronParser from 'cron-parser';
import utc from 'dayjs/plugin/utc';
import timezone from 'dayjs/plugin/timezone';
dayjs.extend(utc);
dayjs.extend(timezone);
export const formatTime2YMDHMW = (time?: Date) => dayjs(time).format('YYYY-MM-DD HH:mm:ss dddd');
export const formatTime2YMDHM = (time?: Date) =>

View File

@@ -91,3 +91,10 @@ export const sliceJsonStr = (str: string) => {
return jsonStr;
};
export const sliceStrStartEnd = (str: string, start: number, end: number) => {
const overSize = str.length > start + end;
const startContent = str.slice(0, start);
const endContent = overSize ? str.slice(-end) : '';
return startContent + (overSize ? ` ...... ` : '') + endContent;
};

View File

@@ -0,0 +1,9 @@
export type RequireAtLeastOne<T, Keys extends keyof T = keyof T> = Omit<T, Keys> &
{
[K in Keys]-?: Required<Pick<T, K>> & Partial<Omit<T, K>>;
}[Keys];
export type RequireOnlyOne<T, Keys extends keyof T = keyof T> = Omit<T, Keys> &
{
[K in Keys]-?: Required<Pick<T, K>> & Partial<Record<Exclude<Keys, K>, undefined>>;
}[Keys];

View File

@@ -119,3 +119,10 @@ export const Prompt_QuotePromptList: PromptTemplateItem[] = [
问题:"""{{question}}"""`
}
];
// Document quote prompt
export const Prompt_DocumentQuote = `将 <Quote></Quote> 中的内容作为你的知识:
<Quote>
{{quote}}
</Quote>
`;

View File

@@ -1,9 +1,9 @@
export const Prompt_AgentQA = {
description: `<Context></Context> 标记中是一段文本,学习和分析它,并整理学习成果:
- 提出问题并给出每个问题的答案。
- 答案需详细完整,尽可能保留原文描述。
- 答案需详细完整,尽可能保留原文描述,可以适当扩展答案描述
- 答案可以包含普通文字、链接、代码、表格、公示、媒体链接等 Markdown 元素。
- 最多提出 30 个问题。
- 最多提出 50 个问题。
`,
fixedText: `请按以下格式整理学习成果:
<Context>

View File

@@ -2,23 +2,46 @@ import openai from 'openai';
import type {
ChatCompletionMessageToolCall,
ChatCompletionChunk,
ChatCompletionMessageParam,
ChatCompletionMessageParam as SdkChatCompletionMessageParam,
ChatCompletionToolMessageParam,
ChatCompletionAssistantMessageParam
ChatCompletionAssistantMessageParam,
ChatCompletionContentPart as SdkChatCompletionContentPart,
ChatCompletionUserMessageParam as SdkChatCompletionUserMessageParam
} from 'openai/resources';
import { ChatMessageTypeEnum } from './constants';
export * from 'openai/resources';
export type ChatCompletionMessageParam = ChatCompletionMessageParam & {
// Extension of ChatCompletionMessageParam, Add file url type
export type ChatCompletionContentPartFile = {
type: 'file_url';
name: string;
url: string;
};
// Rewrite ChatCompletionContentPart, Add file type
export type ChatCompletionContentPart =
| SdkChatCompletionContentPart
| ChatCompletionContentPartFile;
type CustomChatCompletionUserMessageParam = {
content: string | Array<ChatCompletionContentPart>;
role: 'user';
name?: string;
};
export type ChatCompletionMessageParam = (
| Exclude<SdkChatCompletionMessageParam, SdkChatCompletionUserMessageParam>
| CustomChatCompletionUserMessageParam
) & {
dataId?: string;
};
export type SdkChatCompletionMessageParam = SdkChatCompletionMessageParam;
/* ToolChoice and functionCall extension */
export type ChatCompletionToolMessageParam = ChatCompletionToolMessageParam & { name: string };
export type ChatCompletionAssistantToolParam = {
role: 'assistant';
tool_calls: ChatCompletionMessageToolCall[];
};
export type ChatCompletionMessageToolCall = ChatCompletionMessageToolCall & {
toolName?: string;
toolAvatar?: string;
@@ -28,13 +51,16 @@ export type ChatCompletionMessageFunctionCall = ChatCompletionAssistantMessagePa
toolName?: string;
toolAvatar?: string;
};
// Stream response
export type StreamChatType = Stream<ChatCompletionChunk>;
export default openai;
export * from 'openai';
// Other
export type PromptTemplateItem = {
title: string;
desc: string;
value: string;
};
export default openai;
export * from 'openai';

View File

@@ -1,4 +1,4 @@
import { AppTTSConfigType, AppWhisperConfigType } from './type';
import { AppTTSConfigType, AppFileSelectConfigType, AppWhisperConfigType } from './type';
export enum AppTypeEnum {
folder = 'folder',
@@ -23,3 +23,9 @@ export const defaultChatInputGuideConfig = {
textList: [],
customUrl: ''
};
export const defaultAppSelectFileConfig: AppFileSelectConfigType = {
canSelectFile: false,
canSelectImg: false,
maxFiles: 10
};

View File

@@ -5,6 +5,20 @@ import { FlowNodeTypeEnum } from '../../workflow/node/constant';
export const getPluginInputsFromStoreNodes = (nodes: StoreNodeItemType[]) => {
return nodes.find((node) => node.flowNodeType === FlowNodeTypeEnum.pluginInput)?.inputs || [];
};
export const getPluginRunContent = (e: { pluginInputs: FlowNodeInputItemType[] }) => {
return JSON.stringify(e);
export const getPluginRunContent = ({
pluginInputs,
variables
}: {
pluginInputs: FlowNodeInputItemType[];
variables: Record<string, any>;
}) => {
const pluginInputsWithValue = pluginInputs.map((input) => {
const { key } = input;
const value = variables?.hasOwnProperty(key) ? variables[key] : input.defaultValue;
return {
...input,
value
};
});
return JSON.stringify(pluginInputsWithValue);
};

View File

@@ -1,7 +1,7 @@
import type { FlowNodeTemplateType, StoreNodeItemType } from '../workflow/type/node';
import { AppTypeEnum } from './constants';
import { PermissionTypeEnum } from '../../support/permission/constant';
import { VariableInputEnum } from '../workflow/constants';
import { NodeInputKeyEnum, VariableInputEnum } from '../workflow/constants';
import { SelectedDatasetType } from '../workflow/api';
import { DatasetSearchModeEnum } from '../dataset/constants';
import { TeamTagSchema as TeamTagsSchemaType } from '@fastgpt/global/support/user/team/type.d';
@@ -91,6 +91,7 @@ export type AppChatConfigType = {
whisperConfig?: AppWhisperConfigType;
scheduledTriggerConfig?: AppScheduledTriggerConfigType;
chatInputGuide?: ChatInputGuideConfigType;
fileSelectConfig?: AppFileSelectConfigType;
};
export type SettingAIDataType = {
model: string;
@@ -98,6 +99,7 @@ export type SettingAIDataType = {
maxToken: number;
isResponseAnswerText?: boolean;
maxHistories?: number;
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode
};
// variable
@@ -134,3 +136,9 @@ export type AppScheduledTriggerConfigType = {
timezone: string;
defaultPrompt: string;
};
// File
export type AppFileSelectConfigType = {
canSelectFile: boolean;
canSelectImg: boolean;
maxFiles: number;
};

View File

@@ -56,16 +56,21 @@ export const chats2GPTMessages = ({
text: item.text?.content || ''
};
}
if (
item.type === ChatItemValueTypeEnum.file &&
item.file?.type === ChatFileTypeEnum.image
) {
return {
type: 'image_url',
image_url: {
if (item.type === ChatItemValueTypeEnum.file) {
if (item.file?.type === ChatFileTypeEnum.image) {
return {
type: 'image_url',
image_url: {
url: item.file?.url || ''
}
};
} else if (item.file?.type === ChatFileTypeEnum.file) {
return {
type: 'file_url',
name: item.file?.name || '',
url: item.file?.url || ''
}
};
};
}
}
})
.filter(Boolean) as ChatCompletionContentPart[];
@@ -175,6 +180,16 @@ export const GPTMessages2Chats = (
url: item.image_url.url
}
});
} else if (item.type === 'file_url') {
value.push({
// @ts-ignore
type: ChatItemValueTypeEnum.file,
file: {
type: ChatFileTypeEnum.file,
name: item.name,
url: item.url
}
});
}
});
}

View File

@@ -106,17 +106,26 @@ export type AdminFbkType = {
};
/* --------- chat item ---------- */
export type ChatItemType = (UserChatItemType | SystemChatItemType | AIChatItemType) & {
dataId?: string;
export type ResponseTagItemType = {
totalRunningTime?: number;
totalQuoteList?: SearchDataResponseItemType[];
llmModuleAccount?: number;
historyPreviewLength?: number;
};
export type ChatItemType = (UserChatItemType | SystemChatItemType | AIChatItemType) & {
dataId?: string;
} & ResponseTagItemType;
// Frontend type
export type ChatSiteItemType = (UserChatItemType | SystemChatItemType | AIChatItemType) & {
dataId: string;
status: `${ChatStatusEnum}`;
moduleName?: string;
ttsBuffer?: Uint8Array;
responseData?: ChatHistoryItemResType[];
} & ChatBoxInputType;
} & ChatBoxInputType &
ResponseTagItemType;
/* --------- team chat --------- */
export type ChatAppListSchema = {

View File

@@ -2,6 +2,7 @@ import { DispatchNodeResponseType } from '../workflow/runtime/type';
import { FlowNodeTypeEnum } from '../workflow/node/constant';
import { ChatItemValueTypeEnum, ChatRoleEnum } from './constants';
import { ChatHistoryItemResType, ChatItemType, UserChatItemValueItemType } from './type.d';
import { sliceStrStartEnd } from '../../common/string/tools';
// Concat 2 -> 1, and sort by role
export const concatHistories = (histories1: ChatItemType[], histories2: ChatItemType[]) => {
@@ -25,6 +26,7 @@ export const getChatTitleFromChatMessage = (message?: ChatItemType, defaultValue
return defaultValue;
};
// Keep the first n and last n characters
export const getHistoryPreview = (
completeMessages: ChatItemType[]
): {
@@ -32,30 +34,44 @@ export const getHistoryPreview = (
value: string;
}[] => {
return completeMessages.map((item, i) => {
if (item.obj === ChatRoleEnum.System || i >= completeMessages.length - 2) {
return {
obj: item.obj,
value: item.value?.[0]?.text?.content || ''
};
}
const n = item.obj === ChatRoleEnum.System || i >= completeMessages.length - 2 ? 80 : 40;
const content = item.value
.map((item) => {
if (item.text?.content) {
const content =
item.text.content.length > 20
? `${item.text.content.slice(0, 20)}...`
: item.text.content;
return content;
}
return '';
})
.filter(Boolean)
.join('\n');
// Get message text content
const rawText = (() => {
if (item.obj === ChatRoleEnum.System) {
return item.value?.map((item) => item.text?.content).join('') || '';
} else if (item.obj === ChatRoleEnum.Human) {
return (
item.value
?.map((item) => {
if (item?.text?.content) return item?.text?.content;
if (item.file?.type === 'image') return 'Input an image';
return '';
})
.filter(Boolean)
.join('\n') || ''
);
} else if (item.obj === ChatRoleEnum.AI) {
return (
item.value
?.map((item) => {
return (
item.text?.content || item?.tools?.map((item) => item.toolName).join(',') || ''
);
})
.join('') || ''
);
}
return '';
})();
const startContent = rawText.slice(0, n);
const endContent = rawText.length > 2 * n ? rawText.slice(-n) : '';
const content = startContent + (rawText.length > n ? ` ...... ` : '') + endContent;
return {
obj: item.obj,
value: content
value: sliceStrStartEnd(content, 80, 80)
};
});
};

View File

@@ -74,6 +74,23 @@ export type ExternalFileCreateDatasetCollectionParams = ApiCreateDatasetCollecti
filename?: string;
};
/* ================= tag ===================== */
export type CreateDatasetCollectionTagParams = {
datasetId: string;
tag: string;
};
export type AddTagsToCollectionsParams = {
originCollectionIds: string[];
collectionIds: string[];
datasetId: string;
tag: string;
};
export type UpdateDatasetCollectionTagParams = {
datasetId: string;
tagId: string;
tag: string;
};
/* ================= data ===================== */
export type PgSearchRawType = {
id: string;

View File

@@ -69,6 +69,13 @@ export type DatasetCollectionSchemaType = {
};
};
export type DatasetCollectionTagsSchemaType = {
_id: string;
teamId: string;
datasetId: string;
tag: string;
};
export type DatasetDataIndexItemType = {
defaultIndex: boolean;
dataId: string; // pg data id
@@ -144,6 +151,17 @@ export type DatasetItemType = Omit<DatasetSchemaType, 'vectorModel' | 'agentMode
permission: DatasetPermission;
};
/* ================= tag ===================== */
export type DatasetTagType = {
_id: string;
tag: string;
};
export type TagUsageType = {
tagId: string;
collections: string[];
};
/* ================= collection ===================== */
export type DatasetCollectionItemType = CollectionWithDatasetType & {
sourceName: string;

View File

@@ -75,6 +75,8 @@ export enum NodeInputKeyEnum {
aiChatQuoteTemplate = 'quoteTemplate',
aiChatQuotePrompt = 'quotePrompt',
aiChatDatasetQuote = 'quoteQA',
aiChatVision = 'aiChatVision',
stringQuoteText = 'stringQuoteText',
// dataset
datasetSelectList = 'datasets',
@@ -85,6 +87,7 @@ export enum NodeInputKeyEnum {
datasetSearchUsingExtensionQuery = 'datasetSearchUsingExtensionQuery',
datasetSearchExtensionModel = 'datasetSearchExtensionModel',
datasetSearchExtensionBg = 'datasetSearchExtensionBg',
collectionFilterMatch = 'collectionFilterMatch',
// concat dataset
datasetQuoteList = 'system_datasetQuoteList',
@@ -117,7 +120,10 @@ export enum NodeInputKeyEnum {
// code
code = 'code',
codeType = 'codeType' // js|py
codeType = 'codeType', // js|py
// read files
fileUrlList = 'fileUrlList'
}
export enum NodeOutputKeyEnum {
@@ -132,6 +138,9 @@ export enum NodeOutputKeyEnum {
addOutputParam = 'system_addOutputParam',
rawResponse = 'system_rawResponse',
// start
userFiles = 'userFiles',
// dataset
datasetQuoteQA = 'quoteQA',

View File

@@ -117,7 +117,8 @@ export enum FlowNodeTypeEnum {
variableUpdate = 'variableUpdate',
code = 'code',
textEditor = 'textEditor',
customFeedback = 'customFeedback'
customFeedback = 'customFeedback',
readFiles = 'readFiles'
}
// node IO value type

View File

@@ -16,10 +16,12 @@ import { UserModelSchema } from '../../../support/user/type';
import { AppDetailType, AppSchema } from '../../app/type';
import { RuntimeNodeItemType } from '../runtime/type';
import { RuntimeEdgeItemType } from './edge';
import { ReadFileNodeResponse } from '../template/system/readFiles/type';
/* workflow props */
export type ChatDispatchProps = {
res?: NextApiResponse;
requestOrigin?: string;
mode: 'test' | 'chat' | 'debug';
teamId: string;
tmbId: string;
@@ -30,6 +32,7 @@ export type ChatDispatchProps = {
histories: ChatItemType[];
variables: Record<string, any>; // global variable
query: UserChatItemValueItemType[]; // trigger query
chatConfig: AppSchema['chatConfig'];
stream: boolean;
detail: boolean; // response detail
maxRunTimes: number;
@@ -146,6 +149,10 @@ export type DispatchNodeResponseType = {
// plugin
pluginOutput?: Record<string, any>;
// read files
readFilesResult?: string;
readFiles?: ReadFileNodeResponse;
};
export type DispatchNodeResultType<T> = {
@@ -166,4 +173,6 @@ export type AIChatNodeProps = {
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;
[NodeInputKeyEnum.aiChatQuotePrompt]?: string;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.stringQuoteText]?: string;
};

View File

@@ -25,6 +25,7 @@ import { VariableUpdateNode } from './system/variableUpdate';
import { CodeNode } from './system/sandbox';
import { TextEditorNode } from './system/textEditor';
import { CustomFeedbackNode } from './system/customFeedback';
import { ReadFilesNodes } from './system/readFiles';
const systemNodes: FlowNodeTemplateType[] = [
AiChatModule,
@@ -36,6 +37,7 @@ const systemNodes: FlowNodeTemplateType[] = [
StopToolNode,
ClassifyQuestionModule,
ContextExtractModule,
ReadFilesNodes,
HttpNode468,
AiQueryExtension,
LafModule,

View File

@@ -3,6 +3,7 @@ import { FlowNodeInputTypeEnum } from '../node/constant';
import { WorkflowIOValueTypeEnum } from '../constants';
import { chatNodeSystemPromptTip } from './tip';
import { FlowNodeInputItemType } from '../type/io';
import { i18nT } from '../../../../web/i18n/utils';
export const Input_Template_History: FlowNodeInputItemType = {
key: NodeInputKeyEnum.history,
@@ -64,3 +65,11 @@ export const Input_Template_Dataset_Quote: FlowNodeInputItemType = {
description: '',
valueType: WorkflowIOValueTypeEnum.datasetQuote
};
export const Input_Template_Text_Quote: FlowNodeInputItemType = {
key: NodeInputKeyEnum.stringQuoteText,
renderTypeList: [FlowNodeInputTypeEnum.reference, FlowNodeInputTypeEnum.textarea],
label: i18nT('app:document_quote'),
debugLabel: i18nT('app:document_quote'),
description: i18nT('app:document_quote_tip'),
valueType: WorkflowIOValueTypeEnum.string
};

View File

@@ -15,10 +15,12 @@ import {
Input_Template_Dataset_Quote,
Input_Template_History,
Input_Template_System_Prompt,
Input_Template_UserChatInput
Input_Template_UserChatInput,
Input_Template_Text_Quote
} from '../input';
import { chatNodeSystemPromptTip } from '../tip';
import { getHandleConfig } from '../utils';
import { i18nT } from '../../../../../web/i18n/utils';
export const AiChatModule: FlowNodeTemplateType = {
id: FlowNodeTypeEnum.chatNode,
@@ -27,8 +29,8 @@ export const AiChatModule: FlowNodeTemplateType = {
sourceHandle: getHandleConfig(true, true, true, true),
targetHandle: getHandleConfig(true, true, true, true),
avatar: 'core/workflow/template/aiChat',
name: 'AI 对话',
intro: 'AI 大模型对话',
name: i18nT('workflow:template.ai_chat'),
intro: i18nT('workflow:template.ai_chat_intro'),
showStatus: true,
isTool: true,
version: '481',
@@ -40,20 +42,14 @@ export const AiChatModule: FlowNodeTemplateType = {
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 0,
valueType: WorkflowIOValueTypeEnum.number,
min: 0,
max: 10,
step: 1
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 2000,
valueType: WorkflowIOValueTypeEnum.number,
min: 100,
max: 4000,
step: 50
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatIsResponseText,
@@ -74,6 +70,13 @@ export const AiChatModule: FlowNodeTemplateType = {
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatVision,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
// settings modal ---
{
...Input_Template_System_Prompt,
@@ -82,8 +85,9 @@ export const AiChatModule: FlowNodeTemplateType = {
placeholder: chatNodeSystemPromptTip
},
Input_Template_History,
{ ...Input_Template_UserChatInput, toolDescription: '用户问题' },
Input_Template_Dataset_Quote
Input_Template_Dataset_Quote,
Input_Template_Text_Quote,
{ ...Input_Template_UserChatInput, toolDescription: '用户问题' }
],
outputs: [
{

View File

@@ -13,9 +13,9 @@ import {
import { Input_Template_UserChatInput } from '../input';
import { DatasetSearchModeEnum } from '../../../dataset/constants';
import { getHandleConfig } from '../utils';
import { i18nT } from '../../../../../web/i18n/utils';
export const Dataset_SEARCH_DESC =
'调用“语义检索”和“全文检索”能力,从“知识库”中查找可能与问题相关的参考内容';
export const Dataset_SEARCH_DESC = i18nT('workflow:template.dataset_search_intro');
export const DatasetSearchModule: FlowNodeTemplateType = {
id: FlowNodeTypeEnum.datasetSearchNode,
@@ -24,7 +24,7 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
sourceHandle: getHandleConfig(true, true, true, true),
targetHandle: getHandleConfig(true, true, true, true),
avatar: 'core/workflow/template/datasetSearch',
name: '知识库搜索',
name: i18nT('workflow:template.dataset_search'),
intro: Dataset_SEARCH_DESC,
showStatus: true,
isTool: true,
@@ -90,6 +90,25 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
{
...Input_Template_UserChatInput,
toolDescription: '需要检索的内容'
},
{
key: NodeInputKeyEnum.collectionFilterMatch,
renderTypeList: [FlowNodeInputTypeEnum.JSONEditor, FlowNodeInputTypeEnum.reference],
label: '集合元数据过滤',
valueType: WorkflowIOValueTypeEnum.object,
isPro: true,
description: `目前支持标签和创建时间过滤,需按照以下格式填写:
{
"tags": {
"$and": ["标签 1","标签 2"],
"$or": ["有 $and 标签时and 生效or 不生效"]
},
"createTime": {
"$gte": "YYYY-MM-DD HH:mm 格式即可,集合的创建时间大于该时间",
"$lte": "YYYY-MM-DD HH:mm 格式即可,集合的创建时间小于该时间,可和 $gte 共同使用"
}
}
`
}
],
outputs: [

View File

@@ -0,0 +1,48 @@
import { i18nT } from '../../../../../../web/i18n/utils';
import {
FlowNodeTemplateTypeEnum,
NodeInputKeyEnum,
NodeOutputKeyEnum,
WorkflowIOValueTypeEnum
} from '../../../constants';
import {
FlowNodeInputTypeEnum,
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../../node/constant';
import { FlowNodeTemplateType } from '../../../type/node';
import { getHandleConfig } from '../../utils';
export const ReadFilesNodes: FlowNodeTemplateType = {
id: FlowNodeTypeEnum.readFiles,
templateType: FlowNodeTemplateTypeEnum.tools,
flowNodeType: FlowNodeTypeEnum.readFiles,
sourceHandle: getHandleConfig(true, true, true, true),
targetHandle: getHandleConfig(true, true, true, true),
avatar: 'core/workflow/template/readFiles',
name: i18nT('app:workflow.read_files'),
intro: i18nT('app:workflow.read_files_tip'),
showStatus: true,
version: '489',
isTool: true,
inputs: [
{
key: NodeInputKeyEnum.fileUrlList,
renderTypeList: [FlowNodeInputTypeEnum.reference],
valueType: WorkflowIOValueTypeEnum.arrayString,
label: i18nT('app:workflow.file_url'),
required: true,
value: []
}
],
outputs: [
{
id: NodeOutputKeyEnum.text,
key: NodeOutputKeyEnum.text,
label: i18nT('app:workflow.read_files_result'),
description: i18nT('app:workflow.read_files_result_desc'),
valueType: WorkflowIOValueTypeEnum.string,
type: FlowNodeOutputTypeEnum.static
}
]
};

View File

@@ -0,0 +1,4 @@
export type ReadFileNodeResponse = {
url: string;
name: string;
}[];

View File

@@ -2,6 +2,7 @@ import { FlowNodeTypeEnum } from '../../node/constant';
import { FlowNodeTemplateType } from '../../type/node.d';
import { FlowNodeTemplateTypeEnum } from '../../constants';
import { getHandleConfig } from '../utils';
import { i18nT } from '../../../../../web/i18n/utils';
export const SystemConfigNode: FlowNodeTemplateType = {
id: FlowNodeTypeEnum.systemConfig,
@@ -10,8 +11,8 @@ export const SystemConfigNode: FlowNodeTemplateType = {
sourceHandle: getHandleConfig(false, false, false, false),
targetHandle: getHandleConfig(false, false, false, false),
avatar: 'core/workflow/template/systemConfig',
name: '系统配置',
intro: '可以配置应用的系统参数。',
name: i18nT('workflow:template.system_config'),
intro: '',
unique: true,
forbidDelete: true,
version: '481',

View File

@@ -19,6 +19,7 @@ import {
import { chatNodeSystemPromptTip } from '../tip';
import { LLMModelTypeEnum } from '../../../ai/constants';
import { getHandleConfig } from '../utils';
import { i18nT } from '../../../../../web/i18n/utils';
export const ToolModule: FlowNodeTemplateType = {
id: FlowNodeTypeEnum.tools,
@@ -27,8 +28,8 @@ export const ToolModule: FlowNodeTemplateType = {
sourceHandle: getHandleConfig(true, true, false, true),
targetHandle: getHandleConfig(true, true, false, true),
avatar: 'core/workflow/template/toolCall',
name: '工具调用',
intro: '通过AI模型自动选择一个或多个功能块进行调用也可以对插件进行调用。',
name: i18nT('workflow:template.tool_call'),
intro: i18nT('workflow:template.tool_call_intro'),
showStatus: true,
version: '481',
inputs: [
@@ -41,21 +42,23 @@ export const ToolModule: FlowNodeTemplateType = {
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 0,
valueType: WorkflowIOValueTypeEnum.number,
min: 0,
max: 10,
step: 1
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: 2000,
valueType: WorkflowIOValueTypeEnum.number,
min: 100,
max: 4000,
step: 50
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatVision,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
{
...Input_Template_System_Prompt,
label: 'core.ai.Prompt',

View File

@@ -7,6 +7,17 @@ import {
} from '../../constants';
import { getHandleConfig } from '../utils';
import { Input_Template_UserChatInput } from '../input';
import { i18nT } from '../../../../../web/i18n/utils';
import { FlowNodeOutputItemType } from '../../type/io';
export const userFilesInput: FlowNodeOutputItemType = {
id: NodeOutputKeyEnum.userFiles,
key: NodeOutputKeyEnum.userFiles,
label: i18nT('app:workflow.user_file_input'),
description: i18nT('app:workflow.user_file_input_desc'),
type: FlowNodeOutputTypeEnum.static,
valueType: WorkflowIOValueTypeEnum.arrayString
};
export const WorkflowStart: FlowNodeTemplateType = {
id: FlowNodeTypeEnum.workflowStart,
@@ -15,7 +26,7 @@ export const WorkflowStart: FlowNodeTemplateType = {
sourceHandle: getHandleConfig(false, true, false, false),
targetHandle: getHandleConfig(false, false, false, false),
avatar: 'core/workflow/template/workflowStart',
name: '流程开始',
name: i18nT('workflow:template.workflow_start'),
intro: '',
forbidDelete: true,
unique: true,
@@ -25,7 +36,7 @@ export const WorkflowStart: FlowNodeTemplateType = {
{
id: NodeOutputKeyEnum.userChatInput,
key: NodeOutputKeyEnum.userChatInput,
label: 'core.module.input.label.user question',
label: i18nT('common:core.module.input.label.user question'),
type: FlowNodeOutputTypeEnum.static,
valueType: WorkflowIOValueTypeEnum.string
}

View File

@@ -52,6 +52,7 @@ export type FlowNodeInputItemType = InputComponentPropsType & {
// render components params
canEdit?: boolean; // dynamic inputs
isPro?: boolean; // Pro version field
};
export type FlowNodeOutputItemType = {

View File

@@ -82,6 +82,8 @@ export const splitGuideModule = (guideModules?: StoreNodeItemType) => {
chatInputGuide
};
};
// Get app chat config: db > nodes
export const getAppChatConfig = ({
chatConfig,
systemConfigNode,
@@ -125,6 +127,7 @@ export const getAppChatConfig = ({
export const getOrInitModuleInputValue = (input: FlowNodeInputItemType) => {
if (input.value !== undefined || !input.valueType) return input.value;
if (input.defaultValue !== undefined) return input.defaultValue;
const map: Record<string, any> = {
[WorkflowIOValueTypeEnum.boolean]: false,

View File

@@ -1,11 +1,13 @@
export enum UserAuthTypeEnum {
register = 'register',
findPassword = 'findPassword',
wxLogin = 'wxLogin'
wxLogin = 'wxLogin',
bindNotification = 'bindNotification'
}
export const userAuthTypeMap = {
[UserAuthTypeEnum.register]: 'register',
[UserAuthTypeEnum.findPassword]: 'findPassword',
[UserAuthTypeEnum.wxLogin]: 'wxLogin'
[UserAuthTypeEnum.wxLogin]: 'wxLogin',
[UserAuthTypeEnum.bindNotification]: 'bindNotification'
};

View File

@@ -15,3 +15,14 @@ export const InformLevelMap = {
label: '紧急'
}
};
export enum SendInformTemplateCodeEnum {
EXPIRE_SOON = 'EXPIRE_SOON',
EXPIRED = 'EXPIRED',
FREE_CLEAN = 'FREE_CLEAN',
REGISTER = 'REGISTER',
RESET_PASSWORD = 'RESET_PASSWORD',
BIND_NOTIFICATION = 'BIND_NOTIFICATION',
LACK_OF_POINTS = 'LACK_OF_POINTS',
CUSTOM = 'CUSTOM'
}

View File

@@ -1,13 +1,16 @@
import { InformLevelEnum } from './constants';
import { InformLevelEnum, SendInformTemplateCodeEnum } from './constants';
export type SendInformProps = {
title: string;
content: string;
level: `${InformLevelEnum}`;
templateCode: `${SendInformTemplateCodeEnum}`;
templateParam: Record<string, any>;
customLockMinutes?: number; // custom lock minutes
};
export type SendInform2UserProps = SendInformProps & {
tmbId: string;
teamId: string;
};
export type SendInform2User = SendInformProps & {
type: `${InformTypeEnum}`;
tmbId: string;

View File

@@ -18,7 +18,9 @@ export type TeamSchema = {
};
lafAccount: LafAccountType;
defaultPermission: PermissionValueType;
notificationAccount?: string;
};
export type tagsType = {
label: string;
key: string;
@@ -63,6 +65,7 @@ export type TeamTmbItemType = {
role: `${TeamMemberRoleEnum}`;
status: `${TeamMemberStatusEnum}`;
lafAccount?: LafAccountType;
notificationAccount?: string;
permission: TeamPermission;
};
@@ -72,7 +75,6 @@ export type TeamMemberItemType = {
teamId: string;
memberName: string;
avatar: string;
// TODO: this should be deprecated.
role: `${TeamMemberRoleEnum}`;
status: `${TeamMemberStatusEnum}`;
permission: TeamPermission;

View File

@@ -1,12 +1,10 @@
import { TeamPermission } from '../permission/user/controller';
import { UserStatusEnum } from './constant';
import { TeamTmbItemType } from './team/type';
export type UserModelSchema = {
_id: string;
username: string;
email?: string;
phonePrefix?: number;
phone?: string;
password: string;
avatar: string;
promotionRate: number;
@@ -31,4 +29,6 @@ export type UserType = {
openaiAccount: UserModelSchema['openaiAccount'];
team: TeamTmbItemType;
standardInfo?: standardInfoType;
notificationAccount?: string;
permission: TeamPermission;
};

View File

@@ -1,8 +1,11 @@
import { i18nT } from '../../../../web/i18n/utils';
export enum UsageSourceEnum {
fastgpt = 'fastgpt',
api = 'api',
shareLink = 'shareLink',
training = 'training'
training = 'training',
cronJob = 'cronJob'
}
export const UsageSourceMap = {
@@ -17,5 +20,8 @@ export const UsageSourceMap = {
},
[UsageSourceEnum.training]: {
label: 'dataset.Training Name'
},
[UsageSourceEnum.cronJob]: {
label: i18nT('common:cron_job_run_app')
}
};

View File

@@ -1,6 +1,7 @@
import { search, SafeSearchType } from 'duck-duck-scrape';
import { delay } from '@fastgpt/global/common/system/utils';
import { addLog } from '@fastgpt/service/common/system/log';
import { getErrText } from '@fastgpt/global/common/error/utils';
type Props = {
query: string;
@@ -35,7 +36,7 @@ const main = async (props: Props, retry = 3): Response => {
if (retry <= 0) {
addLog.warn('DuckDuckGo error', { error });
return {
result: 'Failed to fetch data'
result: getErrText(error, 'Failed to fetch data from DuckDuckGo')
};
}

View File

@@ -1,6 +1,7 @@
import { searchImages, SafeSearchType } from 'duck-duck-scrape';
import { delay } from '@fastgpt/global/common/system/utils';
import { addLog } from '@fastgpt/service/common/system/log';
import { getErrText } from '@fastgpt/global/common/error/utils';
type Props = {
query: string;
@@ -33,7 +34,7 @@ const main = async (props: Props, retry = 3): Response => {
if (retry <= 0) {
addLog.warn('DuckDuckGo error', { error });
return {
result: 'Failed to fetch data'
result: getErrText(error, 'Failed to fetch data from DuckDuckGo')
};
}

View File

@@ -1,6 +1,7 @@
import { searchNews, SafeSearchType } from 'duck-duck-scrape';
import { delay } from '@fastgpt/global/common/system/utils';
import { addLog } from '@fastgpt/service/common/system/log';
import { getErrText } from '@fastgpt/global/common/error/utils';
type Props = {
query: string;
@@ -34,7 +35,7 @@ const main = async (props: Props, retry = 3): Response => {
if (retry <= 0) {
addLog.warn('DuckDuckGo error', { error });
return {
result: 'Failed to fetch data'
result: getErrText(error, 'Failed to fetch data from DuckDuckGo')
};
}

View File

@@ -1,6 +1,7 @@
import { searchVideos, SafeSearchType } from 'duck-duck-scrape';
import { delay } from '@fastgpt/global/common/system/utils';
import { addLog } from '@fastgpt/service/common/system/log';
import { getErrText } from '@fastgpt/global/common/error/utils';
type Props = {
query: string;
@@ -34,7 +35,7 @@ const main = async (props: Props, retry = 3): Response => {
if (retry <= 0) {
addLog.warn('DuckDuckGo error', { error });
return {
result: 'Failed to fetch data'
result: getErrText(error, 'Failed to fetch data from DuckDuckGo')
};
}

View File

@@ -1,6 +1,6 @@
{
"author": "",
"version": "486",
"version": "489",
"name": "文本加工",
"avatar": "/imgs/workflow/textEditor.svg",
"intro": "可对固定或传入的文本进行加工后输出,非字符串类型数据最终会转成字符串类型。",

View File

@@ -1,5 +1,5 @@
import { connectionMongo, getMongoModel, type Model } from '../../mongo';
const { Schema, model, models } = connectionMongo;
import { connectionMongo, getMongoModel } from '../../mongo';
const { Schema } = connectionMongo;
import { RawTextBufferSchemaType } from './type';
export const collectionName = 'buffer_rawtexts';

View File

@@ -3,16 +3,19 @@ import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
import fsp from 'fs/promises';
import fs from 'fs';
import { DatasetFileSchema } from '@fastgpt/global/core/dataset/type';
import { MongoFileSchema } from './schema';
import { MongoChatFileSchema, MongoDatasetFileSchema } from './schema';
import { detectFileEncoding } from '@fastgpt/global/common/file/tools';
import { CommonErrEnum } from '@fastgpt/global/common/error/code/common';
import { MongoRawTextBuffer } from '../../buffer/rawText/schema';
import { readRawContentByFileBuffer } from '../read/utils';
import { gridFsStream2Buffer, stream2Encoding } from './utils';
import { addLog } from '../../system/log';
import { readFromSecondary } from '../../mongo/utils';
export function getGFSCollection(bucket: `${BucketNameEnum}`) {
MongoFileSchema;
MongoDatasetFileSchema;
MongoChatFileSchema;
return connectionMongo.connection.db.collection(`${bucket}.files`);
}
export function getGridBucket(bucket: `${BucketNameEnum}`) {
@@ -49,6 +52,7 @@ export async function uploadFile({
const { stream: readStream, encoding } = await stream2Encoding(fs.createReadStream(path));
// Add default metadata
metadata.teamId = teamId;
metadata.tmbId = tmbId;
metadata.encoding = encoding;
@@ -103,7 +107,9 @@ export async function delFileByFileIdList({
try {
const bucket = getGridBucket(bucketName);
await Promise.all(fileIdList.map((id) => bucket.delete(new Types.ObjectId(id))));
for await (const fileId of fileIdList) {
await bucket.delete(new Types.ObjectId(fileId));
}
} catch (error) {
if (retry > 0) {
return delFileByFileIdList({ bucketName, fileIdList, retry: retry - 1 });
@@ -138,7 +144,9 @@ export const readFileContentFromMongo = async ({
filename: string;
}> => {
// read buffer
const fileBuffer = await MongoRawTextBuffer.findOne({ sourceId: fileId }).lean();
const fileBuffer = await MongoRawTextBuffer.findOne({ sourceId: fileId }, undefined, {
...readFromSecondary
}).lean();
if (fileBuffer) {
return {
rawText: fileBuffer.rawText,

View File

@@ -1,13 +1,17 @@
import { connectionMongo, getMongoModel, type Model } from '../../mongo';
const { Schema, model, models } = connectionMongo;
const { Schema } = connectionMongo;
const FileSchema = new Schema({});
const DatasetFileSchema = new Schema({});
const ChatFileSchema = new Schema({});
try {
FileSchema.index({ 'metadata.teamId': 1 });
FileSchema.index({ 'metadata.uploadDate': -1 });
DatasetFileSchema.index({ uploadDate: -1 });
ChatFileSchema.index({ uploadDate: -1 });
ChatFileSchema.index({ 'metadata.chatId': 1 });
} catch (error) {
console.log(error);
}
export const MongoFileSchema = getMongoModel('dataset.files', FileSchema);
export const MongoDatasetFileSchema = getMongoModel('dataset.files', DatasetFileSchema);
export const MongoChatFileSchema = getMongoModel('chat.files', ChatFileSchema);

View File

@@ -45,7 +45,7 @@ export const stream2Encoding = async (stream: NodeJS.ReadableStream) => {
})();
const enc = detectFileEncoding(buffer);
console.log('Get encoding time', Date.now() - start, enc);
return {
encoding: enc,
stream: copyStream

View File

@@ -3,7 +3,6 @@ import multer from 'multer';
import path from 'path';
import { BucketNameEnum, bucketNameMap } from '@fastgpt/global/common/file/constants';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { tmpFileDirPath } from './constants';
type FileType = {
fieldname: string;

View File

@@ -8,28 +8,6 @@ import fs from 'fs';
import { detectFileEncoding } from '@fastgpt/global/common/file/tools';
import type { ReadFileResponse } from '../../../worker/readFile/type';
// match md img text and upload to db
export const matchMdImgTextAndUpload = ({
teamId,
md,
metadata
}: {
md: string;
teamId: string;
metadata?: Record<string, any>;
}) =>
markdownProcess({
rawText: md,
uploadImgController: (base64Img) =>
uploadMongoImg({
type: MongoImageTypeEnum.collectionImage,
base64Img,
teamId,
metadata,
expiredTime: addHours(new Date(), 2)
})
});
export type readRawTextByLocalFileParams = {
teamId: string;
path: string;
@@ -72,6 +50,28 @@ export const readRawContentByFileBuffer = async ({
encoding: string;
metadata?: Record<string, any>;
}) => {
// Upload image in markdown
const matchMdImgTextAndUpload = ({
teamId,
md,
metadata
}: {
md: string;
teamId: string;
metadata?: Record<string, any>;
}) =>
markdownProcess({
rawText: md,
uploadImgController: (base64Img) =>
uploadMongoImg({
type: MongoImageTypeEnum.collectionImage,
base64Img,
teamId,
metadata,
expiredTime: addHours(new Date(), 1)
})
});
let { rawText, formatText } = await runWorker<ReadFileResponse>(WorkerNameEnum.readFile, {
extension,
encoding,

View File

@@ -18,7 +18,17 @@ export const guessBase64ImageType = (str: string) => {
i: 'image/png',
R: 'image/gif',
U: 'image/webp',
Q: 'image/bmp'
Q: 'image/bmp',
P: 'image/svg+xml',
T: 'image/tiff',
J: 'image/jp2',
S: 'image/x-tga',
I: 'image/ief',
V: 'image/vnd.microsoft.icon',
W: 'image/vnd.wap.wbmp',
X: 'image/x-xbitmap',
Z: 'image/x-xpixmap',
Y: 'image/x-xwindowdump'
};
const defaultType = 'image/jpeg';
@@ -30,6 +40,11 @@ export const guessBase64ImageType = (str: string) => {
return imageTypeMap[firstChar] || defaultType;
};
export const getFileContentTypeFromHeader = (header: string): string | undefined => {
const contentType = header.split(';')[0];
return contentType;
};
export const clearDirFiles = (dirPath: string) => {
if (!fs.existsSync(dirPath)) {
return;

View File

@@ -4,14 +4,21 @@ export enum TimerIdEnum {
checkInvalidVector = 'checkInvalidVector',
clearExpiredSubPlan = 'clearExpiredSubPlan',
updateStandardPlan = 'updateStandardPlan',
scheduleTriggerApp = 'scheduleTriggerApp'
scheduleTriggerApp = 'scheduleTriggerApp',
notification = 'notification'
}
export const timerIdMap = {
[TimerIdEnum.checkInValidDatasetFiles]: 'checkInValidDatasetFiles',
[TimerIdEnum.checkInvalidDatasetData]: 'checkInvalidDatasetData',
[TimerIdEnum.checkInvalidVector]: 'checkInvalidVector',
[TimerIdEnum.clearExpiredSubPlan]: 'clearExpiredSubPlan',
[TimerIdEnum.updateStandardPlan]: 'updateStandardPlan',
[TimerIdEnum.scheduleTriggerApp]: 'scheduleTriggerApp'
};
export enum LockNotificationEnum {
NotificationExpire = 'notification_expire',
NotificationFreeClean = 'notification_free_clean',
NotificationLackOfPoints = 'notification_lack_of_points'
}
export type LockType = `${LockNotificationEnum}`;
// add a new type enum example:
// export enum ExampleLockEnum {
// ExampleLockType1 = 'example_lock_type1'
// }
//
// export type LockType = `${NotificationLockEnum}` | `${ExampleLockEnum}`

View File

@@ -1,6 +1,5 @@
import { connectionMongo, getMongoModel, type Model } from '../../mongo';
import { timerIdMap } from './constants';
const { Schema, model, models } = connectionMongo;
import { connectionMongo, getMongoModel } from '../../mongo';
const { Schema } = connectionMongo;
import { TimerLockSchemaType } from './type.d';
export const collectionName = 'systemtimerlocks';
@@ -9,8 +8,7 @@ const TimerLockSchema = new Schema({
timerId: {
type: String,
required: true,
unique: true,
enum: Object.keys(timerIdMap)
unique: true
},
expiredTime: {
type: Date,

View File

@@ -1,4 +1,3 @@
import { TimerIdEnum } from './constants';
import { MongoTimerLock } from './schema';
import { addMinutes } from 'date-fns';
@@ -9,7 +8,7 @@ export const checkTimerLock = async ({
timerId,
lockMinuted
}: {
timerId: TimerIdEnum;
timerId: string;
lockMinuted: number;
}) => {
try {

View File

@@ -26,9 +26,7 @@ export type EmbeddingRecallProps = {
datasetIds: string[];
forbidCollectionIdList: string[];
// forbidEmbIndexIdList: string[];
// similarity?: number;
// efSearch?: number;
filterCollectionIdList?: string[];
};
export type EmbeddingRecallCtrlProps = EmbeddingRecallProps & {
vector: number[];

View File

@@ -213,19 +213,50 @@ export class MilvusCtrl {
};
embRecall = async (props: EmbeddingRecallCtrlProps): Promise<EmbeddingRecallResponse> => {
const client = await this.getClient();
const { teamId, datasetIds, vector, limit, forbidCollectionIdList, retry = 2 } = props;
const {
teamId,
datasetIds,
vector,
limit,
forbidCollectionIdList,
filterCollectionIdList,
retry = 2
} = props;
// Forbid collection
const formatForbidCollectionIdList = (() => {
if (!filterCollectionIdList) return forbidCollectionIdList;
const list = forbidCollectionIdList
.map((id) => String(id))
.filter((id) => !filterCollectionIdList.includes(id));
return list;
})();
const forbidColQuery =
forbidCollectionIdList.length > 0
? `and (collectionId not in [${forbidCollectionIdList.map((id) => `"${String(id)}"`).join(',')}])`
formatForbidCollectionIdList.length > 0
? `and (collectionId not in [${formatForbidCollectionIdList.map((id) => `"${id}"`).join(',')}])`
: '';
// filter collection id
const formatFilterCollectionId = (() => {
if (!filterCollectionIdList) return;
return filterCollectionIdList
.map((id) => String(id))
.filter((id) => !forbidCollectionIdList.includes(id));
})();
const collectionIdQuery = formatFilterCollectionId
? `and (collectionId in [${formatFilterCollectionId.map((id) => `"${id}"`)}])`
: ``;
// Empty data
if (formatFilterCollectionId && formatFilterCollectionId.length === 0) {
return { results: [] };
}
try {
const { results } = await client.search({
collection_name: DatasetVectorTableName,
data: vector,
limit,
filter: `(teamId == "${teamId}") and (datasetId in [${datasetIds.map((id) => `"${String(id)}"`).join(',')}]) ${forbidColQuery}`,
filter: `(teamId == "${teamId}") and (datasetId in [${datasetIds.map((id) => `"${id}"`).join(',')}]) ${collectionIdQuery} ${forbidColQuery}`,
output_fields: ['collectionId']
});

View File

@@ -119,14 +119,44 @@ export class PgVectorCtrl {
}
};
embRecall = async (props: EmbeddingRecallCtrlProps): Promise<EmbeddingRecallResponse> => {
const { teamId, datasetIds, vector, limit, forbidCollectionIdList, retry = 2 } = props;
const {
teamId,
datasetIds,
vector,
limit,
forbidCollectionIdList,
filterCollectionIdList,
retry = 2
} = props;
// Get forbid collection
const formatForbidCollectionIdList = (() => {
if (!filterCollectionIdList) return forbidCollectionIdList;
const list = forbidCollectionIdList
.map((id) => String(id))
.filter((id) => !filterCollectionIdList.includes(id));
return list;
})();
const forbidCollectionSql =
forbidCollectionIdList.length > 0
? `AND collection_id NOT IN (${forbidCollectionIdList.map((id) => `'${String(id)}'`).join(',')})`
: 'AND collection_id IS NOT NULL';
// const forbidDataSql =
// forbidEmbIndexIdList.length > 0 ? `AND id NOT IN (${forbidEmbIndexIdList.join(',')})` : '';
formatForbidCollectionIdList.length > 0
? `AND collection_id NOT IN (${formatForbidCollectionIdList.map((id) => `'${id}'`).join(',')})`
: '';
// Filter by collectionId
const formatFilterCollectionId = (() => {
if (!filterCollectionIdList) return;
return filterCollectionIdList
.map((id) => String(id))
.filter((id) => !forbidCollectionIdList.includes(id));
})();
const filterCollectionIdSql = formatFilterCollectionId
? `AND collection_id IN (${formatFilterCollectionId.map((id) => `'${id}'`).join(',')})`
: '';
// Empty data
if (formatFilterCollectionId && formatFilterCollectionId.length === 0) {
return { results: [] };
}
try {
// const explan: any = await PgClient.query(
@@ -150,6 +180,7 @@ export class PgVectorCtrl {
from ${DatasetVectorTableName}
where team_id='${teamId}'
AND dataset_id IN (${datasetIds.map((id) => `'${String(id)}'`).join(',')})
${filterCollectionIdSql}
${forbidCollectionSql}
order by score limit ${limit};
COMMIT;`

View File

@@ -1,6 +1,7 @@
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d';
import { getAIApi } from '../config';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { loadRequestMessages } from '../../chat/utils';
export const Prompt_QuestionGuide = `你是一个AI智能助手可以回答和解决我的问题。请结合前面的对话记录帮我生成 3 个问题引导我继续提问。问题的长度应小于20个字符按 JSON 格式返回: ["问题1", "问题2", "问题3"]`;
@@ -25,7 +26,10 @@ export async function createQuestionGuide({
model: model,
temperature: 0.1,
max_tokens: 200,
messages: concatMessages,
messages: await loadRequestMessages({
messages: concatMessages,
useVision: false
}),
stream: false
});

View File

@@ -0,0 +1,39 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { countGptMessagesTokens } from '../../common/string/tiktoken';
export const computedMaxToken = async ({
maxToken,
model,
filterMessages = []
}: {
maxToken: number;
model: LLMModelItemType;
filterMessages: ChatCompletionMessageParam[];
}) => {
maxToken = Math.min(maxToken, model.maxResponse);
const tokensLimit = model.maxContext;
/* count response max token */
const promptsToken = await countGptMessagesTokens(filterMessages);
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
if (maxToken <= 0) {
maxToken = 200;
}
return maxToken;
};
// FastGPT temperature range: [0,10], ai temperature:[0,2],{0,1]……
export const computedTemperature = ({
model,
temperature
}: {
model: LLMModelItemType;
temperature: number;
}) => {
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01);
return temperature;
};

View File

@@ -17,7 +17,8 @@ export const chatConfigType = {
ttsConfig: Object,
whisperConfig: Object,
scheduledTriggerConfig: Object,
chatInputGuide: Object
chatInputGuide: Object,
fileSelectConfig: Object
};
// schema

View File

@@ -2,6 +2,9 @@ import type { ChatItemType, ChatItemValueItemType } from '@fastgpt/global/core/c
import { MongoChatItem } from './chatItemSchema';
import { addLog } from '../../common/system/log';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { delFileByFileIdList, getGFSCollection } from '../../common/file/gridfs/controller';
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
import { MongoChat } from './chatSchema';
export async function getChatItems({
appId,
@@ -75,3 +78,40 @@ export const addCustomFeedbacks = async ({
addLog.error('addCustomFeedbacks error', error);
}
};
/*
Delete chat files
1. ChatId: Delete one chat files
2. AppId: Delete all the app's chat files
*/
export const deleteChatFiles = async ({
chatIdList,
appId
}: {
chatIdList?: string[];
appId?: string;
}) => {
if (!appId && !chatIdList) return Promise.reject('appId or chatIdList is required');
const appChatIdList = await (async () => {
if (appId) {
const appChatIdList = await MongoChat.find({ appId }, { chatId: 1 });
return appChatIdList.map((item) => String(item.chatId));
} else if (chatIdList) {
return chatIdList;
}
return [];
})();
const collection = getGFSCollection(BucketNameEnum.chat);
const where = {
'metadata.chatId': { $in: appChatIdList }
};
const files = await collection.find(where, { projection: { _id: 1 } }).toArray();
await delFileByFileIdList({
bucketName: BucketNameEnum.chat,
fileIdList: files.map((item) => String(item._id))
});
};

View File

@@ -1,13 +1,13 @@
import { countGptMessagesTokens } from '../../common/string/tiktoken/index';
import type {
ChatCompletionContentPart,
ChatCompletionMessageParam
ChatCompletionMessageParam,
SdkChatCompletionMessageParam
} from '@fastgpt/global/core/ai/type.d';
import axios from 'axios';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { guessBase64ImageType } from '../../common/file/utils';
import { getFileContentTypeFromHeader, guessBase64ImageType } from '../../common/file/utils';
import { serverRequestBaseUrl } from '../../common/api/serverRequest';
import { cloneDeep } from 'lodash';
/* slice chat context by tokens */
const filterEmptyMessages = (messages: ChatCompletionMessageParam[]) => {
@@ -96,89 +96,183 @@ export const filterGPTMessageByMaxTokens = async ({
return filterEmptyMessages([...systemPrompts, ...chats]);
};
export const formatGPTMessagesInRequestBefore = (messages: ChatCompletionMessageParam[]) => {
return messages
.map((item) => {
if (!item.content) return;
if (typeof item.content === 'string') {
return {
...item,
content: item.content.trim()
};
}
// array
if (item.content.length === 0) return;
if (item.content.length === 1 && item.content[0].type === 'text') {
return {
...item,
content: item.content[0].text
};
}
return item;
})
.filter(Boolean) as ChatCompletionMessageParam[];
};
/* Load user chat content.
Img: to base 64
/*
Format requested messages
1. If not useVision, only retain text.
2. Remove file_url
3. If useVision, parse url from question, and load image from url(Local url)
*/
export const loadChatImgToBase64 = async (content: string | ChatCompletionContentPart[]) => {
if (typeof content === 'string') {
return content;
}
export const loadRequestMessages = async ({
messages,
useVision = true,
origin
}: {
messages: ChatCompletionMessageParam[];
useVision?: boolean;
origin?: string;
}) => {
// Split question text and image
function parseStringWithImages(input: string): ChatCompletionContentPart[] {
if (!useVision) {
return [{ type: 'text', text: input || '' }];
}
return Promise.all(
content.map(async (item) => {
if (item.type === 'text') return item;
// 正则表达式匹配图片URL
const imageRegex = /(https?:\/\/.*\.(?:png|jpe?g|gif|webp|bmp|tiff?|svg|ico|heic|avif))/i;
if (!item.image_url.url) return item;
const result: { type: 'text' | 'image'; value: string }[] = [];
let lastIndex = 0;
let match;
/*
1. From db: Get it from db
2. From web: Not update
*/
if (item.image_url.url.startsWith('/')) {
const response = await axios.get(item.image_url.url, {
baseURL: serverRequestBaseUrl,
responseType: 'arraybuffer'
});
const base64 = Buffer.from(response.data).toString('base64');
let imageType = response.headers['content-type'];
if (imageType === undefined) {
imageType = guessBase64ImageType(base64);
}
return {
...item,
image_url: {
...item.image_url,
url: `data:${imageType};base64,${base64}`
}
};
// 使用正则表达式查找所有匹配项
while ((match = imageRegex.exec(input.slice(lastIndex))) !== null) {
const textBefore = input.slice(lastIndex, lastIndex + match.index);
// 如果图片URL前有文本添加文本部分
if (textBefore) {
result.push({ type: 'text', value: textBefore });
}
return item;
})
);
};
export const loadRequestMessages = async (messages: ChatCompletionMessageParam[]) => {
// 添加图片URL
result.push({ type: 'image', value: match[0] });
lastIndex += match.index + match[0].length;
}
// 添加剩余的文本(如果有的话)
if (lastIndex < input.length) {
result.push({ type: 'text', value: input.slice(lastIndex) });
}
return result
.map((item) => {
if (item.type === 'text') {
return { type: 'text', text: item.value };
}
if (item.type === 'image') {
return {
type: 'image_url',
image_url: {
url: item.value
}
};
}
return { type: 'text', text: item.value };
})
.filter(Boolean) as ChatCompletionContentPart[];
}
// Load image
const parseUserContent = async (content: string | ChatCompletionContentPart[]) => {
if (typeof content === 'string') {
return parseStringWithImages(content);
}
const result = await Promise.all(
content.map(async (item) => {
if (item.type === 'text') return parseStringWithImages(item.text);
if (item.type === 'file_url') return;
if (!item.image_url.url) return item;
// Remove url origin
const imgUrl = (() => {
if (origin && item.image_url.url.startsWith(origin)) {
return item.image_url.url.replace(origin, '');
}
return item.image_url.url;
})();
/* Load local image */
if (imgUrl.startsWith('/')) {
const response = await axios.get(imgUrl, {
baseURL: serverRequestBaseUrl,
responseType: 'arraybuffer'
});
const base64 = Buffer.from(response.data, 'binary').toString('base64');
const imageType =
getFileContentTypeFromHeader(response.headers['content-type']) ||
guessBase64ImageType(base64);
return {
...item,
image_url: {
...item.image_url,
url: `data:${imageType};base64,${base64}`
}
};
}
return item;
})
);
return result.flat().filter(Boolean);
};
// format GPT messages, concat text messages
const clearInvalidMessages = (messages: ChatCompletionMessageParam[]) => {
return messages
.map((item) => {
if (item.role === ChatCompletionRequestMessageRoleEnum.System && !item.content) {
return;
}
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
if (!item.content) return;
if (typeof item.content === 'string') {
return {
...item,
content: item.content.trim()
};
}
// array
if (item.content.length === 0) return;
if (item.content.length === 1 && item.content[0].type === 'text') {
return {
...item,
content: item.content[0].text
};
}
}
return item;
})
.filter(Boolean) as ChatCompletionMessageParam[];
};
if (messages.length === 0) {
return Promise.reject('core.chat.error.Messages empty');
}
const loadMessages = await Promise.all(
messages.map(async (item) => {
// filter messages file
const filterMessages = messages.map((item) => {
// If useVision=false, only retain text.
if (
item.role === ChatCompletionRequestMessageRoleEnum.User &&
Array.isArray(item.content) &&
!useVision
) {
return {
...item,
content: item.content.filter((item) => item.type === 'text')
};
}
return item;
});
const loadMessages = (await Promise.all(
filterMessages.map(async (item) => {
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
return {
...item,
content: await loadChatImgToBase64(item.content)
content: await parseUserContent(item.content)
};
} else {
return item;
}
})
);
)) as ChatCompletionMessageParam[];
return loadMessages;
return clearInvalidMessages(loadMessages) as SdkChatCompletionMessageParam[];
};

View File

@@ -1,7 +1,4 @@
import {
TrainingModeEnum,
DatasetCollectionTypeEnum
} from '@fastgpt/global/core/dataset/constants';
import { TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
import type { CreateDatasetCollectionParams } from '@fastgpt/global/core/dataset/api.d';
import { MongoDatasetCollection } from './schema';
import {
@@ -15,6 +12,7 @@ import { deleteDatasetDataVector } from '../../../common/vectorStore/controller'
import { delFileByFileIdList } from '../../../common/file/gridfs/controller';
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
import { ClientSession } from '../../../common/mongo';
import { createOrGetCollectionTags } from './utils';
export async function createOneCollection({
teamId,
@@ -39,6 +37,7 @@ export async function createOneCollection({
rawTextLength,
metadata = {},
session,
tags,
...props
}: CreateDatasetCollectionParams & {
teamId: string;
@@ -46,6 +45,7 @@ export async function createOneCollection({
[key: string]: any;
session?: ClientSession;
}) {
const collectionTags = await createOrGetCollectionTags({ tags, teamId, datasetId, session });
const [collection] = await MongoDatasetCollection.create(
[
{
@@ -69,7 +69,8 @@ export async function createOneCollection({
rawTextLength,
hashRawText,
metadata
metadata,
tags: collectionTags
}
],
{ session }

View File

@@ -106,8 +106,10 @@ try {
updateTime: -1
});
// get forbid
// DatasetCollectionSchema.index({ teamId: 1, datasetId: 1, forbid: 1 });
// Tag filter
DatasetCollectionSchema.index({ teamId: 1, datasetId: 1, tags: 1 });
// create time filter
DatasetCollectionSchema.index({ teamId: 1, datasetId: 1, createTime: 1 });
} catch (error) {
console.log(error);
}

View File

@@ -11,6 +11,7 @@ import {
import { hashStr } from '@fastgpt/global/common/string/tools';
import { ClientSession } from '../../../common/mongo';
import { PushDatasetDataResponse } from '@fastgpt/global/core/dataset/api';
import { MongoDatasetCollectionTags } from '../tag/schema';
/**
* get all collection by top collectionId
@@ -200,3 +201,36 @@ export const reloadCollectionChunks = async ({
insertLen: result.length
};
};
export const createOrGetCollectionTags = async ({
tags = [],
datasetId,
teamId,
session
}: {
tags?: string[];
datasetId: string;
teamId: string;
session?: ClientSession;
}): Promise<string[]> => {
if (!tags.length) return [];
const existingTags = await MongoDatasetCollectionTags.find({
teamId,
datasetId,
$expr: { $in: ['$tag', tags] }
});
const existingTagContents = existingTags.map((tag) => tag.tag);
const newTagContents = tags.filter((tag) => !existingTagContents.includes(tag));
const newTags = await MongoDatasetCollectionTags.insertMany(
newTagContents.map((tagContent) => ({
teamId,
datasetId,
tag: tagContent
})),
{ session }
);
return [...existingTags.map((tag) => tag._id), ...newTags.map((tag) => tag._id)];
};

View File

@@ -20,6 +20,9 @@ import { hashStr } from '@fastgpt/global/common/string/tools';
import { jiebaSplit } from '../../../common/string/jieba';
import { getCollectionSourceData } from '@fastgpt/global/core/dataset/collection/utils';
import { Types } from '../../../common/mongo';
import json5 from 'json5';
import { MongoDatasetCollectionTags } from '../tag/schema';
import { readFromSecondary } from '../../../common/mongo/utils';
type SearchDatasetDataProps = {
teamId: string;
@@ -31,6 +34,20 @@ type SearchDatasetDataProps = {
usingReRank?: boolean;
reRankQuery: string;
queries: string[];
/*
{
tags: {
$and: ["str1","str2"],
$or: ["str1","str2",null] null means no tags
},
createTime: {
$gte: 'xx',
$lte: 'xxx'
}
}
*/
collectionFilterMatch?: string;
};
export async function searchDatasetData(props: SearchDatasetDataProps) {
@@ -43,7 +60,8 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
limit: maxTokens,
searchMode = DatasetSearchModeEnum.embedding,
usingReRank = false,
datasetIds = []
datasetIds = [],
collectionFilterMatch
} = props;
/* init params */
@@ -87,14 +105,148 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
forbidCollectionIdList: collections.map((item) => String(item._id))
};
};
/*
Collection metadata filter
标签过滤:
1. and 先生效
2. and 标签和 null 不能共存,否则返回空数组
*/
const filterCollectionByMetadata = async (): Promise<string[] | undefined> => {
if (!collectionFilterMatch || !global.feConfigs.isPlus) return;
let tagCollectionIdList: string[] | undefined = undefined;
let createTimeCollectionIdList: string[] | undefined = undefined;
try {
const jsonMatch = json5.parse(collectionFilterMatch);
// Tag
let andTags = jsonMatch?.tags?.$and as (string | null)[] | undefined;
let orTags = jsonMatch?.tags?.$or as (string | null)[] | undefined;
// get andTagIds
if (andTags && andTags.length > 0) {
// tag 去重
andTags = Array.from(new Set(andTags));
if (andTags.includes(null) && andTags.some((tag) => typeof tag === 'string')) {
return [];
}
if (andTags.every((tag) => typeof tag === 'string')) {
// Get tagId by tag string
const andTagIdList = await MongoDatasetCollectionTags.find(
{
teamId,
datasetId: { $in: datasetIds },
tag: { $in: andTags }
},
'_id',
{
...readFromSecondary
}
).lean();
// If you enter a tag that does not exist, none will be found
if (andTagIdList.length !== andTags.length) return [];
// Get collectionId by tagId
const collections = await MongoDatasetCollection.find(
{
teamId,
datasetId: { $in: datasetIds },
tags: { $all: andTagIdList.map((item) => String(item._id)) }
},
'_id',
{
...readFromSecondary
}
).lean();
tagCollectionIdList = collections.map((item) => String(item._id));
} else if (andTags.every((tag) => tag === null)) {
const collections = await MongoDatasetCollection.find(
{
teamId,
datasetId: { $in: datasetIds },
$or: [{ tags: { $size: 0 } }, { tags: { $exists: false } }]
},
'_id',
{
...readFromSecondary
}
).lean();
tagCollectionIdList = collections.map((item) => String(item._id));
}
} else if (orTags && orTags.length > 0) {
// Get tagId by tag string
const orTagArray = await MongoDatasetCollectionTags.find(
{
teamId,
datasetId: { $in: datasetIds },
tag: { $in: orTags.filter((tag) => tag !== null) }
},
'_id',
{ ...readFromSecondary }
).lean();
const orTagIds = orTagArray.map((item) => String(item._id));
// Get collections by tagId
const collections = await MongoDatasetCollection.find(
{
teamId,
datasetId: { $in: datasetIds },
$or: [
{ tags: { $in: orTagIds } },
...(orTags.includes(null) ? [{ tags: { $size: 0 } }] : [])
]
},
'_id',
{ ...readFromSecondary }
).lean();
tagCollectionIdList = collections.map((item) => String(item._id));
}
// time
const getCreateTime = jsonMatch?.createTime?.$gte as string | undefined;
const lteCreateTime = jsonMatch?.createTime?.$lte as string | undefined;
if (getCreateTime || lteCreateTime) {
const collections = await MongoDatasetCollection.find(
{
teamId,
datasetId: { $in: datasetIds },
createTime: {
...(getCreateTime && { $gte: new Date(getCreateTime) }),
...(lteCreateTime && {
$lte: new Date(lteCreateTime)
})
}
},
'_id'
);
createTimeCollectionIdList = collections.map((item) => String(item._id));
}
// Concat tag and time
if (tagCollectionIdList && createTimeCollectionIdList) {
return tagCollectionIdList.filter((id) => createTimeCollectionIdList!.includes(id));
} else if (tagCollectionIdList) {
return tagCollectionIdList;
} else if (createTimeCollectionIdList) {
return createTimeCollectionIdList;
}
} catch (error) {}
};
const embeddingRecall = async ({
query,
limit,
forbidCollectionIdList
forbidCollectionIdList,
filterCollectionIdList
}: {
query: string;
limit: number;
forbidCollectionIdList: string[];
filterCollectionIdList?: string[];
}) => {
const { vectors, tokens } = await getVectorsByText({
model: getVectorModel(model),
@@ -107,7 +259,8 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
datasetIds,
vector: vectors[0],
limit,
forbidCollectionIdList
forbidCollectionIdList,
filterCollectionIdList
});
// get q and a
@@ -165,10 +318,12 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
};
const fullTextRecall = async ({
query,
limit
limit,
filterCollectionIdList
}: {
query: string;
limit: number;
filterCollectionIdList?: string[];
}): Promise<{
fullTextRecallResults: SearchDataResponseItemType[];
tokenLen: number;
@@ -188,7 +343,14 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
$match: {
teamId: new Types.ObjectId(teamId),
datasetId: new Types.ObjectId(id),
$text: { $search: jiebaSplit({ text: query }) }
$text: { $search: jiebaSplit({ text: query }) },
...(filterCollectionIdList && filterCollectionIdList.length > 0
? {
collectionId: {
$in: filterCollectionIdList.map((id) => new Types.ObjectId(id))
}
}
: {})
}
},
{
@@ -327,7 +489,10 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
const fullTextRecallResList: SearchDataResponseItemType[][] = [];
let totalTokens = 0;
const { forbidCollectionIdList } = await getForbidData();
const [{ forbidCollectionIdList }, filterCollectionIdList] = await Promise.all([
getForbidData(),
filterCollectionByMetadata()
]);
await Promise.all(
queries.map(async (query) => {
@@ -335,11 +500,13 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
embeddingRecall({
query,
limit: embeddingLimit,
forbidCollectionIdList
forbidCollectionIdList,
filterCollectionIdList
}),
fullTextRecall({
query,
limit: fullTextLimit
limit: fullTextLimit,
filterCollectionIdList
})
]);
totalTokens += tokens;

View File

@@ -0,0 +1,35 @@
import { TeamCollectionName } from '@fastgpt/global/support/user/team/constant';
import { connectionMongo, getMongoModel, type Model } from '../../../common/mongo';
import { DatasetCollectionName } from '../schema';
import { DatasetCollectionTagsSchemaType } from '@fastgpt/global/core/dataset/type';
const { Schema } = connectionMongo;
export const DatasetCollectionTagsName = 'dataset_collection_tags';
const DatasetCollectionTagsSchema = new Schema({
teamId: {
type: Schema.Types.ObjectId,
ref: TeamCollectionName,
required: true
},
datasetId: {
type: Schema.Types.ObjectId,
ref: DatasetCollectionName,
required: true
},
tag: {
type: String,
required: true
}
});
try {
DatasetCollectionTagsSchema.index({ teamId: 1, datasetId: 1, tag: 1 });
} catch (error) {
console.log(error);
}
export const MongoDatasetCollectionTags = getMongoModel<DatasetCollectionTagsSchemaType>(
DatasetCollectionTagsName,
DatasetCollectionTagsSchema
);

View File

@@ -16,6 +16,7 @@ import { formatModelChars2Points } from '../../../../support/wallet/usage/utils'
import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getHandleId } from '@fastgpt/global/core/workflow/utils';
import { loadRequestMessages } from '../../../chat/utils';
type Props = ModuleDispatchProps<{
[NodeInputKeyEnum.aiModel]: string;
@@ -113,6 +114,10 @@ const completions = async ({
]
}
];
const requestMessages = await loadRequestMessages({
messages: chats2GPTMessages({ messages, reserveId: false }),
useVision: false
});
const ai = getAIApi({
userKey: user.openaiAccount,
@@ -122,7 +127,7 @@ const completions = async ({
const data = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0.01,
messages: chats2GPTMessages({ messages, reserveId: false }),
messages: requestMessages,
stream: false
});
const answer = data.choices?.[0].message?.content || '';

View File

@@ -1,5 +1,5 @@
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
import { filterGPTMessageByMaxTokens } from '../../../chat/utils';
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import {
countMessagesTokens,
@@ -173,6 +173,10 @@ ${description ? `- ${description}` : ''}
messages: adaptMessages,
maxTokens: extractModel.maxContext
});
const requestMessages = await loadRequestMessages({
messages: filterMessages,
useVision: false
});
const properties: Record<
string,
@@ -200,7 +204,7 @@ ${description ? `- ${description}` : ''}
};
return {
filterMessages,
filterMessages: requestMessages,
agentFunction
};
};
@@ -224,7 +228,7 @@ const toolChoice = async (props: ActionProps) => {
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0,
temperature: 0.01,
messages: filterMessages,
tools,
tool_choice: { type: 'function', function: { name: agentFunName } }
@@ -269,7 +273,7 @@ const functionCall = async (props: ActionProps) => {
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0,
temperature: 0.01,
messages: filterMessages,
function_call: {
name: agentFunName
@@ -338,6 +342,10 @@ Human: ${content}`
]
}
];
const requestMessages = await loadRequestMessages({
messages: chats2GPTMessages({ messages, reserveId: false }),
useVision: false
});
const ai = getAIApi({
userKey: user.openaiAccount,
@@ -346,7 +354,7 @@ Human: ${content}`
const data = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0.01,
messages: chats2GPTMessages({ messages, reserveId: false }),
messages: requestMessages,
stream: false
});
const answer = data.choices?.[0].message?.content || '';

View File

@@ -1,3 +1,5 @@
import { replaceVariable } from '@fastgpt/global/common/string/tools';
export const Prompt_Tool_Call = `<Instruction>
你是一个智能机器人,除了可以回答用户问题外,你还掌握工具的使用能力。有时候,你可以依赖工具的运行结果,来更准确的回答用户。
@@ -32,6 +34,8 @@ TOOL_RESPONSE: """
ANSWER: 0: 今天杭州是晴天,适合去西湖、灵隐寺、千岛湖等地玩。
</Instruction>
------
现在,我们开始吧!下面是你本次可以使用的工具:
"""
@@ -42,3 +46,16 @@ ANSWER: 0: 今天杭州是晴天,适合去西湖、灵隐寺、千岛湖等地
USER: {{question}}
ANSWER: `;
export const getMultiplePrompt = (obj: {
fileCount: number;
imgCount: number;
question: string;
}) => {
const prompt = `Number of session file inputs
Document{{fileCount}}
Image{{imgCount}}
------
{{question}}`;
return replaceVariable(prompt, obj);
};

View File

@@ -9,7 +9,7 @@ import {
ChatCompletionMessageFunctionCall,
ChatCompletionFunctionMessageParam,
ChatCompletionAssistantMessageParam
} from '@fastgpt/global/core/ai/type';
} from '@fastgpt/global/core/ai/type.d';
import { NextApiResponse } from 'next';
import {
responseWrite,
@@ -24,10 +24,11 @@ import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './ty
import json5 from 'json5';
import { DispatchFlowResponse } from '../../type';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { getNanoid } from '@fastgpt/global/common/string/tools';
import { getNanoid, sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
type FunctionRunResponseType = {
toolRunResponse: DispatchFlowResponse;
@@ -42,7 +43,18 @@ export const runToolWithFunctionCall = async (
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
const {
toolModel,
toolNodes,
messages,
res,
requestOrigin,
runtimeNodes,
detail = false,
node,
stream,
params: { temperature = 0, maxToken = 4000, aiChatVision }
} = props;
const assistantResponses = response?.assistantResponses || [];
const functions: ChatCompletionCreateParams.Function[] = toolNodes.map((item) => {
@@ -72,44 +84,60 @@ export const runToolWithFunctionCall = async (
};
});
const filterMessages = await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
});
const formativeMessages = filterMessages.map((item) => {
const filterMessages = (
await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
})
).map((item) => {
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant && item.function_call) {
return {
...item,
function_call: {
name: item.function_call?.name,
arguments: item.function_call?.arguments
}
},
content: ''
};
}
return item;
});
const requestMessages = await loadRequestMessages(formativeMessages);
const [requestMessages, max_tokens] = await Promise.all([
loadRequestMessages({
messages: filterMessages,
useVision: toolModel.vision && aiChatVision,
origin: requestOrigin
}),
computedMaxToken({
model: toolModel,
maxToken,
filterMessages
})
]);
const requestBody: any = {
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: computedTemperature({
model: toolModel,
temperature
}),
max_tokens,
stream,
messages: requestMessages,
functions,
function_call: 'auto'
};
// console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: requestMessages,
functions,
function_call: 'auto'
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
const aiResponse = await ai.chat.completions.create(requestBody, {
headers: {
Accept: 'application/json, text/plain, */*'
}
);
});
const { answer, functionCalls } = await (async () => {
if (res && stream) {
@@ -198,7 +226,7 @@ export const runToolWithFunctionCall = async (
toolName: '',
toolAvatar: '',
params: '',
response: stringToolResponse
response: sliceStrStartEnd(stringToolResponse, 300, 300)
}
})
});
@@ -222,7 +250,7 @@ export const runToolWithFunctionCall = async (
function_call: functionCall
};
const concatToolMessages = [
...filterMessages,
...requestMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = await countGptMessagesTokens(concatToolMessages, undefined, functions);

View File

@@ -8,7 +8,7 @@ import { ModelTypeEnum, getLLMModel } from '../../../../ai/model';
import { filterToolNodeIdByEdges, getHistories } from '../../utils';
import { runToolWithToolChoice } from './toolChoice';
import { DispatchToolModuleProps, ToolNodeItemType } from './type.d';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
GPTMessages2Chats,
@@ -22,12 +22,46 @@ import { getHistoryPreview } from '@fastgpt/global/core/chat/utils';
import { runToolWithFunctionCall } from './functionCall';
import { runToolWithPromptCall } from './promptCall';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_Tool_Call } from './constants';
import { getMultiplePrompt, Prompt_Tool_Call } from './constants';
import { filterToolResponseToPreview } from './utils';
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
}>;
/*
Tool call auth add file prompt to question。
Guide the LLM to call tool.
*/
export const toolCallMessagesAdapt = ({
userInput
}: {
userInput: UserChatItemValueItemType[];
}) => {
const files = userInput.filter((item) => item.type === 'file');
if (files.length > 0) {
return userInput.map((item) => {
if (item.type === 'text') {
const filesCount = files.filter((file) => file.file?.type === 'file').length;
const imgCount = files.filter((file) => file.file?.type === 'image').length;
const text = item.text?.content || '';
return {
...item,
text: {
content: getMultiplePrompt({ fileCount: filesCount, imgCount, question: text })
}
};
}
return item;
});
}
return userInput;
};
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
const {
node: { nodeId, name },
@@ -62,16 +96,31 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
const messages: ChatItemType[] = [
...getSystemPrompt(systemPrompt),
...chatHistories,
// Add file input prompt to histories
...chatHistories.map((item) => {
if (item.obj === ChatRoleEnum.Human) {
return {
...item,
value: toolCallMessagesAdapt({
userInput: item.value
})
};
}
return item;
}),
{
obj: ChatRoleEnum.Human,
value: runtimePrompt2ChatsValue({
text: userChatInput,
files: chatValue2RuntimePrompt(query).files
value: toolCallMessagesAdapt({
userInput: runtimePrompt2ChatsValue({
text: userChatInput,
files: chatValue2RuntimePrompt(query).files
})
})
}
];
// console.log(JSON.stringify(messages, null, 2));
const {
dispatchFlowResponse, // tool flow response
totalTokens,
@@ -98,14 +147,24 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
}
const lastMessage = adaptMessages[adaptMessages.length - 1];
if (typeof lastMessage.content !== 'string') {
return Promise.reject('暂时只支持纯文本');
if (typeof lastMessage.content === 'string') {
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
question: lastMessage.content
});
} else if (Array.isArray(lastMessage.content)) {
// array, replace last element
const lastText = lastMessage.content[lastMessage.content.length - 1];
if (lastText.type === 'text') {
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
question: lastText.text
});
} else {
return Promise.reject('Prompt call invalid input');
}
} else {
return Promise.reject('Prompt call invalid input');
}
lastMessage.content = replaceVariable(Prompt_Tool_Call, {
question: userChatInput
});
return runToolWithPromptCall({
...props,
toolNodes,
@@ -132,12 +191,14 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
}, 0);
const flatUsages = dispatchFlowResponse.map((item) => item.flowUsages).flat();
const previewAssistantResponses = filterToolResponseToPreview(assistantResponses);
return {
[NodeOutputKeyEnum.answerText]: assistantResponses
[NodeOutputKeyEnum.answerText]: previewAssistantResponses
.filter((item) => item.text?.content)
.map((item) => item.text?.content || '')
.join(''),
[DispatchNodeResponseKeyEnum.assistantResponses]: assistantResponses,
[DispatchNodeResponseKeyEnum.assistantResponses]: previewAssistantResponses,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: totalPointsUsage,
toolCallTokens: totalTokens,

View File

@@ -20,10 +20,16 @@ import { dispatchWorkFlow } from '../../index';
import { DispatchToolModuleProps, RunToolResponse, ToolNodeItemType } from './type.d';
import json5 from 'json5';
import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/index';
import { getNanoid, replaceVariable, sliceJsonStr } from '@fastgpt/global/common/string/tools';
import {
getNanoid,
replaceVariable,
sliceJsonStr,
sliceStrStartEnd
} from '@fastgpt/global/common/string/tools';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { updateToolInputValue } from './utils';
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
type FunctionCallCompletion = {
id: string;
@@ -43,7 +49,18 @@ export const runToolWithPromptCall = async (
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
const {
toolModel,
toolNodes,
messages,
res,
requestOrigin,
runtimeNodes,
detail = false,
node,
stream,
params: { temperature = 0, maxToken = 4000, aiChatVision }
} = props;
const assistantResponses = response?.assistantResponses || [];
const toolsPrompt = JSON.stringify(
@@ -77,7 +94,7 @@ export const runToolWithPromptCall = async (
const lastMessage = messages[messages.length - 1];
if (typeof lastMessage.content !== 'string') {
return Promise.reject('暂时只支持纯文本');
return Promise.reject('Prompt call invalid input');
}
lastMessage.content = replaceVariable(lastMessage.content, {
toolsPrompt
@@ -87,27 +104,40 @@ export const runToolWithPromptCall = async (
messages,
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
});
const requestMessages = await loadRequestMessages(filterMessages);
const [requestMessages, max_tokens] = await Promise.all([
loadRequestMessages({
messages: filterMessages,
useVision: toolModel.vision && aiChatVision,
origin: requestOrigin
}),
computedMaxToken({
model: toolModel,
maxToken,
filterMessages
})
]);
const requestBody = {
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: computedTemperature({
model: toolModel,
temperature
}),
max_tokens,
stream,
messages: requestMessages
};
// console.log(JSON.stringify(filterMessages, null, 2));
// console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: requestMessages
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
const aiResponse = await ai.chat.completions.create(requestBody, {
headers: {
Accept: 'application/json, text/plain, */*'
}
);
});
const answer = await (async () => {
if (res && stream) {
@@ -225,7 +255,7 @@ export const runToolWithPromptCall = async (
toolName: '',
toolAvatar: '',
params: '',
response: stringToolResponse
response: sliceStrStartEnd(stringToolResponse, 300, 300)
}
})
});
@@ -250,7 +280,7 @@ export const runToolWithPromptCall = async (
function_call: toolJson
};
const concatToolMessages = [
...filterMessages,
...requestMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = await countGptMessagesTokens(concatToolMessages, undefined);

View File

@@ -28,6 +28,8 @@ import { countGptMessagesTokens } from '../../../../../common/string/tiktoken/in
import { GPTMessages2Chats } from '@fastgpt/global/core/chat/adapt';
import { AIChatItemType } from '@fastgpt/global/core/chat/type';
import { updateToolInputValue } from './utils';
import { computedMaxToken, computedTemperature } from '../../../../ai/utils';
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
type ToolRunResponseType = {
toolRunResponse: DispatchFlowResponse;
@@ -49,7 +51,18 @@ export const runToolWithToolChoice = async (
},
response?: RunToolResponse
): Promise<RunToolResponse> => {
const { toolModel, toolNodes, messages, res, runtimeNodes, detail = false, node, stream } = props;
const {
toolModel,
toolNodes,
messages,
res,
requestOrigin,
runtimeNodes,
detail = false,
node,
stream,
params: { temperature = 0, maxToken = 4000, aiChatVision }
} = props;
const assistantResponses = response?.assistantResponses || [];
const tools: ChatCompletionTool[] = toolNodes.map((item) => {
@@ -81,12 +94,13 @@ export const runToolWithToolChoice = async (
}
};
});
const filterMessages = await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
});
const formativeMessages = filterMessages.map((item) => {
// Filter histories by maxToken
const filterMessages = (
await filterGPTMessageByMaxTokens({
messages,
maxTokens: toolModel.maxContext - 300 // filter token. not response maxToken
})
).map((item) => {
if (item.role === 'assistant' && item.tool_calls) {
return {
...item,
@@ -99,43 +113,43 @@ export const runToolWithToolChoice = async (
}
return item;
});
const requestMessages = await loadRequestMessages(formativeMessages);
// console.log(
// JSON.stringify(
// {
// ...toolModel?.defaultConfig,
// model: toolModel.model,
// temperature: 0,
// stream,
// messages: requestMessages,
// tools,
// tool_choice: 'auto'
// },
// null,
// 2
// )
// );
const [requestMessages, max_tokens] = await Promise.all([
loadRequestMessages({
messages: filterMessages,
useVision: toolModel.vision && aiChatVision,
origin: requestOrigin
}),
computedMaxToken({
model: toolModel,
maxToken,
filterMessages
})
]);
const requestBody: any = {
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: computedTemperature({
model: toolModel,
temperature
}),
max_tokens,
stream,
messages: requestMessages,
tools,
tool_choice: 'auto'
};
// console.log(JSON.stringify(requestBody, null, 2));
/* Run llm */
const ai = getAIApi({
timeout: 480000
});
const aiResponse = await ai.chat.completions.create(
{
...toolModel?.defaultConfig,
model: toolModel.model,
temperature: 0,
stream,
messages: requestMessages,
tools,
tool_choice: 'auto'
},
{
headers: {
Accept: 'application/json, text/plain, */*'
}
const aiResponse = await ai.chat.completions.create(requestBody, {
headers: {
Accept: 'application/json, text/plain, */*'
}
);
});
const { answer, toolCalls } = await (async () => {
if (res && stream) {
@@ -221,7 +235,7 @@ export const runToolWithToolChoice = async (
toolName: '',
toolAvatar: '',
params: '',
response: stringToolResponse
response: sliceStrStartEnd(stringToolResponse, 300, 300)
}
})
});
@@ -243,7 +257,7 @@ export const runToolWithToolChoice = async (
tool_calls: toolCalls
};
const concatToolMessages = [
...filterMessages,
...requestMessages,
assistantToolMsgParams
] as ChatCompletionMessageParam[];
const tokens = await countGptMessagesTokens(concatToolMessages, tools);

View File

@@ -11,9 +11,13 @@ import { AIChatItemValueItemType, ChatItemValueItemType } from '@fastgpt/global/
export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]: string;
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatVision]?: boolean;
}>;
export type RunToolResponse = {

View File

@@ -1,3 +1,6 @@
import { sliceStrStartEnd } from '@fastgpt/global/common/string/tools';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { AIChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { FlowNodeInputItemType } from '@fastgpt/global/core/workflow/type/io';
export const updateToolInputValue = ({
@@ -12,3 +15,22 @@ export const updateToolInputValue = ({
value: params[input.key] ?? input.value
}));
};
export const filterToolResponseToPreview = (response: AIChatItemValueItemType[]) => {
return response.map((item) => {
if (item.type === ChatItemValueTypeEnum.tool) {
const formatTools = item.tools?.map((tool) => {
return {
...tool,
response: sliceStrStartEnd(tool.response, 500, 500)
};
});
return {
...item,
tools: formatTools
};
}
return item;
});
};

Some files were not shown because too many files have changed in this diff Show More