Compare commits

..

45 Commits

Author SHA1 Message Date
Archer
ca5717936b update doc (#3836) 2025-02-19 22:32:08 +08:00
Archer
6762723b10 perf: ery extension and fix filter same embedding result (#3833)
* perf: ery extension and fix filter same embedding result

* fix: extract node too long

* perf: ui

* perf: not chatId will auto save

* fix: laf

* fix: member load

* feat: add completions unstream error response

* feat: add completions unstream error response

* updat emodel provider
2025-02-19 22:16:43 +08:00
heheer
8604cbd021 fix source name (#3834) 2025-02-19 20:42:30 +08:00
Finley Ge
206325bc5f chore: team, orgs, search and so on (#3807)
* feat: clb search support username, memberName, contacts

* feat: popup org names

* feat: update team member table

* feat: restore the member

* feat: search user in team member table

* feat: bind contact

* feat: export members

* feat: org tab could delete member

* feat: org table search

* feat: team notification account bind

* feat: permission tab search

* fix: wecom sso

* chore(init): copy notificationAccount to user.contact

* chore: adjust

* fix: ts error

* fix: useConfirm iconColor customization

* pref: fe

* fix: style

* fix: fix team member manage

* pref: enlarge team member pagesize

* pref: initv4822

* fix: pageSize

* pref: initscritpt
2025-02-19 17:27:19 +08:00
Archer
5fd520c794 perf: gemini config (#3828)
* doc

* doc

* perf: gemini config
2025-02-19 12:00:31 +08:00
Archer
09205e4666 fix: price page init data;perf: usage code;fix: reasoning tokens;fix: workflow basic node cannot upgrade (#3816)
* fix: img read

* fix: price page init data

* perf: ai model avatar

* perf: refresh in change team

* perf: null checker

* perf: usage code

* fix: reasoning tokens

* fix: workflow basic node cannot upgrade

* perf: model refresh

* perf: icon refresh
2025-02-18 20:50:25 +08:00
Finley Ge
ccf28d83b8 fix: app version addSourcemember tmbid could be empty (#3822) 2025-02-18 20:26:49 +08:00
LGiki
420aaad48e chore: fix typo in docs (#3819) 2025-02-18 20:25:51 +08:00
heheer
8ba2339890 download fetch baseurl & node select dnd (#3820) 2025-02-18 20:25:15 +08:00
Archer
e7b8934367 Update 4818.md (#3818) 2025-02-18 14:26:21 +08:00
Finley Ge
3e13397614 fix: refresh memberlist when switching account (#3814) 2025-02-18 13:54:56 +08:00
Archer
b14674cc6f fix: whisper checker;fix: img read (#3813)
* fix: img read

* fix: whisper checker

* perf: dev doc

* perf: dev doc

* remove invalid code
2025-02-18 10:08:25 +08:00
Archer
4d20274a97 feat: think tag parse (#3805) (#3808)
* feat: think tag parse

* remove some model config

* feat: parse think tag test
2025-02-17 20:57:36 +08:00
heheer
4447e40364 fix template market simple app (#3804) 2025-02-17 20:56:46 +08:00
John Chen
23949230ee fix document (#3806)
V2版本“获取集合列表”接口的path区分了大小写,使用/api/core/dataset/collection/listv2会返回404,必须使用大写V
2025-02-17 20:55:34 +08:00
saikidev
cd7a897304 chore: add ppio provider (#3789) 2025-02-14 17:04:43 +08:00
Archer
18aff8b8db update yml version (#3787) 2025-02-14 12:50:54 +08:00
Archer
d2b60ec785 fix: model check circle tip (#3786)
* model config

* feat: normalization embedding

* remove log

* version doc

* version doc

* fix: model check circle tip

* uml
2025-02-14 11:42:14 +08:00
a.e.
1226fe42a1 fix: skip thirdparty sso state verification (#3721) (#3782) 2025-02-14 11:39:34 +08:00
Finley Ge
abd375cdec fix: app/dataset list api return private flag (#3784) 2025-02-14 11:38:48 +08:00
Archer
7aacce8b0b 4.9.0 test (#3779)
* model config

* feat: normalization embedding

* remove log

* version doc

* version doc
2025-02-13 16:27:41 +08:00
heheer
686b09afd1 chatbot not overflow (#3777)
* chatbot not overflow

* add comment
2025-02-13 15:10:22 +08:00
heheer
3cfec37e9d fix embed chatbot default open (#3774) 2025-02-13 13:36:56 +08:00
Archer
d3641c877c perf: unlogin user fetch data (#3775)
* model config

* feat: normalization embedding

* perf: unlogin user fetch data
2025-02-13 13:36:33 +08:00
Archer
1094c65f2b perf: http empty params (#3773)
* model config

* feat: normalization embedding

* perf: http empty params

* doc
2025-02-13 10:35:11 +08:00
Archer
abe082b9ab i18n perf (#3770)
* model config

* feat: normalization embedding

* perf: mark ui

* perf: i18n

* fix: rerank error tip
2025-02-12 16:36:21 +08:00
heheer
132cf69372 optimize dnd drag code (#3768) 2025-02-12 15:25:31 +08:00
heheer
06a8a5e23d fix: simple mode variables dnd (#3767)
* fix: simple mode variables dnd

* optimize dnd drag
2025-02-12 14:36:04 +08:00
heheer
c42deab63b global variable & interactive node dnd (#3764) 2025-02-12 12:27:36 +08:00
Archer
58f715e878 perf: request quantity;perf: share page error circulation;perf: share chat toast (#3763)
* model config

* feat: normalization embedding

* perf: share page error circulation

* perf: request quantity

* perf: share chat toast

* perf: queue
2025-02-12 11:36:29 +08:00
Archer
116936ffa9 更新 share.md (#3757) 2025-02-11 23:54:42 +08:00
heheer
f5d045eece export csv format & log title debounce (#3754) 2025-02-11 17:36:00 +08:00
sbcyk
8ac6494e60 Update chat.md (#3746)
示例代码的json内容少了一个引号
2025-02-11 17:31:30 +08:00
heheer
f002896a24 chat logs filter & export (#3737)
* chat logs filter & export

* export chat detail
2025-02-11 16:32:47 +08:00
Archer
8738c32fb0 4.8.21 feature (#3742)
* model config

* feat: normalization embedding

* adapt unstrea  reasoning response

* remove select app

* perf: dataset search code

* fix: multiple audio video show

* perf: query extension output

* perf: link check

* perf: faq doc

* fix: ts

* feat: support reasoning text output

* feat: workflow support reasoning output
2025-02-11 13:53:08 +08:00
heheer
896a3f1472 add plugin unexist error tips (#3717)
* add plugin unexist error tips

* throw error when run plugin

* check workflow

* plugin data avoid request twice

* auth owner tmbId

* fix
2025-02-10 15:20:49 +08:00
John Chen
4284b78707 Update configuration.md (#3725)
由于4.8.20版本放弃在config.json中配置模型,在说明文档中,修正二级标题的版本号,并添加注释
2025-02-10 09:13:17 +08:00
Archer
fac5b6b50d 更新 4820.md (#3730) 2025-02-09 10:06:08 +08:00
Archer
51e17a47fa feat: normalization embedding;feat: model top_p param config (#3723)
* edit form force close image select

* model config

* feat: normalization embedding

* perf: add share page title force refresh
2025-02-08 12:16:46 +08:00
Archer
42b2046f96 4.8.21 feature (#3720)
* agent search demo

* edit form force close image select

* feat: llm params and doubao1.5

* perf: model error tip

* fix: template register path

* package
2025-02-08 10:44:33 +08:00
heheer
bb82b515e0 feat: auto adapt outlink chatwindow position (#3707) 2025-02-08 09:49:41 +08:00
clidxhk
fe688cdf2d Update utils.ts (#3699)
本地windows平台开发,加载model列表出现两次盘符导致加载失败,修改代码确保生成的路径不会包含重复的盘符,从而避免 ENOENT 错误。
2025-02-07 09:52:08 +08:00
Archer
0d35326909 fix: yml (#3709) 2025-02-06 16:03:45 +08:00
Archer
d857a391b3 4.8.20 update (#3706)
* fix: rerank auth token

* feat: check null value

* bind notify

* perf: reasoning config

* Adapt mongo 4.x index
2025-02-06 14:34:43 +08:00
Archer
772c1cde77 remove log (#3692) 2025-02-05 11:17:38 +08:00
250 changed files with 5071 additions and 1897 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 332 KiB

View File

@@ -13,8 +13,8 @@ weight: 707
下面配置文件示例中包含了系统参数和各个模型配置:
## 4.6.8+ 版本新配置文件示例
## 4.8.20+ 版本新配置文件示例
> 从4.8.20版本开始,模型在页面中进行配置。
```json
{
"feConfigs": {
@@ -27,4 +27,4 @@ weight: 707
"pgHNSWEfSearch": 100 // 向量搜索参数。越大搜索越精确但是速度越慢。设置为100有99%+精度。
}
}
```
```

View File

@@ -11,7 +11,7 @@ weight: 707
1. 基础的网络知识:端口,防火墙……
2. Docker 和 Docker Compose 基础知识
3. 大模型相关接口和参数
3. 大模型相关接口和参数
4. RAG 相关知识:向量模型,向量数据库,向量检索
## 部署架构图
@@ -211,6 +211,8 @@ docker restart oneapi
### 6. 配置模型
务必先配置至少一组模型,否则系统无法正常使用。
[点击查看模型配置教程](/docs/development/modelConfig/intro/)
## FAQ

View File

@@ -9,17 +9,31 @@ images: []
## 一、错误排查方式
遇到问题先按下面方式排查。
可以先找找[Issue](https://github.com/labring/FastGPT/issues),或新提 Issue私有部署错误务必提供详细的操作步骤、日志、截图否则很难排查。
### 获取后端错误
1. `docker ps -a` 查看所有容器运行状态,检查是否全部 running如有异常尝试`docker logs 容器名`查看对应日志。
2. 容器都运行正常的,`docker logs 容器名` 查看报错日志
3. 带有`requestId`的,都是 OneAPI 提示错误,大部分都是因为模型接口报错。
4. 无法解决时,可以找找[Issue](https://github.com/labring/FastGPT/issues),或新提 Issue私有部署错误务必提供详细的日志否则很难排查。
### 前端错误
前端报错时,页面会出现崩溃,并提示检查控制台日志。可以打开浏览器控制台,并查看`console`中的 log 日志。还可以点击对应 log 的超链接,会提示到具体错误文件,可以把这些详细错误信息提供,方便排查。
### OneAPI 错误
带有`requestId`的,都是 OneAPI 提示错误,大部分都是因为模型接口报错。可以参考 [OneAPI 常见错误](/docs/development/faq/#三常见的-oneapi-错误)
## 二、通用问题
### 前端页面崩溃
1. 90% 情况是模型配置不正确:确保每类模型都至少有一个启用;检查模型中一些`对象`参数是否异常(数组和对象),如果为空,可以尝试给个空数组或空对象。
2. 少部分是由于浏览器兼容问题,由于项目中包含一些高阶语法,可能低版本浏览器不兼容,可以将具体操作步骤和控制台中错误信息提供 issue。
3. 关闭浏览器翻译功能,如果浏览器开启了翻译,可能会导致页面崩溃。
### 通过sealos部署的话是否没有本地部署的一些限制
![](/imgs/faq1.png)
这是索引模型的长度限制,通过任何方式部署都一样的,但不同索引模型的配置不一样,可以在后台修改参数。
@@ -128,9 +142,13 @@ OneAPI 的 API Key 配置错误,需要修改`OPENAI_API_KEY`环境变量,并
3. ....
### Tiktoken 下载失败
由于 OneAPI 会在启动时从网络下载一个 tiktoken 的依赖,如果网络异常,就会导致启动失败。可以参考[OneAPI 离线部署](https://blog.csdn.net/wanh/article/details/139039216)解决。
## 四、常见模型问题
### 如何检查模型问题
### 如何检查模型可用性问题
1. 私有部署模型,先确认部署的模型是否正常。
2. 通过 CURL 请求,直接测试上游模型是否正常运行(云端模型或私有模型均进行测试)
@@ -403,3 +421,7 @@ curl --location --request POST 'https://oneapi.xxxx/v1/chat/completions' \
"tool_choice": "auto"
}'
```
### 向量检索得分大于 1
由于模型没有归一化导致的。目前仅支持归一化的模型。

View File

@@ -15,8 +15,8 @@ weight: 705
- [Git](http://git-scm.com/)
- [Docker](https://www.docker.com/)(构建镜像)
- [Node.js v18.17 / v20.x](http://nodejs.org)版本尽量一样可以使用nvm管理node版本
- [pnpm](https://pnpm.io/) 版本 8.6.0 (目前官方的开发环境)
- [Node.js v20.14.0](http://nodejs.org)版本尽量一样可以使用nvm管理node版本
- [pnpm](https://pnpm.io/) 推荐版本 9.4.0 (目前官方的开发环境)
- make命令: 根据不同平台,百度安装 (官方是GNU Make 4.3)
## 开始本地开发
@@ -77,8 +77,6 @@ Mongo 数据库需要注意,需要注意在连接地址中增加 `directConnec
可参考项目根目录下的 `dev.md`,第一次编译运行可能会有点慢,需要点耐心哦
```bash
# 给自动化脚本代码执行权限(非 linux 系统, 可以手动执行里面的 postinstall.sh 文件内容)
chmod -R +x ./scripts/
# 代码根目录下执行,会安装根 package、projects 和 packages 内所有依赖
# 如果提示 isolate-vm 安装失败可以参考https://github.com/laverdet/isolated-vm?tab=readme-ov-file#requirements
pnpm i

View File

@@ -43,7 +43,7 @@ weight: 744
{{% alert icon="🤖 " context="success" %}}
注意:
1. 目前语音识别模型和重排模型仅会生效一个,所以配置时候,只需要配置一个即可。
2. 用于知识库文件处理的语言模型,至少需要开启一个,否则知识库会报错
2. 系统至少需要一个语言模型和一个索引模型才能正常使用
{{% /alert %}}
#### 核心配置

View File

@@ -7,6 +7,12 @@ toc: true
weight: 852
---
# 如何获取 AppId
可在应用详情的路径里获取 AppId。
![](/imgs/appid.png)
# 发起对话
{{% alert icon="🤖 " context="success" %}}
@@ -102,8 +108,8 @@ curl --location --request POST 'http://localhost:3000/api/v1/chat/completions' \
{{% alert context="info" %}}
- headers.Authorization: Bearer {{apikey}}
- chatId: string | undefined 。
-`undefined` 时(不传入),不使用 FastGpt 提供的上下文功能,完全通过传入的 messages 构建上下文。 不会将你的记录存储到数据库中,你也无法在记录汇总中查阅到。
-`非空字符串`时,意味着使用 chatId 进行对话,自动从 FastGpt 数据库取历史记录,并使用 messages 数组最后一个内容作为用户问题。请自行确保 chatId 唯一长度小于250通常可以是自己系统的对话框ID。
-`undefined` 时(不传入),不使用 FastGpt 提供的上下文功能,完全通过传入的 messages 构建上下文。
-`非空字符串`时,意味着使用 chatId 进行对话,自动从 FastGpt 数据库取历史记录,并使用 messages 数组最后一个内容作为用户问题,其余 message 会被忽略。请自行确保 chatId 唯一长度小于250通常可以是自己系统的对话框ID。
- messages: 结构与 [GPT接口](https://platform.openai.com/docs/api-reference/chat/object) chat模式一致。
- responseChatItemId: string | undefined 。如果传入,则会将该值作为本次对话的响应消息的 IDFastGPT 会自动将该 ID 存入数据库。请确保,在当前`chatId`下,`responseChatItemId`是唯一的。
- detail: 是否返回中间值(模块状态,响应的完整结果等),`stream模式`下会通过`event`进行区分,`非stream模式`结果保存在`responseData`中。
@@ -672,7 +678,7 @@ curl --location --request POST 'http://localhost:3000/api/core/chat/getHistories
"appId": "appId",
"offset": 0,
"pageSize": 20,
"source: "api"
"source": "api"
}'
```

View File

@@ -735,7 +735,7 @@ data 为集合的 ID。
**4.8.19+**
```bash
curl --location --request POST 'http://localhost:3000/api/core/dataset/collection/listv2' \
curl --location --request POST 'http://localhost:3000/api/core/dataset/collection/listV2' \
--header 'Authorization: Bearer {{authorization}}' \
--header 'Content-Type: application/json' \
--data-raw '{

View File

@@ -1,6 +1,6 @@
---
title: 'Api Key 使用与鉴权'
description: 'FastGPT Api Key 使用与鉴权'
title: 'OpenAPI 介绍'
description: 'FastGPT OpenAPI 介绍'
icon: 'key'
draft: false
toc: true
@@ -27,6 +27,7 @@ FastGPT 的 API Key **有 2 类**,一类是全局通用的 key (无法直接
| --------------------- | --------------------- |
| ![](/imgs/fastgpt-api2.jpg) | ![](/imgs/fastgpt-api1.jpg) |
## 基本配置
OpenAPI 中,所有的接口都通过 Header.Authorization 进行鉴权。

View File

@@ -11,7 +11,7 @@ weight: 860
在 FastGPT V4.6.4 中,我们修改了分享链接的数据读取方式,为每个用户生成一个 localId用于标识用户从云端拉取对话记录。但是这种方式仅能保障用户在同一设备同一浏览器中使用如果切换设备或者清空浏览器缓存则会丢失这些记录。这种方式存在一定的风险因此我们仅允许用户拉取近`30天``20条`记录。
分享链接身份鉴权设计的目的在于,将 FastGPT 的对话框快速、安全的接入到你现有的系统中,仅需 2 个接口即可实现。
分享链接身份鉴权设计的目的在于,将 FastGPT 的对话框快速、安全的接入到你现有的系统中,仅需 2 个接口即可实现。该功能目前只在商业版中提供。
## 使用说明

View File

@@ -60,6 +60,10 @@ FastGPT 使用了 one-api 项目来管理模型池,其可以兼容 OpenAI 、A
### 3. 配置模型
### 4. 配置模型
务必先配置至少一组模型,否则系统无法正常使用。
[点击查看模型配置教程](/docs/development/modelConfig/intro/)
## 收费

View File

@@ -1,5 +1,5 @@
---
title: 'V4.8.18'
title: 'V4.8.18(包含升级脚本)'
description: 'FastGPT V4.8.18 更新说明'
icon: 'upgrade'
draft: false

View File

@@ -17,8 +17,8 @@ weight: 804
### 3. 更新镜像:
- 更新 fastgpt 镜像 tag: v4.8.20
- 更新 fastgpt-pro 商业版镜像 tag: v4.8.20
- 更新 fastgpt 镜像 tag: v4.8.20-fix2
- 更新 fastgpt-pro 商业版镜像 tag: v4.8.20-fix2
- Sandbox 镜像无需更新
### 4. 运行升级脚本
@@ -35,7 +35,7 @@ curl --location --request POST 'https://{{host}}/api/admin/initv4820' \
## 完整更新内容
1. 新增 - 可视化模型参数配置。预设超过 100 个模型配置。同时支持所有类型模型的一键测试。(预计下个版本会完全支持在页面上配置渠道)。
1. 新增 - 可视化模型参数配置,取代原配置文件配置模型。预设超过 100 个模型配置。同时支持所有类型模型的一键测试。(预计下个版本会完全支持在页面上配置渠道)。
2. 新增 - DeepSeek resoner 模型支持输出思考过程。
3. 新增 - 使用记录导出和仪表盘。
4. 新增 - markdown 语法扩展,支持音视频(代码块 audio 和 video

View File

@@ -0,0 +1,39 @@
---
title: 'V4.8.21'
description: 'FastGPT V4.8.21 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 803
---
## 更新指南
### 1. 做好数据库备份
### 2. 更新镜像:
- 更新 fastgpt 镜像 tag: v4.8.21-fix
- 更新 fastgpt-pro 商业版镜像 tag: v4.8.21-fix
- Sandbox 镜像无需更新
## 完整更新内容
1. 新增 - 弃用/已删除的插件提示。
2. 新增 - 对话日志按来源分类、标题检索、导出功能。
3. 新增 - 全局变量支持拖拽排序。
4. 新增 - LLM 模型支持 top_p, response_format, json_schema 参数。
5. 新增 - Doubao1.5 模型预设。阿里 embedding3 预设。
6. 新增 - 向量模型支持归一化配置,以便适配未归一化的向量模型,例如 Doubao 的 embedding 模型。
6. 新增 - AI 对话节点,支持输出思考过程结果,可用于其他节点引用。
7. 优化 - 网站嵌入式聊天窗口,增加窗口位置适配。
8. 优化 - 模型未配置时错误提示。
9. 优化 - 适配非 Stream 模式思考输出。
10. 优化 - 增加 TTS voice 未配置时的空指针保护。
11. 优化 - Markdown 链接解析分割规则,改成严格匹配模式,牺牲兼容多种情况,减少误解析。
12. 优化 - 减少未登录用户的数据获取范围,提高系统隐私性。
13. 修复 - 简易模式,切换到其他非视觉模型时候,会强制关闭图片识别。
14. 修复 - o1,o3 模型,在测试时候字段映射未生效导致报错。
15. 修复 - 公众号对话空指针异常。
16. 修复 - 多个音频/视频文件展示异常。
17. 修复 - 分享链接鉴权报错后无限循环。

View File

@@ -0,0 +1,61 @@
---
title: 'V4.8.22(进行中)'
description: 'FastGPT V4.8.22 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 802
---
## 🌟更新指南
### 1. 做好数据库备份
### 2. 更新镜像:
- 更新 fastgpt 镜像 tag: v4.8.22-alpha
- 更新 fastgpt-pro 商业版镜像 tag: v4.8.22-alpha
- Sandbox 镜像无需更新
### 3. 运行升级脚本
仅商业版,并提供 Saas 服务的用户需要运行该升级脚本。
从任意终端,发起 1 个 HTTP 请求。其中 {{rootkey}} 替换成环境变量里的 `rootkey`{{host}} 替换成**FastGPT 域名**。
```bash
curl --location --request POST 'https://{{host}}/api/admin/initv4822' \
--header 'rootkey: {{rootkey}}' \
--header 'Content-Type: application/json'
```
会迁移联系方式到对应用户表中。
## 🚀 新增内容
1. AI 对话节点解析 `<think></think>` 标签内容作为思考链,便于各类模型进行思考链输出。需主动开启模型输出思考。
2. 对话 API 优化,无论是否传递 chatId都会保存对话日志。未传递 chatId则随机生成一个 chatId 来进行存储。
3. ppio 模型提供商
## ⚙️ 优化
1. 模型未配置时提示,减少冲突提示。
2. 使用记录代码。
3. 内容提取节点,字段描述过长时换行。同时修改其输出名用 key而不是 description。
4. 团队管理交互。
5. 对话接口,非流响应,增加报错字段。
## 🐛 修复
1. 思考内容未进入到输出 Tokens.
2. 思考链流输出时,有时与正文顺序偏差。
3. API 调用工作流,如果传递的图片不支持 Head 检测时,图片会被过滤。已增加该类错误检测,避免被错误过滤。
4. 模板市场部分模板错误。
5. 免登录窗口无法正常判断语言识别是否开启。
6. 对话日志导出,未兼容 sub path。
7. 切换团队时未刷新成员列表
8. list 接口在联查 member 时,存在空指针可能性。
9. 工作流基础节点无法升级。
10. 向量检索结果未去重。
11. 用户选择节点无法正常连线。
12. 对话记录保存时source 未正常记录。

View File

@@ -7,7 +7,7 @@ toc: true
weight: 234
---
知识库搜索具体参数说明,以及内部逻辑请移步:[FastGPT知识库搜索方案](/docs/course/data_search/)
知识库搜索具体参数说明,以及内部逻辑请移步:[FastGPT知识库搜索方案](/docs/guide/knowledge_base/rag/)
## 特点
@@ -27,7 +27,7 @@ weight: 234
### 输入 - 搜索参数
[点击查看参数介绍](/docs/course/data_search/#搜索参数)
[点击查看参数介绍](/docs/guide/knowledge_base/dataset_engine/#搜索参数)
### 输出 - 引用内容

View File

@@ -20,7 +20,7 @@ weight: 502
![](/imgs/fastgpt-api1.jpg)
{{% alert icon="🍅" context="success" %}}
Tips: 安全起见,你可以设置一个额度或者过期时间,放置 key 被滥用。
Tips: 安全起见,你可以设置一个额度或者过期时间,防止 key 被滥用。
{{% /alert %}}

View File

@@ -114,15 +114,15 @@ services:
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.20 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.8.21-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.21-fix # 阿里云
networks:
- fastgpt
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.20 # 阿里云
image: ghcr.io/labring/fastgpt:v4.8.21-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.21-fix # 阿里云
ports:
- 3000:3000
networks:

View File

@@ -72,15 +72,15 @@ services:
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.20 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.8.21-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.21-fix # 阿里云
networks:
- fastgpt
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.20 # 阿里云
image: ghcr.io/labring/fastgpt:v4.8.21-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.21-fix # 阿里云
ports:
- 3000:3000
networks:

View File

@@ -53,15 +53,15 @@ services:
wait $$!
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.20 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.8.21-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.21-fix # 阿里云
networks:
- fastgpt
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.8.20 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.20 # 阿里云
image: ghcr.io/labring/fastgpt:v4.8.21-fix # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.21-fix # 阿里云
ports:
- 3000:3000
networks:

View File

@@ -7,7 +7,7 @@
"format-code": "prettier --config \"./.prettierrc.js\" --write \"./**/src/**/*.{ts,tsx,scss}\"",
"format-doc": "zhlint --dir ./docSite *.md --fix",
"gen:theme-typings": "chakra-cli tokens packages/web/styles/theme.ts --out node_modules/.pnpm/node_modules/@chakra-ui/styled-system/dist/theming.types.d.ts",
"postinstall": "sh ./scripts/postinstall.sh",
"postinstall": "pnpm gen:theme-typings",
"initIcon": "node ./scripts/icon/init.js",
"previewIcon": "node ./scripts/icon/index.js",
"api:gen": "tsc ./scripts/openapi/index.ts && node ./scripts/openapi/index.js && npx @redocly/cli build-docs ./scripts/openapi/openapi.json -o ./projects/app/public/openapi/index.html",

View File

@@ -16,8 +16,8 @@ export const bucketNameMap = {
}
};
export const ReadFileBaseUrl = `${process.env.FE_DOMAIN || ''}${process.env.NEXT_PUBLIC_BASE_URL || ''}/api/common/file/read`;
export const ReadFileBaseUrl = `${process.env.FILE_DOMAIN || process.env.FE_DOMAIN || ''}${process.env.NEXT_PUBLIC_BASE_URL || ''}/api/common/file/read`;
export const documentFileType = '.txt, .docx, .csv, .xlsx, .pdf, .md, .html, .pptx';
export const imageFileType =
'.jpg, .jpeg, .png, .gif, .bmp, .webp, .svg, .tiff, .tif, .ico, .heic, .heif, .avif';
'.jpg, .jpeg, .png, .gif, .bmp, .webp, .svg, .tiff, .tif, .ico, .heic, .heif, .avif, .raw, .cr2, .nef, .arw, .dng, .psd, .ai, .eps, .emf, .wmf, .jfif, .exif, .pgm, .ppm, .pbm, .jp2, .j2k, .jpf, .jpx, .jpm, .mj2, .xbm, .pcx';

View File

@@ -1,5 +1,5 @@
import { detect } from 'jschardet';
import { documentFileType, imageFileType } from './constants';
import { documentFileType } from './constants';
import { ChatFileTypeEnum } from '../../core/chat/constants';
import { UserChatItemValueItemType } from '../../core/chat/type';
import * as fs from 'fs';
@@ -25,6 +25,7 @@ export const detectFileEncodingByPath = async (path: string) => {
const fd = await fs.promises.open(path, 'r');
try {
// Read file head
// @ts-ignore
const { bytesRead } = await fd.read(buffer, 0, MAX_BYTES, 0);
const actualBuffer = buffer.slice(0, bytesRead);
@@ -37,40 +38,49 @@ export const detectFileEncodingByPath = async (path: string) => {
// Url => user upload file type
export const parseUrlToFileType = (url: string): UserChatItemValueItemType['file'] | undefined => {
if (typeof url !== 'string') return;
const parseUrl = new URL(url, 'https://locaohost:3000');
const filename = (() => {
// Check base64 image
if (url.startsWith('data:image/')) {
const mime = url.split(',')[0].split(':')[1].split(';')[0];
return `image.${mime.split('/')[1]}`;
}
// Old version file url: https://xxx.com/file/read?filename=xxx.pdf
const filenameQuery = parseUrl.searchParams.get('filename');
if (filenameQuery) return filenameQuery;
// Handle base64 image
if (url.startsWith('data:')) {
const matches = url.match(/^data:([^;]+);base64,/);
if (!matches) return;
// Common file https://xxx.com/xxx.pdf?xxxx=xxx
const pathname = parseUrl.pathname;
if (pathname) return pathname.split('/').pop();
})();
const mimeType = matches[1].toLowerCase();
if (!mimeType.startsWith('image/')) return;
if (!filename) return;
const extension = filename.split('.').pop()?.toLowerCase() || '';
if (!extension) return;
if (documentFileType.includes(extension)) {
const extension = mimeType.split('/')[1];
return {
type: ChatFileTypeEnum.file,
name: filename,
type: ChatFileTypeEnum.image,
name: `image.${extension}`,
url
};
}
if (imageFileType.includes(extension)) {
try {
const parseUrl = new URL(url, 'https://localhost:3000');
// Get filename from URL
const filename = parseUrl.searchParams.get('filename') || parseUrl.pathname.split('/').pop();
const extension = filename?.split('.').pop()?.toLowerCase() || '';
// If it's a document type, return as file, otherwise treat as image
if (extension && documentFileType.includes(extension)) {
return {
type: ChatFileTypeEnum.file,
name: filename || 'null',
url
};
}
// Default to image type for non-document files
return {
type: ChatFileTypeEnum.image,
name: filename,
name: filename || 'null.png',
url
};
} catch (error) {
return {
type: ChatFileTypeEnum.image,
name: 'invalid.png',
url
};
}

View File

@@ -26,7 +26,7 @@ export const simpleText = (text = '') => {
};
export const valToStr = (val: any) => {
if (val === undefined) return 'undefined';
if (val === undefined) return '';
if (val === null) return 'null';
if (typeof val === 'object') return JSON.stringify(val);

View File

@@ -26,11 +26,16 @@ type BaseModelItemType = {
export type LLMModelItemType = PriceType &
BaseModelItemType & {
type: ModelTypeEnum.llm;
// Model params
maxContext: number;
maxResponse: number;
quoteMaxToken: number;
maxTemperature?: number;
showTopP?: boolean;
responseFormatList?: string[];
showStopSign?: boolean;
censor?: boolean;
vision?: boolean;
reasoning?: boolean;
@@ -59,6 +64,7 @@ export type EmbeddingModelItemType = PriceType &
maxToken: number; // model max token
weight: number; // training weight
hidden?: boolean; // Disallow creation
normalization?: boolean; // normalization processing
defaultConfig?: Record<string, any>; // post request config
dbConfig?: Record<string, any>; // Custom parameters for storage
queryConfig?: Record<string, any>; // Custom parameters for query

View File

@@ -61,6 +61,9 @@ export const getModelFromList = (
model: string
) => {
const modelData = modelList.find((item) => item.model === model) ?? modelList[0];
if (!modelData) {
throw new Error('No Key model is configured');
}
const provider = getModelProvider(modelData.provider);
return {
...modelData,

View File

@@ -22,6 +22,7 @@ export type ModelProviderIdType =
| 'StepFun'
| 'Yi'
| 'Siliconflow'
| 'PPIO'
| 'Ollama'
| 'BAAI'
| 'FishAudio'
@@ -71,11 +72,6 @@ export const ModelProviderList: ModelProviderType[] = [
name: 'Groq',
avatar: 'model/groq'
},
{
id: 'AliCloud',
name: i18nT('common:model_alicloud'),
avatar: 'model/alicloud'
},
{
id: 'Qwen',
name: i18nT('common:model_qwen'),
@@ -86,6 +82,11 @@ export const ModelProviderList: ModelProviderType[] = [
name: i18nT('common:model_doubao'),
avatar: 'model/doubao'
},
{
id: 'DeepSeek',
name: 'DeepSeek',
avatar: 'model/deepseek'
},
{
id: 'ChatGLM',
name: i18nT('common:model_chatglm'),
@@ -96,11 +97,6 @@ export const ModelProviderList: ModelProviderType[] = [
name: i18nT('common:model_ernie'),
avatar: 'model/ernie'
},
{
id: 'DeepSeek',
name: 'DeepSeek',
avatar: 'model/deepseek'
},
{
id: 'Moonshot',
name: i18nT('common:model_moonshot'),
@@ -162,11 +158,21 @@ export const ModelProviderList: ModelProviderType[] = [
name: i18nT('common:model_moka'),
avatar: 'model/moka'
},
{
id: 'AliCloud',
name: i18nT('common:model_alicloud'),
avatar: 'model/alicloud'
},
{
id: 'Siliconflow',
name: i18nT('common:model_siliconflow'),
avatar: 'model/siliconflow'
},
{
id: 'PPIO',
name: i18nT('common:model_ppio'),
avatar: 'model/ppio'
},
{
id: 'Other',
name: i18nT('common:model_other'),

View File

@@ -1,14 +1,12 @@
import openai from 'openai';
import type {
ChatCompletionMessageToolCall,
ChatCompletionChunk,
ChatCompletionMessageParam as SdkChatCompletionMessageParam,
ChatCompletionToolMessageParam,
ChatCompletionContentPart as SdkChatCompletionContentPart,
ChatCompletionUserMessageParam as SdkChatCompletionUserMessageParam,
ChatCompletionToolMessageParam as SdkChatCompletionToolMessageParam,
ChatCompletionAssistantMessageParam as SdkChatCompletionAssistantMessageParam,
ChatCompletionContentPartText
ChatCompletionAssistantMessageParam as SdkChatCompletionAssistantMessageParam
} from 'openai/resources';
import { ChatMessageTypeEnum } from './constants';
import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
@@ -48,6 +46,7 @@ export type ChatCompletionMessageParam = (
| CustomChatCompletionToolMessageParam
| CustomChatCompletionAssistantMessageParam
) & {
reasoning_text?: string;
dataId?: string;
hideInUI?: boolean;
};
@@ -71,7 +70,8 @@ export type ChatCompletionMessageFunctionCall =
};
// Stream response
export type StreamChatType = Stream<ChatCompletionChunk>;
export type StreamChatType = Stream<openai.Chat.Completions.ChatCompletionChunk>;
export type UnStreamChatType = openai.Chat.Completions.ChatCompletion;
export default openai;
export * from 'openai';

View File

@@ -74,13 +74,17 @@ export type AppDetailType = AppSchema & {
export type AppSimpleEditFormType = {
// templateId: string;
aiSettings: {
model: string;
systemPrompt?: string | undefined;
temperature?: number;
maxToken?: number;
isResponseAnswerText: boolean;
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]?: string | undefined;
[NodeInputKeyEnum.aiChatTemperature]?: number;
[NodeInputKeyEnum.aiChatMaxToken]?: number;
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
maxHistories: number;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
[NodeInputKeyEnum.aiChatReasoning]?: boolean; // Is open reasoning mode
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
};
dataset: {
datasets: SelectedDatasetType;
@@ -119,6 +123,10 @@ export type SettingAIDataType = {
maxHistories?: number;
[NodeInputKeyEnum.aiChatVision]?: boolean; // Is open vision mode
[NodeInputKeyEnum.aiChatReasoning]?: boolean; // Is open reasoning mode
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
};
// variable

View File

@@ -7,6 +7,8 @@ import { StoreNodeItemType } from '../workflow/type/node';
import { DatasetSearchModeEnum } from '../dataset/constants';
import { WorkflowTemplateBasicType } from '../workflow/type';
import { AppTypeEnum } from './constants';
import { AppErrEnum } from '../../common/error/code/app';
import { PluginErrEnum } from '../../common/error/code/plugin';
export const getDefaultAppForm = (): AppSimpleEditFormType => {
return {
@@ -117,7 +119,8 @@ export const appWorkflow2Form = ({
version: node.version,
inputs: node.inputs,
outputs: node.outputs,
templateType: FlowNodeTemplateTypeEnum.other
templateType: FlowNodeTemplateTypeEnum.other,
pluginData: node.pluginData
});
} else if (node.flowNodeType === FlowNodeTypeEnum.systemConfig) {
defaultAppForm.chatConfig = getAppChatConfig({
@@ -147,3 +150,18 @@ export const getAppType = (config?: WorkflowTemplateBasicType | AppSimpleEditFor
}
return '';
};
export const checkAppUnExistError = (error?: string) => {
const unExistError: Array<string> = [
AppErrEnum.unAuthApp,
AppErrEnum.unExist,
PluginErrEnum.unAuth,
PluginErrEnum.unExist
];
if (!!error && unExistError.includes(error)) {
return error;
} else {
return undefined;
}
};

View File

@@ -46,7 +46,16 @@ export const chats2GPTMessages = ({
messages.forEach((item) => {
const dataId = reserveId ? item.dataId : undefined;
if (item.obj === ChatRoleEnum.Human) {
if (item.obj === ChatRoleEnum.System) {
const content = item.value?.[0]?.text?.content;
if (content) {
results.push({
dataId,
role: ChatCompletionRequestMessageRoleEnum.System,
content
});
}
} else if (item.obj === ChatRoleEnum.Human) {
const value = item.value
.map((item) => {
if (item.type === ChatItemValueTypeEnum.text) {
@@ -80,15 +89,6 @@ export const chats2GPTMessages = ({
role: ChatCompletionRequestMessageRoleEnum.User,
content: simpleUserContentPart(value)
});
} else if (item.obj === ChatRoleEnum.System) {
const content = item.value?.[0]?.text?.content;
if (content) {
results.push({
dataId,
role: ChatCompletionRequestMessageRoleEnum.System,
content
});
}
} else {
const aiResults: ChatCompletionMessageParam[] = [];
@@ -349,7 +349,7 @@ export const chatValue2RuntimePrompt = (value: ChatItemValueItemType[]): Runtime
};
value.forEach((item) => {
if (item.type === 'file' && item.file) {
prompt.files?.push(item.file);
prompt.files.push(item.file);
} else if (item.text) {
prompt.text += item.text.content;
}

View File

@@ -33,8 +33,10 @@ export enum WorkflowIOValueTypeEnum {
dynamic = 'dynamic',
// plugin special type
selectApp = 'selectApp',
selectDataset = 'selectDataset'
selectDataset = 'selectDataset',
// abandon
selectApp = 'selectApp'
}
export const toolValueTypeList = [
@@ -142,6 +144,10 @@ export enum NodeInputKeyEnum {
aiChatVision = 'aiChatVision',
stringQuoteText = 'stringQuoteText',
aiChatReasoning = 'aiChatReasoning',
aiChatTopP = 'aiChatTopP',
aiChatStopSign = 'aiChatStopSign',
aiChatResponseFormat = 'aiChatResponseFormat',
aiChatJsonSchema = 'aiChatJsonSchema',
// dataset
datasetSelectList = 'datasets',
@@ -154,6 +160,10 @@ export enum NodeInputKeyEnum {
datasetSearchExtensionBg = 'datasetSearchExtensionBg',
collectionFilterMatch = 'collectionFilterMatch',
authTmbId = 'authTmbId',
datasetDeepSearch = 'datasetDeepSearch',
datasetDeepSearchModel = 'datasetDeepSearchModel',
datasetDeepSearchMaxTimes = 'datasetDeepSearchMaxTimes',
datasetDeepSearchBg = 'datasetDeepSearchBg',
// concat dataset
datasetQuoteList = 'system_datasetQuoteList',

View File

@@ -140,7 +140,14 @@ export enum FlowNodeTypeEnum {
}
// node IO value type
export const FlowValueTypeMap = {
export const FlowValueTypeMap: Record<
WorkflowIOValueTypeEnum,
{
label: string;
value: WorkflowIOValueTypeEnum;
abandon?: boolean;
}
> = {
[WorkflowIOValueTypeEnum.string]: {
label: 'String',
value: WorkflowIOValueTypeEnum.string
@@ -189,10 +196,6 @@ export const FlowValueTypeMap = {
label: i18nT('common:core.workflow.Dataset quote'),
value: WorkflowIOValueTypeEnum.datasetQuote
},
[WorkflowIOValueTypeEnum.selectApp]: {
label: i18nT('common:plugin.App'),
value: WorkflowIOValueTypeEnum.selectApp
},
[WorkflowIOValueTypeEnum.selectDataset]: {
label: i18nT('common:core.chat.Select dataset'),
value: WorkflowIOValueTypeEnum.selectDataset
@@ -200,6 +203,11 @@ export const FlowValueTypeMap = {
[WorkflowIOValueTypeEnum.dynamic]: {
label: i18nT('common:core.workflow.dynamic_input'),
value: WorkflowIOValueTypeEnum.dynamic
},
[WorkflowIOValueTypeEnum.selectApp]: {
label: 'selectApp',
value: WorkflowIOValueTypeEnum.selectApp,
abandon: true
}
};
@@ -219,3 +227,6 @@ export const datasetQuoteValueDesc = `{
q: string;
a: string
}[]`;
export const datasetSelectValueDesc = `{
datasetId: string;
}[]`;

View File

@@ -123,6 +123,7 @@ export type DispatchNodeResponseType = {
temperature?: number;
maxToken?: number;
quoteList?: SearchDataResponseItemType[];
reasoningText?: string;
historyPreview?: {
obj: `${ChatRoleEnum}`;
value: string;
@@ -133,9 +134,17 @@ export type DispatchNodeResponseType = {
limit?: number;
searchMode?: `${DatasetSearchModeEnum}`;
searchUsingReRank?: boolean;
extensionModel?: string;
extensionResult?: string;
extensionTokens?: number;
queryExtensionResult?: {
model: string;
inputTokens: number;
outputTokens: number;
query: string;
};
deepSearchResult?: {
model: string;
inputTokens: number;
outputTokens: number;
};
// dataset concat
concatLength?: number;
@@ -198,6 +207,11 @@ export type DispatchNodeResponseType = {
// tool params
toolParamsResult?: Record<string, any>;
// abandon
extensionModel?: string;
extensionResult?: string;
extensionTokens?: number;
};
export type DispatchNodeResultType<T = {}> = {
@@ -221,6 +235,10 @@ export type AIChatNodeProps = {
[NodeInputKeyEnum.aiChatIsResponseText]: boolean;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.aiChatReasoning]?: boolean;
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
[NodeInputKeyEnum.aiChatQuoteRole]?: AiChatQuoteRoleType;
[NodeInputKeyEnum.aiChatQuoteTemplate]?: string;

View File

@@ -10,6 +10,7 @@ import { FlowNodeOutputItemType, ReferenceValueType } from '../type/io';
import { ChatItemType, NodeOutputItemType } from '../../../core/chat/type';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '../../../core/chat/constants';
import { replaceVariable, valToStr } from '../../../common/string/tools';
import { ChatCompletionChunk } from 'openai/resources';
export const getMaxHistoryLimitFromNodes = (nodes: StoreNodeItemType[]): number => {
let limit = 10;
@@ -292,13 +293,12 @@ export const getReferenceVariableValue = ({
export const formatVariableValByType = (val: any, valueType?: WorkflowIOValueTypeEnum) => {
if (!valueType) return val;
if (val === undefined || val === null) return;
// Value type check, If valueType invalid, return undefined
if (valueType.startsWith('array') && !Array.isArray(val)) return undefined;
if (valueType === WorkflowIOValueTypeEnum.boolean) return Boolean(val);
if (valueType === WorkflowIOValueTypeEnum.number) return Number(val);
if (valueType === WorkflowIOValueTypeEnum.string) {
if (val === undefined) return 'undefined';
if (val === null) return 'null';
return typeof val === 'object' ? JSON.stringify(val) : String(val);
}
if (
@@ -420,3 +420,137 @@ export function rewriteNodeOutputByHistories(
};
});
}
// Parse <think></think> tags to think and answer - unstream response
export const parseReasoningContent = (text: string): [string, string] => {
const regex = /<think>([\s\S]*?)<\/think>/;
const match = text.match(regex);
if (!match) {
return ['', text];
}
const thinkContent = match[1].trim();
// Add answer (remaining text after think tag)
const answerContent = text.slice(match.index! + match[0].length);
return [thinkContent, answerContent];
};
// Parse <think></think> tags to think and answer - stream response
export const parseReasoningStreamContent = () => {
let isInThinkTag: boolean | undefined;
const startTag = '<think>';
let startTagBuffer = '';
const endTag = '</think>';
let endTagBuffer = '';
/*
parseReasoning - 只控制是否主动解析 <think></think>,如果接口已经解析了,仍然会返回 think 内容。
*/
const parsePart = (
part: {
choices: {
delta: {
content?: string;
reasoning_content?: string;
};
}[];
},
parseReasoning = false
): [string, string] => {
const content = part.choices?.[0]?.delta?.content || '';
// @ts-ignore
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
if (reasoningContent || !parseReasoning) {
isInThinkTag = false;
return [reasoningContent, content];
}
if (!content) {
return ['', ''];
}
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
if (isInThinkTag === false) {
return ['', content];
}
// 检测是否为 think 标签开头的数据
if (isInThinkTag === undefined) {
// Parse content think and answer
startTagBuffer += content;
// 太少内容时候,暂时不解析
if (startTagBuffer.length < startTag.length) {
return ['', ''];
}
if (startTagBuffer.startsWith(startTag)) {
isInThinkTag = true;
return [startTagBuffer.slice(startTag.length), ''];
}
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
isInThinkTag = false;
return ['', startTagBuffer];
}
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
/*
检测 </think> 方案。
存储所有疑似 </think> 的内容,直到检测到完整的 </think> 标签或超出 </think> 长度。
content 返回值包含以下几种情况:
abc - 完全未命中尾标签
abc<th - 命中一部分尾标签
abc</think> - 完全命中尾标签
abc</think>abc - 完全命中尾标签
</think>abc - 完全命中尾标签
k>abc - 命中一部分尾标签
*/
// endTagBuffer 专门用来记录疑似尾标签的内容
if (endTagBuffer) {
endTagBuffer += content;
if (endTagBuffer.includes(endTag)) {
isInThinkTag = false;
const answer = endTagBuffer.slice(endTag.length);
return ['', answer];
} else if (endTagBuffer.length >= endTag.length) {
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
const tmp = endTagBuffer;
endTagBuffer = '';
return [tmp, ''];
}
return ['', ''];
} else if (content.includes(endTag)) {
// 返回内容,完整命中</think>,直接结束
isInThinkTag = false;
const [think, answer] = content.split(endTag);
return [think, answer];
} else {
// 无 buffer且未命中 </think>,开始疑似 </think> 检测。
for (let i = 1; i < endTag.length; i++) {
const partialEndTag = endTag.slice(0, i);
// 命中一部分尾标签
if (content.endsWith(partialEndTag)) {
const think = content.slice(0, -partialEndTag.length);
endTagBuffer += partialEndTag;
return [think, ''];
}
}
}
// 完全未命中尾标签,还是 think 阶段。
return [content, ''];
};
const getStartTagBuffer = () => startTagBuffer;
return {
parsePart,
getStartTagBuffer
};
};

View File

@@ -63,14 +63,12 @@ export const AiChatModule: FlowNodeTemplateType = {
key: NodeInputKeyEnum.aiChatTemperature,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
@@ -98,6 +96,30 @@ export const AiChatModule: FlowNodeTemplateType = {
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
{
key: NodeInputKeyEnum.aiChatTopP,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatStopSign,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatResponseFormat,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatJsonSchema,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
// settings modal ---
{
...Input_Template_System_Prompt,
@@ -108,7 +130,6 @@ export const AiChatModule: FlowNodeTemplateType = {
Input_Template_History,
Input_Template_Dataset_Quote,
Input_Template_File_Link_Prompt,
{ ...Input_Template_UserChatInput, toolDescription: i18nT('workflow:user_question') }
],
outputs: [
@@ -130,6 +151,20 @@ export const AiChatModule: FlowNodeTemplateType = {
description: i18nT('common:core.module.output.description.Ai response content'),
valueType: WorkflowIOValueTypeEnum.string,
type: FlowNodeOutputTypeEnum.static
},
{
id: NodeOutputKeyEnum.reasoningText,
key: NodeOutputKeyEnum.reasoningText,
required: false,
label: i18nT('workflow:reasoning_text'),
valueType: WorkflowIOValueTypeEnum.string,
type: FlowNodeOutputTypeEnum.static,
invalid: true,
invalidCondition: ({ inputs, llmModelList }) => {
const model = inputs.find((item) => item.key === NodeInputKeyEnum.aiModel)?.value;
const modelItem = llmModelList.find((item) => item.model === model);
return modelItem?.reasoning !== true;
}
}
]
};

View File

@@ -1,5 +1,6 @@
import {
datasetQuoteValueDesc,
datasetSelectValueDesc,
FlowNodeInputTypeEnum,
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
@@ -38,7 +39,8 @@ export const DatasetSearchModule: FlowNodeTemplateType = {
label: i18nT('common:core.module.input.label.Select dataset'),
value: [],
valueType: WorkflowIOValueTypeEnum.selectDataset,
required: true
required: true,
valueDesc: datasetSelectValueDesc
},
{
key: NodeInputKeyEnum.datasetSimilarity,

View File

@@ -43,14 +43,12 @@ export const ToolModule: FlowNodeTemplateType = {
key: NodeInputKeyEnum.aiChatTemperature,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatMaxToken,
renderTypeList: [FlowNodeInputTypeEnum.hidden], // Set in the pop-up window
label: '',
value: undefined,
valueType: WorkflowIOValueTypeEnum.number
},
{
@@ -60,6 +58,30 @@ export const ToolModule: FlowNodeTemplateType = {
valueType: WorkflowIOValueTypeEnum.boolean,
value: true
},
{
key: NodeInputKeyEnum.aiChatTopP,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.number
},
{
key: NodeInputKeyEnum.aiChatStopSign,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatResponseFormat,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
key: NodeInputKeyEnum.aiChatJsonSchema,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string
},
{
...Input_Template_System_Prompt,

View File

@@ -1,3 +1,4 @@
import { LLMModelItemType } from '../../ai/model.d';
import { LLMModelTypeEnum } from '../../ai/constants';
import { WorkflowIOValueTypeEnum, NodeInputKeyEnum, NodeOutputKeyEnum } from '../constants';
import { FlowNodeInputTypeEnum, FlowNodeOutputTypeEnum } from '../node/constant';
@@ -77,6 +78,12 @@ export type FlowNodeOutputItemType = {
defaultValue?: any;
required?: boolean;
invalid?: boolean;
invalidCondition?: (e: {
inputs: FlowNodeInputItemType[];
llmModelList: LLMModelItemType[];
}) => boolean;
// component params
customFieldConfig?: CustomFieldConfigType;
};

View File

@@ -43,6 +43,17 @@ export type FlowNodeCommonType = {
pluginId?: string;
isFolder?: boolean;
// pluginType?: AppTypeEnum;
pluginData?: PluginDataType;
};
export type PluginDataType = {
version: string;
diagram?: string;
userGuide?: string;
courseUrl?: string;
name?: string;
avatar?: string;
error?: string;
};
type HandleType = {

View File

@@ -1,5 +1,9 @@
import { MemberGroupSchemaType, MemberGroupType } from 'support/permission/memberGroup/type';
import { OAuthEnum } from './constant';
import { TrackRegisterParams } from './login/api';
import { TeamMemberStatusEnum } from './team/constant';
import { OrgType } from './team/org/type';
import { TeamMemberItemType } from './team/type';
export type PostLoginProps = {
username: string;
@@ -21,3 +25,9 @@ export type FastLoginProps = {
token: string;
code: string;
};
export type SearchResult = {
members: Omit<TeamMemberItemType, 'teamId' | 'permission'>[];
orgs: Omit<OrgType, 'permission' | 'members'>[];
groups: MemberGroupSchemaType[];
};

View File

@@ -13,6 +13,7 @@ export type CreateTeamProps = {
defaultTeam?: boolean;
memberName?: string;
memberAvatar?: string;
notificationAccount?: string;
};
export type UpdateTeamProps = Omit<ThirdPartyAccountType, 'externalWorkflowVariable'> & {
name?: string;
@@ -39,6 +40,12 @@ export type UpdateInviteProps = {
tmbId: string;
status: TeamMemberSchema['status'];
};
export type UpdateStatusProps = {
tmbId: string;
status: TeamMemberSchema['status'];
};
export type InviteMemberResponse = Record<
'invite' | 'inValid' | 'inTeam',
{ username: string; userId: string }[]

View File

@@ -34,6 +34,7 @@ export type TeamTagSchema = TeamTagItemType & {
_id: string;
teamId: string;
createTime: Date;
updateTime?: Date;
};
export type TeamMemberSchema = {
@@ -41,6 +42,7 @@ export type TeamMemberSchema = {
teamId: string;
userId: string;
createTime: Date;
updateTime?: Date;
name: string;
role: `${TeamMemberRoleEnum}`;
status: `${TeamMemberStatusEnum}`;
@@ -79,6 +81,9 @@ export type TeamMemberItemType = {
role: `${TeamMemberRoleEnum}`;
status: `${TeamMemberStatusEnum}`;
permission: TeamPermission;
contact?: string;
createTime: Date;
updateTime?: Date;
};
export type TeamTagItemType = {

View File

@@ -17,6 +17,7 @@ export type UserModelSchema = {
fastgpt_sem?: {
keyword: string;
};
contact?: string;
};
export type UserType = {
@@ -29,6 +30,7 @@ export type UserType = {
standardInfo?: standardInfoType;
notificationAccount?: string;
permission: TeamPermission;
contact?: string;
};
export type SourceMemberType = {

View File

@@ -0,0 +1,4 @@
export const generateCsv = (headers: string[], data: string[][]) => {
const csv = [headers.join(','), ...data.map((row) => row.join(','))].join('\n');
return csv;
};

View File

@@ -5,6 +5,7 @@ import { ClientSession, Types } from '../../../common/mongo';
import { guessBase64ImageType } from '../utils';
import { readFromSecondary } from '../../mongo/utils';
import { addHours } from 'date-fns';
import { imageFileType } from '@fastgpt/global/common/file/constants';
export const maxImgSize = 1024 * 1024 * 12;
const base64MimeRegex = /data:image\/([^\)]+);base64/;
@@ -25,12 +26,19 @@ export async function uploadMongoImg({
const [base64Mime, base64Data] = base64Img.split(',');
// Check if mime type is valid
if (!base64MimeRegex.test(base64Mime)) {
return Promise.reject('Invalid image mime type');
return Promise.reject('Invalid image base64');
}
const mime = `image/${base64Mime.match(base64MimeRegex)?.[1] ?? 'image/jpeg'}`;
const binary = Buffer.from(base64Data, 'base64');
const extension = mime.split('/')[1];
let extension = mime.split('/')[1];
if (extension.startsWith('x-')) {
extension = extension.substring(2); // Remove 'x-' prefix
}
if (!extension || !imageFileType.includes(`.${extension}`)) {
return Promise.reject(`Invalid image file type: ${mime}`);
}
const { _id } = await MongoImage.create({
teamId,

View File

@@ -63,6 +63,13 @@ export const getMongoModel = <T>(name: string, schema: mongoose.Schema) => {
const model = connectionMongo.model<T>(name, schema);
// Sync index
syncMongoIndex(model);
return model;
};
const syncMongoIndex = async (model: Model<any>) => {
if (process.env.SYNC_INDEX !== '0' && process.env.NODE_ENV !== 'test') {
try {
model.syncIndexes({ background: true });
@@ -70,8 +77,6 @@ export const getMongoModel = <T>(name: string, schema: mongoose.Schema) => {
addLog.error('Create index error', error);
}
}
return model;
};
export const ReadPreference = connectionMongo.mongo.ReadPreference;

View File

@@ -25,7 +25,7 @@ export const countGptMessagesTokens = async (
number
>({
name: WorkerNameEnum.countGptMessagesTokens,
maxReservedThreads: global.systemEnv?.tokenWorkers || 50
maxReservedThreads: global.systemEnv?.tokenWorkers || 30
});
const total = await workerController.run({ messages, tools, functionCall });

View File

@@ -24,7 +24,7 @@ export const aiTranscriptions = async ({
? { url: modelData.requestUrl }
: {
baseURL: aiAxiosConfig.baseUrl,
url: modelData.requestUrl || '/audio/transcriptions'
url: '/audio/transcriptions'
}),
headers: {
Authorization: modelData.requestAuth

View File

@@ -1,7 +1,9 @@
import OpenAI from '@fastgpt/global/core/ai';
import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming
ChatCompletionCreateParamsStreaming,
StreamChatType,
UnStreamChatType
} from '@fastgpt/global/core/ai/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { addLog } from '../../common/system/log';
@@ -38,29 +40,30 @@ export const getAxiosConfig = (props?: { userKey?: OpenaiAccountType }) => {
};
};
type CompletionsBodyType =
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
type InferResponseType<T extends CompletionsBodyType> =
T extends ChatCompletionCreateParamsStreaming
? OpenAI.Chat.Completions.ChatCompletionChunk
: OpenAI.Chat.Completions.ChatCompletion;
export const createChatCompletion = async <T extends CompletionsBodyType>({
export const createChatCompletion = async ({
body,
userKey,
timeout,
options
}: {
body: T;
body: ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming;
userKey?: OpenaiAccountType;
timeout?: number;
options?: OpenAI.RequestOptions;
}): Promise<{
response: InferResponseType<T>;
isStreamResponse: boolean;
getEmptyResponseTip: () => string;
}> => {
}): Promise<
{
getEmptyResponseTip: () => string;
} & (
| {
response: StreamChatType;
isStreamResponse: true;
}
| {
response: UnStreamChatType;
isStreamResponse: false;
}
)
> => {
try {
const modelConstantsData = getLLMModel(body.model);
@@ -96,9 +99,17 @@ export const createChatCompletion = async <T extends CompletionsBodyType>({
return i18nT('chat:LLM_model_response_empty');
};
if (isStreamResponse) {
return {
response,
isStreamResponse: true,
getEmptyResponseTip
};
}
return {
response: response as InferResponseType<T>,
isStreamResponse,
response,
isStreamResponse: false,
getEmptyResponseTip
};
} catch (error) {

View File

@@ -8,6 +8,12 @@
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 0.99,
"showTopP": true,
"responseFormatList": [
"text",
"json_object"
],
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
@@ -30,6 +36,12 @@
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 0.99,
"showTopP": true,
"responseFormatList": [
"text",
"json_object"
],
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
@@ -52,6 +64,12 @@
"maxResponse": 4000,
"quoteMaxToken": 900000,
"maxTemperature": 0.99,
"showTopP": true,
"responseFormatList": [
"text",
"json_object"
],
"showStopSign": true,
"vision": false,
"toolChoice": false,
"functionCall": false,
@@ -74,6 +92,12 @@
"maxResponse": 4000,
"quoteMaxToken": 120000,
"maxTemperature": 0.99,
"showTopP": true,
"responseFormatList": [
"text",
"json_object"
],
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
@@ -96,6 +120,8 @@
"maxResponse": 1000,
"quoteMaxToken": 6000,
"maxTemperature": 0.99,
"showTopP": true,
"showStopSign": true,
"vision": true,
"toolChoice": false,
"functionCall": false,
@@ -118,6 +144,8 @@
"maxResponse": 1000,
"quoteMaxToken": 6000,
"maxTemperature": 0.99,
"showTopP": true,
"showStopSign": true,
"vision": true,
"toolChoice": false,
"functionCall": false,

View File

@@ -8,6 +8,8 @@
"maxResponse": 8000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
@@ -30,6 +32,8 @@
"maxResponse": 8000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": true,
"toolChoice": true,
"functionCall": false,
@@ -52,6 +56,8 @@
"maxResponse": 8000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": true,
"toolChoice": true,
"functionCall": false,
@@ -74,6 +80,8 @@
"maxResponse": 4096,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": true,
"toolChoice": true,
"functionCall": false,

View File

@@ -5,9 +5,12 @@
"model": "deepseek-chat",
"name": "Deepseek-chat",
"maxContext": 64000,
"maxResponse": 4096,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1.5,
"maxTemperature": 1,
"showTopP": true,
"responseFormatList": ["text", "json_object"],
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
@@ -25,7 +28,7 @@
"model": "deepseek-reasoner",
"name": "Deepseek-reasoner",
"maxContext": 64000,
"maxResponse": 4096,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": null,
"vision": false,
@@ -42,7 +45,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
}
]
}

View File

@@ -1,6 +1,102 @@
{
"provider": "Doubao",
"list": [
{
"model": "Doubao-1.5-lite-32k",
"name": "Doubao-1.5-lite-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-1.5-pro-32k",
"name": "Doubao-1.5-pro-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-1.5-pro-256k",
"name": "Doubao-1.5-pro-256k",
"maxContext": 256000,
"maxResponse": 12000,
"quoteMaxToken": 256000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-1.5-vision-pro-32k",
"name": "Doubao-1.5-vision-pro-32k",
"maxContext": 32000,
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
},
{
"model": "Doubao-lite-4k",
"name": "Doubao-lite-4k",
@@ -8,6 +104,8 @@
"maxResponse": 4000,
"quoteMaxToken": 4000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
@@ -30,6 +128,8 @@
"maxResponse": 4000,
"quoteMaxToken": 32000,
"maxTemperature": 1,
"showTopP": true,
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": false,
@@ -65,7 +165,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "Doubao-vision-lite-32k",
@@ -87,7 +189,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "Doubao-pro-4k",
@@ -109,7 +213,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "Doubao-pro-32k",
@@ -131,7 +237,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "Doubao-pro-128k",
@@ -153,7 +261,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "Doubao-vision-pro-32k",
@@ -175,21 +285,25 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "Doubao-embedding-large",
"name": "Doubao-embedding-large",
"defaultToken": 512,
"maxToken": 4096,
"type": "embedding"
"type": "embedding",
"normalization": true
},
{
"model": "Doubao-embedding",
"name": "Doubao-embedding",
"defaultToken": 512,
"maxToken": 4096,
"type": "embedding"
"type": "embedding",
"normalization": true
}
]
}

View File

@@ -21,7 +21,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "ERNIE-4.0-Turbo-8K",
@@ -43,7 +45,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "ERNIE-Lite-8K",
@@ -65,7 +69,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "ERNIE-Speed-128K",
@@ -87,7 +93,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "Embedding-V1",

View File

@@ -1,6 +1,54 @@
{
"provider": "Gemini",
"list": [
{
"model": "gemini-2.0-flash",
"name": "gemini-2.0-flash",
"maxContext": 1000000,
"maxResponse": 8000,
"quoteMaxToken": 60000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "gemini-2.0-pro-exp",
"name": "gemini-2.0-pro-exp",
"maxContext": 2000000,
"maxResponse": 8000,
"quoteMaxToken": 100000,
"maxTemperature": 1,
"vision": true,
"toolChoice": true,
"functionCall": false,
"defaultSystemChatPrompt": "",
"datasetProcess": true,
"usedInClassify": true,
"customCQPrompt": "",
"usedInExtractFields": true,
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "gemini-1.5-flash",
"name": "gemini-1.5-flash",
@@ -21,7 +69,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "gemini-1.5-pro",
@@ -43,7 +93,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "gemini-2.0-flash-exp",
@@ -65,7 +117,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "gemini-2.0-flash-thinking-exp-1219",
@@ -87,7 +141,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "gemini-2.0-flash-thinking-exp-01-21",
@@ -109,7 +165,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "gemini-exp-1206",
@@ -131,7 +189,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "text-embedding-004",

View File

@@ -20,7 +20,9 @@
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "llama-3.3-70b-versatile",
@@ -41,7 +43,9 @@
"customExtractPrompt": "",
"usedInToolCall": true,
"defaultConfig": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
}
]
}

View File

@@ -21,7 +21,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "hunyuan-lite",
@@ -43,7 +45,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "hunyuan-pro",
@@ -65,7 +69,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "hunyuan-standard",
@@ -87,7 +93,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "hunyuan-turbo-vision",
@@ -109,7 +117,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "hunyuan-turbo",
@@ -131,7 +141,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "hunyuan-vision",
@@ -153,7 +165,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "hunyuan-embedding",

View File

@@ -21,7 +21,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "internlm3-8b-instruct",
@@ -43,7 +45,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
}
]
}

View File

@@ -21,7 +21,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "abab6.5s-chat",
@@ -43,7 +45,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "speech-01-turbo",
@@ -237,4 +241,4 @@
"type": "tts"
}
]
}
}

View File

@@ -21,7 +21,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "ministral-8b-latest",
@@ -43,7 +45,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "mistral-large-latest",
@@ -65,7 +69,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "mistral-small-latest",
@@ -87,7 +93,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
}
]
}

View File

@@ -21,7 +21,10 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
},
{
"model": "moonshot-v1-32k",
@@ -43,7 +46,10 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
},
{
"model": "moonshot-v1-128k",
@@ -65,7 +71,10 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
}
]
}

View File

@@ -8,6 +8,13 @@
"maxResponse": 16000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"showTopP": true,
"responseFormatList": [
"text",
"json_object",
"json_schema"
],
"showStopSign": true,
"vision": true,
"toolChoice": true,
"functionCall": true,
@@ -29,6 +36,13 @@
"maxResponse": 4000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"showTopP": true,
"responseFormatList": [
"text",
"json_object",
"json_schema"
],
"showStopSign": true,
"vision": true,
"toolChoice": true,
"functionCall": true,
@@ -68,7 +82,9 @@
"fieldMap": {
"max_tokens": "max_completion_tokens"
},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "o1-mini",
@@ -94,7 +110,9 @@
"fieldMap": {
"max_tokens": "max_completion_tokens"
},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "o1",
@@ -120,7 +138,9 @@
"fieldMap": {
"max_tokens": "max_completion_tokens"
},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "o1-preview",
@@ -146,7 +166,9 @@
"fieldMap": {
"max_tokens": "max_completion_tokens"
},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "gpt-3.5-turbo",
@@ -155,6 +177,8 @@
"maxResponse": 4000,
"quoteMaxToken": 13000,
"maxTemperature": 1.2,
"showTopP": true,
"showStopSign": true,
"vision": false,
"toolChoice": true,
"functionCall": true,
@@ -175,6 +199,8 @@
"maxResponse": 4000,
"quoteMaxToken": 60000,
"maxTemperature": 1.2,
"showTopP": true,
"showStopSign": true,
"vision": true,
"toolChoice": true,
"functionCall": true,
@@ -249,4 +275,4 @@
"type": "stt"
}
]
}
}

View File

@@ -0,0 +1,4 @@
{
"provider": "PPIO",
"list": []
}

View File

@@ -21,7 +21,10 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
},
{
"model": "qwen-plus",
@@ -43,7 +46,10 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
},
{
"model": "qwen-vl-plus",
@@ -63,7 +69,9 @@
"usedInQueryExtension": true,
"customExtractPrompt": "",
"usedInToolCall": true,
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "qwen-max",
@@ -85,7 +93,10 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
},
{
"model": "qwen-vl-max",
@@ -107,7 +118,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "qwen-coder-turbo",
@@ -129,7 +142,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "qwen2.5-7b-instruct",
@@ -151,7 +166,10 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
},
{
"model": "qwen2.5-14b-instruct",
@@ -173,7 +191,10 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
},
{
"model": "qwen2.5-32b-instruct",
@@ -195,7 +216,10 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
},
{
"model": "qwen2.5-72b-instruct",
@@ -217,7 +241,17 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true,
"responseFormatList": ["text", "json_object"]
},
{
"model": "text-embedding-v3",
"name": "text-embedding-v3",
"defaultToken": 512,
"maxToken": 8000,
"type": "embedding"
}
]
}

View File

@@ -21,7 +21,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "Qwen/Qwen2-VL-72B-Instruct",
@@ -42,7 +44,9 @@
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"defaultConfig": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "deepseek-ai/DeepSeek-V2.5",
@@ -64,7 +68,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "BAAI/bge-m3",
@@ -201,4 +207,4 @@
"type": "rerank"
}
]
}
}

View File

@@ -19,7 +19,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "generalv3",
@@ -39,7 +41,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "pro-128k",
@@ -59,7 +63,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "generalv3.5",
@@ -79,7 +85,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "max-32k",
@@ -101,7 +109,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "4.0Ultra",
@@ -123,7 +133,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
}
]
}
}

View File

@@ -19,7 +19,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-1-8k",
@@ -39,7 +41,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-1-32k",
@@ -59,7 +63,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-1-128k",
@@ -79,7 +85,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-1-256k",
@@ -99,7 +107,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-1o-vision-32k",
@@ -119,7 +129,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-1v-8k",
@@ -139,7 +151,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-1v-32k",
@@ -159,7 +173,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-2-mini",
@@ -179,7 +195,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-2-16k",
@@ -199,7 +217,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-2-16k-exp",
@@ -219,7 +239,9 @@
"customCQPrompt": "",
"customExtractPrompt": "",
"defaultSystemChatPrompt": "",
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "step-tts-mini",
@@ -305,4 +327,4 @@
"type": "tts"
}
]
}
}

View File

@@ -21,7 +21,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
},
{
"model": "yi-vision-v2",
@@ -43,7 +45,9 @@
"usedInToolCall": true,
"defaultConfig": {},
"fieldMap": {},
"type": "llm"
"type": "llm",
"showTopP": true,
"showStopSign": true
}
]
}

View File

@@ -11,7 +11,11 @@ import {
ReRankModelItemType
} from '@fastgpt/global/core/ai/model.d';
import { debounce } from 'lodash';
import { ModelProviderType } from '@fastgpt/global/core/ai/provider';
import {
getModelProvider,
ModelProviderIdType,
ModelProviderType
} from '@fastgpt/global/core/ai/provider';
import { findModelFromAlldata } from '../model';
import {
reloadFastGPTConfigBuffer,
@@ -27,7 +31,12 @@ import { delay } from '@fastgpt/global/common/system/utils';
export const loadSystemModels = async (init = false) => {
const getProviderList = () => {
const currentFileUrl = new URL(import.meta.url);
const modelsPath = path.join(path.dirname(currentFileUrl.pathname), 'provider');
const filePath = decodeURIComponent(
process.platform === 'win32'
? currentFileUrl.pathname.substring(1) // Remove leading slash on Windows
: currentFileUrl.pathname
);
const modelsPath = path.join(path.dirname(filePath), 'provider');
return fs.readdirSync(modelsPath) as string[];
};
@@ -91,7 +100,7 @@ export const loadSystemModels = async (init = false) => {
await Promise.all(
providerList.map(async (name) => {
const fileContent = (await import(`./provider/${name}`))?.default as {
provider: ModelProviderType;
provider: ModelProviderIdType;
list: SystemModelItemType[];
};
@@ -101,7 +110,7 @@ export const loadSystemModels = async (init = false) => {
const modelData: any = {
...fileModel,
...dbModel?.metadata,
provider: dbModel?.metadata?.provider || fileContent.provider,
provider: getModelProvider(dbModel?.metadata?.provider || fileContent.provider).id,
type: dbModel?.metadata?.type || fileModel.type,
isCustom: false
};
@@ -143,6 +152,7 @@ export const loadSystemModels = async (init = false) => {
console.error('Load models error', error);
// @ts-ignore
global.systemModelList = undefined;
return Promise.reject(error);
}
};

View File

@@ -32,12 +32,14 @@ export async function getVectorsByText({ model, input, type }: GetVectorProps) {
model: model.model,
input: [input]
},
model.requestUrl && model.requestAuth
model.requestUrl
? {
path: model.requestUrl,
headers: {
Authorization: `Bearer ${model.requestAuth}`
}
headers: model.requestAuth
? {
Authorization: `Bearer ${model.requestAuth}`
}
: undefined
}
: {}
)
@@ -54,7 +56,14 @@ export async function getVectorsByText({ model, input, type }: GetVectorProps) {
const [tokens, vectors] = await Promise.all([
countPromptTokens(input),
Promise.all(res.data.map((item) => unityDimensional(item.embedding)))
Promise.all(
res.data
.map((item) => unityDimensional(item.embedding))
.map((item) => {
if (model.normalization) return normalization(item);
return item;
})
)
]);
return {
@@ -85,3 +94,15 @@ function unityDimensional(vector: number[]) {
return resultVector.concat(zeroVector);
}
// normalization processing
function normalization(vector: number[]) {
if (vector.some((item) => item > 1)) {
// Calculate the Euclidean norm (L2 norm)
const norm = Math.sqrt(vector.reduce((sum, val) => sum + val * val, 0));
// Normalize the vector by dividing each component by the norm
return vector.map((val) => val / norm);
}
return vector;
}

View File

@@ -25,8 +25,11 @@ export function reRankRecall({
if (!model) {
return Promise.reject('no rerank model');
}
if (documents.length === 0) {
return Promise.resolve([]);
}
const { baseUrl, authorization } = getAxiosConfig({});
const { baseUrl, authorization } = getAxiosConfig();
let start = Date.now();
return POST<PostReRankResponse>(
@@ -38,7 +41,7 @@ export function reRankRecall({
},
{
headers: {
Authorization: model.requestAuth ? model.requestAuth : authorization
Authorization: model.requestAuth ? `Bearer ${model.requestAuth}` : authorization
},
timeout: 30000
}

View File

@@ -42,17 +42,27 @@ type CompletionsBodyType =
| ChatCompletionCreateParamsStreaming;
type InferCompletionsBody<T> = T extends { stream: true }
? ChatCompletionCreateParamsStreaming
: ChatCompletionCreateParamsNonStreaming;
: T extends { stream: false }
? ChatCompletionCreateParamsNonStreaming
: ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming;
export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
body: T,
body: T & {
response_format?: any;
json_schema?: string;
stop?: string;
},
model: string | LLMModelItemType
): InferCompletionsBody<T> => {
const modelData = typeof model === 'string' ? getLLMModel(model) : model;
if (!modelData) {
return body as InferCompletionsBody<T>;
return body as unknown as InferCompletionsBody<T>;
}
const response_format = body.response_format;
const json_schema = body.json_schema ?? undefined;
const stop = body.stop ?? undefined;
const requestBody: T = {
...body,
temperature:
@@ -62,7 +72,14 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
temperature: body.temperature
})
: undefined,
...modelData?.defaultConfig
...modelData?.defaultConfig,
response_format: response_format
? {
type: response_format,
json_schema
}
: undefined,
stop: stop?.split('|')
};
// field map
@@ -75,9 +92,7 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
});
}
// console.log(requestBody);
return requestBody as InferCompletionsBody<T>;
return requestBody as unknown as InferCompletionsBody<T>;
};
export const llmStreamResponseToText = async (response: StreamChatType) => {

View File

@@ -1,7 +1,7 @@
import { connectionMongo, getMongoModel } from '../../common/mongo';
const { Schema } = connectionMongo;
import { ChatSchema as ChatType } from '@fastgpt/global/core/chat/type.d';
import { ChatSourceMap } from '@fastgpt/global/core/chat/constants';
import { ChatSourceEnum, ChatSourceMap } from '@fastgpt/global/core/chat/constants';
import {
TeamCollectionName,
TeamMemberCollectionName
@@ -52,8 +52,10 @@ const ChatSchema = new Schema({
},
source: {
type: String,
required: true
required: true,
enum: Object.values(ChatSourceEnum)
},
sourceName: String,
shareId: {
type: String
},
@@ -88,7 +90,7 @@ try {
ChatSchema.index({ appId: 1, chatId: 1 });
// get chat logs;
ChatSchema.index({ teamId: 1, appId: 1, updateTime: -1 });
ChatSchema.index({ teamId: 1, appId: 1, updateTime: -1, sources: 1 });
// get share chat history
ChatSchema.index({ shareId: 1, outLinkUid: 1, updateTime: -1 });

View File

@@ -1,6 +1,10 @@
import type { AIChatItemType, UserChatItemType } from '@fastgpt/global/core/chat/type.d';
import { MongoApp } from '../app/schema';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import {
ChatItemValueTypeEnum,
ChatRoleEnum,
ChatSourceEnum
} from '@fastgpt/global/core/chat/constants';
import { MongoChatItem } from './chatItemSchema';
import { MongoChat } from './chatSchema';
import { addLog } from '../../common/system/log';
@@ -22,7 +26,8 @@ type Props = {
variables?: Record<string, any>;
isUpdateUseTime: boolean;
newTitle: string;
source: string;
source: `${ChatSourceEnum}`;
sourceName?: string;
shareId?: string;
outLinkUid?: string;
content: [UserChatItemType & { dataId?: string }, AIChatItemType & { dataId?: string }];
@@ -40,6 +45,7 @@ export async function saveChat({
isUpdateUseTime,
newTitle,
source,
sourceName,
shareId,
outLinkUid,
content,
@@ -96,6 +102,7 @@ export async function saveChat({
pluginInputs,
title: newTitle,
source,
sourceName,
shareId,
outLinkUid,
metadata: metadataUpdate,

View File

@@ -197,7 +197,11 @@ export const loadRequestMessages = async ({
addLog.info(`Filter invalid image: ${imgUrl}`);
return;
}
} catch (error) {
} catch (error: any) {
if (error?.response?.status === 405) {
return item;
}
addLog.warn(`Filter invalid image: ${imgUrl}`, { error });
return;
}
}

View File

@@ -37,12 +37,7 @@ try {
{ teamId: 1, datasetId: 1, fullTextToken: 'text' },
{
name: 'teamId_1_datasetId_1_fullTextToken_text',
default_language: 'none',
collation: {
locale: 'simple', // 使用简单匹配规则
strength: 2, // 忽略大小写
caseLevel: false // 进一步确保大小写不敏感
}
default_language: 'none'
}
);
DatasetDataTextSchema.index({ dataId: 1 }, { unique: true });

View File

@@ -5,7 +5,7 @@ import {
} from '@fastgpt/global/core/dataset/constants';
import { recallFromVectorStore } from '../../../common/vectorStore/controller';
import { getVectorsByText } from '../../ai/embedding';
import { getEmbeddingModel, getDefaultRerankModel } from '../../ai/model';
import { getEmbeddingModel, getDefaultRerankModel, getLLMModel } from '../../ai/model';
import { MongoDatasetData } from '../data/schema';
import {
DatasetDataTextSchemaType,
@@ -23,18 +23,24 @@ import json5 from 'json5';
import { MongoDatasetCollectionTags } from '../tag/schema';
import { readFromSecondary } from '../../../common/mongo/utils';
import { MongoDatasetDataText } from '../data/dataTextSchema';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { POST } from '../../../common/api/plusRequest';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { datasetSearchQueryExtension } from './utils';
type SearchDatasetDataProps = {
export type SearchDatasetDataProps = {
histories: ChatItemType[];
teamId: string;
model: string;
similarity?: number; // min distance
limit: number; // max Token limit
datasetIds: string[];
searchMode?: `${DatasetSearchModeEnum}`;
usingReRank?: boolean;
reRankQuery: string;
queries: string[];
[NodeInputKeyEnum.datasetSimilarity]?: number; // min distance
[NodeInputKeyEnum.datasetMaxTokens]: number; // max Token limit
[NodeInputKeyEnum.datasetSearchMode]?: `${DatasetSearchModeEnum}`;
[NodeInputKeyEnum.datasetSearchUsingReRank]?: boolean;
/*
{
tags: {
@@ -50,7 +56,96 @@ type SearchDatasetDataProps = {
collectionFilterMatch?: string;
};
export async function searchDatasetData(props: SearchDatasetDataProps) {
export type SearchDatasetDataResponse = {
searchRes: SearchDataResponseItemType[];
tokens: number;
searchMode: `${DatasetSearchModeEnum}`;
limit: number;
similarity: number;
usingReRank: boolean;
usingSimilarityFilter: boolean;
queryExtensionResult?: {
model: string;
inputTokens: number;
outputTokens: number;
query: string;
};
deepSearchResult?: { model: string; inputTokens: number; outputTokens: number };
};
export const datasetDataReRank = async ({
data,
query
}: {
data: SearchDataResponseItemType[];
query: string;
}): Promise<SearchDataResponseItemType[]> => {
const results = await reRankRecall({
query,
documents: data.map((item) => ({
id: item.id,
text: `${item.q}\n${item.a}`
}))
});
if (results.length === 0) {
return Promise.reject('Rerank error');
}
// add new score to data
const mergeResult = results
.map((item, index) => {
const target = data.find((dataItem) => dataItem.id === item.id);
if (!target) return null;
const score = item.score || 0;
return {
...target,
score: [{ type: SearchScoreTypeEnum.reRank, value: score, index }]
};
})
.filter(Boolean) as SearchDataResponseItemType[];
return mergeResult;
};
export const filterDatasetDataByMaxTokens = async (
data: SearchDataResponseItemType[],
maxTokens: number
) => {
const filterMaxTokensResult = await (async () => {
// Count tokens
const tokensScoreFilter = await Promise.all(
data.map(async (item) => ({
...item,
tokens: await countPromptTokens(item.q + item.a)
}))
);
const results: SearchDataResponseItemType[] = [];
let totalTokens = 0;
for await (const item of tokensScoreFilter) {
totalTokens += item.tokens;
if (totalTokens > maxTokens + 500) {
break;
}
results.push(item);
if (totalTokens > maxTokens) {
break;
}
}
return results.length === 0 ? data.slice(0, 1) : results;
})();
return filterMaxTokensResult;
};
export async function searchDatasetData(
props: SearchDatasetDataProps
): Promise<SearchDatasetDataResponse> {
let {
teamId,
reRankQuery,
@@ -288,6 +383,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
).lean()
]);
const set = new Map<string, number>();
const formatResult = results
.map((item, index) => {
const collection = collections.find((col) => String(col._id) === String(item.collectionId));
@@ -303,8 +399,6 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
return;
}
const score = item?.score || 0;
const result: SearchDataResponseItemType = {
id: String(data._id),
updateTime: data.updateTime,
@@ -314,12 +408,24 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
datasetId: String(data.datasetId),
collectionId: String(data.collectionId),
...getCollectionSourceData(collection),
score: [{ type: SearchScoreTypeEnum.embedding, value: score, index }]
score: [{ type: SearchScoreTypeEnum.embedding, value: item?.score || 0, index }]
};
return result;
})
.filter(Boolean) as SearchDataResponseItemType[];
.filter((item) => {
if (!item) return false;
if (set.has(item.id)) return false;
set.set(item.id, 1);
return true;
})
.map((item, index) => {
if (!item) return;
return {
...item,
score: item.score.map((item) => ({ ...item, index }))
};
}) as SearchDataResponseItemType[];
return {
embeddingRecallResults: formatResult,
@@ -455,47 +561,6 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
tokenLen: 0
};
};
const reRankSearchResult = async ({
data,
query
}: {
data: SearchDataResponseItemType[];
query: string;
}): Promise<SearchDataResponseItemType[]> => {
try {
const results = await reRankRecall({
query,
documents: data.map((item) => ({
id: item.id,
text: `${item.q}\n${item.a}`
}))
});
if (results.length === 0) {
usingReRank = false;
return [];
}
// add new score to data
const mergeResult = results
.map((item, index) => {
const target = data.find((dataItem) => dataItem.id === item.id);
if (!target) return null;
const score = item.score || 0;
return {
...target,
score: [{ type: SearchScoreTypeEnum.reRank, value: score, index }]
};
})
.filter(Boolean) as SearchDataResponseItemType[];
return mergeResult;
} catch (error) {
usingReRank = false;
return [];
}
};
const multiQueryRecall = async ({
embeddingLimit,
fullTextLimit
@@ -580,10 +645,15 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
set.add(str);
return true;
});
return reRankSearchResult({
query: reRankQuery,
data: filterSameDataResults
});
try {
return await datasetDataReRank({
query: reRankQuery,
data: filterSameDataResults
});
} catch (error) {
usingReRank = false;
return [];
}
})();
// embedding recall and fullText recall rrf concat
@@ -628,31 +698,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
})();
// token filter
const filterMaxTokensResult = await (async () => {
const tokensScoreFilter = await Promise.all(
scoreFilter.map(async (item) => ({
...item,
tokens: await countPromptTokens(item.q + item.a)
}))
);
const results: SearchDataResponseItemType[] = [];
let totalTokens = 0;
for await (const item of tokensScoreFilter) {
totalTokens += item.tokens;
if (totalTokens > maxTokens + 500) {
break;
}
results.push(item);
if (totalTokens > maxTokens) {
break;
}
}
return results.length === 0 ? scoreFilter.slice(0, 1) : results;
})();
const filterMaxTokensResult = await filterDatasetDataByMaxTokens(scoreFilter, maxTokens);
return {
searchRes: filterMaxTokensResult,
@@ -664,3 +710,54 @@ export async function searchDatasetData(props: SearchDatasetDataProps) {
usingSimilarityFilter
};
}
export type DefaultSearchDatasetDataProps = SearchDatasetDataProps & {
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]?: boolean;
[NodeInputKeyEnum.datasetSearchExtensionModel]?: string;
[NodeInputKeyEnum.datasetSearchExtensionBg]?: string;
};
export const defaultSearchDatasetData = async ({
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg,
...props
}: DefaultSearchDatasetDataProps): Promise<SearchDatasetDataResponse> => {
const query = props.queries[0];
const extensionModel = datasetSearchUsingExtensionQuery
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const { concatQueries, extensionQueries, rewriteQuery, aiExtensionResult } =
await datasetSearchQueryExtension({
query,
extensionModel,
extensionBg: datasetSearchExtensionBg
});
const result = await searchDatasetData({
...props,
reRankQuery: rewriteQuery,
queries: concatQueries
});
return {
...result,
queryExtensionResult: aiExtensionResult
? {
model: aiExtensionResult.model,
inputTokens: aiExtensionResult.inputTokens,
outputTokens: aiExtensionResult.outputTokens,
query: extensionQueries.join('\n')
}
: undefined
};
};
export type DeepRagSearchProps = SearchDatasetDataProps & {
[NodeInputKeyEnum.datasetDeepSearchModel]?: string;
[NodeInputKeyEnum.datasetDeepSearchMaxTimes]?: number;
[NodeInputKeyEnum.datasetDeepSearchBg]?: string;
};
export const deepRagSearch = (data: DeepRagSearchProps) =>
POST<SearchDatasetDataResponse>('/core/dataset/deepRag', data);

View File

@@ -72,12 +72,15 @@ Human: ${query}
if (result.extensionQueries?.length === 0) return;
return result;
})();
const extensionQueries = filterSamQuery(aiExtensionResult?.extensionQueries || []);
if (aiExtensionResult) {
queries = filterSamQuery(queries.concat(aiExtensionResult.extensionQueries));
queries = filterSamQuery(queries.concat(extensionQueries));
rewriteQuery = queries.join('\n');
}
return {
extensionQueries,
concatQueries: queries,
rewriteQuery,
aiExtensionResult

View File

@@ -1,45 +1,5 @@
import { DatasetTrainingSchemaType } from '@fastgpt/global/core/dataset/type';
import { addLog } from '../../../common/system/log';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { MongoDatasetTraining } from './schema';
import Papa from 'papaparse';
export const checkInvalidChunkAndLock = async ({
err,
errText,
data
}: {
err: any;
errText: string;
data: DatasetTrainingSchemaType;
}) => {
if (err?.response) {
addLog.error(`openai error: ${errText}`, {
status: err.response?.status,
statusText: err.response?.statusText,
data: err.response?.data
});
} else {
addLog.error(getErrText(err, errText), err);
}
if (
err?.message === 'invalid message format' ||
err?.type === 'invalid_request_error' ||
err?.code === 500
) {
addLog.error('Lock training data', err);
try {
await MongoDatasetTraining.findByIdAndUpdate(data._id, {
lockTime: new Date('2998/5/5')
});
} catch (error) {}
return true;
}
return false;
};
export const parseCsvTable2Chunks = (rawText: string) => {
const csvArr = Papa.parse(rawText).data as string[][];

View File

@@ -46,7 +46,15 @@ export const runToolWithFunctionCall = async (
externalProvider,
stream,
workflowStreamResponse,
params: { temperature, maxToken, aiChatVision }
params: {
temperature,
maxToken,
aiChatVision,
aiChatTopP,
aiChatStopSign,
aiChatResponseFormat,
aiChatJsonSchema
}
} = workflowProps;
// Interactive
@@ -204,12 +212,18 @@ export const runToolWithFunctionCall = async (
const requestBody = llmCompletionsBodyFormat(
{
model: toolModel.model,
temperature,
max_tokens,
stream,
messages: requestMessages,
functions,
function_call: 'auto'
function_call: 'auto',
temperature,
max_tokens,
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: aiChatResponseFormat,
json_schema: aiChatJsonSchema
},
toolModel
);

View File

@@ -334,7 +334,7 @@ const getMultiInput = async ({
return {
documentQuoteText: text,
userFiles: fileLinks.map((url) => parseUrlToFileType(url))
userFiles: fileLinks.map((url) => parseUrlToFileType(url)).filter(Boolean)
};
};

View File

@@ -54,7 +54,15 @@ export const runToolWithPromptCall = async (
externalProvider,
stream,
workflowStreamResponse,
params: { temperature, maxToken, aiChatVision }
params: {
temperature,
maxToken,
aiChatVision,
aiChatTopP,
aiChatStopSign,
aiChatResponseFormat,
aiChatJsonSchema
}
} = workflowProps;
if (interactiveEntryToolParams) {
@@ -215,10 +223,14 @@ export const runToolWithPromptCall = async (
const requestBody = llmCompletionsBodyFormat(
{
model: toolModel.model,
stream,
messages: requestMessages,
temperature,
max_tokens,
stream,
messages: requestMessages
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: aiChatResponseFormat,
json_schema: aiChatJsonSchema
},
toolModel
);

View File

@@ -93,7 +93,15 @@ export const runToolWithToolChoice = async (
stream,
externalProvider,
workflowStreamResponse,
params: { temperature, maxToken, aiChatVision }
params: {
temperature,
maxToken,
aiChatVision,
aiChatTopP,
aiChatStopSign,
aiChatResponseFormat,
aiChatJsonSchema
}
} = workflowProps;
if (maxRunToolTimes <= 0 && response) {
@@ -263,12 +271,16 @@ export const runToolWithToolChoice = async (
const requestBody = llmCompletionsBodyFormat(
{
model: toolModel.model,
temperature,
max_tokens,
stream,
messages: requestMessages,
tools,
tool_choice: 'auto'
tool_choice: 'auto',
temperature,
max_tokens,
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: aiChatResponseFormat,
json_schema: aiChatJsonSchema
},
toolModel
);

View File

@@ -16,12 +16,16 @@ export type DispatchToolModuleProps = ModuleDispatchProps<{
[NodeInputKeyEnum.history]?: ChatItemType[];
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.aiModel]: string;
[NodeInputKeyEnum.aiSystemPrompt]: string;
[NodeInputKeyEnum.aiChatTemperature]: number;
[NodeInputKeyEnum.aiChatMaxToken]: number;
[NodeInputKeyEnum.aiChatVision]?: boolean;
[NodeInputKeyEnum.fileUrlList]?: string[];
[NodeInputKeyEnum.aiChatTopP]?: number;
[NodeInputKeyEnum.aiChatStopSign]?: string;
[NodeInputKeyEnum.aiChatResponseFormat]?: string;
[NodeInputKeyEnum.aiChatJsonSchema]?: string;
}> & {
messages: ChatCompletionMessageParam[];
toolNodes: ToolNodeItemType[];

View File

@@ -3,13 +3,13 @@ import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../chat
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import {
parseReasoningContent,
parseReasoningStreamContent,
textAdaptGptResponse
} from '@fastgpt/global/core/workflow/runtime/utils';
import { createChatCompletion } from '../../../ai/config';
import type {
ChatCompletion,
ChatCompletionMessageParam,
StreamChatType
} from '@fastgpt/global/core/ai/type.d';
import type { ChatCompletionMessageParam, StreamChatType } from '@fastgpt/global/core/ai/type.d';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { postTextCensor } from '../../../../common/api/requestPlusApi';
@@ -51,7 +51,7 @@ import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & {
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.userChatInput]?: string;
[NodeInputKeyEnum.history]?: ChatItemType[] | number;
[NodeInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
}
@@ -81,7 +81,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
maxToken,
history = 6,
quoteQA,
userChatInput,
userChatInput = '',
isResponseAnswerText = true,
systemPrompt = '',
aiChatQuoteRole = 'system',
@@ -89,6 +89,11 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
quotePrompt,
aiChatVision,
aiChatReasoning = true,
aiChatTopP,
aiChatStopSign,
aiChatResponseFormat,
aiChatJsonSchema,
fileUrlList: fileLinks, // node quote file links
stringQuoteText //abandon
}
@@ -100,7 +105,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return Promise.reject('The chat model is undefined, you need to select a chat model.');
}
stream = stream && isResponseAnswerText;
aiChatVision = modelConstantsData.vision && aiChatVision;
aiChatReasoning = !!aiChatReasoning && !!modelConstantsData.reasoning;
const chatHistories = getHistories(history, histories);
@@ -160,17 +165,21 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
const requestMessages = await loadRequestMessages({
messages: filterMessages,
useVision: modelConstantsData.vision && aiChatVision,
useVision: aiChatVision,
origin: requestOrigin
});
const requestBody = llmCompletionsBodyFormat(
{
model: modelConstantsData.model,
stream,
messages: requestMessages,
temperature,
max_tokens,
stream,
messages: requestMessages
top_p: aiChatTopP,
stop: aiChatStopSign,
response_format: aiChatResponseFormat as any,
json_schema: aiChatJsonSchema
},
modelConstantsData
);
@@ -186,12 +195,19 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
});
const { answerText, reasoningText } = await (async () => {
if (res && isStreamResponse) {
if (isStreamResponse) {
if (!res) {
return {
answerText: '',
reasoningText: ''
};
}
// sse response
const { answer, reasoning } = await streamResponse({
res,
stream: response,
aiChatReasoning,
isResponseAnswerText,
workflowStreamResponse
});
@@ -200,26 +216,49 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
reasoningText: reasoning
};
} else {
const unStreamResponse = response as ChatCompletion;
const answer = unStreamResponse.choices?.[0]?.message?.content || '';
const reasoning = aiChatReasoning
? // @ts-ignore
unStreamResponse.choices?.[0]?.message?.reasoning_content || ''
: '';
const { content, reasoningContent } = (() => {
const content = response.choices?.[0]?.message?.content || '';
// @ts-ignore
const reasoningContent: string = response.choices?.[0]?.message?.reasoning_content || '';
// API already parse reasoning content
if (reasoningContent || !aiChatReasoning) {
return {
content,
reasoningContent
};
}
const [think, answer] = parseReasoningContent(content);
return {
content: answer,
reasoningContent: think
};
})();
// Some models do not support streaming
if (stream) {
// Some models do not support streaming
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
text: answer,
reasoning_content: reasoning
})
});
if (aiChatReasoning && reasoningContent) {
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
reasoning_content: reasoningContent
})
});
}
if (isResponseAnswerText && content) {
workflowStreamResponse?.({
event: SseResponseEventEnum.fastAnswer,
data: textAdaptGptResponse({
text: content
})
});
}
}
return {
answerText: answer,
reasoningText: reasoning
answerText: content,
reasoningText: reasoningContent
};
}
})();
@@ -231,7 +270,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
const AIMessages: ChatCompletionMessageParam[] = [
{
role: ChatCompletionRequestMessageRoleEnum.Assistant,
content: answerText
content: answerText,
reasoning_text: reasoningText // reasoning_text is only recorded for response, but not for request
}
];
@@ -249,7 +289,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
});
return {
answerText,
answerText: answerText.trim(),
reasoningText,
[DispatchNodeResponseKeyEnum.nodeResponse]: {
totalPoints: externalProvider.openaiAccount?.key ? 0 : totalPoints,
@@ -259,11 +299,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
outputTokens: outputTokens,
query: `${userChatInput}`,
maxToken: max_tokens,
historyPreview: getHistoryPreview(
chatCompleteMessages,
10000,
modelConstantsData.vision && aiChatVision
),
reasoningText,
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
contextTotalLen: completeMessages.length
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
@@ -371,7 +408,7 @@ async function getMultiInput({
return {
documentQuoteText: text,
userFiles: fileLinks.map((url) => parseUrlToFileType(url))
userFiles: fileLinks.map((url) => parseUrlToFileType(url)).filter(Boolean)
};
}
@@ -470,12 +507,14 @@ async function streamResponse({
res,
stream,
workflowStreamResponse,
aiChatReasoning
aiChatReasoning,
isResponseAnswerText
}: {
res: NextApiResponse;
stream: StreamChatType;
workflowStreamResponse?: WorkflowResponseType;
aiChatReasoning?: boolean;
isResponseAnswerText?: boolean;
}) {
const write = responseWriteController({
res,
@@ -483,28 +522,42 @@ async function streamResponse({
});
let answer = '';
let reasoning = '';
const { parsePart, getStartTagBuffer } = parseReasoningStreamContent();
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const content = part.choices?.[0]?.delta?.content || '';
const [reasoningContent, content] = parsePart(part, aiChatReasoning);
answer += content;
const reasoningContent = aiChatReasoning
? part.choices?.[0]?.delta?.reasoning_content || ''
: '';
reasoning += reasoningContent;
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: content,
reasoning_content: reasoningContent
})
});
if (aiChatReasoning && reasoningContent) {
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
reasoning_content: reasoningContent
})
});
}
if (isResponseAnswerText && content) {
workflowStreamResponse?.({
write,
event: SseResponseEventEnum.answer,
data: textAdaptGptResponse({
text: content
})
});
}
}
// if answer is empty, try to get value from startTagBuffer. (Cause: The response content is too short to exceed the minimum parse length)
if (answer === '') {
answer = getStartTagBuffer();
}
return { answer, reasoning };

View File

@@ -6,13 +6,11 @@ import { formatModelChars2Points } from '../../../../support/wallet/usage/utils'
import type { SelectedDatasetType } from '@fastgpt/global/core/workflow/api.d';
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
import type { ModuleDispatchProps } from '@fastgpt/global/core/workflow/runtime/type';
import { getLLMModel, getEmbeddingModel } from '../../../ai/model';
import { searchDatasetData } from '../../../dataset/search/controller';
import { getEmbeddingModel } from '../../../ai/model';
import { deepRagSearch, defaultSearchDatasetData } from '../../../dataset/search/controller';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { DatasetSearchModeEnum } from '@fastgpt/global/core/dataset/constants';
import { getHistories } from '../utils';
import { datasetSearchQueryExtension } from '../../../dataset/search/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { checkTeamReRankPermission } from '../../../../support/permission/teamLimit';
import { MongoDataset } from '../../../dataset/schema';
@@ -25,13 +23,19 @@ type DatasetSearchProps = ModuleDispatchProps<{
[NodeInputKeyEnum.datasetSimilarity]: number;
[NodeInputKeyEnum.datasetMaxTokens]: number;
[NodeInputKeyEnum.datasetSearchMode]: `${DatasetSearchModeEnum}`;
[NodeInputKeyEnum.userChatInput]: string;
[NodeInputKeyEnum.userChatInput]?: string;
[NodeInputKeyEnum.datasetSearchUsingReRank]: boolean;
[NodeInputKeyEnum.collectionFilterMatch]: string;
[NodeInputKeyEnum.authTmbId]?: boolean;
[NodeInputKeyEnum.datasetSearchUsingExtensionQuery]: boolean;
[NodeInputKeyEnum.datasetSearchExtensionModel]: string;
[NodeInputKeyEnum.datasetSearchExtensionBg]: string;
[NodeInputKeyEnum.collectionFilterMatch]: string;
[NodeInputKeyEnum.authTmbId]: boolean;
[NodeInputKeyEnum.datasetDeepSearch]?: boolean;
[NodeInputKeyEnum.datasetDeepSearchModel]?: string;
[NodeInputKeyEnum.datasetDeepSearchMaxTimes]?: number;
[NodeInputKeyEnum.datasetDeepSearchBg]?: string;
}>;
export type DatasetSearchResponse = DispatchNodeResultType<{
[NodeOutputKeyEnum.datasetQuoteQA]: SearchDataResponseItemType[];
@@ -51,13 +55,18 @@ export async function dispatchDatasetSearch(
limit = 1500,
usingReRank,
searchMode,
userChatInput,
userChatInput = '',
authTmbId = false,
collectionFilterMatch,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg,
collectionFilterMatch,
authTmbId = false
datasetDeepSearch,
datasetDeepSearchModel,
datasetDeepSearchMaxTimes,
datasetDeepSearchBg
}
} = props as DatasetSearchProps;
@@ -85,25 +94,12 @@ export async function dispatchDatasetSearch(
return emptyResult;
}
// query extension
const extensionModel = datasetSearchUsingExtensionQuery
? getLLMModel(datasetSearchExtensionModel)
: undefined;
const [{ concatQueries, rewriteQuery, aiExtensionResult }, datasetIds] = await Promise.all([
datasetSearchQueryExtension({
query: userChatInput,
extensionModel,
extensionBg: datasetSearchExtensionBg,
histories: getHistories(6, histories)
}),
authTmbId
? filterDatasetsByTmbId({
datasetIds: datasets.map((item) => item.datasetId),
tmbId
})
: Promise.resolve(datasets.map((item) => item.datasetId))
]);
const datasetIds = authTmbId
? await filterDatasetsByTmbId({
datasetIds: datasets.map((item) => item.datasetId),
tmbId
})
: await Promise.resolve(datasets.map((item) => item.datasetId));
if (datasetIds.length === 0) {
return emptyResult;
@@ -116,15 +112,11 @@ export async function dispatchDatasetSearch(
);
// start search
const {
searchRes,
tokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank
} = await searchDatasetData({
const searchData = {
histories,
teamId,
reRankQuery: `${rewriteQuery}`,
queries: concatQueries,
reRankQuery: userChatInput,
queries: [userChatInput],
model: vectorModel.model,
similarity,
limit,
@@ -132,59 +124,106 @@ export async function dispatchDatasetSearch(
searchMode,
usingReRank: usingReRank && (await checkTeamReRankPermission(teamId)),
collectionFilterMatch
});
};
const {
searchRes,
tokens,
usingSimilarityFilter,
usingReRank: searchUsingReRank,
queryExtensionResult,
deepSearchResult
} = datasetDeepSearch
? await deepRagSearch({
...searchData,
datasetDeepSearchModel,
datasetDeepSearchMaxTimes,
datasetDeepSearchBg
})
: await defaultSearchDatasetData({
...searchData,
datasetSearchUsingExtensionQuery,
datasetSearchExtensionModel,
datasetSearchExtensionBg
});
// count bill results
const nodeDispatchUsages: ChatNodeUsageType[] = [];
// vector
const { totalPoints, modelName } = formatModelChars2Points({
model: vectorModel.model,
inputTokens: tokens,
modelType: ModelTypeEnum.embedding
const { totalPoints: embeddingTotalPoints, modelName: embeddingModelName } =
formatModelChars2Points({
model: vectorModel.model,
inputTokens: tokens,
modelType: ModelTypeEnum.embedding
});
nodeDispatchUsages.push({
totalPoints: embeddingTotalPoints,
moduleName: node.name,
model: embeddingModelName,
inputTokens: tokens
});
// Query extension
const { totalPoints: queryExtensionTotalPoints } = (() => {
if (queryExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: queryExtensionResult.model,
inputTokens: queryExtensionResult.inputTokens,
outputTokens: queryExtensionResult.outputTokens,
modelType: ModelTypeEnum.llm
});
nodeDispatchUsages.push({
totalPoints,
moduleName: i18nT('common:core.module.template.Query extension'),
model: modelName,
inputTokens: queryExtensionResult.inputTokens,
outputTokens: queryExtensionResult.outputTokens
});
return {
totalPoints
};
}
return {
totalPoints: 0
};
})();
// Deep search
const { totalPoints: deepSearchTotalPoints } = (() => {
if (deepSearchResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: deepSearchResult.model,
inputTokens: deepSearchResult.inputTokens,
outputTokens: deepSearchResult.outputTokens,
modelType: ModelTypeEnum.llm
});
nodeDispatchUsages.push({
totalPoints,
moduleName: i18nT('common:deep_rag_search'),
model: modelName,
inputTokens: deepSearchResult.inputTokens,
outputTokens: deepSearchResult.outputTokens
});
return {
totalPoints
};
}
return {
totalPoints: 0
};
})();
const totalPoints = embeddingTotalPoints + queryExtensionTotalPoints + deepSearchTotalPoints;
const responseData: DispatchNodeResponseType & { totalPoints: number } = {
totalPoints,
query: concatQueries.join('\n'),
model: modelName,
query: userChatInput,
model: vectorModel.model,
inputTokens: tokens,
similarity: usingSimilarityFilter ? similarity : undefined,
limit,
searchMode,
searchUsingReRank: searchUsingReRank,
quoteList: searchRes
quoteList: searchRes,
queryExtensionResult,
deepSearchResult
};
const nodeDispatchUsages: ChatNodeUsageType[] = [
{
totalPoints,
moduleName: node.name,
model: modelName,
inputTokens: tokens
}
];
if (aiExtensionResult) {
const { totalPoints, modelName } = formatModelChars2Points({
model: aiExtensionResult.model,
inputTokens: aiExtensionResult.inputTokens,
outputTokens: aiExtensionResult.outputTokens,
modelType: ModelTypeEnum.llm
});
responseData.totalPoints += totalPoints;
responseData.inputTokens = aiExtensionResult.inputTokens;
responseData.outputTokens = aiExtensionResult.outputTokens;
responseData.extensionModel = modelName;
responseData.extensionResult =
aiExtensionResult.extensionQueries?.join('\n') ||
JSON.stringify(aiExtensionResult.extensionQueries);
nodeDispatchUsages.push({
totalPoints,
moduleName: 'core.module.template.Query extension',
model: modelName,
inputTokens: aiExtensionResult.inputTokens,
outputTokens: aiExtensionResult.outputTokens
});
}
return {
quoteQA: searchRes,

View File

@@ -232,9 +232,14 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
chatNodeUsages = chatNodeUsages.concat(nodeDispatchUsages);
}
if (toolResponses !== undefined) {
if (toolResponses !== undefined && toolResponses !== null) {
if (Array.isArray(toolResponses) && toolResponses.length === 0) return;
if (typeof toolResponses === 'object' && Object.keys(toolResponses).length === 0) return;
if (
!Array.isArray(toolResponses) &&
typeof toolResponses === 'object' &&
Object.keys(toolResponses).length === 0
)
return;
toolRunResponse = toolResponses;
}
@@ -243,12 +248,17 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
chatAssistantResponse = chatAssistantResponse.concat(assistantResponses);
} else {
if (reasoningText) {
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.reasoning,
reasoning: {
content: reasoningText
}
});
const isResponseReasoningText = inputs.find(
(item) => item.key === NodeInputKeyEnum.aiChatReasoning
)?.value;
if (isResponseReasoningText) {
chatAssistantResponse.push({
type: ChatItemValueTypeEnum.reasoning,
reasoning: {
content: reasoningText
}
});
}
}
if (answerText) {
// save assistant text response

View File

@@ -53,7 +53,7 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
const userInputFiles = (() => {
if (fileUrlList) {
return fileUrlList.map((url) => parseUrlToFileType(url));
return fileUrlList.map((url) => parseUrlToFileType(url)).filter(Boolean);
}
// Adapt version 4.8.13 upgrade
return files;

View File

@@ -38,10 +38,10 @@ type HttpRequestProps = ModuleDispatchProps<{
[NodeInputKeyEnum.abandon_httpUrl]: string;
[NodeInputKeyEnum.httpMethod]: string;
[NodeInputKeyEnum.httpReqUrl]: string;
[NodeInputKeyEnum.httpHeaders]: PropsArrType[];
[NodeInputKeyEnum.httpParams]: PropsArrType[];
[NodeInputKeyEnum.httpJsonBody]: string;
[NodeInputKeyEnum.httpFormBody]: PropsArrType[];
[NodeInputKeyEnum.httpHeaders]?: PropsArrType[];
[NodeInputKeyEnum.httpParams]?: PropsArrType[];
[NodeInputKeyEnum.httpJsonBody]?: string;
[NodeInputKeyEnum.httpFormBody]?: PropsArrType[];
[NodeInputKeyEnum.httpContentType]: ContentTypes;
[NodeInputKeyEnum.addInputParam]: Record<string, any>;
[NodeInputKeyEnum.httpTimeout]?: number;
@@ -76,10 +76,10 @@ export const dispatchHttp468Request = async (props: HttpRequestProps): Promise<H
params: {
system_httpMethod: httpMethod = 'POST',
system_httpReqUrl: httpReqUrl,
system_httpHeader: httpHeader,
system_httpHeader: httpHeader = [],
system_httpParams: httpParams = [],
system_httpJsonBody: httpJsonBody,
system_httpFormBody: httpFormBody,
system_httpJsonBody: httpJsonBody = '',
system_httpFormBody: httpFormBody = [],
system_httpContentType: httpContentType = ContentTypes.json,
system_httpTimeout: httpTimeout = 60,
[NodeInputKeyEnum.addInputParam]: dynamicInput,
@@ -398,41 +398,6 @@ async function fetchData({
};
}
// function replaceVariable(text: string, obj: Record<string, any>) {
// for (const [key, value] of Object.entries(obj)) {
// if (value === undefined) {
// text = text.replace(new RegExp(`{{(${key})}}`, 'g'), UNDEFINED_SIGN);
// } else {
// const replacement = JSON.stringify(value);
// const unquotedReplacement =
// replacement.startsWith('"') && replacement.endsWith('"')
// ? replacement.slice(1, -1)
// : replacement;
// text = text.replace(new RegExp(`{{(${key})}}`, 'g'), () => unquotedReplacement);
// }
// }
// return text || '';
// }
// function removeUndefinedSign(obj: Record<string, any>) {
// for (const key in obj) {
// if (obj[key] === UNDEFINED_SIGN) {
// obj[key] = undefined;
// } else if (Array.isArray(obj[key])) {
// obj[key] = obj[key].map((item: any) => {
// if (item === UNDEFINED_SIGN) {
// return undefined;
// } else if (typeof item === 'object') {
// removeUndefinedSign(item);
// }
// return item;
// });
// } else if (typeof obj[key] === 'object') {
// removeUndefinedSign(obj[key]);
// }
// }
// return obj;
// }
// Replace some special response from system plugin
async function replaceSystemPluginResponse({
response,

View File

@@ -142,7 +142,7 @@ export const checkQuoteQAValue = (quoteQA?: SearchDataResponseItemType[]) => {
if (quoteQA.length === 0) {
return [];
}
if (quoteQA.some((item) => !item.q)) {
if (quoteQA.some((item) => typeof item !== 'object' || !item.q)) {
return undefined;
}
return quoteQA;

View File

@@ -46,6 +46,7 @@ export async function getUserDetail({
promotionRate: user.promotionRate,
team: tmb,
notificationAccount: tmb.notificationAccount,
permission: tmb.permission
permission: tmb.permission,
contact: user.contact
};
}

View File

@@ -57,6 +57,7 @@ const UserSchema = new Schema({
},
fastgpt_sem: Object,
sourceDomain: String,
contact: String,
/** @deprecated */
avatar: String

View File

@@ -36,6 +36,9 @@ const TeamMemberSchema = new Schema({
type: Date,
default: () => new Date()
},
updateTime: {
type: Date
},
defaultTeam: {
type: Boolean,
default: false

View File

@@ -86,9 +86,12 @@ export async function addSourceMember<T extends { tmbId: string }>({
}): Promise<Array<T & { sourceMember: SourceMemberType }>> {
if (!Array.isArray(list)) return [];
const tmbIdList = list
.map((item) => (item.tmbId ? String(item.tmbId) : undefined))
.filter(Boolean);
const tmbList = await MongoTeamMember.find(
{
_id: { $in: list.map((item) => String(item.tmbId)) }
_id: { $in: tmbIdList }
},
'tmbId name avatar status',
{

View File

@@ -1,6 +1,114 @@
import { UsageSourceEnum } from '@fastgpt/global/support/wallet/usage/constants';
import { MongoUsage } from './schema';
import { ClientSession } from '../../../common/mongo';
import { ClientSession, Types } from '../../../common/mongo';
import { addLog } from '../../../common/system/log';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { ConcatUsageProps, CreateUsageProps } from '@fastgpt/global/support/wallet/usage/api';
import { i18nT } from '../../../../web/i18n/utils';
import { pushConcatBillTask, pushReduceTeamAiPointsTask } from './utils';
import { POST } from '../../../common/api/plusRequest';
import { FastGPTProUrl } from '../../../common/system/constants';
export async function createUsage(data: CreateUsageProps) {
try {
// In FastGPT server
if (FastGPTProUrl) {
await POST('/support/wallet/usage/createUsage', data);
} else if (global.reduceAiPointsQueue) {
// In FastGPT pro server
await MongoUsage.create(data);
pushReduceTeamAiPointsTask({ teamId: data.teamId, totalPoints: data.totalPoints });
if (data.totalPoints === 0) {
addLog.info('0 totalPoints', data);
}
}
} catch (error) {
addLog.error('createUsage error', error);
}
}
export async function concatUsage(data: ConcatUsageProps) {
try {
// In FastGPT server
if (FastGPTProUrl) {
await POST('/support/wallet/usage/concatUsage', data);
} else if (global.reduceAiPointsQueue) {
const {
teamId,
billId,
totalPoints = 0,
listIndex,
inputTokens = 0,
outputTokens = 0
} = data;
// billId is required and valid
if (!billId || !Types.ObjectId.isValid(billId)) return;
// In FastGPT pro server
pushConcatBillTask([
{
billId,
listIndex,
inputTokens,
outputTokens,
totalPoints
}
]);
pushReduceTeamAiPointsTask({ teamId, totalPoints });
if (data.totalPoints === 0) {
addLog.info('0 totalPoints', data);
}
}
} catch (error) {
addLog.error('concatUsage error', error);
}
}
export const createChatUsage = ({
appName,
appId,
pluginId,
teamId,
tmbId,
source,
flowUsages
}: {
appName: string;
appId?: string;
pluginId?: string;
teamId: string;
tmbId: string;
source: UsageSourceEnum;
flowUsages: ChatNodeUsageType[];
}) => {
const totalPoints = flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0);
createUsage({
teamId,
tmbId,
appName,
appId,
pluginId,
totalPoints,
source,
list: flowUsages.map((item) => ({
moduleName: item.moduleName,
amount: item.totalPoints || 0,
model: item.model,
inputTokens: item.inputTokens,
outputTokens: item.outputTokens
}))
});
addLog.debug(`Create chat usage`, {
source,
teamId,
totalPoints
});
return { totalPoints };
};
export const createTrainingUsage = async ({
teamId,
@@ -29,21 +137,21 @@ export const createTrainingUsage = async ({
totalPoints: 0,
list: [
{
moduleName: 'support.wallet.moduleName.index',
moduleName: i18nT('common:support.wallet.moduleName.index'),
model: vectorModel,
amount: 0,
inputTokens: 0,
outputTokens: 0
},
{
moduleName: 'support.wallet.moduleName.qa',
moduleName: i18nT('common:support.wallet.moduleName.qa'),
model: agentModel,
amount: 0,
inputTokens: 0,
outputTokens: 0
},
{
moduleName: 'core.dataset.training.Auto mode',
moduleName: i18nT('common:core.dataset.training.Auto mode'),
model: agentModel,
amount: 0,
inputTokens: 0,

View File

@@ -0,0 +1,12 @@
export type ConcatBillQueueItemType = {
billId: string;
listIndex?: number;
totalPoints: number;
inputTokens: number;
outputTokens: number;
};
declare global {
var reduceAiPointsQueue: { teamId: string; totalPoints: number }[];
var concatBillQueue: ConcatBillQueueItemType[];
}

View File

@@ -1,5 +1,6 @@
import { findAIModel } from '../../../core/ai/model';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { ConcatBillQueueItemType } from './type';
export const formatModelChars2Points = ({
model,
@@ -34,3 +35,20 @@ export const formatModelChars2Points = ({
totalPoints
};
};
export const pushReduceTeamAiPointsTask = ({
teamId,
totalPoints
}: {
teamId: string;
totalPoints: number;
}) => {
global.reduceAiPointsQueue.push({
teamId: String(teamId),
totalPoints
});
};
export const pushConcatBillTask = (data: ConcatBillQueueItemType[]) => {
global.concatBillQueue.push(...data);
};

View File

@@ -72,7 +72,7 @@ parentPort?.on(
};
const total =
messages.reduce((sum, item) => {
messages.reduce((sum, item, index) => {
// Evaluates the text of toolcall and functioncall
const functionCallPrompt = (() => {
let prompt = '';
@@ -100,7 +100,13 @@ parentPort?.on(
.join('');
})();
return sum + countPromptTokens(`${contentPrompt}${functionCallPrompt}`, item.role);
// Only the last message computed reasoning_text
const reasoningText = index === messages.length - 1 ? item.reasoning_text || '' : '';
return (
sum +
countPromptTokens(`${reasoningText}${contentPrompt}${functionCallPrompt}`, item.role)
);
}, 0) +
countToolsTokens(tools) +
countToolsTokens(functionCall);

View File

@@ -56,14 +56,15 @@ export const readPdfFile = async ({ buffer }: ReadRawTextByBuffer): Promise<Read
}
};
// @ts-ignore
const loadingTask = pdfjs.getDocument(buffer.buffer);
const doc = await loadingTask.promise;
// Avoid OOM.
let result = '';
const pageArr = Array.from({ length: doc.numPages }, (_, i) => i + 1);
for await (const pageNo of pageArr) {
result += await readPDFPage(doc, pageNo);
for (let i = 0; i < pageArr.length; i++) {
result += await readPDFPage(doc, i + 1);
}
loadingTask.destroy();

Some files were not shown because too many files have changed in this diff Show More