Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58fbf78c6e | ||
|
|
ae149069bf | ||
|
|
e5b8dea9ae | ||
|
|
b48eff4212 | ||
|
|
87e7c23da4 | ||
|
|
e6d53e3daa | ||
|
|
922cb433d3 | ||
|
|
a997f277ad | ||
|
|
1f4e5f6d71 | ||
|
|
ce2e926d76 | ||
|
|
bd79e7701f | ||
|
|
82871be054 | ||
|
|
8ba339e78f | ||
|
|
8e9c030600 | ||
|
|
9b8779ba08 | ||
|
|
bfac393ab1 | ||
|
|
76d20b2b76 | ||
|
|
b4933471cd | ||
|
|
c995bccef8 | ||
|
|
3deac290bf | ||
|
|
b7eb4c15de | ||
|
|
41a8536c16 | ||
|
|
1e618502c7 | ||
|
|
e9297c2c6a | ||
|
|
30a89fe4cb | ||
|
|
c41def5fbb | ||
|
|
b596976a96 | ||
|
|
e71708ee76 | ||
|
|
ddddd998c8 | ||
|
|
181b854342 | ||
|
|
40af63b1dd | ||
|
|
d2a56a2fed | ||
|
|
4b9b0dbbb9 | ||
|
|
c0135f5f21 | ||
|
|
8a4715293e | ||
|
|
69dc927a5a | ||
|
|
d5752ddbaa | ||
|
|
048f5a2d53 | ||
|
|
096afef629 |
BIN
docSite/assets/imgs/dingtalk-bot-1.png
Normal file
|
After Width: | Height: | Size: 83 KiB |
BIN
docSite/assets/imgs/dingtalk-bot-2.png
Normal file
|
After Width: | Height: | Size: 141 KiB |
BIN
docSite/assets/imgs/dingtalk-bot-3.png
Normal file
|
After Width: | Height: | Size: 80 KiB |
BIN
docSite/assets/imgs/dingtalk-bot-4.png
Normal file
|
After Width: | Height: | Size: 313 KiB |
BIN
docSite/assets/imgs/dingtalk-bot-5.png
Normal file
|
After Width: | Height: | Size: 152 KiB |
BIN
docSite/assets/imgs/dingtalk-bot-6.png
Normal file
|
After Width: | Height: | Size: 96 KiB |
BIN
docSite/assets/imgs/dingtalk-bot-7.png
Normal file
|
After Width: | Height: | Size: 93 KiB |
BIN
docSite/assets/imgs/dingtalk-bot-8.png
Normal file
|
After Width: | Height: | Size: 112 KiB |
|
Before Width: | Height: | Size: 156 KiB After Width: | Height: | Size: 104 KiB |
|
Before Width: | Height: | Size: 166 KiB After Width: | Height: | Size: 106 KiB |
|
Before Width: | Height: | Size: 85 KiB After Width: | Height: | Size: 51 KiB |
|
Before Width: | Height: | Size: 113 KiB After Width: | Height: | Size: 91 KiB |
|
Before Width: | Height: | Size: 72 KiB After Width: | Height: | Size: 58 KiB |
BIN
docSite/assets/imgs/image-25.png
Normal file
|
After Width: | Height: | Size: 217 KiB |
BIN
docSite/assets/imgs/image-26.png
Normal file
|
After Width: | Height: | Size: 251 KiB |
BIN
docSite/assets/imgs/image-27.png
Normal file
|
After Width: | Height: | Size: 368 KiB |
BIN
docSite/assets/imgs/image-28.png
Normal file
|
After Width: | Height: | Size: 222 KiB |
BIN
docSite/assets/imgs/image-29.png
Normal file
|
After Width: | Height: | Size: 251 KiB |
BIN
docSite/assets/imgs/image-30.png
Normal file
|
After Width: | Height: | Size: 610 KiB |
BIN
docSite/assets/imgs/image-31.png
Normal file
|
After Width: | Height: | Size: 100 KiB |
BIN
docSite/assets/imgs/image-32.png
Normal file
|
After Width: | Height: | Size: 134 KiB |
BIN
docSite/assets/imgs/image-33.png
Normal file
|
After Width: | Height: | Size: 65 KiB |
BIN
docSite/assets/imgs/image-34.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
docSite/assets/imgs/image-35.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
docSite/assets/imgs/image-36.png
Normal file
|
After Width: | Height: | Size: 113 KiB |
BIN
docSite/assets/imgs/image-37.png
Normal file
|
After Width: | Height: | Size: 135 KiB |
BIN
docSite/assets/imgs/image-38.png
Normal file
|
After Width: | Height: | Size: 145 KiB |
BIN
docSite/assets/imgs/image-39.png
Normal file
|
After Width: | Height: | Size: 103 KiB |
BIN
docSite/assets/imgs/image-40.png
Normal file
|
After Width: | Height: | Size: 126 KiB |
BIN
docSite/assets/imgs/image-41.png
Normal file
|
After Width: | Height: | Size: 250 KiB |
BIN
docSite/assets/imgs/image-42.png
Normal file
|
After Width: | Height: | Size: 189 KiB |
BIN
docSite/assets/imgs/image-43.png
Normal file
|
After Width: | Height: | Size: 136 KiB |
BIN
docSite/assets/imgs/image-44.png
Normal file
|
After Width: | Height: | Size: 136 KiB |
BIN
docSite/assets/imgs/image-45.png
Normal file
|
After Width: | Height: | Size: 139 KiB |
BIN
docSite/assets/imgs/image-46.png
Normal file
|
After Width: | Height: | Size: 178 KiB |
BIN
docSite/assets/imgs/image-47.png
Normal file
|
After Width: | Height: | Size: 178 KiB |
BIN
docSite/assets/imgs/image-48.png
Normal file
|
After Width: | Height: | Size: 207 KiB |
BIN
docSite/assets/imgs/image-49.png
Normal file
|
After Width: | Height: | Size: 83 KiB |
BIN
docSite/assets/imgs/image-50.png
Normal file
|
After Width: | Height: | Size: 176 KiB |
BIN
docSite/assets/imgs/image-51.png
Normal file
|
After Width: | Height: | Size: 246 KiB |
BIN
docSite/assets/imgs/image-52.png
Normal file
|
After Width: | Height: | Size: 146 KiB |
BIN
docSite/assets/imgs/image-53.png
Normal file
|
After Width: | Height: | Size: 136 KiB |
BIN
docSite/assets/imgs/image-54.png
Normal file
|
After Width: | Height: | Size: 150 KiB |
BIN
docSite/assets/imgs/searxng_plugin_guide1.png
Normal file
|
After Width: | Height: | Size: 324 KiB |
@@ -21,16 +21,16 @@ weight: 708
|
||||
"lafEnv": "https://laf.dev" // laf环境。 https://laf.run (杭州阿里云) ,或者私有化的laf环境。如果使用 Laf openapi 功能,需要最新版的 laf 。
|
||||
},
|
||||
"systemEnv": {
|
||||
"vectorMaxProcess": 15,
|
||||
"qaMaxProcess": 15,
|
||||
"vectorMaxProcess": 15, // 向量处理线程数量
|
||||
"qaMaxProcess": 15, // 问答拆分线程数量
|
||||
"tokenWorkers": 50, // Token 计算线程保持数,会持续占用内存,不能设置太大。
|
||||
"pgHNSWEfSearch": 100 // 向量搜索参数。越大,搜索越精确,但是速度越慢。设置为100,有99%+精度。
|
||||
},
|
||||
"llmModels": [
|
||||
{
|
||||
"provider": "OpenAI", // 模型提供商,主要用于分类展示,目前已经内置提供商包括:https://github.com/labring/FastGPT/blob/main/packages/global/core/ai/provider.ts, 可 pr 提供新的提供商,或直接填写 Other
|
||||
"model": "gpt-4o-mini", // 模型名(对应OneAPI中渠道的模型名)
|
||||
"name": "gpt-4o-mini", // 模型别名
|
||||
"avatar": "/imgs/model/openai.svg", // 模型的logo
|
||||
"maxContext": 125000, // 最大上下文
|
||||
"maxResponse": 16000, // 最大回复
|
||||
"quoteMaxToken": 120000, // 最大引用内容
|
||||
@@ -38,7 +38,7 @@ weight: 708
|
||||
"charsPointsPrice": 0, // n积分/1k token(商业版)
|
||||
"censor": false, // 是否开启敏感校验(商业版)
|
||||
"vision": true, // 是否支持图片输入
|
||||
"datasetProcess": true, // 是否设置为知识库处理模型(QA),务必保证至少有一个为true,否则知识库会报错
|
||||
"datasetProcess": true, // 是否设置为文本理解模型(QA),务必保证至少有一个为true,否则知识库会报错
|
||||
"usedInClassify": true, // 是否用于问题分类(务必保证至少有一个为true)
|
||||
"usedInExtractFields": true, // 是否用于内容提取(务必保证至少有一个为true)
|
||||
"usedInToolCall": true, // 是否用于工具调用(务必保证至少有一个为true)
|
||||
@@ -48,12 +48,13 @@ weight: 708
|
||||
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
|
||||
"customExtractPrompt": "", // 自定义内容提取提示词
|
||||
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
|
||||
"defaultConfig": {} // 请求API时,挟带一些默认配置(比如 GLM4 的 top_p)
|
||||
"defaultConfig": {}, // 请求API时,挟带一些默认配置(比如 GLM4 的 top_p)
|
||||
"fieldMap": {} // 字段映射(o1 模型需要把 max_tokens 映射为 max_completion_tokens)
|
||||
},
|
||||
{
|
||||
"provider": "OpenAI",
|
||||
"model": "gpt-4o",
|
||||
"name": "gpt-4o",
|
||||
"avatar": "/imgs/model/openai.svg",
|
||||
"maxContext": 125000,
|
||||
"maxResponse": 4000,
|
||||
"quoteMaxToken": 120000,
|
||||
@@ -71,14 +72,15 @@ weight: 708
|
||||
"customCQPrompt": "",
|
||||
"customExtractPrompt": "",
|
||||
"defaultSystemChatPrompt": "",
|
||||
"defaultConfig": {}
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {}
|
||||
},
|
||||
{
|
||||
"provider": "OpenAI",
|
||||
"model": "o1-mini",
|
||||
"name": "o1-mini",
|
||||
"avatar": "/imgs/model/openai.svg",
|
||||
"maxContext": 125000,
|
||||
"maxResponse": 4000,
|
||||
"maxResponse": 65000,
|
||||
"quoteMaxToken": 120000,
|
||||
"maxTemperature": 1.2,
|
||||
"charsPointsPrice": 0,
|
||||
@@ -99,11 +101,11 @@ weight: 708
|
||||
}
|
||||
},
|
||||
{
|
||||
"provider": "OpenAI",
|
||||
"model": "o1-preview",
|
||||
"name": "o1-preview",
|
||||
"avatar": "/imgs/model/openai.svg",
|
||||
"maxContext": 125000,
|
||||
"maxResponse": 4000,
|
||||
"maxResponse": 32000,
|
||||
"quoteMaxToken": 120000,
|
||||
"maxTemperature": 1.2,
|
||||
"charsPointsPrice": 0,
|
||||
@@ -126,21 +128,18 @@ weight: 708
|
||||
],
|
||||
"vectorModels": [
|
||||
{
|
||||
"model": "text-embedding-ada-002", // 模型名(与OneAPI对应)
|
||||
"name": "Embedding-2", // 模型展示名
|
||||
"avatar": "/imgs/model/openai.svg", // logo
|
||||
"charsPointsPrice": 0, // n积分/1k token
|
||||
"defaultToken": 700, // 默认文本分割时候的 token
|
||||
"maxToken": 3000, // 最大 token
|
||||
"weight": 100, // 优先训练权重
|
||||
"defaultConfig": {}, // 自定义额外参数。例如,如果希望使用 embedding3-large 的话,可以传入 dimensions:1024,来返回1024维度的向量。(目前必须小于1536维度)
|
||||
"dbConfig": {}, // 存储时的额外参数(非对称向量模型时候需要用到)
|
||||
"queryConfig": {} // 参训时的额外参数
|
||||
"provider": "OpenAI",
|
||||
"model": "text-embedding-3-small",
|
||||
"name": "text-embedding-3-small",
|
||||
"charsPointsPrice": 0,
|
||||
"defaultToken": 512,
|
||||
"maxToken": 3000,
|
||||
"weight": 100
|
||||
},
|
||||
{
|
||||
"provider": "OpenAI",
|
||||
"model": "text-embedding-3-large",
|
||||
"name": "text-embedding-3-large",
|
||||
"avatar": "/imgs/model/openai.svg",
|
||||
"charsPointsPrice": 0,
|
||||
"defaultToken": 512,
|
||||
"maxToken": 3000,
|
||||
@@ -150,13 +149,16 @@ weight: 708
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "text-embedding-3-small",
|
||||
"name": "text-embedding-3-small",
|
||||
"avatar": "/imgs/model/openai.svg",
|
||||
"charsPointsPrice": 0,
|
||||
"defaultToken": 512,
|
||||
"maxToken": 3000,
|
||||
"weight": 100
|
||||
"provider": "OpenAI",
|
||||
"model": "text-embedding-ada-002", // 模型名(与OneAPI对应)
|
||||
"name": "Embedding-2", // 模型展示名
|
||||
"charsPointsPrice": 0, // n积分/1k token
|
||||
"defaultToken": 700, // 默认文本分割时候的 token
|
||||
"maxToken": 3000, // 最大 token
|
||||
"weight": 100, // 优先训练权重
|
||||
"defaultConfig": {}, // 自定义额外参数。例如,如果希望使用 embedding3-large 的话,可以传入 dimensions:1024,来返回1024维度的向量。(目前必须小于1536维度)
|
||||
"dbConfig": {}, // 存储时的额外参数(非对称向量模型时候需要用到)
|
||||
"queryConfig": {} // 参训时的额外参数
|
||||
}
|
||||
],
|
||||
"reRankModels": [],
|
||||
@@ -183,25 +185,33 @@ weight: 708
|
||||
}
|
||||
```
|
||||
|
||||
## 关于模型 logo
|
||||
## 模型提供商
|
||||
|
||||
统一放置在项目的`public/imgs/model/xxx`目录中,目前内置了以下几种,如果有需要,可以PR增加。默认logo为 Hugging face 的 logo~
|
||||
为了方便模型分类展示,FastGPT 内置了部分模型提供商的名字和 Logo。如果你期望补充提供商,可[提交 Issue](https://github.com/labring/FastGPT/issues),并提供几个信息:
|
||||
|
||||
1. 厂商官网地址
|
||||
2. 厂商 SVG logo,建议是正方形图片。
|
||||
|
||||
目前已支持的提供商, 复制 "-" 之前的字符串,作为 provider 的值。
|
||||
|
||||
- OpenAI
|
||||
- Claude
|
||||
- Gemini
|
||||
- MistralAI
|
||||
- Qwen - 通义千问
|
||||
- Doubao - 豆包
|
||||
- ChatGLM - 智谱
|
||||
- DeepSeek - 深度求索
|
||||
- Moonshot - 月之暗面
|
||||
- MiniMax
|
||||
- SparkDesk - 讯飞星火
|
||||
- Hunyuan - 腾讯混元
|
||||
- Baichuan - 百川
|
||||
- Yi - 零一万物
|
||||
- Ernie - 文心一言
|
||||
- Ollama
|
||||
- Other - 其他
|
||||
|
||||
- /imgs/model/baichuan.svg - 百川智能
|
||||
- /imgs/model/chatglm.svg - 智谱清言
|
||||
- /imgs/model/claude.svg - claude
|
||||
- /imgs/model/deepseek.svg - deepseek
|
||||
- /imgs/model/doubao.svg - 火山豆包
|
||||
- /imgs/model/ernie.svg - 文心一言
|
||||
- /imgs/model/gemini.svg - gemini
|
||||
- /imgs/model/huggingface.svg - Hugging face【默认logo】
|
||||
- /imgs/model/minimax.svg - minimax
|
||||
- /imgs/model/moonshot.svg - 月之暗面
|
||||
- /imgs/model/openai.svg - OpenAI GPT
|
||||
- /imgs/model/qwen.svg - 通义千问
|
||||
- /imgs/model/sparkDesk.svg - 讯飞星火
|
||||
- /imgs/model/yi.svg - 零一万物
|
||||
-
|
||||
|
||||
## 特殊模型
|
||||
|
||||
|
||||
@@ -866,6 +866,8 @@ curl --location --request DELETE 'http://localhost:3000/api/core/chat/delHistory
|
||||
|
||||
### 清空所有历史记录
|
||||
|
||||
仅会情况通过 API Key 创建的对话历史记录,不会清空在线使用、分享链接等其他来源的对话历史记录。
|
||||
|
||||
{{< tabs tabTotal="3" >}}
|
||||
{{< tab tabName="请求示例" >}}
|
||||
{{< markdownify >}}
|
||||
@@ -1313,6 +1315,83 @@ curl --location --request POST 'http://localhost:3000/api/core/chat/feedback/upd
|
||||
|
||||
## 猜你想问
|
||||
|
||||
**4.8.16 后新版接口**
|
||||
|
||||
新版猜你想问,必须包含 appId 和 chatId 的参数才可以进行使用。会自动根据 chatId 去拉取最近 6 轮对话记录作为上下文来引导回答。
|
||||
|
||||
{{< tabs tabTotal="3" >}}
|
||||
{{< tab tabName="请求示例" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'http://localhost:3000/api/core/ai/agent/v2/createQuestionGuide' \
|
||||
--header 'Authorization: Bearer {{apikey}}' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{
|
||||
"appId": "appId",
|
||||
"chatId": "chatId",
|
||||
"questionGuide": {
|
||||
"open": true,
|
||||
"model": "GPT-4o-mini",
|
||||
"customPrompt": "你是一个智能助手,请根据用户的问题生成猜你想问。"
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab tabName="参数说明" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
{{% alert icon=" " context="success" %}}
|
||||
|
||||
| 参数名 | 类型 | 必填 | 说明 |
|
||||
| --- | --- | --- | --- |
|
||||
| appId | string | ✅ | 应用 Id |
|
||||
| chatId | string | ✅ | 对话 Id |
|
||||
| questionGuide | object | | 自定义配置,不传的话,则会根据 appId,取最新发布版本的配置 |
|
||||
|
||||
```ts
|
||||
type CreateQuestionGuideParams = OutLinkChatAuthProps & {
|
||||
appId: string;
|
||||
chatId: string;
|
||||
questionGuide?: {
|
||||
open: boolean;
|
||||
model?: string;
|
||||
customPrompt?: string;
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab tabName="响应示例" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```json
|
||||
{
|
||||
"code": 200,
|
||||
"statusText": "",
|
||||
"message": "",
|
||||
"data": [
|
||||
"你对AI有什么看法?",
|
||||
"想了解AI的应用吗?",
|
||||
"你希望AI能做什么?"
|
||||
]
|
||||
}
|
||||
```
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
{{< /tabs >}}
|
||||
|
||||
---
|
||||
|
||||
**4.8.16 前旧版接口:**
|
||||
|
||||
{{< tabs tabTotal="3" >}}
|
||||
{{< tab tabName="请求示例" >}}
|
||||
{{< markdownify >}}
|
||||
@@ -1369,3 +1448,5 @@ curl --location --request POST 'http://localhost:3000/api/core/ai/agent/createQu
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: '升级到 V4.3(需要初始化)'
|
||||
title: '升级到 V4.3(包含升级脚本)'
|
||||
description: 'FastGPT 从旧版本升级到 V4.3 操作指南'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: '升级到 V4.4(需要初始化)'
|
||||
title: '升级到 V4.4(包含升级脚本)'
|
||||
description: 'FastGPT 从旧版本升级到 V4.4 操作指南'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: '升级到 V4.4.1(需要初始化)'
|
||||
title: '升级到 V4.4.1(包含升级脚本)'
|
||||
description: 'FastGPT 从旧版本升级到 V4.4.1 操作指南'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: '升级到 V4.4.2(需要初始化)'
|
||||
title: '升级到 V4.4.2(包含升级脚本)'
|
||||
description: 'FastGPT 从旧版本升级到 V4.4.2 操作指南'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.4.5(需要初始化)'
|
||||
title: 'V4.4.5(包含升级脚本)'
|
||||
description: 'FastGPT V4.4.5 更新'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.6(需要初始化)'
|
||||
title: 'V4.6(包含升级脚本)'
|
||||
description: 'FastGPT V4.6 更新'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.6.2(需要初始化)'
|
||||
title: 'V4.6.2(包含升级脚本)'
|
||||
description: 'FastGPT V4.6.2'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.6.3(需要初始化)'
|
||||
title: 'V4.6.3(包含升级脚本)'
|
||||
description: 'FastGPT V4.6.3'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.6.4(需要初始化)'
|
||||
title: 'V4.6.4(包含升级脚本)'
|
||||
description: 'FastGPT V4.6.4'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.6.9(需要初始化)'
|
||||
title: 'V4.6.9(包含升级脚本)'
|
||||
description: 'FastGPT V4.6.9更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.7.1(需要初始化)'
|
||||
title: 'V4.7.1(包含升级脚本)'
|
||||
description: 'FastGPT V4.7.1 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.8.1(需要初始化)'
|
||||
title: 'V4.8.1(包含升级脚本)'
|
||||
description: 'FastGPT V4.8.1 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.8.10(需要初始化)'
|
||||
title: 'V4.8.10(包含升级脚本)'
|
||||
description: 'FastGPT V4.8.10 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.8.12(需要初始化)'
|
||||
title: 'V4.8.12(包含升级脚本)'
|
||||
description: 'FastGPT V4.8.12 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.8.15(初始化)'
|
||||
title: 'V4.8.15(包含升级脚本)'
|
||||
description: 'FastGPT V4.8.15 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
@@ -23,23 +23,35 @@ weight: 809
|
||||
|
||||
## 升级指南
|
||||
|
||||
- 更新 FastGPT 镜像 tag: v4.8.15
|
||||
- 更新 FastGPT 商业版镜像 tag: v4.8.15 (fastgpt-pro镜像)
|
||||
- 更新 fastgpt 镜像 tag: v4.8.15-fix3
|
||||
- 更新 fastgpt-pro 商业版镜像 tag: v4.8.15
|
||||
- Sandbox 镜像,可以不更新
|
||||
|
||||
|
||||
## 运行初始化脚本
|
||||
## 运行升级脚本
|
||||
|
||||
从任意终端,发起 1 个 HTTP 请求。其中 {{rootkey}} 替换成环境变量里的 `rootkey`;{{host}} 替换成**FastGPT 域名**。
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'https://{{host}}/admin/initv4815' \
|
||||
curl --location --request POST 'https://{{host}}/api/admin/initv4815' \
|
||||
--header 'rootkey: {{rootkey}}' \
|
||||
--header 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
会重置应用定时执行的字段,把 null 去掉,减少索引大小。
|
||||
|
||||
----
|
||||
|
||||
从任意终端,发起 1 个 HTTP 请求。其中 {{rootkey}} 替换成环境变量里的 `rootkey`;{{host}} 替换成**fastgpt-pro域名**。
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'https://{{host}}/api/admin/init/refreshFreeUser' \
|
||||
--header 'rootkey: {{rootkey}}' \
|
||||
--header 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
重新计算一次免费版用户的时长,之前有版本升级时没有重新计算时间,导致会误发通知。
|
||||
|
||||
|
||||
## 完整更新内容
|
||||
|
||||
@@ -61,4 +73,4 @@ curl --location --request POST 'https://{{host}}/admin/initv4815' \
|
||||
16. 修复 - 语言播放鉴权问题。
|
||||
17. 修复 - 插件应用知识库引用上限始终为 3000
|
||||
18. 修复 - 工作流编辑记录存储上限,去掉本地存储,增加异常离开时,强制自动保存。
|
||||
19. 修复 - 工作流特殊变量替换问题。($开头的字符串无法替换)
|
||||
19. 修复 - 工作流特殊变量替换问题。($开头的字符串无法替换)
|
||||
|
||||
71
docSite/content/zh-cn/docs/development/upgrading/4816.md
Normal file
@@ -0,0 +1,71 @@
|
||||
---
|
||||
title: 'V4.8.16(进行中)'
|
||||
description: 'FastGPT V4.8.16 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 808
|
||||
---
|
||||
|
||||
## 更新指南
|
||||
|
||||
### 1. 更新镜像:
|
||||
|
||||
- 更新 fastgpt 镜像 tag: v4.8.16-beta
|
||||
- 更新 fastgpt-pro 商业版镜像 tag: v4.8.16-beta
|
||||
- Sandbox 镜像 tag: v4.8.16-beta
|
||||
|
||||
### 2. 更新配置文件
|
||||
|
||||
参考最新的[配置文件](/docs/development/configuration/),更新 `config.json` 或 admin 中模型文件配置。给 LLMModel 和 VectorModel 增加 `provider` 字段,以便进行模型分类。例如:
|
||||
|
||||
```json
|
||||
{
|
||||
"provider": "OpenAI", // 这是新增的
|
||||
"model": "gpt-4o",
|
||||
"name": "gpt-4o",
|
||||
"maxContext": 125000,
|
||||
"maxResponse": 4000,
|
||||
"quoteMaxToken": 120000,
|
||||
"maxTemperature": 1.2,
|
||||
"charsPointsPrice": 0,
|
||||
"censor": false,
|
||||
"vision": true,
|
||||
"datasetProcess": true,
|
||||
"usedInClassify": true,
|
||||
"usedInExtractFields": true,
|
||||
"usedInToolCall": true,
|
||||
"usedInQueryExtension": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"customCQPrompt": "",
|
||||
"customExtractPrompt": "",
|
||||
"defaultSystemChatPrompt": "",
|
||||
"defaultConfig": {},
|
||||
"fieldMap": {}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## 完整更新内容
|
||||
|
||||
1. 新增 - SearXNG 搜索插件[点击查看教程](/docs/guide/plugins/searxng_plugin_guide/)
|
||||
2. 新增 - 商业版支持 API 知识库和链接集合定时同步。
|
||||
3. 新增 - 猜你想问支持选择模型和自定义提示词。
|
||||
4. 新增 - 钉钉和企微机器人 webhook 插件。
|
||||
5. 新增 - 商业版支持钉钉 SSO 登录配置。[点击查看教程](/docs/guide/admin/sso_dingtalk/)
|
||||
6. 新增 - 商业版支持飞书和语雀知识库导入。[点击查看教程](/docs/guide/knowledge_base/lark_dataset/)
|
||||
7. 新增 - sandbox 新增 createHmac 加密全局方法。
|
||||
8. 新增 - 工作流右键支持全部折叠。
|
||||
9. 优化 - 模型选择器。
|
||||
10. 优化 - SSR 渲染,预判断是移动端还是 pc 端,减少页面抖动。
|
||||
11. 优化 - 工作流/简易模式变量初始化代码,去除监听初始化,避免因渲染顺序不一致导致的失败。
|
||||
12. 优化 - 工作流获取数据类型不一致数据时,增加类型转化,避免 undefined。
|
||||
13. 修复 - 无法自动切换默认语言。增加分享链接,强制执行一次切换默认语言。
|
||||
14. 修复 - 数组选择器自动兼容 4.8.13 以前的数据。
|
||||
15. 修复 - 站点同步知识库,链接同步时未使用选择器。
|
||||
16. 修复 - 简易模式转工作流,没有把系统配置项转化。
|
||||
17. 修复 - 插件独立运行,变量初始值未赋上。
|
||||
18. 修复 - 工作流使用弹窗组件时,关闭弹窗后,有时候会出现页面偏移。
|
||||
19. 修复 - 插件调试时,日志未保存插件输入参数。
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.8.4(需要初始化)'
|
||||
title: 'V4.8.4(包含升级脚本)'
|
||||
description: 'FastGPT V4.8.4 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.8.5(需要初始化)'
|
||||
title: 'V4.8.5(包含升级脚本)'
|
||||
description: 'FastGPT V4.8.5 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.8.6(需要初始化)'
|
||||
title: 'V4.8.6(包含升级脚本)'
|
||||
description: 'FastGPT V4.8.6 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.8.8(需要初始化)'
|
||||
title: 'V4.8.8(包含升级脚本)'
|
||||
description: 'FastGPT V4.8.8 更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
@@ -55,14 +55,14 @@ docker-compose up -d
|
||||
|
||||
## 执行升级初始化脚本
|
||||
|
||||
镜像更新完后,可以查看文档中的`版本介绍`,通常需要执行升级脚本的版本都会标明`需要初始化`,打开对应的文档,参考说明执行初始化脚本即可,大部分时候都是需要发送一个`POST`请求。
|
||||
镜像更新完后,可以查看文档中的`版本介绍`,通常需要执行升级脚本的版本都会标明`包含升级脚本`,打开对应的文档,参考说明执行**升级脚本**即可,大部分时候都是需要发送一个`POST`请求。
|
||||
|
||||
|
||||
## QA
|
||||
|
||||
### 为什么需要初始化
|
||||
### 为什么需要执行升级脚本
|
||||
|
||||
数据表出现大幅度变更,无法通过设置默认值,或复杂度较高时,会通过初始化来更新部分数据表字段。
|
||||
数据表出现大幅度变更,无法通过设置默认值,或复杂度较高时,会通过升级脚本来更新部分数据表字段。
|
||||
严格按初始化步骤进行操作,不会造成旧数据丢失。但在初始化过程中,如果数据量大,需要初始化的时间较长,这段时间可能会造成服务无法正常使用。
|
||||
|
||||
### {{host}} 是什么
|
||||
|
||||
9
docSite/content/zh-cn/docs/guide/admin/_index.md
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
weight: 490
|
||||
title: '商业版后台'
|
||||
description: '商业版后台使用教程'
|
||||
icon: 'chat_bubble'
|
||||
draft: false
|
||||
images: []
|
||||
---
|
||||
<!-- 470 ~ 500 -->
|
||||
44
docSite/content/zh-cn/docs/guide/admin/sso_dingtalk.md
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
weight: 490
|
||||
title: '钉钉 SSO 配置'
|
||||
description: '钉钉 SSO 登录'
|
||||
icon: 'chat_bubble'
|
||||
draft: false
|
||||
images: []
|
||||
---
|
||||
|
||||
## 1. 注册钉钉应用
|
||||
|
||||
登录 [钉钉开放平台](https://open-dev.dingtalk.com/fe/app?hash=%23%2Fcorp%2Fapp#/corp/app),创建一个应用。
|
||||
|
||||

|
||||
|
||||
## 2. 配置钉钉应用安全设置
|
||||
|
||||
点击进入创建好的应用后,点开`安全设置`,配置出口 IP(服务器 IP),和重定向 URL。重定向 URL 填写逻辑:
|
||||
|
||||
`{{fastgpt 域名}}/login/provider`
|
||||
|
||||

|
||||
|
||||
## 3. 设置钉钉应用权限
|
||||
|
||||
点击进入创建好的应用后,点开`权限设置`,开放两个权限: `个人手机号信息`和`通讯录个人信息读权限`
|
||||
|
||||

|
||||
|
||||
## 4. 发布应用
|
||||
|
||||
点击进入创建好的应用后,点开`版本管理与发布`,随便创建一个新版本即可。
|
||||
|
||||
## 5. 在 FastGPT Admin 配置钉钉应用 id
|
||||
|
||||
名字都是对应上,直接填写即可。
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
| |  |
|
||||
|
||||
## 6. 测试
|
||||
|
||||

|
||||
@@ -11,11 +11,14 @@ weight: 104
|
||||
|
||||
| | | |
|
||||
| --- | --- | --- |
|
||||
|  |  |  |
|
||||
|  |  |  |
|
||||
|
||||
## 返回AI内容(高级编排特有)
|
||||
## 流响应(高级编排 AI 对话 特有)
|
||||
|
||||
这是一个开关,打开的时候,当 AI 对话模块运行时,会将其输出的内容返回到浏览器(API响应);如果关闭,AI 输出的内容不会返回到浏览器,但是生成的内容仍可以通过【AI回复】进行输出。你可以将【AI回复】连接到其他模块中。
|
||||
旧版名字叫做:返回 AI 内容;新版改名:流响应。
|
||||
|
||||
这是一个开关,打开的时候,当 AI 对话模块运行时,会将其输出的内容返回到浏览器(API响应);
|
||||
如果关闭,会强制使用非流模式调用模型,并且 AI 输出的内容不会返回到浏览器,但是生成的内容仍可以通过【AI回复】进行输出。你可以将【AI回复】连接到其他模块中进行二次使用。
|
||||
|
||||
### 最大上下文
|
||||
|
||||
@@ -33,13 +36,25 @@ weight: 104
|
||||
|
||||
最大回复 token 数量。注意,是回复的Tokens!不是上下文 tokens。
|
||||
|
||||
通常,回复上限=min(模型允许的最大回复上限, 最大上下文-已用上下文)
|
||||
|
||||
所以,一般配置模型时,不会把最大上下文配置成模型实际最大上下文,而是预留预定空间给回答,例如 128k 模型,可以配置 max_context=115000
|
||||
|
||||
### 系统提示词
|
||||
|
||||
被放置在上下文数组的最前面,role 为 system,用于引导模型。
|
||||
|
||||
### 最大对话轮数(仅简易模式)
|
||||
|
||||
可以配置模型支持的最大对话轮数,如果模型的超出上下文,系统会自动截断,尽可能保证不超模型上下文。
|
||||
|
||||
所以尽管配置 30 轮对话,实际运行时候,不一定会达到 30 轮。
|
||||
|
||||
## 引用模板 & 引用提示词
|
||||
|
||||
这两个参数与知识库问答场景相关,可以控制知识库相关的提示词。
|
||||
进行知识库搜索后,你可以自定义组织检索结果构成的提示词,这个配置,仅工作流中 AI 对话节点可用。并且,只会在有引用知识库内容时才会生效。
|
||||
|
||||

|
||||
|
||||
### AI 对话消息组成
|
||||
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
---
|
||||
title: '飞书知识库'
|
||||
description: 'FastGPT 飞书知识库功能介绍和使用方式'
|
||||
icon: 'language'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 405
|
||||
---
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
|  |  |
|
||||
|
||||
FastGPT v4.8.16 版本开始,商业版用户支持飞书知识库导入,用户可以通过配置飞书应用的 appId 和 appSecret,并选中一个**文档空间的顶层文件夹**来导入飞书知识库。目前处于测试阶段,部分交互有待优化。
|
||||
|
||||
由于飞书限制,无法直接获取所有文档内容,目前仅可以获取共享空间下文件目录的内容,无法获取个人空间和知识库里的内容。
|
||||
|
||||
|
||||
## 1. 创建飞书应用
|
||||
|
||||
打开 [飞书开放平台](https://open.feishu.cn/?lang=zh-CN),点击**创建应用**,选择**自建应用**,然后填写应用名称。
|
||||
|
||||
## 2. 配置应用权限
|
||||
|
||||
创建应用后,进入应用可以配置相关权限,这里需要增加两个权限:
|
||||
|
||||
1. 获取云空间文件夹下的云文档清单
|
||||
2. 查看新版文档
|
||||
|
||||

|
||||
|
||||
## 3. 获取 appId 和 appSecret
|
||||
|
||||

|
||||
|
||||
## 4. 给 Folder 增加权限
|
||||
|
||||
可参考飞书教程: https://open.feishu.cn/document/server-docs/docs/drive-v1/faq#b02e5bfb
|
||||
|
||||
大致总结为:
|
||||
|
||||
1. 把刚刚创建的应用拉入一个群里
|
||||
2. 给这个群增加目录权限
|
||||
|
||||
如果你的目录已经给全员组增加权限了,则可以跳过上面步骤,直接获取 Folder Token。
|
||||
|
||||

|
||||
|
||||
## 5. 获取 Folder Token
|
||||
|
||||
可以页面路径上获取 Folder Token,注意不要把问号复制进来。
|
||||
|
||||

|
||||
|
||||
## 6. 创建知识库
|
||||
|
||||
根据 3 和 5 获取到的 3 个参数,创建知识库,选择飞书文件库类型,然后填入对应的参数,点击创建。
|
||||
|
||||

|
||||
@@ -0,0 +1,42 @@
|
||||
---
|
||||
title: '语雀文件库'
|
||||
description: 'FastGPT 语雀文件库功能介绍和使用方式'
|
||||
icon: 'language'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 405
|
||||
---
|
||||
|
||||
| | |
|
||||
| --- | --- |
|
||||
|  |  |
|
||||
|
||||
FastGPT v4.8.16 版本开始,商业版用户支持语雀文件库导入,用户可以通过配置语雀的 token 和 uid 来导入语雀文档库。目前处于测试阶段,部分交互有待优化。
|
||||
|
||||
## 1. 获取语雀的 token 和 uid
|
||||
|
||||
在语雀首页 - 个人头像 - 设置,可找到对应参数。
|
||||
|
||||

|
||||
|
||||
参考下图获取 Token 和 User ID,注意给 Token 赋值权限:
|
||||
|
||||
| 获取 Token | 增加权限 | 获取 User ID |
|
||||
| --- | --- | --- |
|
||||
|  |  |  |
|
||||
|
||||
## 2. 创建知识库
|
||||
|
||||
使用上一步获取的 token 和 uid,创建知识库,选择语雀文件库类型,然后填入对应的参数,点击创建。
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
## 3. 导入文档
|
||||
|
||||
创建完知识库后,点击`添加文件`即可导入语雀的文档库,跟随引导即可。
|
||||
|
||||
语雀知识库支持定时同步功能,每天会不定时的扫描一次,如果文档有更新,则会进行同步,也可以进行手动同步。
|
||||
|
||||

|
||||
178
docSite/content/zh-cn/docs/guide/plugins/searxng_plugin_guide.md
Normal file
@@ -0,0 +1,178 @@
|
||||
---
|
||||
title: "SearXNG 搜索插件配置与使用说明"
|
||||
description: "FastGPT SearXNG 搜索插件配置指南"
|
||||
icon: "search"
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 303
|
||||
---
|
||||
|
||||
[SearXNG](https://github.com/searxng/searxng)是一款免费的互联网元搜索引擎,它汇总了来自各种搜索服务和数据库的结果。它不会跟踪或分析用户。用户可以自行部署它进行使用。本文介绍 Searxng 的部署以及接入 FastGPT 插件。
|
||||
|
||||
|
||||
## 1. 部署应用
|
||||
|
||||
这里介绍在 Sealos 中部署 SearXNG 的方法。Docker 部署,可以直接参考 [SearXNG 官方教程](https://github.com/searxng/searxng)。
|
||||
|
||||
点击打开 [Sealos 北京区](https://bja.sealos.run/),点击应用部署,并新建一个应用:
|
||||
|
||||
| 打开应用部署 | 点击新建应用 |
|
||||
| --- | --- |
|
||||
|  |  |
|
||||
|
||||
## 2. 部署配置
|
||||
|
||||
把下面参数,填入配置中:
|
||||
|
||||
* 镜像名: searxng/searxng:latest
|
||||
* CPU: 0.2
|
||||
* 内存: 512M
|
||||
* 容器暴露端口: 8080
|
||||
* 开启公网访问
|
||||
* 点击高级配置,填写环境变量和配置文件
|
||||
|
||||

|
||||
|
||||
**环境变量**
|
||||
|
||||
填下面两个内容,主要是为了减小并发,不然内存占用非常大。
|
||||
|
||||
```
|
||||
UWSGI_WORKERS=4
|
||||
UWSGI_THREADS=4
|
||||
```
|
||||
|
||||
**配置文件**
|
||||
|
||||
新增一个配置文件,文件名:`/etc/searx/settings.yml`
|
||||
文件内容:
|
||||
|
||||
```txt
|
||||
general:
|
||||
debug: false
|
||||
instance_name: "searxng"
|
||||
privacypolicy_url: false
|
||||
donation_url: false
|
||||
contact_url: false
|
||||
enable_metrics: true
|
||||
open_metrics: ''
|
||||
|
||||
brand:
|
||||
new_issue_url: https://github.com/searxng/searxng/issues/new
|
||||
docs_url: https://docs.searxng.org/
|
||||
public_instances: https://searx.space
|
||||
wiki_url: https://github.com/searxng/searxng/wiki
|
||||
issue_url: https://github.com/searxng/searxng/issues
|
||||
|
||||
search:
|
||||
safe_search: 0
|
||||
autocomplete: ""
|
||||
autocomplete_min: 4
|
||||
default_lang: "auto"
|
||||
ban_time_on_fail: 5
|
||||
max_ban_time_on_fail: 120
|
||||
formats:
|
||||
- html
|
||||
|
||||
server:
|
||||
port: 8080
|
||||
bind_address: "0.0.0.0"
|
||||
base_url: false
|
||||
limiter: false
|
||||
public_instance: false
|
||||
secret_key: "example"
|
||||
image_proxy: false
|
||||
http_protocol_version: "1.0"
|
||||
method: "POST"
|
||||
default_http_headers:
|
||||
X-Content-Type-Options: nosniff
|
||||
X-Download-Options: noopen
|
||||
X-Robots-Tag: noindex, nofollow
|
||||
Referrer-Policy: no-referrer
|
||||
|
||||
redis:
|
||||
url: false
|
||||
|
||||
ui:
|
||||
static_path: ""
|
||||
static_use_hash: false
|
||||
templates_path: ""
|
||||
default_theme: simple
|
||||
default_locale: ""
|
||||
query_in_title: false
|
||||
infinite_scroll: false
|
||||
center_alignment: false
|
||||
theme_args:
|
||||
simple_style: auto
|
||||
|
||||
outgoing:
|
||||
request_timeout: 30.0
|
||||
max_request_timeout: 40.0
|
||||
pool_connections: 200
|
||||
pool_maxsize: 50
|
||||
enable_http2: false
|
||||
retries: 5
|
||||
|
||||
engines:
|
||||
|
||||
- name: bing
|
||||
engine: bing
|
||||
shortcut: bi
|
||||
|
||||
doi_resolvers:
|
||||
oadoi.org: 'https://oadoi.org/'
|
||||
doi.org: 'https://doi.org/'
|
||||
doai.io: 'https://dissem.in/'
|
||||
sci-hub.se: 'https://sci-hub.se/'
|
||||
sci-hub.st: 'https://sci-hub.st/'
|
||||
sci-hub.ru: 'https://sci-hub.ru/'
|
||||
|
||||
default_doi_resolver: 'oadoi.org'
|
||||
```
|
||||
|
||||
国内目前只有 Bing 引擎可以正常用,所以上面的配置只配置了 bing 引擎。如果在海外部署,可以使用[Sealos 新加坡可用区](https://cloud.sealos.io/),并配置其他搜索引擎,可以参考[SearXNG 默认配置文件](https://github.com/searxng/searxng/blob/master/searx/settings.yml), 从里面复制一些 engine 配置。例如:
|
||||
|
||||
```
|
||||
- name: duckduckgo
|
||||
engine: duckduckgo
|
||||
shortcut: ddg
|
||||
|
||||
- name: google
|
||||
engine: google
|
||||
shortcut: go
|
||||
```
|
||||
|
||||
## 3. FastGPT 使用
|
||||
|
||||
复制 Sealos 部署后提供的公网地址,填入 FastGPT 的 SearXNG 插件的 URL 中。
|
||||
|
||||
| 复制公网地址| 填入 URL |
|
||||
| --- | --- |
|
||||
|  |  |
|
||||
|
||||
## 返回格式
|
||||
|
||||
* 成功时返回搜索结果数组:
|
||||
|
||||
```Bash
|
||||
{
|
||||
"result": "[{\"title\":\"标题1\",\"link\":\"链接1\",\"snippet\":\"摘要1\"}, ...]"
|
||||
}
|
||||
```
|
||||
|
||||
* 失败时通过 Promise.reject 可能返回错误信息:
|
||||
|
||||
```Bash
|
||||
- "缺少查询参数"
|
||||
- "缺少url"
|
||||
- "Failed to fetch data from Search XNG"
|
||||
```
|
||||
|
||||
一般问题来源于参数缺失与服务部署,如有更多问题可在用户群提问。
|
||||
|
||||
## FAQ
|
||||
|
||||
### 无搜索结果
|
||||
|
||||
1. 先直接打开外网地址,测试是否可以正常搜索。
|
||||
2. 检查是否有超时的搜索引擎,通过 API 调用时不会返回结果。
|
||||
@@ -84,4 +84,18 @@ function main({input}){
|
||||
}
|
||||
```
|
||||
|
||||

|
||||

|
||||
|
||||
### createHmac 加密
|
||||
|
||||
与 node 中 crypto 的 createHmac 方法一致。
|
||||
|
||||
```js
|
||||
function main({secret}){
|
||||
const {sign,timestamp} = createHmac('sha256',secret)
|
||||
|
||||
return {
|
||||
sign,timestamp
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,57 @@
|
||||
---
|
||||
title: "接入钉钉机器人教程"
|
||||
description: "FastGPT 接入钉钉机器人教程"
|
||||
icon: "chat"
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 505
|
||||
---
|
||||
|
||||
从 4.8.16 版本起,FastGPT 商业版支持直接接入钉钉机器人,无需额外的 API。
|
||||
|
||||
## 1. 创建钉钉企业内部应用
|
||||
|
||||
1. 在[钉钉开发者后台](https://open-dev.dingtalk.com/fe/app)创建企业内部应用。
|
||||
|
||||

|
||||
|
||||
2. 获取**Client ID**和**Client Secret**。
|
||||
|
||||

|
||||
|
||||
## 2. 为 FastGPT 添加发布渠道
|
||||
|
||||
在 FastGPT 中选择要接入的应用,在**发布渠道**页面,新建一个接入钉钉机器人的发布渠道。
|
||||
|
||||
将前面拿到的 **Client ID** 和 **Client Secret** 填入配置弹窗中。
|
||||
|
||||

|
||||
|
||||
创建完成后,点击**请求地址**按钮,然后复制回调地址。
|
||||
|
||||
## 3. 为应用添加**机器人**应用能力。
|
||||
|
||||
在钉钉开发者后台,点击左侧**添加应用能力**,为刚刚创建的企业内部应用添加 **机器人** 应用能力。
|
||||
|
||||

|
||||
|
||||
## 4. 配置机器人回调地址
|
||||
|
||||
点击左侧**机器人** 应用能力,然后将底部**消息接受模式**设置为**HTTP模式**,消息接收地址填入前面复制的 FastGPT 的回调地址。
|
||||
|
||||

|
||||
|
||||
调试完成后,点击**发布**。
|
||||
|
||||
## 5. 发布应用
|
||||
机器人发布后,还需要在**版本管理与发布**页面发布应用版本。
|
||||
|
||||

|
||||
|
||||
点击**创建新版本**后,设置版本号和版本描述后点击保存发布即可。
|
||||
|
||||

|
||||
|
||||
应用发布后,即可在钉钉企业中使用机器人功能,可对机器人私聊。或者在群组添加机器人后`@机器人`,触发对话。
|
||||
|
||||

|
||||
@@ -114,15 +114,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.8.14 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.14 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.8.15 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.15 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.8.14-milvus-fix # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.14-milvus-fix # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.8.15-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.15-fix2 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@@ -72,15 +72,15 @@ services:
|
||||
# fastgpt
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.8.13 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.13 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.8.15 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.15 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.8.14 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.14 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.8.15-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.15-fix2 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@@ -53,15 +53,15 @@ services:
|
||||
wait $$!
|
||||
sandbox:
|
||||
container_name: sandbox
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.8.14 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.14 # 阿里云
|
||||
image: ghcr.io/labring/fastgpt-sandbox:v4.8.15 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.8.15 # 阿里云
|
||||
networks:
|
||||
- fastgpt
|
||||
restart: always
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:v4.8.14-milvus-fix # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.14-milvus-fix # 阿里云
|
||||
image: ghcr.io/labring/fastgpt:v4.8.15-fix2 # git
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.8.15-fix2 # 阿里云
|
||||
ports:
|
||||
- 3000:3000
|
||||
networks:
|
||||
|
||||
@@ -12,7 +12,8 @@ export enum DatasetErrEnum {
|
||||
unLinkCollection = 'unLinkCollection',
|
||||
invalidVectorModelOrQAModel = 'invalidVectorModelOrQAModel',
|
||||
notSupportSync = 'notSupportSync',
|
||||
sameApiCollection = 'sameApiCollection'
|
||||
sameApiCollection = 'sameApiCollection',
|
||||
noApiServer = 'noApiServer'
|
||||
}
|
||||
const datasetErr = [
|
||||
{
|
||||
|
||||
@@ -72,7 +72,7 @@ export const ERROR_RESPONSE: Record<
|
||||
[ERROR_ENUM.tooManyRequest]: {
|
||||
code: 429,
|
||||
statusText: ERROR_ENUM.tooManyRequest,
|
||||
message: 'Too many request',
|
||||
message: i18nT('common:error.too_many_request'),
|
||||
data: null
|
||||
},
|
||||
[ERROR_ENUM.insufficientQuota]: {
|
||||
|
||||
7
packages/global/common/middle/tracks/constants.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
export enum TrackEnum {
|
||||
login = 'login',
|
||||
createApp = 'createApp',
|
||||
useAppTemplate = 'useAppTemplate',
|
||||
createDataset = 'createDataset',
|
||||
appNodes = 'appNodes'
|
||||
}
|
||||
18
packages/global/common/middle/tracks/type.d.ts
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
import { TrackEnum } from './constants';
|
||||
import { OAuthEnum } from '../../../support/user/constant';
|
||||
import { AppTypeEnum } from '../../../core/app/constants';
|
||||
|
||||
export type PushTrackCommonType = {
|
||||
uid: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
};
|
||||
|
||||
export type TrackSchemaType = {
|
||||
event: TrackEnum;
|
||||
createTime: Date;
|
||||
uid?: string;
|
||||
teamId?: string;
|
||||
tmbId?: string;
|
||||
data: Record<string, any>;
|
||||
};
|
||||
@@ -29,7 +29,7 @@ export const simpleText = (text = '') => {
|
||||
replace {{variable}} to value
|
||||
*/
|
||||
export function replaceVariable(text: any, obj: Record<string, string | number>) {
|
||||
if (!(typeof text === 'string')) return text;
|
||||
if (typeof text !== 'string') return text;
|
||||
|
||||
for (const key in obj) {
|
||||
const val = obj[key];
|
||||
|
||||
@@ -2,3 +2,5 @@ export const HUMAN_ICON = `/icon/human.svg`;
|
||||
export const LOGO_ICON = `/icon/logo.svg`;
|
||||
export const HUGGING_FACE_ICON = `/imgs/model/huggingface.svg`;
|
||||
export const DEFAULT_TEAM_AVATAR = `/imgs/avatar/defaultTeamAvatar.svg`;
|
||||
|
||||
export const isProduction = process.env.NODE_ENV === 'production';
|
||||
|
||||
@@ -58,11 +58,13 @@ export type FastGPTFeConfigsType = {
|
||||
icon?: string;
|
||||
title?: string;
|
||||
url?: string;
|
||||
autoLogin?: boolean;
|
||||
};
|
||||
oauth?: {
|
||||
github?: string;
|
||||
google?: string;
|
||||
wechat?: string;
|
||||
dingtalk?: string;
|
||||
microsoft?: {
|
||||
clientId?: string;
|
||||
tenantId?: string;
|
||||
|
||||
6
packages/global/core/ai/model.d.ts
vendored
@@ -1,7 +1,10 @@
|
||||
import type { ModelProviderIdType } from './provider';
|
||||
|
||||
export type LLMModelItemType = {
|
||||
provider: ModelProviderIdType;
|
||||
model: string;
|
||||
name: string;
|
||||
avatar?: string;
|
||||
avatar?: string; // model icon, from provider
|
||||
maxContext: number;
|
||||
maxResponse: number;
|
||||
quoteMaxToken: number;
|
||||
@@ -31,6 +34,7 @@ export type LLMModelItemType = {
|
||||
};
|
||||
|
||||
export type VectorModelItemType = {
|
||||
provider: ModelProviderIdType;
|
||||
model: string; // model name
|
||||
name: string; // show name
|
||||
avatar?: string;
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import type { LLMModelItemType, VectorModelItemType } from './model.d';
|
||||
import { getModelProvider, ModelProviderIdType } from './provider';
|
||||
|
||||
export const defaultQAModels: LLMModelItemType[] = [
|
||||
{
|
||||
provider: 'OpenAI',
|
||||
model: 'gpt-4o-mini',
|
||||
name: 'gpt-4o-mini',
|
||||
maxContext: 16000,
|
||||
@@ -23,6 +25,7 @@ export const defaultQAModels: LLMModelItemType[] = [
|
||||
|
||||
export const defaultVectorModels: VectorModelItemType[] = [
|
||||
{
|
||||
provider: 'OpenAI',
|
||||
model: 'text-embedding-3-small',
|
||||
name: 'Embedding-2',
|
||||
charsPointsPrice: 0,
|
||||
@@ -31,3 +34,15 @@ export const defaultVectorModels: VectorModelItemType[] = [
|
||||
weight: 100
|
||||
}
|
||||
];
|
||||
|
||||
export const getModelFromList = (
|
||||
modelList: { provider: ModelProviderIdType; name: string; model: string }[],
|
||||
model: string
|
||||
) => {
|
||||
const modelData = modelList.find((item) => item.model === model) ?? modelList[0];
|
||||
const provider = getModelProvider(modelData.provider);
|
||||
return {
|
||||
...modelData,
|
||||
avatar: provider.avatar
|
||||
};
|
||||
};
|
||||
|
||||
@@ -65,3 +65,13 @@ export const Prompt_CQJson = `请帮我执行一个“问题分类”任务,
|
||||
问题:"{{question}}"
|
||||
类型ID=
|
||||
`;
|
||||
|
||||
export const PROMPT_QUESTION_GUIDE = `You are an AI assistant tasked with predicting the user's next question based on the conversation history. Your goal is to generate 3 potential questions that will guide the user to continue the conversation. When generating these questions, adhere to the following rules:
|
||||
|
||||
1. Use the same language as the user's last question in the conversation history.
|
||||
2. Keep each question under 20 characters in length.
|
||||
|
||||
Analyze the conversation history provided to you and use it as context to generate relevant and engaging follow-up questions. Your predictions should be logical extensions of the current topic or related areas that the user might be interested in exploring further.
|
||||
|
||||
Remember to maintain consistency in tone and style with the existing conversation while providing diverse options for the user to choose from. Your goal is to keep the conversation flowing naturally and help the user delve deeper into the subject matter or explore related topics.`;
|
||||
export const PROMPT_QUESTION_GUIDE_FOOTER = `Please strictly follow the format rules: \nReturn questions in JSON format: ['Question 1', 'Question 2', 'Question 3']. Your output: `;
|
||||
|
||||
121
packages/global/core/ai/provider.ts
Normal file
@@ -0,0 +1,121 @@
|
||||
import { i18nT } from '../../../web/i18n/utils';
|
||||
|
||||
export type ModelProviderIdType =
|
||||
| 'OpenAI'
|
||||
| 'Claude'
|
||||
| 'Gemini'
|
||||
| 'MistralAI'
|
||||
| 'Qwen'
|
||||
| 'Doubao'
|
||||
| 'ChatGLM'
|
||||
| 'DeepSeek'
|
||||
| 'Moonshot'
|
||||
| 'MiniMax'
|
||||
| 'SparkDesk'
|
||||
| 'Hunyuan'
|
||||
| 'Baichuan'
|
||||
| 'Yi'
|
||||
| 'Ernie'
|
||||
| 'Ollama'
|
||||
| 'Other';
|
||||
|
||||
export type ModelProviderType = {
|
||||
id: ModelProviderIdType;
|
||||
name: string;
|
||||
avatar: string;
|
||||
};
|
||||
|
||||
export const ModelProviderList: ModelProviderType[] = [
|
||||
{
|
||||
id: 'OpenAI',
|
||||
name: 'OpenAI',
|
||||
avatar: 'model/openai'
|
||||
},
|
||||
{
|
||||
id: 'Claude',
|
||||
name: 'Claude',
|
||||
avatar: 'model/claude'
|
||||
},
|
||||
{
|
||||
id: 'Gemini',
|
||||
name: 'Gemini',
|
||||
avatar: 'model/gemini'
|
||||
},
|
||||
{
|
||||
id: 'MistralAI',
|
||||
name: 'MistralAI',
|
||||
avatar: 'model/huggingface'
|
||||
},
|
||||
{
|
||||
id: 'Qwen',
|
||||
name: i18nT('common:model_qwen'),
|
||||
avatar: 'model/qwen'
|
||||
},
|
||||
{
|
||||
id: 'Doubao',
|
||||
name: i18nT('common:model_doubao'),
|
||||
avatar: 'model/doubao'
|
||||
},
|
||||
{
|
||||
id: 'ChatGLM',
|
||||
name: i18nT('common:model_chatglm'),
|
||||
avatar: 'model/chatglm'
|
||||
},
|
||||
{
|
||||
id: 'DeepSeek',
|
||||
name: 'DeepSeek',
|
||||
avatar: 'model/deepseek'
|
||||
},
|
||||
{
|
||||
id: 'Moonshot',
|
||||
name: i18nT('common:model_moonshot'),
|
||||
avatar: 'model/moonshot'
|
||||
},
|
||||
{
|
||||
id: 'MiniMax',
|
||||
name: 'MiniMax',
|
||||
avatar: 'model/minimax'
|
||||
},
|
||||
{
|
||||
id: 'SparkDesk',
|
||||
name: i18nT('common:model_sparkdesk'),
|
||||
avatar: 'model/sparkDesk'
|
||||
},
|
||||
{
|
||||
id: 'Hunyuan',
|
||||
name: i18nT('common:model_hunyuan'),
|
||||
avatar: 'model/hunyuan'
|
||||
},
|
||||
{
|
||||
id: 'Baichuan',
|
||||
name: i18nT('common:model_baichuan'),
|
||||
avatar: 'model/baichuan'
|
||||
},
|
||||
{
|
||||
id: 'Yi',
|
||||
name: i18nT('common:model_yi'),
|
||||
avatar: 'model/yi'
|
||||
},
|
||||
{
|
||||
id: 'Ernie',
|
||||
name: i18nT('common:model_ernie'),
|
||||
avatar: 'model/ernie'
|
||||
},
|
||||
{
|
||||
id: 'Ollama',
|
||||
name: 'Ollama',
|
||||
avatar: 'model/ollama'
|
||||
},
|
||||
{
|
||||
id: 'Other',
|
||||
name: i18nT('common:model_other'),
|
||||
avatar: 'model/huggingface'
|
||||
}
|
||||
];
|
||||
export const ModelProviderMap = Object.fromEntries(
|
||||
ModelProviderList.map((item) => [item.id, item])
|
||||
);
|
||||
|
||||
export const getModelProvider = (provider: ModelProviderIdType) => {
|
||||
return ModelProviderMap[provider] ?? ModelProviderMap.Other;
|
||||
};
|
||||
@@ -1,8 +1,10 @@
|
||||
import { PROMPT_QUESTION_GUIDE } from '../ai/prompt/agent';
|
||||
import {
|
||||
AppTTSConfigType,
|
||||
AppFileSelectConfigType,
|
||||
AppWhisperConfigType,
|
||||
AppAutoExecuteConfigType
|
||||
AppAutoExecuteConfigType,
|
||||
AppQGConfigType
|
||||
} from './type';
|
||||
|
||||
export enum AppTypeEnum {
|
||||
@@ -28,6 +30,12 @@ export const defaultWhisperConfig: AppWhisperConfigType = {
|
||||
autoTTSResponse: false
|
||||
};
|
||||
|
||||
export const defaultQGConfig: AppQGConfigType = {
|
||||
open: false,
|
||||
model: 'gpt-4o-mini',
|
||||
customPrompt: PROMPT_QUESTION_GUIDE
|
||||
};
|
||||
|
||||
export const defaultChatInputGuideConfig = {
|
||||
open: false,
|
||||
textList: [],
|
||||
|
||||
10
packages/global/core/app/type.d.ts
vendored
@@ -97,7 +97,7 @@ export type AppChatConfigType = {
|
||||
welcomeText?: string;
|
||||
variables?: VariableItemType[];
|
||||
autoExecute?: AppAutoExecuteConfigType;
|
||||
questionGuide?: boolean;
|
||||
questionGuide?: AppQGConfigType;
|
||||
ttsConfig?: AppTTSConfigType;
|
||||
whisperConfig?: AppWhisperConfigType;
|
||||
scheduledTriggerConfig?: AppScheduledTriggerConfigType;
|
||||
@@ -148,6 +148,14 @@ export type AppWhisperConfigType = {
|
||||
autoSend: boolean;
|
||||
autoTTSResponse: boolean;
|
||||
};
|
||||
|
||||
// question guide
|
||||
export type AppQGConfigType = {
|
||||
open: boolean;
|
||||
model?: string;
|
||||
customPrompt?: string;
|
||||
};
|
||||
|
||||
// question guide text
|
||||
export type ChatInputGuideConfigType = {
|
||||
open: boolean;
|
||||
|
||||
2
packages/global/core/chat/type.d.ts
vendored
@@ -16,6 +16,7 @@ import { DatasetSearchModeEnum } from '../dataset/constants';
|
||||
import { DispatchNodeResponseType } from '../workflow/runtime/type.d';
|
||||
import { ChatBoxInputType } from '../../../../projects/app/src/components/core/chat/ChatContainer/ChatBox/type';
|
||||
import { WorkflowInteractiveResponseType } from '../workflow/template/system/interactive/type';
|
||||
import { FlowNodeInputItemType } from '../workflow/type/io';
|
||||
|
||||
export type ChatSchema = {
|
||||
_id: string;
|
||||
@@ -35,6 +36,7 @@ export type ChatSchema = {
|
||||
variableList?: VariableItemType[];
|
||||
welcomeText?: string;
|
||||
variables: Record<string, any>;
|
||||
pluginInputs?: FlowNodeInputItemType[];
|
||||
metadata?: Record<string, any>;
|
||||
};
|
||||
|
||||
|
||||
7
packages/global/core/dataset/api.d.ts
vendored
@@ -17,6 +17,11 @@ export type DatasetUpdateBody = {
|
||||
externalReadUrl?: DatasetSchemaType['externalReadUrl'];
|
||||
defaultPermission?: DatasetSchemaType['defaultPermission'];
|
||||
apiServer?: DatasetSchemaType['apiServer'];
|
||||
yuqueServer?: DatasetSchemaType['yuqueServer'];
|
||||
feishuServer?: DatasetSchemaType['feishuServer'];
|
||||
|
||||
// sync schedule
|
||||
autoSync?: boolean;
|
||||
};
|
||||
|
||||
/* ================= collection ===================== */
|
||||
@@ -47,6 +52,8 @@ export type CreateDatasetCollectionParams = DatasetCollectionChunkMetadataType &
|
||||
tags?: string[];
|
||||
|
||||
createTime?: Date;
|
||||
updateTime?: Date;
|
||||
nextSyncTime?: Date;
|
||||
};
|
||||
|
||||
export type ApiCreateDatasetCollectionParams = DatasetCollectionChunkMetadataType & {
|
||||
|
||||
11
packages/global/core/dataset/apiDataset.d.ts
vendored
@@ -22,3 +22,14 @@ export type APIFileContentResponse = {
|
||||
export type APIFileReadResponse = {
|
||||
url: string;
|
||||
};
|
||||
|
||||
export type FeishuServer = {
|
||||
appId: string;
|
||||
appSecret: string;
|
||||
folderToken: string;
|
||||
};
|
||||
|
||||
export type YuqueServer = {
|
||||
userId: string;
|
||||
token: string;
|
||||
};
|
||||
|
||||
@@ -6,7 +6,9 @@ export enum DatasetTypeEnum {
|
||||
dataset = 'dataset',
|
||||
websiteDataset = 'websiteDataset', // depp link
|
||||
externalFile = 'externalFile',
|
||||
apiDataset = 'apiDataset'
|
||||
apiDataset = 'apiDataset',
|
||||
feishu = 'feishu',
|
||||
yuque = 'yuque'
|
||||
}
|
||||
export const DatasetTypeMap = {
|
||||
[DatasetTypeEnum.folder]: {
|
||||
@@ -33,6 +35,16 @@ export const DatasetTypeMap = {
|
||||
icon: 'core/dataset/externalDatasetOutline',
|
||||
label: 'api_file',
|
||||
collectionLabel: 'common.File'
|
||||
},
|
||||
[DatasetTypeEnum.feishu]: {
|
||||
icon: 'core/dataset/feishuDatasetOutline',
|
||||
label: 'feishu_dataset',
|
||||
collectionLabel: 'common.File'
|
||||
},
|
||||
[DatasetTypeEnum.yuque]: {
|
||||
icon: 'core/dataset/yuqueDatasetOutline',
|
||||
label: 'yuque_dataset',
|
||||
collectionLabel: 'common.File'
|
||||
}
|
||||
};
|
||||
|
||||
@@ -82,7 +94,8 @@ export const DatasetCollectionTypeMap = {
|
||||
|
||||
export enum DatasetCollectionSyncResultEnum {
|
||||
sameRaw = 'sameRaw',
|
||||
success = 'success'
|
||||
success = 'success',
|
||||
failed = 'failed'
|
||||
}
|
||||
export const DatasetCollectionSyncResultMap = {
|
||||
[DatasetCollectionSyncResultEnum.sameRaw]: {
|
||||
@@ -90,6 +103,9 @@ export const DatasetCollectionSyncResultMap = {
|
||||
},
|
||||
[DatasetCollectionSyncResultEnum.success]: {
|
||||
label: i18nT('common:core.dataset.collection.sync.result.success')
|
||||
},
|
||||
[DatasetCollectionSyncResultEnum.failed]: {
|
||||
label: i18nT('dataset:sync_collection_failed')
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
13
packages/global/core/dataset/type.d.ts
vendored
@@ -10,7 +10,7 @@ import {
|
||||
} from './constants';
|
||||
import { DatasetPermission } from '../../support/permission/dataset/controller';
|
||||
import { Permission } from '../../support/permission/controller';
|
||||
import { APIFileServer } from './apiDataset';
|
||||
import { APIFileServer, FeishuServer, YuqueServer } from './apiDataset';
|
||||
|
||||
export type DatasetSchemaType = {
|
||||
_id: string;
|
||||
@@ -33,9 +33,10 @@ export type DatasetSchemaType = {
|
||||
};
|
||||
inheritPermission: boolean;
|
||||
apiServer?: APIFileServer;
|
||||
feishuServer?: FeishuServer;
|
||||
yuqueServer?: YuqueServer;
|
||||
|
||||
syncSchedule?: { cronString: string; timezone: string };
|
||||
syncNextTime?: Date;
|
||||
autoSync?: boolean;
|
||||
|
||||
// abandon
|
||||
externalReadUrl?: string;
|
||||
@@ -65,11 +66,13 @@ export type DatasetCollectionSchemaType = {
|
||||
fileId?: string; // local file id
|
||||
rawLink?: string; // link url
|
||||
externalFileId?: string; //external file id
|
||||
apiFileId?: string; // api file id
|
||||
externalFileUrl?: string; // external import url
|
||||
|
||||
nextSyncTime?: Date;
|
||||
|
||||
rawTextLength?: number;
|
||||
hashRawText?: string;
|
||||
externalFileUrl?: string; // external import url
|
||||
apiFileId?: string; // api file id
|
||||
metadata?: {
|
||||
webPageSelector?: string;
|
||||
relatedImgId?: string; // The id of the associated image collections
|
||||
|
||||
@@ -251,6 +251,7 @@ export const getReferenceVariableValue = ({
|
||||
return variables[outputId];
|
||||
}
|
||||
|
||||
// 避免 value 刚好就是二个元素的字符串数组
|
||||
const node = nodes.find((node) => node.nodeId === sourceNodeId);
|
||||
if (!node) {
|
||||
return value;
|
||||
@@ -283,9 +284,13 @@ export const formatVariableValByType = (val: any, valueType?: WorkflowIOValueTyp
|
||||
if (!valueType) return val;
|
||||
// Value type check, If valueType invalid, return undefined
|
||||
if (valueType.startsWith('array') && !Array.isArray(val)) return undefined;
|
||||
if (valueType === WorkflowIOValueTypeEnum.boolean && typeof val !== 'boolean') return undefined;
|
||||
if (valueType === WorkflowIOValueTypeEnum.number && typeof val !== 'number') return undefined;
|
||||
if (valueType === WorkflowIOValueTypeEnum.string && typeof val !== 'string') return undefined;
|
||||
if (valueType === WorkflowIOValueTypeEnum.boolean) return Boolean(val);
|
||||
if (valueType === WorkflowIOValueTypeEnum.number) return Number(val);
|
||||
if (valueType === WorkflowIOValueTypeEnum.string) {
|
||||
if (val === undefined) return 'undefined';
|
||||
if (val === null) return 'null';
|
||||
return typeof val === 'object' ? JSON.stringify(val) : String(val);
|
||||
}
|
||||
if (
|
||||
[
|
||||
WorkflowIOValueTypeEnum.object,
|
||||
|
||||
@@ -26,12 +26,14 @@ import type {
|
||||
AppScheduledTriggerConfigType,
|
||||
ChatInputGuideConfigType,
|
||||
AppChatConfigType,
|
||||
AppAutoExecuteConfigType
|
||||
AppAutoExecuteConfigType,
|
||||
AppQGConfigType
|
||||
} from '../app/type';
|
||||
import { EditorVariablePickerType } from '../../../web/components/common/Textarea/PromptEditor/type';
|
||||
import {
|
||||
defaultAutoExecuteConfig,
|
||||
defaultChatInputGuideConfig,
|
||||
defaultQGConfig,
|
||||
defaultTTSConfig,
|
||||
defaultWhisperConfig
|
||||
} from '../app/constants';
|
||||
@@ -76,9 +78,14 @@ export const splitGuideModule = (guideModules?: StoreNodeItemType) => {
|
||||
const variables: VariableItemType[] =
|
||||
guideModules?.inputs.find((item) => item.key === NodeInputKeyEnum.variables)?.value ?? [];
|
||||
|
||||
const questionGuide: boolean =
|
||||
!!guideModules?.inputs?.find((item) => item.key === NodeInputKeyEnum.questionGuide)?.value ??
|
||||
false;
|
||||
// Adapt old version
|
||||
const questionGuideVal = guideModules?.inputs?.find(
|
||||
(item) => item.key === NodeInputKeyEnum.questionGuide
|
||||
)?.value;
|
||||
const questionGuide: AppQGConfigType =
|
||||
typeof questionGuideVal === 'boolean'
|
||||
? { ...defaultQGConfig, open: questionGuideVal }
|
||||
: questionGuideVal ?? defaultQGConfig;
|
||||
|
||||
const ttsConfig: AppTTSConfigType =
|
||||
guideModules?.inputs?.find((item) => item.key === NodeInputKeyEnum.tts)?.value ??
|
||||
|
||||
@@ -3,6 +3,7 @@ export enum PublishChannelEnum {
|
||||
iframe = 'iframe',
|
||||
apikey = 'apikey',
|
||||
feishu = 'feishu',
|
||||
dingtalk = 'dingtalk',
|
||||
wecom = 'wecom',
|
||||
officialAccount = 'official_account'
|
||||
}
|
||||
|
||||
12
packages/global/support/outLink/type.d.ts
vendored
@@ -14,6 +14,11 @@ export interface FeishuAppType {
|
||||
verificationToken?: string;
|
||||
}
|
||||
|
||||
export interface DingtalkAppType {
|
||||
clientId: string;
|
||||
clientSecret: string;
|
||||
}
|
||||
|
||||
export interface WecomAppType {
|
||||
AgentId: string;
|
||||
CorpId: string;
|
||||
@@ -36,7 +41,12 @@ export interface OffiAccountAppType {
|
||||
// because we can not reply anything in 15s. Thus, the wechat server will treat this request as a failed request.
|
||||
}
|
||||
|
||||
export type OutlinkAppType = FeishuAppType | WecomAppType | OffiAccountAppType | undefined;
|
||||
export type OutlinkAppType =
|
||||
| FeishuAppType
|
||||
| WecomAppType
|
||||
| OffiAccountAppType
|
||||
| DingtalkAppType
|
||||
| undefined;
|
||||
|
||||
export type OutLinkSchema<T extends OutlinkAppType = undefined> = {
|
||||
_id: string;
|
||||
|
||||