Compare commits
14 Commits
v4.6.7-alp
...
v4.6.8
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf7ee919b6 | ||
|
|
91bcf8c53e | ||
|
|
51bbdf26a3 | ||
|
|
ec8e2512bc | ||
|
|
34602b25df | ||
|
|
fc19c4cf09 | ||
|
|
9188752b41 | ||
|
|
6b40062504 | ||
|
|
72d1503fa3 | ||
|
|
2c6dbe13d9 | ||
|
|
318116627c | ||
|
|
379673cae1 | ||
|
|
aab6ee51eb | ||
|
|
91b7d81c1a |
19
.github/workflows/bot-issues-translator.yml
vendored
@@ -1,19 +0,0 @@
|
||||
name: 'Github Rebot for issues-translator'
|
||||
on:
|
||||
issues:
|
||||
types: [ opened ]
|
||||
issue_comment:
|
||||
types: [ created ]
|
||||
jobs:
|
||||
translate:
|
||||
permissions:
|
||||
issues: write
|
||||
discussions: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: usthe/issues-translate-action@v2.7
|
||||
with:
|
||||
IS_MODIFY_TITLE: true
|
||||
BOT_GITHUB_TOKEN: ${{ secrets.GH_PAT }}
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically. 👯👭🏻🧑🤝🧑👫🧑🏿🤝🧑🏻👩🏾🤝👨🏿👬🏿
|
||||
4
.github/workflows/docs-image.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Build FastGPT docs images and copy image to docker hub
|
||||
name: Build docs images and copy image to docker hub
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
@@ -95,4 +95,4 @@ jobs:
|
||||
env:
|
||||
KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
|
||||
with:
|
||||
args: rollout restart deployment fastgpt-docs
|
||||
args: rollout restart deployment fastgpt-docs
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: deploy-docs-preview
|
||||
name: preview-docs
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
alias-domains: | #Optional
|
||||
fastgpt-staging.vercel.app
|
||||
docsOutput:
|
||||
needs: [ deploy-preview ]
|
||||
needs: [deploy-preview]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
with:
|
||||
version: v0.0.6
|
||||
env:
|
||||
GH_TOKEN: "${{ secrets.GH_PAT }}"
|
||||
SEALOS_TYPE: "pr_comment"
|
||||
SEALOS_FILENAME: "report.md"
|
||||
SEALOS_REPLACE_TAG: "DEFAULT_REPLACE_DEPLOY"
|
||||
GH_TOKEN: '${{ secrets.GH_PAT }}'
|
||||
SEALOS_TYPE: 'pr_comment'
|
||||
SEALOS_FILENAME: 'report.md'
|
||||
SEALOS_REPLACE_TAG: 'DEFAULT_REPLACE_DEPLOY'
|
||||
5
.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"inlang.vs-code-extension"
|
||||
]
|
||||
}
|
||||
2
.vscode/settings.json
vendored
@@ -2,7 +2,7 @@
|
||||
"editor.formatOnSave": true,
|
||||
"editor.mouseWheelZoom": true,
|
||||
"typescript.tsdk": "node_modules/typescript/lib",
|
||||
"prettier.prettierPath": "./node_modules/prettier",
|
||||
"prettier.prettierPath": "",
|
||||
"i18n-ally.localesPaths": [
|
||||
"projects/app/public/locales",
|
||||
],
|
||||
|
||||
@@ -8,7 +8,7 @@ ARG proxy
|
||||
RUN [ -z "$proxy" ] || sed -i 's/dl-cdn.alpinelinux.org/mirrors.ustc.edu.cn/g' /etc/apk/repositories
|
||||
RUN apk add --no-cache libc6-compat && npm install -g pnpm@8.6.0
|
||||
# if proxy exists, set proxy
|
||||
RUN [ -z "$proxy" ] || pnpm config set registry https://registry.npm.taobao.org
|
||||
RUN [ -z "$proxy" ] || pnpm config set registry https://registry.npmmirror.com
|
||||
|
||||
# copy packages and one project
|
||||
COPY pnpm-lock.yaml pnpm-workspace.yaml ./
|
||||
@@ -28,7 +28,7 @@ ARG proxy
|
||||
RUN [ -z "$proxy" ] || sed -i 's/dl-cdn.alpinelinux.org/mirrors.ustc.edu.cn/g' /etc/apk/repositories
|
||||
RUN apk add --no-cache libc6-compat && npm install -g pnpm@8.6.0
|
||||
# if proxy exists, set proxy
|
||||
RUN [ -z "$proxy" ] || pnpm config set registry https://registry.npm.taobao.org
|
||||
RUN [ -z "$proxy" ] || pnpm config set registry https://registry.npmmirror.com
|
||||
|
||||
COPY ./worker /app/worker
|
||||
RUN cd /app/worker && pnpm i --production --ignore-workspace
|
||||
@@ -78,7 +78,7 @@ COPY --from=builder /app/projects/$name/package.json ./package.json
|
||||
COPY --from=workerDeps /app/worker /app/worker
|
||||
# copy config
|
||||
COPY ./projects/$name/data /app/data
|
||||
|
||||
RUN chown -R nextjs:nodejs /app/data
|
||||
|
||||
ENV NODE_ENV production
|
||||
ENV NEXT_TELEMETRY_DISABLED 1
|
||||
|
||||
14
README.md
@@ -57,7 +57,7 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
- [x] 源文件引用追踪
|
||||
- [x] 模块封装,实现多级复用
|
||||
- [x] 混合检索 & 重排
|
||||
- [ ] 自查询规划
|
||||
- [ ] Tool 模块
|
||||
- [ ] 嵌入 [Laf](https://github.com/labring/laf),实现在线编写 HTTP 模块
|
||||
- [ ] 插件封装功能
|
||||
|
||||
@@ -67,10 +67,10 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
- [x] 支持知识库单独设置向量模型
|
||||
- [x] 源文件存储
|
||||
- [x] 支持手动输入,直接分段,QA 拆分导入
|
||||
- [x] 支持 pdf、word、txt、md 等常用文件,支持 url 读取、CSV 批量导入
|
||||
- [ ] 支持 HTML、csv、PPT、Excel 导入
|
||||
- [x] 支持 pdf,docx,txt,html,md,csv
|
||||
- [x] 支持 url 读取、CSV 批量导入
|
||||
- [ ] 支持 PPT、Excel 导入
|
||||
- [ ] 支持文件阅读器
|
||||
- [ ] 支持差异性文件同步
|
||||
- [ ] 更多的数据预处理方案
|
||||
|
||||
`3` 应用调试能力
|
||||
@@ -81,8 +81,8 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
- [ ] 高级编排 DeBug 模式
|
||||
|
||||
`4` OpenAPI 接口
|
||||
- [x] completions 接口 (对齐 GPT 接口)
|
||||
- [ ] 知识库 CRUD
|
||||
- [x] completions 接口 (chat 模式对齐 GPT 接口)
|
||||
- [x] 知识库 CRUD
|
||||
- [ ] 对话 CRUD
|
||||
|
||||
`5` 运营能力
|
||||
@@ -215,4 +215,4 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
1. 允许作为后台服务直接商用,但不允许提供 SaaS 服务。
|
||||
2. 未经商业授权,任何形式的商用服务均需保留相关版权信息。
|
||||
3. 完整请查看 [FastGPT Open Source License](./LICENSE)
|
||||
4. 联系方式:yujinlong@sealos.io,[点击查看商业版定价策略](https://doc.fastgpt.in/docs/commercial)
|
||||
4. 联系方式:yujinlong@sealos.io,[点击查看商业版定价策略](https://doc.fastgpt.in/docs/commercial)
|
||||
|
||||
70
README_en.md
@@ -49,48 +49,52 @@ Cloud: [fastgpt.in](https://fastgpt.in/)
|
||||
|
||||
## 💡 Features
|
||||
|
||||
1. Powerful visual workflows: Effortlessly craft AI applications
|
||||
`1` Application Orchestration Features
|
||||
|
||||
- [x] Simple mode on deck - no need for manual arrangement
|
||||
- [x] User dialogue pre-guidance
|
||||
- [x] Global variables
|
||||
- [x] Knowledge base search
|
||||
- [x] Dialogue via multiple LLM models
|
||||
- [x] Text magic - convert to structured data
|
||||
- [x] Extend with HTTP
|
||||
- [ ] Embed Laf for on-the-fly HTTP module crafting
|
||||
- [x] Directions for the next dialogue steps
|
||||
- [x] Tracking source file references
|
||||
- [ ] Custom file reader
|
||||
- [ ] Modules are packaged into plug-ins to achieve reuse
|
||||
- [x] Offers a straightforward mode, eliminating the need for complex orchestration
|
||||
- [x] Provides clear next-step instructions in dialogues
|
||||
- [x] Facilitates workflow orchestration
|
||||
- [x] Tracks references in source files
|
||||
- [x] Encapsulates modules for enhanced reuse at multiple levels
|
||||
- [x] Combines search and reordering functions
|
||||
- [ ] Includes a tool module
|
||||
- [ ] Integrates [Laf](https://github.com/labring/laf) for online HTTP module creation
|
||||
- [ ] Plugin encapsulation capabilities
|
||||
|
||||
2. Extensive knowledge base preprocessing
|
||||
`2` Knowledge Base Features
|
||||
|
||||
- [x] Reuse and mix multiple knowledge bases
|
||||
- [x] Track chunk modifications and deletions
|
||||
- [x] Supports manual entries, direct segmentation, and QA split imports
|
||||
- [x] Supports URL fetching and batch CSV imports
|
||||
- [x] Supports Set unique vector models for knowledge bases
|
||||
- [x] Store original files
|
||||
- [ ] File learning Agent
|
||||
- [x] Allows for the mixed use of multiple databases
|
||||
- [x] Keeps track of modifications and deletions in data chunks
|
||||
- [x] Enables specific vector models for each knowledge base
|
||||
- [x] Stores original source files
|
||||
- [x] Supports direct input and segment-based QA import
|
||||
- [x] Compatible with a variety of file formats: pdf, docx, txt, html, md, csv
|
||||
- [x] Facilitates URL reading and bulk CSV importing
|
||||
- [ ] Supports PPT and Excel file import
|
||||
- [ ] Features a file reader
|
||||
- [ ] Offers diverse data preprocessing options
|
||||
|
||||
3. Multiple effect testing channels
|
||||
`3` Application Debugging Features
|
||||
|
||||
- [x] Single-point knowledge base search test
|
||||
- [x] Feedback references and ability to modify and delete during dialogue
|
||||
- [x] Complete context presentation
|
||||
- [ ] Complete module intermediate value presentation
|
||||
- [x] Enables targeted search testing within the knowledge base
|
||||
- [x] Allows feedback, editing, and deletion during conversations
|
||||
- [x] Presents the full context of interactions
|
||||
- [x] Displays all intermediate values within modules
|
||||
- [ ] Advanced DeBug mode for orchestration
|
||||
|
||||
4. OpenAPI
|
||||
`4` OpenAPI Interface
|
||||
|
||||
- [x] completions interface (aligned with GPT interface)
|
||||
- [ ] Knowledge base CRUD
|
||||
- [x] The completions interface (aligned with GPT's chat mode interface)
|
||||
- [x] CRUD operations for the knowledge base
|
||||
- [ ] CRUD operations for conversations
|
||||
|
||||
5. Operational functions
|
||||
`5` Operational Features
|
||||
|
||||
- [x] Share without requiring login
|
||||
- [x] Easy embedding with Iframe
|
||||
- [x] Customizable chat window embedding with features like default open, drag-and-drop
|
||||
- [x] Centralizes conversation records for review and annotation
|
||||
|
||||
- [x] Login-free sharing window
|
||||
- [x] One-click embedding with Iframe
|
||||
- [ ] Unified access to dialogue records
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-Back_to_Top-7d09f1.svg" alt="#" align="right">
|
||||
|
||||
|
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 212 KiB |
BIN
docSite/assets/imgs/dataset_search_params2.png
Normal file
|
After Width: | Height: | Size: 92 KiB |
BIN
docSite/assets/imgs/dataset_search_params3.png
Normal file
|
After Width: | Height: | Size: 193 KiB |
BIN
docSite/assets/imgs/dataset_search_process.png
Normal file
|
After Width: | Height: | Size: 428 KiB |
BIN
docSite/assets/imgs/dataset_tree.png
Normal file
|
After Width: | Height: | Size: 100 KiB |
|
Before Width: | Height: | Size: 285 KiB After Width: | Height: | Size: 356 KiB |
|
Before Width: | Height: | Size: 307 KiB After Width: | Height: | Size: 100 KiB |
|
Before Width: | Height: | Size: 301 KiB After Width: | Height: | Size: 160 KiB |
|
Before Width: | Height: | Size: 339 KiB After Width: | Height: | Size: 156 KiB |
|
Before Width: | Height: | Size: 95 KiB After Width: | Height: | Size: 160 KiB |
|
Before Width: | Height: | Size: 78 KiB After Width: | Height: | Size: 58 KiB |
BIN
docSite/assets/imgs/demo-dalle1.png
Normal file
|
After Width: | Height: | Size: 629 KiB |
BIN
docSite/assets/imgs/demo-dalle2.png
Normal file
|
After Width: | Height: | Size: 753 KiB |
|
Before Width: | Height: | Size: 303 KiB After Width: | Height: | Size: 381 KiB |
|
Before Width: | Height: | Size: 336 KiB After Width: | Height: | Size: 198 KiB |
|
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 31 KiB |
|
Before Width: | Height: | Size: 95 KiB After Width: | Height: | Size: 155 KiB |
@@ -1,19 +1,77 @@
|
||||
---
|
||||
title: '知识库搜索参数'
|
||||
description: '知识库搜索原理'
|
||||
title: '知识库搜索介绍'
|
||||
description: '本节会详细介绍 FastGPT 知识库结构设计,理解其 QA 的存储格式和多向量映射,以便更好的构建知识库。同时会介绍每个搜索参数的功能。这篇介绍主要以使用为主,详细原理不多介绍。'
|
||||
icon: 'language'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 106
|
||||
---
|
||||
|
||||
在知识库搜索的方式上,FastGPT提供了三种方式,分别为“语义检索”“增强语义检索”“混合检索”。
|
||||
## 理解向量
|
||||
|
||||

|
||||
FastGPT 采用了 RAG 中的 Embedding 方案构建知识库,要使用好 FastGPT 需要简单的理解`Embedding`向量是如何工作的及其特点。
|
||||
|
||||
## 搜索模式
|
||||
人类的文字、图片、视频等媒介是无法直接被计算机理解的,要想让计算机理解两段文字是否有相似性、相关性,通常需要将它们转成计算机可以理解的语言,向量是其中的一种方式。
|
||||
|
||||
### 语义检索
|
||||
向量可以简单理解为一个数字数组,两个向量之间可以通过数学公式得出一个`距离`,距离越小代表两个向量的相似度越大。从而映射到文字、图片、视频等媒介上,可以用来判断两个媒介之间的相似度。向量搜索便是利用了这个原理。
|
||||
|
||||
而由于文字是有多种类型,并且拥有成千上万种组合方式,因此在转成向量进行相似度匹配时,很难保障其精确性。在向量方案构建的知识库中,通常使用`topk`召回的方式,也就是查找前`k`个最相似的内容,丢给大模型去做更进一步的`语义判断`、`逻辑推理`和`归纳总结`,从而实现知识库问答。因此,在知识库问答中,向量搜索的环节是最为重要的。
|
||||
|
||||
影响向量搜索精度的因素非常多,主要包括:向量模型的质量、数据的质量(长度,完整性,多样性)、检索器的精度(速度与精度之间的取舍)。与数据质量对应的就是检索词的质量。
|
||||
|
||||
检索器的精度比较容易解决,向量模型的训练略复杂,因此数据和检索词质量优化成了一个重要的环节。
|
||||
|
||||
|
||||
### 提高向量搜索精度的方法
|
||||
|
||||
1. 更好分词分段:当一段话的结构和语义是完整的,并且是单一的,精度也会提高。因此,许多系统都会优化分词器,尽可能的保障每组数据的完整性。
|
||||
2. 精简`index`的内容,减少向量内容的长度:当`index`的内容更少,更准确时,检索精度自然会提高。但与此同时,会牺牲一定的检索范围,适合答案较为严格的场景。
|
||||
3. 丰富`index`的数量,可以为同一个`chunk`内容增加多组`index`。
|
||||
4. 优化检索词:在实际使用过程中,用户的问题通常是模糊的或是缺失的,并不一定是完整清晰的问题。因此优化用户的问题(检索词)很大程度上也可以提高精度。
|
||||
5. 微调向量模型:由于市面上直接使用的向量模型都是通用型模型,在特定领域的检索精度并不高,因此微调向量模型可以很大程度上提高专业领域的检索效果。
|
||||
|
||||
## FastGPT 构建知识库方案
|
||||
|
||||
### 数据存储结构
|
||||
|
||||
在 FastGPT 中,整个知识库由库、集合和数据 3 部分组成。集合可以简单理解为一个`文件`。一个`库`中可以包含多个`集合`,一个`集合`中可以包含多组`数据`。最小的搜索单位是`库`,也就是说,知识库搜索时,是对整个`库`进行搜索,而集合仅是为了对数据进行分类管理,与搜索效果无关。(起码目前还是)
|
||||
|
||||

|
||||
|
||||
### 向量存储结构
|
||||
|
||||
FastGPT 采用了`PostgresSQL`的`PG Vector`插件作为向量检索器,索引为`HNSW`。且`PostgresSQL`仅用于向量检索(该引擎可以替换成其它数据库),`MongoDB`用于其他数据的存取。
|
||||
|
||||
在`MongoDB`的`dataset.datas`表中,会存储向量原数据的信息,同时有一个`indexes`字段,会记录其对应的向量ID,这是一个数组,也就是说,一组向量可以对应多组数据。
|
||||
|
||||
在`PostgresSQL`的表中,设置一个`vector`字段用于存储向量。在检索时,会先召回向量,再根据向量的ID,去`MongoDB`中寻找原数据内容,如果对应了同一组原数据,则进行合并,向量得分取最高得分。
|
||||
|
||||

|
||||
|
||||
### 多向量的目的和使用方式
|
||||
|
||||
在一组向量中,内容的长度和语义的丰富度通常是矛盾的,无法兼得。因此,FastGPT 采用了多向量映射的方式,将一组数据映射到多组向量中,从而保障数据的完整性和语义的丰富度。
|
||||
|
||||
你可以为一组较长的文本,添加多组向量,从而在检索时,只要其中一组向量被检索到,该数据也将被召回。
|
||||
|
||||
### 检索方案
|
||||
|
||||
1. 通过`问题补全`实现指代消除和问题扩展,从而增加连续对话的检索能力以及语义丰富度。
|
||||
2. 通过`Concat query`来增加`Rerank`连续对话的时,排序的准确性。
|
||||
3. 通过`RRF`合并方式,综合多个渠道的检索效果。
|
||||
4. 通过`Rerank`来二次排序,提高精度。
|
||||
|
||||

|
||||
|
||||
|
||||
## 搜索参数
|
||||
| | | |
|
||||
| --- |---| --- |
|
||||
||  |  |
|
||||
|
||||
### 搜索模式
|
||||
|
||||
#### 语义检索
|
||||
|
||||
语义检索是通过向量距离,计算用户问题与知识库内容的距离,从而得出“相似度”,当然这并不是语文上的相似度,而是数学上的。
|
||||
|
||||
@@ -27,32 +85,50 @@ weight: 106
|
||||
- 精度不稳定
|
||||
- 受关键词和句子完整度影响
|
||||
|
||||
### 全文检索
|
||||
#### 全文检索
|
||||
|
||||
采用传统的全文检索方式。适合查找关键的主谓语等。
|
||||
|
||||
### 混合检索
|
||||
#### 混合检索
|
||||
|
||||
同时使用向量检索和全文检索,并通过 RRF 公式进行两个搜索结果合并,一般情况下搜索结果会更加丰富准确。
|
||||
|
||||
由于混合检索后的查找范围很大,并且无法直接进行相似度过滤,通常需要进行利用重排模型进行一次结果重新排序,并利用重排的得分进行过滤。
|
||||
|
||||
|
||||
|
||||
## 结果重排
|
||||
#### 结果重排
|
||||
|
||||
利用`ReRank`模型对搜索结果进行重排,绝大多数情况下,可以有效提高搜索结果的准确率。不过,重排模型与问题的完整度(主谓语齐全)有一些关系,通常会先走问题补全后再进行搜索-重排。重排后可以得到一个`0-1`的得分,代表着搜索内容与问题的相关度,该分数通常比向量的得分更加精确,可以根据得分进行过滤。
|
||||
|
||||
FastGPT 会使用 `RRF` 对重排结果、向量搜索结果、全文检索结果进行合并,得到最终的搜索结果。
|
||||
|
||||
## 引用上限
|
||||
### 搜索过滤
|
||||
|
||||
#### 引用上限
|
||||
|
||||
每次搜索最多引用`n`个`tokens`的内容。
|
||||
|
||||
之所以不采用`top k`,是发现在混合知识库(问答库、文档库)时,不同`chunk`的长度差距很大,会导致`top k`的结果不稳定,因此采用了`tokens`的方式进行引用上限的控制。
|
||||
|
||||
## 最低相关度
|
||||
#### 最低相关度
|
||||
|
||||
一个`0-1`的数值,会过滤掉一些低相关度的搜索结果。
|
||||
|
||||
该值仅在`语义检索`或使用`结果重排`时生效。
|
||||
|
||||
### 问题补全
|
||||
|
||||
#### 背景
|
||||
|
||||
在 RAG 中,我们需要根据输入的问题去数据库里执行 embedding 搜索,查找相关的内容,从而查找到相似的内容(简称知识库搜索)。
|
||||
|
||||
在搜索的过程中,尤其是连续对话的搜索,我们通常会发现后续的问题难以搜索到合适的内容,其中一个原因是知识库搜索只会使用“当前”的问题去执行。看下面的例子:
|
||||
|
||||

|
||||
|
||||
用户在提问“第二点是什么”的时候,只会去知识库里查找“第二点是什么”,压根查不到内容。实际上需要查询的是“QA结构是什么”。因此我们需要引入一个【问题补全】模块,来对用户当前的问题进行补全,从而使得知识库搜索能够搜索到合适的内容。使用补全后效果如下:
|
||||
|
||||

|
||||
|
||||
#### 实现方式
|
||||
|
||||
在进行`数据检索`前,会先让模型进行`指代消除`与`问题扩展`,一方面可以可以解决指代对象不明确问题,同时可以扩展问题的语义丰富度。
|
||||
|
||||
@@ -13,153 +13,7 @@ weight: 708
|
||||
|
||||
这个配置文件中包含了系统级参数、AI 对话的模型、function 模型等……
|
||||
|
||||
|
||||
## 旧版本配置文件
|
||||
|
||||
以下配置适合 4.6.6-alpha 之前
|
||||
|
||||
```json
|
||||
{
|
||||
"SystemParams": {
|
||||
"vectorMaxProcess": 15, // 向量生成最大进程,结合数据库性能和 key 来设置
|
||||
"qaMaxProcess": 15, // QA 生成最大进程,结合数据库性能和 key 来设置
|
||||
"pgHNSWEfSearch": 100 // pg vector 索引参数,越大精度高但速度慢
|
||||
},
|
||||
"ChatModels": [ // 对话模型
|
||||
{
|
||||
"model": "gpt-3.5-turbo-1106",
|
||||
"name": "GPT35-1106",
|
||||
"price": 0, // 除以 100000 后等于1个token的价格
|
||||
"maxContext": 16000, // 最大上下文长度
|
||||
"maxResponse": 4000, // 最大回复长度
|
||||
"quoteMaxToken": 2000, // 最大引用内容长度
|
||||
"maxTemperature": 1.2, // 最大温度值
|
||||
"censor": false, // 是否开启敏感词过滤(商业版)
|
||||
"vision": false, // 支持图片输入
|
||||
"defaultSystemChatPrompt": ""
|
||||
},
|
||||
{
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
"name": "GPT35-16k",
|
||||
"maxContext": 16000,
|
||||
"maxResponse": 16000,
|
||||
"price": 0,
|
||||
"quoteMaxToken": 8000,
|
||||
"maxTemperature": 1.2,
|
||||
"censor": false,
|
||||
"vision": false,
|
||||
"defaultSystemChatPrompt": ""
|
||||
},
|
||||
{
|
||||
"model": "gpt-4",
|
||||
"name": "GPT4-8k",
|
||||
"maxContext": 8000,
|
||||
"maxResponse": 8000,
|
||||
"price": 0,
|
||||
"quoteMaxToken": 4000,
|
||||
"maxTemperature": 1.2,
|
||||
"censor": false,
|
||||
"vision": false,
|
||||
"defaultSystemChatPrompt": ""
|
||||
},
|
||||
{
|
||||
"model": "gpt-4-vision-preview",
|
||||
"name": "GPT4-Vision",
|
||||
"maxContext": 128000,
|
||||
"maxResponse": 4000,
|
||||
"price": 0,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1.2,
|
||||
"censor": false,
|
||||
"vision": true,
|
||||
"defaultSystemChatPrompt": ""
|
||||
}
|
||||
],
|
||||
"QAModels": [ // QA 生成模型
|
||||
{
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
"name": "GPT35-16k",
|
||||
"maxContext": 16000,
|
||||
"maxResponse": 16000,
|
||||
"price": 0
|
||||
}
|
||||
],
|
||||
"CQModels": [ // 问题分类模型
|
||||
{
|
||||
"model": "gpt-3.5-turbo-1106",
|
||||
"name": "GPT35-1106",
|
||||
"maxContext": 16000,
|
||||
"maxResponse": 4000,
|
||||
"price": 0,
|
||||
"toolChoice": true, // 是否支持openai的 toolChoice, 不支持的模型需要设置为 false,会走提示词生成
|
||||
"functionPrompt": ""
|
||||
},
|
||||
{
|
||||
"model": "gpt-4",
|
||||
"name": "GPT4-8k",
|
||||
"maxContext": 8000,
|
||||
"maxResponse": 8000,
|
||||
"price": 0,
|
||||
"toolChoice": true,
|
||||
"functionPrompt": ""
|
||||
}
|
||||
],
|
||||
"ExtractModels": [ // 内容提取模型
|
||||
{
|
||||
"model": "gpt-3.5-turbo-1106",
|
||||
"name": "GPT35-1106",
|
||||
"maxContext": 16000,
|
||||
"maxResponse": 4000,
|
||||
"price": 0,
|
||||
"toolChoice": true,
|
||||
"functionPrompt": ""
|
||||
}
|
||||
],
|
||||
"QGModels": [ // 生成下一步指引
|
||||
{
|
||||
"model": "gpt-3.5-turbo-1106",
|
||||
"name": "GPT35-1106",
|
||||
"maxContext": 1600,
|
||||
"maxResponse": 4000,
|
||||
"price": 0
|
||||
}
|
||||
],
|
||||
"VectorModels": [ // 向量模型
|
||||
{
|
||||
"model": "text-embedding-ada-002",
|
||||
"name": "Embedding-2",
|
||||
"price": 0.2,
|
||||
"defaultToken": 700,
|
||||
"maxToken": 3000
|
||||
}
|
||||
],
|
||||
"ReRankModels": [], // 重排模型,暂时填空数组
|
||||
"AudioSpeechModels": [
|
||||
{
|
||||
"model": "tts-1",
|
||||
"name": "OpenAI TTS1",
|
||||
"price": 0,
|
||||
"baseUrl": "",
|
||||
"key": "",
|
||||
"voices": [
|
||||
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
|
||||
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
|
||||
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
|
||||
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
|
||||
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
|
||||
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
|
||||
]
|
||||
}
|
||||
],
|
||||
"WhisperModel": {
|
||||
"model": "whisper-1",
|
||||
"name": "Whisper1",
|
||||
"price": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 4.6.6-alpha 版本完整配置参数
|
||||
## 4.6.8 以前版本完整配置参数
|
||||
|
||||
**使用时,请务必去除注释!**
|
||||
|
||||
@@ -315,6 +169,134 @@ weight: 708
|
||||
}
|
||||
```
|
||||
|
||||
## 4.6.8 新配置文件
|
||||
|
||||
llm模型全部合并
|
||||
|
||||
```json
|
||||
{
|
||||
"systemEnv": {
|
||||
"openapiPrefix": "fastgpt",
|
||||
"vectorMaxProcess": 15,
|
||||
"qaMaxProcess": 15,
|
||||
"pgHNSWEfSearch": 100
|
||||
},
|
||||
"llmModels": [
|
||||
{
|
||||
"model": "gpt-3.5-turbo-1106", // 模型名
|
||||
"name": "gpt-3.5-turbo", // 别名
|
||||
"maxContext": 16000, // 最大上下文
|
||||
"maxResponse": 4000, // 最大回复
|
||||
"quoteMaxToken": 13000, // 最大引用内容
|
||||
"maxTemperature": 1.2, // 最大温度
|
||||
"inputPrice": 0,
|
||||
"outputPrice": 0,
|
||||
"censor": false,
|
||||
"vision": false, // 是否支持图片输入
|
||||
"datasetProcess": false, // 是否设置为知识库处理模型
|
||||
"toolChoice": true, // 是否支持工具选择
|
||||
"functionCall": false, // 是否支持函数调用
|
||||
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
|
||||
"customExtractPrompt": "", // 自定义内容提取提示词
|
||||
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
|
||||
"defaultConfig":{} // 对话默认配置(比如 GLM4 的 top_p
|
||||
},
|
||||
{
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
"name": "gpt-3.5-turbo-16k",
|
||||
"maxContext": 16000,
|
||||
"maxResponse": 16000,
|
||||
"quoteMaxToken": 13000,
|
||||
"maxTemperature": 1.2,
|
||||
"inputPrice": 0,
|
||||
"outputPrice": 0,
|
||||
"censor": false,
|
||||
"vision": false,
|
||||
"datasetProcess": true,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"customCQPrompt": "",
|
||||
"customExtractPrompt": "",
|
||||
"defaultSystemChatPrompt": "",
|
||||
"defaultConfig":{}
|
||||
},
|
||||
{
|
||||
"model": "gpt-4-0125-preview",
|
||||
"name": "gpt-4-turbo",
|
||||
"maxContext": 125000,
|
||||
"maxResponse": 4000,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1.2,
|
||||
"inputPrice": 0,
|
||||
"outputPrice": 0,
|
||||
"censor": false,
|
||||
"vision": false,
|
||||
"datasetProcess": false,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"customCQPrompt": "",
|
||||
"customExtractPrompt": "",
|
||||
"defaultSystemChatPrompt": "",
|
||||
"defaultConfig":{}
|
||||
},
|
||||
{
|
||||
"model": "gpt-4-vision-preview",
|
||||
"name": "gpt-4-vision",
|
||||
"maxContext": 128000,
|
||||
"maxResponse": 4000,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1.2,
|
||||
"inputPrice": 0,
|
||||
"outputPrice": 0,
|
||||
"censor": false,
|
||||
"vision": false,
|
||||
"datasetProcess": false,
|
||||
"toolChoice": true,
|
||||
"functionCall": false,
|
||||
"customCQPrompt": "",
|
||||
"customExtractPrompt": "",
|
||||
"defaultSystemChatPrompt": "",
|
||||
"defaultConfig":{}
|
||||
}
|
||||
],
|
||||
"vectorModels": [
|
||||
{
|
||||
"model": "text-embedding-ada-002",
|
||||
"name": "Embedding-2",
|
||||
"inputPrice": 0,
|
||||
"outputPrice": 0,
|
||||
"defaultToken": 700,
|
||||
"maxToken": 3000,
|
||||
"weight": 100,
|
||||
"defaultConfig":{} // 默认配置。例如,如果希望使用 embedding3-large 的话,可以传入 dimensions:1024,来返回1024维度的向量。(目前必须小于1536维度)
|
||||
}
|
||||
],
|
||||
"reRankModels": [],
|
||||
"audioSpeechModels": [
|
||||
{
|
||||
"model": "tts-1",
|
||||
"name": "OpenAI TTS1",
|
||||
"inputPrice": 0,
|
||||
"outputPrice": 0,
|
||||
"voices": [
|
||||
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
|
||||
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
|
||||
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
|
||||
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
|
||||
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
|
||||
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
|
||||
]
|
||||
}
|
||||
],
|
||||
"whisperModel": {
|
||||
"model": "whisper-1",
|
||||
"name": "Whisper1",
|
||||
"inputPrice": 0,
|
||||
"outputPrice": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 特殊模型
|
||||
|
||||
### ReRank 接入
|
||||
|
||||
@@ -59,10 +59,10 @@ Authorization 为 sk-aaabbbcccdddeeefffggghhhiiijjjkkk。model 为刚刚在 One
|
||||
|
||||
## 接入 FastGPT
|
||||
|
||||
修改 config.json 配置文件,在 ChatModels 中加入 chatglm2, 在 VectorModels 中加入 M3E 模型:
|
||||
修改 config.json 配置文件,在 llmModels 中加入 chatglm2, 在 vectorModels 中加入 M3E 模型:
|
||||
|
||||
```json
|
||||
"ChatModels": [
|
||||
"llmModels": [
|
||||
//其他对话模型
|
||||
{
|
||||
"model": "chatglm2",
|
||||
@@ -74,7 +74,7 @@ Authorization 为 sk-aaabbbcccdddeeefffggghhhiiijjjkkk。model 为刚刚在 One
|
||||
"defaultSystemChatPrompt": ""
|
||||
}
|
||||
],
|
||||
"VectorModels": [
|
||||
"vectorModels": [
|
||||
{
|
||||
"model": "text-embedding-ada-002",
|
||||
"name": "Embedding-2",
|
||||
|
||||
@@ -99,10 +99,10 @@ Authorization 为 sk-aaabbbcccdddeeefffggghhhiiijjjkkk。model 为刚刚在 One
|
||||
|
||||
## 接入 FastGPT
|
||||
|
||||
修改 config.json 配置文件,在 ChatModels 中加入 chatglm2 模型:
|
||||
修改 config.json 配置文件,在 llmModels 中加入 chatglm2 模型:
|
||||
|
||||
```json
|
||||
"ChatModels": [
|
||||
"llmModels": [
|
||||
//已有模型
|
||||
{
|
||||
"model": "chatglm2",
|
||||
|
||||
@@ -48,10 +48,10 @@ Authorization 为 sk-key。model 为刚刚在 One API 填写的自定义模型
|
||||
|
||||
## 接入 FastGPT
|
||||
|
||||
修改 config.json 配置文件,在 VectorModels 中加入 M3E 模型:
|
||||
修改 config.json 配置文件,在 vectorModels 中加入 M3E 模型:
|
||||
|
||||
```json
|
||||
"VectorModels": [
|
||||
"vectorModels": [
|
||||
{
|
||||
"model": "text-embedding-ada-002",
|
||||
"name": "Embedding-2",
|
||||
|
||||
@@ -94,18 +94,55 @@ curl -O https://raw.githubusercontent.com/labring/FastGPT/main/files/deploy/fast
|
||||
curl -O https://raw.githubusercontent.com/labring/FastGPT/main/projects/app/data/config.json
|
||||
```
|
||||
|
||||
## 三、修改 docker-compose.yml 的环境变量
|
||||
|
||||
## 三、启动容器
|
||||
修改`docker-compose.yml`中的`OPENAI_BASE_URL`(API 接口的地址,需要加/v1)和`CHAT_API_KEY`(API 接口的凭证)。
|
||||
|
||||
修改`docker-compose.yml`中的`OPENAI_BASE_URL`和`CHAT_API_KEY`即可,对应为 API 的地址(别忘记加/v1)和 key。
|
||||
使用 OneAPI 的话,OPENAI_BASE_URL=OneAPI访问地址/v1;CHAT_API_KEY=令牌
|
||||
|
||||
|
||||
## 四、启动容器
|
||||
|
||||
在 docker-compose.yml 同级目录下执行
|
||||
|
||||
```bash
|
||||
# 在 docker-compose.yml 同级目录下执行
|
||||
# 进入项目目录
|
||||
cd 项目目录
|
||||
# 创建 mongo 密钥
|
||||
openssl rand -base64 756 > ./mongodb.key
|
||||
# 600不行可以用chmod 999
|
||||
chmod 600 ./mongodb.key
|
||||
|
||||
# 启动容器
|
||||
docker-compose pull
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
## 四、访问 FastGPT
|
||||
## 五、初始化 Mongo 副本集(4.6.8以前可忽略)
|
||||
|
||||
FastGPT 4.6.8 后使用了 MongoDB 的事务,需要运行在副本集上。副本集没法自动化初始化,需手动操作。
|
||||
|
||||
```bash
|
||||
# 查看 mongo 容器是否正常运行
|
||||
docker ps
|
||||
# 进入容器
|
||||
docker exec -it mongo bash
|
||||
|
||||
# 连接数据库
|
||||
mongo -u myname -p mypassword --authenticationDatabase admin
|
||||
|
||||
# 初始化副本集。如果需要外网访问,mongo:27017 可以改成 ip:27017。但是需要同时修改 FastGPT 连接的参数(MONGODB_URI=mongodb://myname:mypassword@mongo:27017/fastgpt?authSource=admin => MONGODB_URI=mongodb://myname:mypassword@ip:27017/fastgpt?authSource=admin)
|
||||
rs.initiate({
|
||||
_id: "rs0",
|
||||
members: [
|
||||
{ _id: 0, host: "mongo:27017" }
|
||||
]
|
||||
})
|
||||
# 检查状态。如果提示 rs0 状态,则代表运行成功
|
||||
rs.status()
|
||||
```
|
||||
|
||||
## 五、访问 FastGPT
|
||||
|
||||
目前可以通过 `ip:3000` 直接访问(注意防火墙)。登录用户名为 `root`,密码为`docker-compose.yml`环境变量里设置的 `DEFAULT_ROOT_PSW`。
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ OneAPI 中没有配置该模型渠道。或者是修改了配置文件中一部
|
||||
|
||||
### Incorrect API key provided: sk-xxxx.You can find your api Key at xxx
|
||||
|
||||
OneAPI 的 API Key 配置错误,需要修改`OPENAI_API_KEY`环境变量,并重启容器(先 stop 然后 rm 掉,最后再 up -d 运行一次)。可以`exec`进入容器,`env`查看环境变量是否生效。
|
||||
OneAPI 的 API Key 配置错误,需要修改`OPENAI_API_KEY`环境变量,并重启容器(先 docker-compose down 然后再 docker-compose up -d 运行一次)。可以`exec`进入容器,`env`查看环境变量是否生效。
|
||||
|
||||
### 其他模型没法进行问题分类/内容提取
|
||||
|
||||
@@ -46,10 +46,25 @@ OneAPI 的 API Key 配置错误,需要修改`OPENAI_API_KEY`环境变量,并
|
||||
### 页面崩溃
|
||||
|
||||
1. 关闭翻译
|
||||
2. 检查配置文件是否正常加载,如果没有正常加载会导致缺失系统信息,在某些操作下会导致空指针。
|
||||
2. 检查配置文件是否正常加载,如果没有正常加载会导致缺失系统信息,在某些操作下会导致空指针。(95%)
|
||||
3. 某些api不兼容问题(较少)
|
||||
|
||||
## 私有部署问题
|
||||
|
||||
### 知识库索引没有进度
|
||||
|
||||
先看日志报错信息。
|
||||
|
||||
1. 可以对话,但是索引没有进度:没有配置向量模型(vectorModels)
|
||||
2. 不能对话,也不能索引:API调用失败。可能是没连上OneAPI或OpenAI
|
||||
3. 有进度,但是非常慢:api key不行,OpenAI的免费号,一分钟只有3次还是60次。一天上限200次。
|
||||
|
||||
## Docker 部署常见问题
|
||||
|
||||
### 首次部署,root用户提示未注册
|
||||
|
||||
没有启动 Mongo 副本集模式。
|
||||
|
||||
### 如何更新?
|
||||
|
||||
1. 查看[更新文档](/docs/development/upgrading/intro/),确认要升级的版本,避免跨版本升级。
|
||||
@@ -76,6 +91,7 @@ OneAPI 的 API Key 配置错误,需要修改`OPENAI_API_KEY`环境变量,并
|
||||
|
||||
1. 挂载目录不正确
|
||||
2. 配置文件不正确,日志中会提示`invalid json`,配置文件需要是标准的 JSON 文件。
|
||||
3. 修改后,没有`docker-compose down`再`docker-compose up -d`,restart是不会重新挂载文件的。
|
||||
|
||||
### 为什么无法连接`本地模型`镜像。
|
||||
|
||||
@@ -102,8 +118,9 @@ PG 数据库没有连接上/初始化失败,可以查看日志。FastGPT 会
|
||||
### Operation `auth_codes.findOne()` buffering timed out after 10000ms
|
||||
|
||||
mongo连接失败,检查
|
||||
1. mongo 服务有没有起来(有些 cpu 不支持 AVX,无法用 mongo5,需要换成 mongo4.x,可以dockerhub找个最新的4.x,修改镜像版本,重新运行)
|
||||
1. mongo 服务有没有起来(有些 cpu 不支持 AVX,无法用 mongo5,需要换成 mongo4.x,可以dockerhub找个最新的4.x,修改镜像版本,重新运行)
|
||||
2. 环境变量(账号密码,注意host和port)
|
||||
3. 副本集启动失败,一直在重启:没挂载mongo key;key没有权限;
|
||||
|
||||
## 本地开发问题
|
||||
|
||||
@@ -48,6 +48,8 @@ git clone git@github.com:<github_username>/FastGPT.git
|
||||
|
||||
第一次开发,需要先部署数据库,建议本地开发可以随便找一台 2C2G 的轻量小数据库实践。数据库部署教程:[Docker 快速部署](/docs/development/docker/)。部署完了,可以本地访问其数据库。
|
||||
|
||||
Mongo 数据库需要修改副本集的`host`,从原来的`mongo:27017`修改为`ip:27017`。
|
||||
|
||||
### 4. 初始配置
|
||||
|
||||
以下文件均在 `projects/app` 路径下。
|
||||
|
||||
@@ -11,6 +11,10 @@ weight: 708
|
||||
* [One API](https://github.com/songquanpeng/one-api) 是一个 OpenAI 接口管理 & 分发系统,可以通过标准的 OpenAI API 格式访问所有的大模型,开箱即用。
|
||||
* FastGPT 可以通过接入 OneAPI 来实现对不同大模型的支持。OneAPI 的部署方法也很简单。
|
||||
|
||||
## FastGPT 与 OneAPI 关系
|
||||
|
||||

|
||||
|
||||
## MySQL 版本
|
||||
|
||||
MySQL 版本支持多实例,高并发。
|
||||
@@ -50,7 +54,14 @@ BATCH_UPDATE_ENABLED=true
|
||||
BATCH_UPDATE_INTERVAL=60
|
||||
```
|
||||
|
||||
## One API使用步骤
|
||||
## One API使用教程
|
||||
|
||||
### 概念
|
||||
|
||||
1. 渠道:
|
||||
1. OneApi 中一个渠道对应一个 `Api Key`,这个 `Api Key` 可以是GPT、微软、ChatGLM、文心一言的。一个`Api Key`通常可以调用同一个厂商的多个模型。
|
||||
2. OneAPI 会根据请求传入的`模型`来决定使用哪一个`Key`,如果一个模型对应了多个`Key`,则会随机调用。
|
||||
2. 令牌:访问 OneAPI 所需的凭证,只需要这`1`个凭证即可访问`OneAPI`上配置的模型。因此`FastGPT`中,只需要配置`OneAPI`的`baseurl`和`令牌`即可。
|
||||
|
||||
### 1. 登录 One API
|
||||
|
||||
@@ -68,7 +79,11 @@ BATCH_UPDATE_INTERVAL=60
|
||||
创建一个令牌
|
||||

|
||||
|
||||
### 3. 修改 FastGPT 的环境变量
|
||||
### 3. 修改账号余额
|
||||
|
||||
OneAPI 默认 root 用户只有 200刀,可以自行修改编辑。
|
||||
|
||||
### 4. 修改 FastGPT 的环境变量
|
||||
|
||||
有了 One API 令牌后,FastGPT 可以通过修改 `baseurl` 和 `key` 去请求到 One API,再由 One API 去请求不同的模型。修改下面两个环境变量:
|
||||
|
||||
@@ -92,21 +107,29 @@ CHAT_API_KEY=sk-xxxxxx
|
||||
可以在 `/projects/app/src/data/config.json` 里找到配置文件(本地开发需要复制成 config.local.json),配置文件中有一项是对话模型配置:
|
||||
|
||||
```json
|
||||
"ChatModels": [
|
||||
"llmModels": [
|
||||
...
|
||||
{
|
||||
"model": "ERNIE-Bot", // 这里的模型需要对应 One API 的模型
|
||||
"name": "文心一言", // 对外展示的名称
|
||||
"maxContext": 8000, // 最大长下文 token,无论什么模型都按 GPT35 的计算。GPT 外的模型需要自行大致计算下这个值。可以调用官方接口去比对 Token 的倍率,然后在这里粗略计算。
|
||||
"maxResponse": 4000, // 最大回复 token
|
||||
// 例如:文心一言的中英文 token 基本是 1:1,而 GPT 的中文 Token 是 2:1,如果文心一言官方最大 Token 是 4000,那么这里就可以填 8000,保险点就填 7000.
|
||||
"quoteMaxToken": 2000, // 引用知识库的最大 Token
|
||||
"maxTemperature": 1, // 最大温度
|
||||
"vision": false, // 是否开启图片识别
|
||||
"defaultSystemChatPrompt": "" // 默认的系统提示词
|
||||
"maxContext": 16000, // 最大上下文
|
||||
"maxResponse": 4000, // 最大回复
|
||||
"quoteMaxToken": 13000, // 最大引用内容
|
||||
"maxTemperature": 1.2, // 最大温度
|
||||
"inputPrice": 0,
|
||||
"outputPrice": 0,
|
||||
"censor": false,
|
||||
"vision": false, // 是否支持图片输入
|
||||
"datasetProcess": false, // 是否设置为知识库处理模型
|
||||
"toolChoice": true, // 是否支持工具选择
|
||||
"functionCall": false, // 是否支持函数调用
|
||||
"customCQPrompt": "", // 自定义文本分类提示词(不支持工具和函数调用的模型
|
||||
"customExtractPrompt": "", // 自定义内容提取提示词
|
||||
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
|
||||
"defaultConfig":{} // 对话默认配置(比如 GLM4 的 top_p
|
||||
}
|
||||
...
|
||||
],
|
||||
```
|
||||
|
||||
添加完后,重启 FastGPT 即可在选择文心一言模型进行对话。
|
||||
添加完后,重启 FastGPT 即可在选择文心一言模型进行对话。**添加向量模型也是类似操作,增加到 `vectorModels`里。**
|
||||
|
||||
@@ -48,7 +48,7 @@ curl --location --request POST 'https://api.fastgpt.in/api/v1/chat/completions'
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab tabName="detail=true 响应" >}}
|
||||
{{< tab tabName="参数说明" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
{{% alert context="info" %}}
|
||||
@@ -56,7 +56,7 @@ curl --location --request POST 'https://api.fastgpt.in/api/v1/chat/completions'
|
||||
- chatId: string | undefined 。
|
||||
- 为 `undefined` 时(不传入),不使用 FastGpt 提供的上下文功能,完全通过传入的 messages 构建上下文。 不会将你的记录存储到数据库中,你也无法在记录汇总中查阅到。
|
||||
- 为`非空字符串`时,意味着使用 chatId 进行对话,自动从 FastGpt 数据库取历史记录,并使用 messages 数组最后一个内容作为用户问题。请自行确保 chatId 唯一,长度小于250,通常可以是自己系统的对话框ID。
|
||||
- messages: 结构与 [GPT接口](https://platform.openai.com/docs/api-reference/chat/object) 完全一致。
|
||||
- messages: 结构与 [GPT接口](https://platform.openai.com/docs/api-reference/chat/object) chat模式一致。
|
||||
- detail: 是否返回中间值(模块状态,响应的完整结果等),`stream模式`下会通过`event`进行区分,`非stream模式`结果保存在`responseData`中。
|
||||
- variables: 模块变量,一个对象,会替换模块中,输入框内容里的`{{key}}`
|
||||
{{% /alert %}}
|
||||
|
||||
@@ -11,6 +11,13 @@ weight: 706
|
||||
|
||||

|
||||
|
||||
## 多模型支持
|
||||
|
||||
FastGPT 使用了 one-api 项目来管理模型池,其可以兼容 OpenAI 、Azure 、国内主流模型和本地模型等。
|
||||
|
||||
可参考:[Sealos 快速部署 OneAPI](/docs/development/one-api)
|
||||
|
||||
|
||||
## 一键部署
|
||||
Sealos 的服务器在国外,不需要额外处理网络问题,无需服务器、无需魔法、无需域名,支持高并发 & 动态伸缩。点击以下按钮即可一键部署 👇
|
||||
|
||||
@@ -50,7 +57,7 @@ Sealos 的服务器在国外,不需要额外处理网络问题,无需服务
|
||||
|
||||
### 简介
|
||||
|
||||
FastGPT 商业版共包含了3个应用(fastgpt, fastgpt-plus, fastgpt-admin)和2个数据库,使用多 Api Key 时候需要安装 OneAPI(一个应用和一个数据库),总计4个应用和3个数据库。
|
||||
FastGPT 商业版共包含了2个应用(fastgpt, fastgpt-plus)和2个数据库,使用多 Api Key 时候需要安装 OneAPI(一个应用和一个数据库),总计3个应用和3个数据库。
|
||||
|
||||

|
||||
|
||||
@@ -115,10 +122,7 @@ SYSTEM_FAVICON 可以是一个网络地址
|
||||
|
||||

|
||||
|
||||
### 管理后台
|
||||
|
||||

|
||||
|
||||
### 管理后台(已合并到plus)
|
||||
|
||||
### 商业版镜像配置文件
|
||||
|
||||
|
||||
@@ -20,14 +20,16 @@ curl --location --request POST 'https://{{host}}/api/admin/initv467' \
|
||||
```
|
||||
|
||||
初始化说明:
|
||||
1. 将 images 重新关联到数据集(不初始化也问题不大,就是可能会留下永久脏数据)
|
||||
1. 将 images 重新关联到数据集
|
||||
2. 设置 pg 表的 null 值。
|
||||
|
||||
|
||||
## V4.6.7 更新说明
|
||||
|
||||
1. 修改了知识库UI及新的导入交互方式。
|
||||
2. 优化知识库和对话的数据索引。
|
||||
3. 知识库 openAPI,支持通过 API 操作知识库。(文档待补充)
|
||||
3. 知识库 openAPI,支持通过 [API 操作知识库](/docs/development/openapi/dataset)。
|
||||
4. 新增 - 输入框变量提示。输入 { 号后将会获得可用变量提示。根据社区针对高级编排的反馈,我们计划于 2 月份的版本中,优化变量内容,支持模块的局部变量以及更多全局变量写入。
|
||||
5. 修复 - API 对话时,chatId 冲突问题。
|
||||
6. 修复 - Iframe 嵌入网页可能导致的 window.onLoad 冲突。
|
||||
5. 优化 - 切换团队后会保存记录,下次登录时优先登录该团队。
|
||||
6. 修复 - API 对话时,chatId 冲突问题。
|
||||
7. 修复 - Iframe 嵌入网页可能导致的 window.onLoad 冲突。
|
||||
90
docSite/content/docs/development/upgrading/468.md
Normal file
@@ -0,0 +1,90 @@
|
||||
---
|
||||
title: 'V4.6.8(需要初始化)'
|
||||
description: 'FastGPT V4.6.8更新说明'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 828
|
||||
---
|
||||
|
||||
## docker 部署 - 手动更新 Mongo
|
||||
|
||||
1. 修改 docker-compose.yml 的mongo部分,补上`command`和`mongodb.key`
|
||||
|
||||
```yml
|
||||
mongo:
|
||||
image: mongo:5.0.18
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云
|
||||
container_name: mongo
|
||||
ports:
|
||||
- 27017:27017
|
||||
networks:
|
||||
- fastgpt
|
||||
command: mongod --keyFile /data/mongodb.key --replSet rs0
|
||||
environment:
|
||||
# 这里密码不用变。
|
||||
- MONGO_INITDB_ROOT_USERNAME=myname
|
||||
- MONGO_INITDB_ROOT_PASSWORD=mypassword
|
||||
volumes:
|
||||
- ./mongo/data:/data/db
|
||||
- ./mongodb.key:/data/mongodb.key
|
||||
```
|
||||
|
||||
2. 创建 mongo 密钥
|
||||
|
||||
```bash
|
||||
cd 项目目录
|
||||
# 创建 mongo 密钥
|
||||
openssl rand -base64 756 > ./mongodb.key
|
||||
# 600不行可以用chmod 999
|
||||
chmod 600 ./mongodb.key
|
||||
# 重启 Mongo
|
||||
docker-compose down
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
3. 进入容器初始化部分集合
|
||||
|
||||
```bash
|
||||
docker exec -it mongo bash
|
||||
mongo -u myname -p mypassword --authenticationDatabase admin
|
||||
# 初始化副本集。如果需要外网访问,mongo:27017 可以改成 ip:27017。但是需要同时修改 FastGPT 连接的参数(MONGODB_URI=mongodb://myname:mypassword@mongo:27017/fastgpt?authSource=admin => MONGODB_URI=mongodb://myname:mypassword@ip:27017/fastgpt?authSource=admin)
|
||||
rs.initiate({
|
||||
_id: "rs0",
|
||||
members: [
|
||||
{ _id: 0, host: "mongo:27017" }
|
||||
]
|
||||
})
|
||||
# 检查状态。如果提示 rs0 状态,则代表运行成功
|
||||
rs.status()
|
||||
```
|
||||
|
||||
## Sealos 部署 - 无需更新 Mongo
|
||||
|
||||
## 修改配置文件
|
||||
|
||||
去除了重复的模型配置,LLM模型都合并到一个属性中:[点击查看最新的配置文件](/docs/development/configuration/)
|
||||
|
||||
## 商业版初始化
|
||||
|
||||
商业版用户需要执行一个初始化,格式化团队信息。
|
||||
|
||||
发起 1 个 HTTP 请求 ({{rootkey}} 替换成环境变量里的 `rootkey`,{{host}} 替换成自己域名)
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'https://{{host}}/api/init/v468' \
|
||||
--header 'rootkey: {{rootkey}}' \
|
||||
--header 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
会初始化计费系统,内部使用可把免费的存储拉大。
|
||||
|
||||
## V4.6.8 更新说明
|
||||
|
||||
1. 新增 - 知识库搜索合并模块。
|
||||
2. 新增 - 新的 Http 模块,支持更加灵活的参数传入。同时支持了输入输出自动数据类型转化,例如:接口输出的 JSON 类型会自动转成字符串类型,直接给其他模块使用。此外,还补充了一些例子,可在文档中查看。
|
||||
3. 优化 - 内容补全。将内容补全内置到【知识库搜索】中,并实现了一次内容补全,即可完成“指代消除”和“问题扩展”。FastGPT知识库搜索详细流程可查看:[知识库搜索介绍](/docs/course/data_search/)
|
||||
4. 优化 - LLM 模型配置,不再区分对话、分类、提取模型。同时支持模型的默认参数,避免不同模型参数冲突,可通过`defaultConfig`传入默认的配置。
|
||||
5. 优化 - 流响应,参考了`ChatNextWeb`的流,更加丝滑。此外,之前提到的乱码、中断,刷新后又正常了,可能会修复)
|
||||
6. 修复 - 语音输入文件无法上传。
|
||||
7. 修复 - 对话框重新生成无法使用。
|
||||
517
docSite/content/docs/workflow/examples/dalle3.md
Normal file
@@ -0,0 +1,517 @@
|
||||
---
|
||||
title: 'Dalle3 绘图'
|
||||
description: '使用 HTTP 模块绘制图片'
|
||||
icon: 'image'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 404
|
||||
---
|
||||
|
||||
| | |
|
||||
| --------------------- | --------------------- |
|
||||
|  |  |
|
||||
|
||||
## OpenAI Dalle3 接口
|
||||
|
||||
先来看下官方接口的参数和响应值:
|
||||
|
||||
Body
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "dall-e-3",
|
||||
"prompt": "A cute baby sea otter",
|
||||
"n": 1,
|
||||
"size": "1024x1024"
|
||||
}
|
||||
```
|
||||
|
||||
Response
|
||||
|
||||
```json
|
||||
{
|
||||
"created": 1589478378,
|
||||
"data": [
|
||||
{
|
||||
"url": "https://..."
|
||||
},
|
||||
{
|
||||
"url": "https://..."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## 编排思路
|
||||
|
||||
1. 通过 AI 来优化图片绘制的提示词(这部省略了,自己找提示词即可)
|
||||
2. 通过`HTTP 模块`调用 Dalle3 接口,获取图片的 URL。
|
||||
3. 通过`文本加工`来构建`Markdown`的图片格式。
|
||||
4. 通过`指定回复`来直接输出图片链接。
|
||||
|
||||
### 1. 构建 HTTP 模块
|
||||
|
||||
请求参数直接复制 Dalle3 接口的即可,并求改 prompt 为变量。需要增加一个`Headers.Authorization`。
|
||||
|
||||
Body:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "dall-e-3",
|
||||
"prompt": "{{prompt}}",
|
||||
"n": 1,
|
||||
"size": "1024x1024"
|
||||
}
|
||||
```
|
||||
|
||||
Headers:
|
||||
|
||||
`Authorization: sk-xxx`
|
||||
|
||||
Response:
|
||||
|
||||
响应值需要根据Dalle3接口的返回值进行获取,我们只绘制了1张图片,所以只需要取第一张图片的URL即可。给 HTTP 模块增加一个`key`为`data[0].url`的输出值。
|
||||
|
||||
### 2. 文本加工 - 构建图片链接
|
||||
|
||||
在`Markdown`语法中``表示插入图片,图片链接由`HTTP模块`输出。
|
||||
|
||||
因此可以增加一个输入来接收`HTTP模块`的图片链接输出,并在`文本内容`中通过变量来引用图片链接,从而得到一个完整的`Markdown`图片格式。
|
||||
|
||||
### 3. 指定回复
|
||||
|
||||
指定回复可以直接输出传入的内容到客户端,因此可以直接输出加工好的`Markdown`图片格式即可。
|
||||
|
||||
## 编排代码
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"moduleId": "userGuide",
|
||||
"name": "core.module.template.User guide",
|
||||
"flowType": "userGuide",
|
||||
"position": {
|
||||
"x": 454.98510354678695,
|
||||
"y": 721.4016845336229
|
||||
},
|
||||
"inputs": [
|
||||
{
|
||||
"key": "welcomeText",
|
||||
"type": "hidden",
|
||||
"valueType": "string",
|
||||
"label": "core.app.Welcome Text",
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "variables",
|
||||
"type": "hidden",
|
||||
"valueType": "any",
|
||||
"label": "core.module.Variable",
|
||||
"value": [],
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "questionGuide",
|
||||
"valueType": "boolean",
|
||||
"type": "switch",
|
||||
"label": "",
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "tts",
|
||||
"type": "hidden",
|
||||
"valueType": "any",
|
||||
"label": "",
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"connected": false
|
||||
}
|
||||
],
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"moduleId": "userChatInput",
|
||||
"name": "core.module.template.Chat entrance",
|
||||
"flowType": "questionInput",
|
||||
"position": {
|
||||
"x": 597.8136543694757,
|
||||
"y": 1709.9244174501202
|
||||
},
|
||||
"inputs": [
|
||||
{
|
||||
"key": "userChatInput",
|
||||
"type": "systemInput",
|
||||
"valueType": "string",
|
||||
"label": "core.module.input.label.user question",
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"connected": false
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"key": "userChatInput",
|
||||
"label": "core.module.input.label.user question",
|
||||
"type": "source",
|
||||
"valueType": "string",
|
||||
"targets": [
|
||||
{
|
||||
"moduleId": "mqgfub",
|
||||
"key": "prompt"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"moduleId": "mqgfub",
|
||||
"name": "Dalle3绘图",
|
||||
"flowType": "httpRequest468",
|
||||
"showStatus": true,
|
||||
"position": {
|
||||
"x": 1071.8956245626034,
|
||||
"y": 1236.690825267034
|
||||
},
|
||||
"inputs": [
|
||||
{
|
||||
"key": "switch",
|
||||
"type": "target",
|
||||
"label": "core.module.input.label.switch",
|
||||
"description": "core.module.input.description.Trigger",
|
||||
"valueType": "any",
|
||||
"showTargetInApp": true,
|
||||
"showTargetInPlugin": true,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "system_httpMethod",
|
||||
"type": "custom",
|
||||
"valueType": "string",
|
||||
"label": "",
|
||||
"value": "POST",
|
||||
"required": true,
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "system_httpReqUrl",
|
||||
"type": "hidden",
|
||||
"valueType": "string",
|
||||
"label": "",
|
||||
"description": "core.module.input.description.Http Request Url",
|
||||
"placeholder": "https://api.ai.com/getInventory",
|
||||
"required": false,
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"value": "https://api.openai.com/v1/images/generations",
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "system_httpHeader",
|
||||
"type": "custom",
|
||||
"valueType": "any",
|
||||
"value": [
|
||||
{
|
||||
"key": "Authorization",
|
||||
"type": "string",
|
||||
"value": "sk-xxx"
|
||||
}
|
||||
],
|
||||
"label": "",
|
||||
"description": "core.module.input.description.Http Request Header",
|
||||
"placeholder": "core.module.input.description.Http Request Header",
|
||||
"required": false,
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "system_httpParams",
|
||||
"type": "hidden",
|
||||
"valueType": "any",
|
||||
"value": [],
|
||||
"label": "",
|
||||
"required": false,
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "system_httpJsonBody",
|
||||
"type": "hidden",
|
||||
"valueType": "any",
|
||||
"value": "{\r\n \"model\": \"dall-e-3\",\r\n \"prompt\": \"{{prompt}}\",\r\n \"n\": 1,\r\n \"size\": \"1024x1024\"\r\n }",
|
||||
"label": "",
|
||||
"required": false,
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "DYNAMIC_INPUT_KEY",
|
||||
"type": "target",
|
||||
"valueType": "any",
|
||||
"label": "core.module.inputType.dynamicTargetInput",
|
||||
"description": "core.module.input.description.dynamic input",
|
||||
"required": false,
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": true,
|
||||
"hideInApp": true,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "prompt",
|
||||
"valueType": "string",
|
||||
"label": "prompt",
|
||||
"type": "target",
|
||||
"required": true,
|
||||
"description": "",
|
||||
"edit": true,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"name": true,
|
||||
"description": true,
|
||||
"required": true,
|
||||
"dataType": true
|
||||
},
|
||||
"connected": true
|
||||
},
|
||||
{
|
||||
"key": "system_addInputParam",
|
||||
"type": "addInputParam",
|
||||
"valueType": "any",
|
||||
"label": "",
|
||||
"required": false,
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"name": true,
|
||||
"description": true,
|
||||
"required": true,
|
||||
"dataType": true
|
||||
},
|
||||
"defaultEditField": {
|
||||
"label": "",
|
||||
"key": "",
|
||||
"description": "",
|
||||
"inputType": "target",
|
||||
"valueType": "string",
|
||||
"required": true
|
||||
},
|
||||
"connected": false
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"key": "finish",
|
||||
"label": "core.module.output.label.running done",
|
||||
"description": "core.module.output.description.running done",
|
||||
"valueType": "boolean",
|
||||
"type": "source",
|
||||
"targets": []
|
||||
},
|
||||
{
|
||||
"key": "system_addOutputParam",
|
||||
"type": "addOutputParam",
|
||||
"valueType": "any",
|
||||
"label": "",
|
||||
"targets": [],
|
||||
"editField": {
|
||||
"key": true,
|
||||
"name": true,
|
||||
"description": true,
|
||||
"dataType": true
|
||||
},
|
||||
"defaultEditField": {
|
||||
"label": "",
|
||||
"key": "",
|
||||
"description": "",
|
||||
"outputType": "source",
|
||||
"valueType": "string"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "source",
|
||||
"valueType": "string",
|
||||
"key": "data[0].url",
|
||||
"label": "url",
|
||||
"description": "",
|
||||
"edit": true,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"name": true,
|
||||
"description": true,
|
||||
"dataType": true
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"moduleId": "nl6mr9",
|
||||
"key": "url"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"moduleId": "xy76o2",
|
||||
"name": "core.module.template.Assigned reply",
|
||||
"flowType": "answerNode",
|
||||
"position": {
|
||||
"x": 2204.027057268489,
|
||||
"y": 1256.786345213533
|
||||
},
|
||||
"inputs": [
|
||||
{
|
||||
"key": "switch",
|
||||
"type": "target",
|
||||
"label": "core.module.input.label.switch",
|
||||
"description": "core.module.input.description.Trigger",
|
||||
"valueType": "any",
|
||||
"showTargetInApp": true,
|
||||
"showTargetInPlugin": true,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "text",
|
||||
"type": "textarea",
|
||||
"valueType": "any",
|
||||
"label": "core.module.input.label.Response content",
|
||||
"description": "core.module.input.description.Response content",
|
||||
"placeholder": "core.module.input.description.Response content",
|
||||
"showTargetInApp": true,
|
||||
"showTargetInPlugin": true,
|
||||
"connected": true
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"key": "finish",
|
||||
"label": "core.module.output.label.running done",
|
||||
"description": "core.module.output.description.running done",
|
||||
"valueType": "boolean",
|
||||
"type": "source",
|
||||
"targets": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"moduleId": "nl6mr9",
|
||||
"name": "core.module.template.textEditor",
|
||||
"flowType": "pluginModule",
|
||||
"showStatus": false,
|
||||
"position": {
|
||||
"x": 1690.1826860670342,
|
||||
"y": 1262.3858719789062
|
||||
},
|
||||
"inputs": [
|
||||
{
|
||||
"key": "pluginId",
|
||||
"type": "hidden",
|
||||
"label": "",
|
||||
"value": "community-textEditor",
|
||||
"valueType": "string",
|
||||
"connected": false,
|
||||
"showTargetInApp": false,
|
||||
"showTargetInPlugin": false
|
||||
},
|
||||
{
|
||||
"key": "switch",
|
||||
"type": "target",
|
||||
"label": "core.module.input.label.switch",
|
||||
"description": "core.module.input.description.Trigger",
|
||||
"valueType": "any",
|
||||
"showTargetInApp": true,
|
||||
"showTargetInPlugin": true,
|
||||
"connected": false
|
||||
},
|
||||
{
|
||||
"key": "textarea",
|
||||
"valueType": "string",
|
||||
"label": "文本内容",
|
||||
"type": "textarea",
|
||||
"required": true,
|
||||
"description": "可以通过 {{key}} 的方式引用传入的变量。变量仅支持字符串或数字。",
|
||||
"edit": false,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"name": true,
|
||||
"description": true,
|
||||
"required": true,
|
||||
"dataType": true,
|
||||
"inputType": true
|
||||
},
|
||||
"connected": false,
|
||||
"placeholder": "可以通过 {{key}} 的方式引用传入的变量。变量仅支持字符串或数字。",
|
||||
"value": ""
|
||||
},
|
||||
{
|
||||
"key": "url",
|
||||
"valueType": "string",
|
||||
"label": "url",
|
||||
"type": "target",
|
||||
"required": true,
|
||||
"description": "",
|
||||
"edit": true,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"name": true,
|
||||
"description": true,
|
||||
"required": true,
|
||||
"dataType": true,
|
||||
"inputType": false
|
||||
},
|
||||
"connected": true
|
||||
},
|
||||
{
|
||||
"key": "DYNAMIC_INPUT_KEY",
|
||||
"valueType": "any",
|
||||
"label": "需要加工的输入",
|
||||
"type": "addInputParam",
|
||||
"required": false,
|
||||
"description": "可动态的添加字符串类型变量,在文本编辑中通过 {{key}} 使用变量。非字符串类型,会自动转成字符串类型。",
|
||||
"edit": false,
|
||||
"editField": {
|
||||
"key": true,
|
||||
"name": true,
|
||||
"description": true,
|
||||
"required": true,
|
||||
"dataType": true,
|
||||
"inputType": false
|
||||
},
|
||||
"defaultEditField": {
|
||||
"label": "",
|
||||
"key": "",
|
||||
"description": "",
|
||||
"inputType": "target",
|
||||
"valueType": "string",
|
||||
"required": true
|
||||
},
|
||||
"connected": false
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"key": "text",
|
||||
"valueType": "string",
|
||||
"label": "core.module.output.label.text",
|
||||
"type": "source",
|
||||
"edit": false,
|
||||
"targets": [
|
||||
{
|
||||
"moduleId": "xy76o2",
|
||||
"key": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -7,6 +7,8 @@ toc: true
|
||||
weight: 406
|
||||
---
|
||||
|
||||
**该教程由社区提供,部分模块已经过期,需调整后才能使用。**
|
||||
|
||||

|
||||
|
||||
众所周知 GPT 只是一个语言模型,功能上有很多局限,但只要综合利用高级编排各模块功能,就可以轻松突破原有 GPT 的局限,实现更多功能。
|
||||
|
||||
@@ -30,20 +30,15 @@ FastGPT 从 V4 版本开始采用新的交互方式来构建 AI 应用。使用
|
||||
|
||||
### 模块分类
|
||||
|
||||
从功能上,模块可以分为 3 类:
|
||||
从功能上,模块可以分为 2 类:
|
||||
|
||||
1. **只读模块**:全局变量、用户引导。
|
||||
2. **系统模块**:聊天记录(无输入,直接从数据库取)、用户问题(流程入口)。
|
||||
3. **功能模块**:知识库搜索、AI 对话等剩余模块。(这些模块都有输入和输出,可以自由组合)。
|
||||
1. **系统模块**:用户引导(配置一些对话框信息)、用户问题(流程入口)。
|
||||
2. **功能模块**:知识库搜索、AI 对话等剩余模块。(这些模块都有输入和输出,可以自由组合)。
|
||||
|
||||
### 模块的组成
|
||||
|
||||
每个模块会包含 3 个核心部分:固定参数、外部输入(左边有个圆圈)和输出(右边有个圆圈)。
|
||||
|
||||
+ 对于只读模块,只需要根据提示填写即可,不参与流程运行。
|
||||
+ 对于系统模块,通常只有固定参数和输出,主要需要关注输出到哪个位置。
|
||||
+ 对于功能模块,通常这 3 部分都是重要的,以下图的 AI 对话为例:
|
||||
|
||||

|
||||
|
||||
- 对话模型、温度、回复上限、系统提示词和限定词为固定参数,同时系统提示词和限定词也可以作为外部输入,意味着如果你有输入流向了系统提示词,那么原本填写的内容就会被**覆盖**。
|
||||
@@ -56,6 +51,7 @@ FastGPT 从 V4 版本开始采用新的交互方式来构建 AI 应用。使用
|
||||
|
||||
1. 仅关心**已连接的**外部输入,即左边的圆圈被连接了。
|
||||
2. 当连接内容都有值时触发。
|
||||
3. **可以多个输出连接到一个输入,后续的值会覆盖前面的值。**
|
||||
|
||||
#### 示例 1:
|
||||
|
||||
@@ -86,4 +82,17 @@ FastGPT 从 V4 版本开始采用新的交互方式来构建 AI 应用。使用
|
||||
|
||||
1. 建议从左往右阅读。
|
||||
2. 从 **用户问题** 模块开始。用户问题模块,代表的是用户发送了一段文本,触发任务开始。
|
||||
3. 关注【AI 对话】和【指定回复】模块,这两个模块是输出答案的地方。
|
||||
3. 关注【AI 对话】和【指定回复】模块,这两个模块是输出答案的地方。
|
||||
|
||||
## FAQ
|
||||
|
||||
### 想合并多个输出结果怎么实现?
|
||||
|
||||
1. 文本加工,可以对字符串进行合并。
|
||||
2. 知识库搜索合并,可以合并多个知识库搜索结果
|
||||
3. 其他结果,无法直接合并,可以考虑传入到`HTTP`模块中进行合并,使用`[Laf](https://laf.run/)`可以快速实现一个无服务器HTTP接口。
|
||||
|
||||
### 模块为什么有2个用户问题
|
||||
|
||||
左侧的`用户问题`是指该模块所需的输入。右侧的`用户问题`是为了方便后续的连线,输出的值和传入的用户问题一样。
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: "问题补全"
|
||||
title: "问题补全(已合并到知识库搜索)"
|
||||
description: "问题补全模块介绍和使用"
|
||||
icon: "input"
|
||||
draft: false
|
||||
|
||||
@@ -7,6 +7,8 @@ toc: true
|
||||
weight: 357
|
||||
---
|
||||
|
||||
知识库搜索具体参数说明,以及内部逻辑请移步:[FastGPT知识库搜索方案](/docs/course/data_search/)
|
||||
|
||||
## 特点
|
||||
|
||||
- 可重复添加(复杂编排时防止线太乱,可以更美观)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: "新 HTTP 模块"
|
||||
title: "HTTP 模块"
|
||||
description: "FastGPT HTTP 模块介绍"
|
||||
icon: "http"
|
||||
draft: false
|
||||
@@ -21,168 +21,172 @@ weight: 355
|
||||
|
||||
HTTP 模块会向对应的地址发送一个 `POST/GET` 请求,携带部分`系统参数`及`自定义参数`,并接收一个 JSON 响应值,字段也是自定义。
|
||||
|
||||
- 你还可以通过 JSON 传入自定义的请求头。
|
||||
- POST 请求中,数据会被放置在 `body` 中。
|
||||
- GET 请求中,数据会被放置在 `query` 中。
|
||||
- 在出入参数中,你都可以通过 xxx.xxx 来代表嵌套的对象。
|
||||
- Params 为路径请求参数,GET请求中用的居多。
|
||||
- Body 为请求体,POST请求中用的居多。
|
||||
- Headers 为请求头,用于传递一些特殊的信息。
|
||||
- 3 种数据中均可以通过 `{{}}` 来引用变量。
|
||||
- 变量来自于`全局变量`、`系统变量`、`局部传入`
|
||||
|
||||
## 参数结构
|
||||
|
||||
### 系统参数说明
|
||||
### 系统变量说明
|
||||
|
||||
你可以将鼠标放置在`请求参数`旁边的问号中,里面会提示你可用的变量。
|
||||
|
||||
- appId: 应用的ID
|
||||
- chatId: 当前对话的ID,测试模式下不存在。
|
||||
- responseChatItemId: 当前对话中,响应的消息ID,测试模式下不存在。
|
||||
- variables: 当前对话的全局变量。
|
||||
- data: 自定义传递的参数。
|
||||
- cTime: 当前时间。
|
||||
- histories: 历史记录(默认最多取10条,无法修改长度)
|
||||
|
||||
### 嵌套对象使用
|
||||
### Params, Headers
|
||||
|
||||
**入参**
|
||||
不多描述,使用方法和Postman, ApiFox 基本一致,目前 Params 和 Headers 未提供语法提示,后续会加入。
|
||||
|
||||
假设我们设计了`3个`输入。
|
||||
可通过 {{key}} 来引入变量。例如:
|
||||
|
||||
- user.name (string)
|
||||
- user.age (number)
|
||||
- type (string)
|
||||
| key | value |
|
||||
| --- | --- |
|
||||
| appId | {{appId}} |
|
||||
| Authorization | Bearer {{token}} |
|
||||
|
||||
最终组成的对象为:
|
||||
### Body
|
||||
|
||||
只有`POST`模式下会生效。
|
||||
|
||||
可以写一个`自定义的 Json`,并通过 {{key}} 来引入变量。例如:
|
||||
|
||||
{{< tabs tabTotal="3" >}}
|
||||
{{< tab tabName="假设有一组变量" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```json
|
||||
{
|
||||
"user": {
|
||||
"name": "",
|
||||
"age": ""
|
||||
},
|
||||
"type": ""
|
||||
"string": "字符串",
|
||||
"number": 123,
|
||||
"boolean": true,
|
||||
"array": [1, 2, 3],
|
||||
"obj": {
|
||||
"name": "FastGPT",
|
||||
"url": "https://fastgpt.in"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**出参**
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
{{< tab tabName="Http 模块中的Body声明" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
假设接口的输出结构为:
|
||||
注意,在 Body 中,你如果引用`字符串`,则需要加上`""`,例如:`"{{string}}"`。
|
||||
|
||||
```json
|
||||
{
|
||||
"string": "{{string}}",
|
||||
"token": "Bearer {{string}}",
|
||||
"number": {{number}},
|
||||
"boolean": {{boolean}},
|
||||
"array": [{{number}}, "{{string}}"],
|
||||
"array2": {{array}},
|
||||
"object": {{obj}}
|
||||
}
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
{{< tab tabName="最终得到的解析" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```json
|
||||
{
|
||||
"string": "字符串",
|
||||
"token": "Bearer 字符串",
|
||||
"number": 123,
|
||||
"boolean": true,
|
||||
"array": [123, "字符串"],
|
||||
"array2": [1, 2, 3],
|
||||
"object": {
|
||||
"name": "FastGPT",
|
||||
"url": "https://fastgpt.in"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
{{< /tabs >}}
|
||||
|
||||
### 如何获取返回值
|
||||
|
||||
从图中可以看出,FastGPT可以添加多个返回值,这个返回值并不代表接口的返回值,而是代表`如何解析接口返回值`,可以通过 key 来`提取`接口响应的值。例如:
|
||||
|
||||
{{< tabs tabTotal="2" >}}
|
||||
{{< tab tabName="接口响应格式" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "测试",
|
||||
"data":{
|
||||
"name": "name",
|
||||
"age": 10
|
||||
"user": {
|
||||
"name": "xxx",
|
||||
"age": 12
|
||||
},
|
||||
"list": [
|
||||
{
|
||||
"name": "xxx",
|
||||
"age": 50
|
||||
},
|
||||
[{ "test": 22 }]
|
||||
],
|
||||
"psw": "xxx"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
那么,自定出参的`key`可以设置为:
|
||||
|
||||
- message (string)
|
||||
- data.name (string)
|
||||
- data.age (number)
|
||||
|
||||
|
||||
## POST 示例
|
||||
|
||||
**自定义入参**
|
||||
|
||||
- user.name (string)
|
||||
- user.age (number)
|
||||
- type (string)
|
||||
|
||||
**自定义出参**
|
||||
|
||||
- message (string)
|
||||
- data.name (string)
|
||||
- data.age (number)
|
||||
|
||||
那么,这个模块发出的请求则是:
|
||||
|
||||
{{< tabs tabTotal="2" >}}
|
||||
{{< tab tabName="POST 请求示例" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'http://xxxx.com' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw '{
|
||||
"appId": "65782f7ffae5f7854ed4498b",
|
||||
"chatId": "xxxx",
|
||||
"responseChatItemId": "xxxx",
|
||||
"variables": {
|
||||
"cTime": "2023-12-18 13:45:46"
|
||||
},
|
||||
"data": {
|
||||
"user": {
|
||||
"name": "",
|
||||
"age": ""
|
||||
},
|
||||
"type": ""
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab tabName="POST响应" >}}
|
||||
{{< tab tabName="FastGPT 转化后的格式" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "message",
|
||||
"data": {
|
||||
"name": "name",
|
||||
"age": 10
|
||||
}
|
||||
"message": "测试",
|
||||
"data.user": { "name": "xxx", "age": 12 },
|
||||
"data.user.name": "xxx",
|
||||
"data.user.age": 12,
|
||||
"data.list": [ { "name": "xxx", "age": 50 }, [{ "test": 22 }] ],
|
||||
"data.list[0]": { "name": "xxx", "age": 50 },
|
||||
"data.list[0].name": "xxx",
|
||||
"data.list[0].age": 50,
|
||||
"data.list[1]": [ { "test": 22 } ],
|
||||
"data.list[1][0]": { "test": 22 },
|
||||
"data.list[1][0].test": 22,
|
||||
"data.psw": "xxx"
|
||||
}
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
{{< /tabs >}}
|
||||
|
||||
## GET 示例
|
||||
|
||||
GET 中,不推荐使用嵌套参数,否则会出现奇怪的问题。此外,GET 请求中,FastGPT 会将参数扁平化,不会将自定义参单独抽到 data 中,同时全局变量也会扁平化,因此需要注意字段 key 是否冲突。
|
||||
你可以配置对应的`key`来从`FastGPT 转化后的格式`获取需要的值,该规则遵守 JS 的对象取值规则。例如:
|
||||
|
||||
**自定义入参**
|
||||
1. 获取`message`的内容,那么你可以配置`message`的`key`为`message`,这样就可以获取到`message`的内容。
|
||||
2. 获取`user的name`,则`key`可以为:`data.user.name`。
|
||||
3. 获取list中第二个元素,则`key`可以为:`data.list[1]`,然后输出类型选择字符串,则获自动获取到`[ { "test": 22 } ]`的`json`字符串。
|
||||
|
||||
- name (string)
|
||||
- age (number)
|
||||
- type (string)
|
||||
### 自动格式化输出
|
||||
|
||||
**自定义出参**
|
||||
FastGPT v4.6.8 后,加入了出参格式化功能,主要以`json`格式化成`字符串`为主。如果你的输出类型选择了`字符串`,则会将`HTTP`对应`key`的值,转成`json`字符串进行输出。因此,未来你可以直接从`HTTP`接口输出内容至`文本加工`中,然后拼接适当的提示词,最终输入给`AI对话`。
|
||||
|
||||
- message (string)
|
||||
- name (string)
|
||||
- age (number)
|
||||
### 动态外部数据
|
||||
|
||||
那么,这个模块发出的请求则是:
|
||||
|
||||
{{< tabs tabTotal="2" >}}
|
||||
{{< tab tabName="GET 请求示例" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```bash
|
||||
curl --location --request GET 'http://xxx.com/test?name&age&type&appId=65782f7ffae5f7854ed4498b&chatId=xxxx&responseChatItemId=xxxx&cTime=2023-12-18 13:45:46'
|
||||
```
|
||||
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
|
||||
{{< tab tabName="GET 响应" >}}
|
||||
{{< markdownify >}}
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "message",
|
||||
"data": {
|
||||
"name": "name",
|
||||
"age": 10
|
||||
}
|
||||
}
|
||||
```
|
||||
{{< /markdownify >}}
|
||||
{{< /tab >}}
|
||||
{{< /tabs >}}
|
||||
在插件中的`HTTP模块`有一个属性叫`动态外部数据`,这个属性是与`插件输入`中,数据类型为`动态外部数据`的值搭配使用。
|
||||
|
||||
类似于文本加工模块,会有一个不确定长度,不确定key的用户输入,因此这部分数据会被`动态外部数据`接收,它们是一个对象。在 HTTP 模块中,你可以在`Body`中接收到一个`key`为`DYNAMIC_INPUT_KEY`的对象。
|
||||
|
||||
## laf 对接 HTTP 示例
|
||||
|
||||
@@ -198,16 +202,14 @@ const db = cloud.database()
|
||||
|
||||
type RequestType = {
|
||||
appId: string;
|
||||
data: {
|
||||
appointment: string;
|
||||
action: 'post' | 'delete' | 'put' | 'get'
|
||||
}
|
||||
appointment: string;
|
||||
action: 'post' | 'delete' | 'put' | 'get'
|
||||
}
|
||||
|
||||
export default async function (ctx: FunctionContext) {
|
||||
try {
|
||||
// 从 body 中获取参数
|
||||
const { appId, data: { appointment, action } } = ctx.body as RequestType
|
||||
const { appId, appointment, action } = ctx.body as RequestType
|
||||
|
||||
const parseBody = JSON.parse(appointment)
|
||||
if (action === 'get') {
|
||||
|
||||
@@ -22,17 +22,18 @@ services:
|
||||
image: mongo:5.0.18
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云
|
||||
container_name: mongo
|
||||
restart: always
|
||||
ports: # 生产环境建议不要暴露
|
||||
ports:
|
||||
- 27017:27017
|
||||
networks:
|
||||
- fastgpt
|
||||
command: mongod --keyFile /data/mongodb.key --replSet rs0
|
||||
environment:
|
||||
# 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
|
||||
- MONGO_INITDB_ROOT_USERNAME=username
|
||||
- MONGO_INITDB_ROOT_PASSWORD=password
|
||||
# 默认的用户名和密码,只有首次允许有效
|
||||
- MONGO_INITDB_ROOT_USERNAME=myname
|
||||
- MONGO_INITDB_ROOT_PASSWORD=mypassword
|
||||
volumes:
|
||||
- ./mongo/data:/data/db
|
||||
- ./mongodb.key:/data/mongodb.key
|
||||
fastgpt:
|
||||
container_name: fastgpt
|
||||
image: ghcr.io/labring/fastgpt:latest # git
|
||||
@@ -55,63 +56,11 @@ services:
|
||||
- TOKEN_KEY=any
|
||||
- ROOT_KEY=root_key
|
||||
- FILE_TOKEN_KEY=filetoken
|
||||
# mongo 配置,不需要改. 如果连不上,可能需要去掉 ?authSource=admin
|
||||
- MONGODB_URI=mongodb://username:password@mongo:27017/fastgpt?authSource=admin
|
||||
# mongo 配置,不需要改. 用户名myname,密码mypassword。
|
||||
- MONGODB_URI=mongodb://myname:mypassword@mongo:27017/fastgpt?authSource=admin
|
||||
# pg配置. 不需要改
|
||||
- PG_URL=postgresql://username:password@pg:5432/postgres
|
||||
volumes:
|
||||
- ./config.json:/app/data/config.json
|
||||
networks:
|
||||
fastgpt:
|
||||
# host 版本, 不推荐。
|
||||
# version: '3.3'
|
||||
# services:
|
||||
# pg:
|
||||
# image: ankane/pgvector:v0.5.0 # dockerhub
|
||||
# # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/pgvector:v0.5.0 # 阿里云
|
||||
# container_name: pg
|
||||
# restart: always
|
||||
# ports: # 生产环境建议不要暴露
|
||||
# - 5432:5432
|
||||
# environment:
|
||||
# # 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
|
||||
# - POSTGRES_USER=username
|
||||
# - POSTGRES_PASSWORD=password
|
||||
# - POSTGRES_DB=postgres
|
||||
# volumes:
|
||||
# - ./pg/data:/var/lib/postgresql/data
|
||||
# mongo:
|
||||
# image: mongo:5.0.18
|
||||
# # image: registry.cn-hangzhou.aliyuncs.com/fastgpt/mongo:5.0.18 # 阿里云
|
||||
# container_name: mongo
|
||||
# restart: always
|
||||
# ports: # 生产环境建议不要暴露
|
||||
# - 27017:27017
|
||||
# environment:
|
||||
# # 这里的配置只有首次运行生效。修改后,重启镜像是不会生效的。需要把持久化数据删除再重启,才有效果
|
||||
# - MONGO_INITDB_ROOT_USERNAME=username
|
||||
# - MONGO_INITDB_ROOT_PASSWORD=password
|
||||
# volumes:
|
||||
# - ./mongo/data:/data/db
|
||||
# - ./mongo/logs:/var/log/mongodb
|
||||
# fastgpt:
|
||||
# # image: ghcr.io/labring/fastgpt:latest # github
|
||||
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:latest # 阿里云
|
||||
# network_mode: host
|
||||
# restart: always
|
||||
# container_name: fastgpt
|
||||
# environment:
|
||||
# # root 密码,用户名为: root
|
||||
# - DEFAULT_ROOT_PSW=1234
|
||||
# # 中转地址,如果是用官方号,不需要管
|
||||
# - OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
# - CHAT_API_KEY=sk-xxxx
|
||||
# - DB_MAX_LINK=5 # database max link
|
||||
# # token加密凭证(随便填,作为登录凭证)
|
||||
# - TOKEN_KEY=any
|
||||
# # root key, 最高权限,可以内部接口互相调用
|
||||
# - ROOT_KEY=root_key
|
||||
# # mongo 配置,不需要改
|
||||
# - MONGODB_URI=mongodb://username:password@0.0.0.0:27017/fastgpt?authSource=admin
|
||||
# # pg配置. 不需要改
|
||||
# - PG_URL=postgresql://username:password@0.0.0.0:5432/postgres
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"i18next": "^22.5.1",
|
||||
"lint-staged": "^13.2.1",
|
||||
"next-i18next": "^13.3.0",
|
||||
"prettier": "^3.0.3",
|
||||
"prettier": "3.2.4",
|
||||
"react-i18next": "^12.3.1",
|
||||
"zhlint": "^0.7.1"
|
||||
},
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import { replaceSensitiveLink } from '../string/tools';
|
||||
|
||||
export const getErrText = (err: any, def = '') => {
|
||||
const msg: string = typeof err === 'string' ? err : err?.message || def || '';
|
||||
msg && console.log('error =>', msg);
|
||||
return msg;
|
||||
return replaceSensitiveLink(msg);
|
||||
};
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
// The number of days left in the month is calculated as 30 days per month, and less than 1 day is calculated as 1 day
|
||||
export const getMonthRemainingDays = () => {
|
||||
const now = new Date();
|
||||
const year = now.getFullYear();
|
||||
const month = now.getMonth();
|
||||
const date = now.getDate();
|
||||
const days = new Date(year, month + 1, 0).getDate();
|
||||
const remainingDays = days - date;
|
||||
return remainingDays + 1;
|
||||
export const getMonthRemainingDays = (startDate = new Date()) => {
|
||||
const year = startDate.getFullYear();
|
||||
const month = startDate.getMonth();
|
||||
const endDay = new Date(year, month + 1, 0, 0, 0, 0);
|
||||
return calculateDaysBetweenDates(startDate, endDay);
|
||||
};
|
||||
|
||||
export const calculateDaysBetweenDates = (date1: Date, date2: Date) => {
|
||||
const oneDay = 24 * 60 * 60 * 1000;
|
||||
const firstDate = new Date(date1).getTime();
|
||||
const secondDate = new Date(date2).getTime();
|
||||
|
||||
const differenceInTime = Math.abs(secondDate - firstDate);
|
||||
const differenceInDays = Math.floor(differenceInTime / oneDay);
|
||||
|
||||
return differenceInDays;
|
||||
};
|
||||
|
||||
@@ -34,11 +34,6 @@ export function countPromptTokens(
|
||||
const enc = getTikTokenEnc();
|
||||
const text = `${role}\n${prompt}`;
|
||||
|
||||
// too large a text will block the thread
|
||||
if (text.length > 15000) {
|
||||
return text.length * 1.7;
|
||||
}
|
||||
|
||||
try {
|
||||
const encodeText = enc.encode(text);
|
||||
return encodeText.length + role.length; // 补充 role 估算值
|
||||
|
||||
@@ -38,6 +38,12 @@ export function replaceVariable(text: string, obj: Record<string, string | numbe
|
||||
return text || '';
|
||||
}
|
||||
|
||||
/* replace sensitive link */
|
||||
export const replaceSensitiveLink = (text: string) => {
|
||||
const urlRegex = /(?<=https?:\/\/)[^\s]+/g;
|
||||
return text.replace(urlRegex, 'xxx');
|
||||
};
|
||||
|
||||
export const getNanoid = (size = 12) => {
|
||||
return customAlphabet('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890', size)();
|
||||
};
|
||||
|
||||
23
packages/global/common/system/types/index.d.ts
vendored
@@ -1,3 +1,4 @@
|
||||
import { StandSubPlanLevelMapType, SubPlanType } from '../../../support/wallet/sub/type';
|
||||
import type {
|
||||
ChatModelItemType,
|
||||
FunctionModelItemType,
|
||||
@@ -7,16 +8,14 @@ import type {
|
||||
WhisperModelType,
|
||||
ReRankModelItemType
|
||||
} from '../../../core/ai/model.d';
|
||||
import { SubTypeEnum } from '../../../support/wallet/sub/constants';
|
||||
|
||||
/* fastgpt main */
|
||||
export type FastGPTConfigFileType = {
|
||||
feConfigs: FastGPTFeConfigsType;
|
||||
systemEnv: SystemEnvType;
|
||||
chatModels: ChatModelItemType[];
|
||||
qaModels: LLMModelItemType[];
|
||||
cqModels: FunctionModelItemType[];
|
||||
extractModels: FunctionModelItemType[];
|
||||
qgModels: LLMModelItemType[];
|
||||
subPlans?: SubPlanType;
|
||||
llmModels: ChatModelItemType[];
|
||||
vectorModels: VectorModelItemType[];
|
||||
reRankModels: ReRankModelItemType[];
|
||||
audioSpeechModels: AudioSpeechModelType[];
|
||||
@@ -51,10 +50,6 @@ export type FastGPTFeConfigsType = {
|
||||
favicon?: string;
|
||||
customApiDomain?: string;
|
||||
customSharePageDomain?: string;
|
||||
subscription?: {
|
||||
datasetStoreFreeSize?: number;
|
||||
datasetStorePrice?: number;
|
||||
};
|
||||
|
||||
uploadFileMaxSize?: number;
|
||||
};
|
||||
@@ -66,8 +61,8 @@ export type SystemEnvType = {
|
||||
pgHNSWEfSearch: number;
|
||||
};
|
||||
|
||||
declare global {
|
||||
var feConfigs: FastGPTFeConfigsType;
|
||||
var systemEnv: SystemEnvType;
|
||||
var systemInitd: boolean;
|
||||
}
|
||||
// declare global {
|
||||
// var feConfigs: FastGPTFeConfigsType;
|
||||
// var systemEnv: SystemEnvType;
|
||||
// var systemInitd: boolean;
|
||||
// }
|
||||
|
||||
22
packages/global/core/ai/model.d.ts
vendored
@@ -3,20 +3,24 @@ export type LLMModelItemType = {
|
||||
name: string;
|
||||
maxContext: number;
|
||||
maxResponse: number;
|
||||
inputPrice: number;
|
||||
outputPrice: number;
|
||||
};
|
||||
export type ChatModelItemType = LLMModelItemType & {
|
||||
quoteMaxToken: number;
|
||||
maxTemperature: number;
|
||||
|
||||
inputPrice: number;
|
||||
outputPrice: number;
|
||||
|
||||
censor?: boolean;
|
||||
vision?: boolean;
|
||||
defaultSystemChatPrompt?: string;
|
||||
};
|
||||
datasetProcess?: boolean;
|
||||
|
||||
export type FunctionModelItemType = LLMModelItemType & {
|
||||
functionCall: boolean;
|
||||
toolChoice: boolean;
|
||||
functionPrompt: string;
|
||||
|
||||
customCQPrompt: string;
|
||||
customExtractPrompt: string;
|
||||
|
||||
defaultSystemChatPrompt?: string;
|
||||
defaultConfig?: Record<string, any>;
|
||||
};
|
||||
|
||||
export type VectorModelItemType = {
|
||||
@@ -27,6 +31,8 @@ export type VectorModelItemType = {
|
||||
outputPrice: number;
|
||||
maxToken: number;
|
||||
weight: number;
|
||||
hidden?: boolean;
|
||||
defaultConfig?: Record<string, any>;
|
||||
};
|
||||
|
||||
export type ReRankModelItemType = {
|
||||
|
||||
@@ -3,11 +3,22 @@ import type { LLMModelItemType, VectorModelItemType } from './model.d';
|
||||
export const defaultQAModels: LLMModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-16k',
|
||||
name: 'GPT35-16k',
|
||||
name: 'gpt-3.5-turbo-16k',
|
||||
maxContext: 16000,
|
||||
maxResponse: 16000,
|
||||
quoteMaxToken: 13000,
|
||||
maxTemperature: 1.2,
|
||||
inputPrice: 0,
|
||||
outputPrice: 0
|
||||
outputPrice: 0,
|
||||
censor: false,
|
||||
vision: false,
|
||||
datasetProcess: true,
|
||||
toolChoice: true,
|
||||
functionCall: false,
|
||||
customCQPrompt: '',
|
||||
customExtractPrompt: '',
|
||||
defaultSystemChatPrompt: '',
|
||||
defaultConfig: {}
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
4
packages/global/core/app/api.d.ts
vendored
@@ -1,4 +1,4 @@
|
||||
import type { ChatModelItemType } from '../ai/model.d';
|
||||
import type { LLMModelItemType } from '../ai/model.d';
|
||||
import { AppTypeEnum } from './constants';
|
||||
import { AppSchema, AppSimpleEditFormType } from './type';
|
||||
|
||||
@@ -22,5 +22,5 @@ export interface AppUpdateParams {
|
||||
export type FormatForm2ModulesProps = {
|
||||
formData: AppSimpleEditFormType;
|
||||
chatModelMaxToken: number;
|
||||
chatModelList: ChatModelItemType[];
|
||||
llmModelList: LLMModelItemType[];
|
||||
};
|
||||
|
||||
19
packages/global/core/app/type.d.ts
vendored
@@ -50,7 +50,7 @@ export type AppDetailType = AppSchema & {
|
||||
// };
|
||||
// Since useform cannot infer enumeration types, all enumeration keys can only be undone manually
|
||||
export type AppSimpleEditFormType = {
|
||||
templateId: string;
|
||||
// templateId: string;
|
||||
aiSettings: {
|
||||
model: string;
|
||||
systemPrompt?: string | undefined;
|
||||
@@ -62,14 +62,14 @@ export type AppSimpleEditFormType = {
|
||||
};
|
||||
dataset: {
|
||||
datasets: SelectedDatasetType;
|
||||
similarity: number;
|
||||
limit: number;
|
||||
searchMode: `${DatasetSearchModeEnum}`;
|
||||
usingReRank: boolean;
|
||||
searchEmptyText: string;
|
||||
};
|
||||
cfr: {
|
||||
background: string;
|
||||
similarity?: number;
|
||||
limit?: number;
|
||||
usingReRank?: boolean;
|
||||
searchEmptyText?: string;
|
||||
datasetSearchUsingExtensionQuery?: boolean;
|
||||
datasetSearchExtensionModel?: string;
|
||||
datasetSearchExtensionBg?: string;
|
||||
};
|
||||
userGuide: {
|
||||
welcomeText: string;
|
||||
@@ -116,9 +116,6 @@ export type AppSimpleEditConfigTemplateType = {
|
||||
usingReRank: boolean;
|
||||
searchEmptyText?: boolean;
|
||||
};
|
||||
cfr?: {
|
||||
background?: boolean;
|
||||
};
|
||||
userGuide?: {
|
||||
welcomeText?: boolean;
|
||||
variables?: boolean;
|
||||
|
||||
@@ -6,9 +6,8 @@ import { getGuideModule, splitGuideModule } from '../module/utils';
|
||||
import { ModuleItemType } from '../module/type.d';
|
||||
import { DatasetSearchModeEnum } from '../dataset/constants';
|
||||
|
||||
export const getDefaultAppForm = (templateId = 'fastgpt-universal'): AppSimpleEditFormType => {
|
||||
export const getDefaultAppForm = (): AppSimpleEditFormType => {
|
||||
return {
|
||||
templateId,
|
||||
aiSettings: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
systemPrompt: '',
|
||||
@@ -18,16 +17,15 @@ export const getDefaultAppForm = (templateId = 'fastgpt-universal'): AppSimpleEd
|
||||
quoteTemplate: '',
|
||||
maxToken: 4000
|
||||
},
|
||||
cfr: {
|
||||
background: ''
|
||||
},
|
||||
dataset: {
|
||||
datasets: [],
|
||||
similarity: 0.4,
|
||||
limit: 1500,
|
||||
searchEmptyText: '',
|
||||
searchMode: DatasetSearchModeEnum.embedding,
|
||||
usingReRank: false
|
||||
usingReRank: false,
|
||||
datasetSearchUsingExtensionQuery: true,
|
||||
datasetSearchExtensionBg: ''
|
||||
},
|
||||
userGuide: {
|
||||
welcomeText: '',
|
||||
@@ -41,14 +39,8 @@ export const getDefaultAppForm = (templateId = 'fastgpt-universal'): AppSimpleEd
|
||||
};
|
||||
|
||||
/* format app modules to edit form */
|
||||
export const appModules2Form = ({
|
||||
templateId,
|
||||
modules
|
||||
}: {
|
||||
modules: ModuleItemType[];
|
||||
templateId: string;
|
||||
}) => {
|
||||
const defaultAppForm = getDefaultAppForm(templateId);
|
||||
export const appModules2Form = ({ modules }: { modules: ModuleItemType[] }) => {
|
||||
const defaultAppForm = getDefaultAppForm();
|
||||
|
||||
const findInputValueByKey = (inputs: FlowNodeInputItemType[], key: string) => {
|
||||
return inputs.find((item) => item.key === key)?.value;
|
||||
@@ -91,7 +83,7 @@ export const appModules2Form = ({
|
||||
);
|
||||
defaultAppForm.dataset.limit = findInputValueByKey(
|
||||
module.inputs,
|
||||
ModuleInputKeyEnum.datasetLimit
|
||||
ModuleInputKeyEnum.datasetMaxTokens
|
||||
);
|
||||
defaultAppForm.dataset.searchMode =
|
||||
findInputValueByKey(module.inputs, ModuleInputKeyEnum.datasetSearchMode) ||
|
||||
@@ -100,6 +92,18 @@ export const appModules2Form = ({
|
||||
module.inputs,
|
||||
ModuleInputKeyEnum.datasetSearchUsingReRank
|
||||
);
|
||||
defaultAppForm.dataset.datasetSearchUsingExtensionQuery = findInputValueByKey(
|
||||
module.inputs,
|
||||
ModuleInputKeyEnum.datasetSearchUsingExtensionQuery
|
||||
);
|
||||
defaultAppForm.dataset.datasetSearchExtensionModel = findInputValueByKey(
|
||||
module.inputs,
|
||||
ModuleInputKeyEnum.datasetSearchExtensionModel
|
||||
);
|
||||
defaultAppForm.dataset.datasetSearchExtensionBg = findInputValueByKey(
|
||||
module.inputs,
|
||||
ModuleInputKeyEnum.datasetSearchExtensionBg
|
||||
);
|
||||
|
||||
// empty text
|
||||
const emptyOutputs =
|
||||
@@ -121,11 +125,6 @@ export const appModules2Form = ({
|
||||
questionGuide: questionGuide,
|
||||
tts: ttsConfig
|
||||
};
|
||||
} else if (module.flowType === FlowNodeTypeEnum.cfr) {
|
||||
const value = module.inputs.find((item) => item.key === ModuleInputKeyEnum.aiSystemPrompt);
|
||||
if (value) {
|
||||
defaultAppForm.cfr.background = value.value;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
4
packages/global/core/chat/type.d.ts
vendored
@@ -109,6 +109,8 @@ export type moduleDispatchResType = {
|
||||
limit?: number;
|
||||
searchMode?: `${DatasetSearchModeEnum}`;
|
||||
searchUsingReRank?: boolean;
|
||||
extensionModel?: string;
|
||||
extensionResult?: string;
|
||||
|
||||
// cq
|
||||
cqList?: ClassifyQuestionAgentItemType[];
|
||||
@@ -119,7 +121,9 @@ export type moduleDispatchResType = {
|
||||
extractResult?: Record<string, any>;
|
||||
|
||||
// http
|
||||
params?: Record<string, any>;
|
||||
body?: Record<string, any>;
|
||||
headers?: Record<string, any>;
|
||||
httpResult?: Record<string, any>;
|
||||
|
||||
// plugin output
|
||||
|
||||
95
packages/global/core/dataset/search/utils.ts
Normal file
@@ -0,0 +1,95 @@
|
||||
import { countPromptTokens } from '../../../common/string/tiktoken';
|
||||
import { SearchScoreTypeEnum } from '../constants';
|
||||
import { SearchDataResponseItemType } from '../type';
|
||||
|
||||
/* dataset search result concat */
|
||||
export const datasetSearchResultConcat = (
|
||||
arr: { k: number; list: SearchDataResponseItemType[] }[]
|
||||
): SearchDataResponseItemType[] => {
|
||||
arr = arr.filter((item) => item.list.length > 0);
|
||||
|
||||
if (arr.length === 0) return [];
|
||||
if (arr.length === 1) return arr[0].list;
|
||||
|
||||
const map = new Map<string, SearchDataResponseItemType & { rrfScore: number }>();
|
||||
|
||||
// rrf
|
||||
arr.forEach((item) => {
|
||||
const k = item.k;
|
||||
|
||||
item.list.forEach((data, index) => {
|
||||
const rank = index + 1;
|
||||
const score = 1 / (k + rank);
|
||||
|
||||
const record = map.get(data.id);
|
||||
if (record) {
|
||||
// 合并两个score,有相同type的score,取最大值
|
||||
const concatScore = [...record.score];
|
||||
for (const dataItem of data.score) {
|
||||
const sameScore = concatScore.find((item) => item.type === dataItem.type);
|
||||
if (sameScore) {
|
||||
sameScore.value = Math.max(sameScore.value, dataItem.value);
|
||||
} else {
|
||||
concatScore.push(dataItem);
|
||||
}
|
||||
}
|
||||
|
||||
map.set(data.id, {
|
||||
...record,
|
||||
score: concatScore,
|
||||
rrfScore: record.rrfScore + score
|
||||
});
|
||||
} else {
|
||||
map.set(data.id, {
|
||||
...data,
|
||||
rrfScore: score
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// sort
|
||||
const mapArray = Array.from(map.values());
|
||||
const results = mapArray.sort((a, b) => b.rrfScore - a.rrfScore);
|
||||
|
||||
return results.map((item, index) => {
|
||||
// if SearchScoreTypeEnum.rrf exist, reset score
|
||||
const rrfScore = item.score.find((item) => item.type === SearchScoreTypeEnum.rrf);
|
||||
if (rrfScore) {
|
||||
rrfScore.value = item.rrfScore;
|
||||
rrfScore.index = index;
|
||||
} else {
|
||||
item.score.push({
|
||||
type: SearchScoreTypeEnum.rrf,
|
||||
value: item.rrfScore,
|
||||
index
|
||||
});
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
delete item.rrfScore;
|
||||
return item;
|
||||
});
|
||||
};
|
||||
|
||||
export const filterSearchResultsByMaxChars = (
|
||||
list: SearchDataResponseItemType[],
|
||||
maxTokens: number
|
||||
) => {
|
||||
const results: SearchDataResponseItemType[] = [];
|
||||
let totalTokens = 0;
|
||||
|
||||
for (let i = 0; i < list.length; i++) {
|
||||
const item = list[i];
|
||||
totalTokens += countPromptTokens(item.q + item.a);
|
||||
if (totalTokens > maxTokens + 500) {
|
||||
break;
|
||||
}
|
||||
results.push(item);
|
||||
if (totalTokens > maxTokens) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return results.length === 0 ? list.slice(0, 1) : results;
|
||||
};
|
||||
@@ -35,6 +35,7 @@ export function getSourceNameIcon({
|
||||
return 'file/fill/manual';
|
||||
}
|
||||
|
||||
/* get dataset data default index */
|
||||
export function getDefaultIndex(props?: { q?: string; a?: string; dataId?: string }) {
|
||||
const { q = '', a, dataId } = props || {};
|
||||
const qaStr = `${q}\n${a}`.trim();
|
||||
|
||||
9
packages/global/core/module/api.d.ts
vendored
@@ -1,14 +1,11 @@
|
||||
import { VectorModelItemType } from '../ai/model.d';
|
||||
import { DYNAMIC_INPUT_KEY } from './constants';
|
||||
|
||||
export type SelectedDatasetType = { datasetId: string; vectorModel: VectorModelItemType }[];
|
||||
|
||||
export type HttpBodyType<T = any> = {
|
||||
appId: string;
|
||||
chatId?: string;
|
||||
responseChatItemId?: string;
|
||||
variables: Record<string, any>;
|
||||
data: T;
|
||||
};
|
||||
[DYNAMIC_INPUT_KEY]: Record<string, any>;
|
||||
} & T;
|
||||
export type HttpQueryType = {
|
||||
appId: string;
|
||||
chatId?: string;
|
||||
|
||||
@@ -61,10 +61,12 @@ export enum ModuleInputKeyEnum {
|
||||
// dataset
|
||||
datasetSelectList = 'datasets',
|
||||
datasetSimilarity = 'similarity',
|
||||
datasetLimit = 'limit',
|
||||
datasetMaxTokens = 'limit',
|
||||
datasetSearchMode = 'searchMode',
|
||||
datasetSearchUsingReRank = 'usingReRank',
|
||||
datasetParamsModal = 'datasetParamsModal',
|
||||
datasetSearchUsingExtensionQuery = 'datasetSearchUsingExtensionQuery',
|
||||
datasetSearchExtensionModel = 'datasetSearchExtensionModel',
|
||||
datasetSearchExtensionBg = 'datasetSearchExtensionBg',
|
||||
|
||||
// context extract
|
||||
contextExtractInput = 'content',
|
||||
@@ -72,8 +74,10 @@ export enum ModuleInputKeyEnum {
|
||||
|
||||
// http
|
||||
httpReqUrl = 'system_httpReqUrl',
|
||||
httpHeader = 'system_httpHeader',
|
||||
httpHeaders = 'system_httpHeader',
|
||||
httpMethod = 'system_httpMethod',
|
||||
httpParams = 'system_httpParams',
|
||||
httpJsonBody = 'system_httpJsonBody',
|
||||
abandon_httpUrl = 'url',
|
||||
|
||||
// app
|
||||
|
||||
@@ -45,11 +45,15 @@ export enum FlowNodeTypeEnum {
|
||||
questionInput = 'questionInput',
|
||||
historyNode = 'historyNode',
|
||||
chatNode = 'chatNode',
|
||||
|
||||
datasetSearchNode = 'datasetSearchNode',
|
||||
datasetConcatNode = 'datasetConcatNode',
|
||||
|
||||
answerNode = 'answerNode',
|
||||
classifyQuestion = 'classifyQuestion',
|
||||
contentExtract = 'contentExtract',
|
||||
httpRequest = 'httpRequest',
|
||||
httpRequest468 = 'httpRequest468',
|
||||
runApp = 'app',
|
||||
pluginModule = 'pluginModule',
|
||||
pluginInput = 'pluginInput',
|
||||
|
||||
2
packages/global/core/module/node/type.d.ts
vendored
@@ -106,6 +106,6 @@ export type AIChatModuleProps = {
|
||||
export type DatasetModuleProps = {
|
||||
[ModuleInputKeyEnum.datasetSelectList]: SelectedDatasetType;
|
||||
[ModuleInputKeyEnum.datasetSimilarity]: number;
|
||||
[ModuleInputKeyEnum.datasetLimit]: number;
|
||||
[ModuleInputKeyEnum.datasetMaxTokens]: number;
|
||||
[ModuleInputKeyEnum.datasetStartReRank]: boolean;
|
||||
};
|
||||
|
||||
@@ -7,6 +7,7 @@ export const Input_Template_Switch: FlowNodeInputItemType = {
|
||||
key: ModuleInputKeyEnum.switch,
|
||||
type: FlowNodeInputTypeEnum.target,
|
||||
label: 'core.module.input.label.switch',
|
||||
description: 'core.module.input.description.Trigger',
|
||||
valueType: ModuleIOValueTypeEnum.any,
|
||||
showTargetInApp: true,
|
||||
showTargetInPlugin: true
|
||||
@@ -27,8 +28,8 @@ export const Input_Template_History: FlowNodeInputItemType = {
|
||||
|
||||
export const Input_Template_UserChatInput: FlowNodeInputItemType = {
|
||||
key: ModuleInputKeyEnum.userChatInput,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
label: 'core.module.input.label.user question',
|
||||
type: FlowNodeInputTypeEnum.custom,
|
||||
label: '',
|
||||
required: true,
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
showTargetInApp: true,
|
||||
@@ -56,3 +57,13 @@ export const Input_Template_DynamicInput: FlowNodeInputItemType = {
|
||||
showTargetInPlugin: true,
|
||||
hideInApp: true
|
||||
};
|
||||
|
||||
export const Input_Template_Dataset_Quote: FlowNodeInputItemType = {
|
||||
key: ModuleInputKeyEnum.aiChatDatasetQuote,
|
||||
type: FlowNodeInputTypeEnum.target,
|
||||
label: '知识库引用',
|
||||
description: 'core.module.Dataset quote.Input description',
|
||||
valueType: ModuleIOValueTypeEnum.datasetQuote,
|
||||
showTargetInApp: true,
|
||||
showTargetInPlugin: true
|
||||
};
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
import {
|
||||
FlowNodeInputTypeEnum,
|
||||
FlowNodeOutputTypeEnum,
|
||||
FlowNodeTypeEnum
|
||||
} from '../../../node/constant';
|
||||
import { FlowModuleTemplateType } from '../../../type';
|
||||
import {
|
||||
ModuleIOValueTypeEnum,
|
||||
ModuleInputKeyEnum,
|
||||
ModuleTemplateTypeEnum
|
||||
} from '../../../constants';
|
||||
|
||||
export const HistoryModule: FlowModuleTemplateType = {
|
||||
id: FlowNodeTypeEnum.historyNode,
|
||||
templateType: ModuleTemplateTypeEnum.systemInput,
|
||||
flowType: FlowNodeTypeEnum.historyNode,
|
||||
avatar: '/imgs/module/history.png',
|
||||
name: '聊天记录(弃用)',
|
||||
intro: '聊天记录,该模块已被弃用',
|
||||
inputs: [
|
||||
{
|
||||
key: ModuleInputKeyEnum.historyMaxAmount,
|
||||
type: FlowNodeInputTypeEnum.numberInput,
|
||||
label: '最长记录数',
|
||||
description:
|
||||
'该记录数不代表模型可接收这么多的历史记录,具体可接收多少历史记录,取决于模型的能力,通常建议不要超过20条。',
|
||||
value: 6,
|
||||
valueType: ModuleIOValueTypeEnum.number,
|
||||
min: 0,
|
||||
max: 100,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.history,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
valueType: ModuleIOValueTypeEnum.chatHistory,
|
||||
label: '聊天记录',
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
}
|
||||
],
|
||||
outputs: [
|
||||
{
|
||||
key: ModuleInputKeyEnum.history,
|
||||
label: '聊天记录',
|
||||
valueType: ModuleIOValueTypeEnum.chatHistory,
|
||||
type: FlowNodeOutputTypeEnum.source,
|
||||
targets: []
|
||||
}
|
||||
]
|
||||
};
|
||||
@@ -2,15 +2,19 @@ import {
|
||||
FlowNodeInputTypeEnum,
|
||||
FlowNodeOutputTypeEnum,
|
||||
FlowNodeTypeEnum
|
||||
} from '../../node/constant';
|
||||
import { FlowModuleTemplateType } from '../../type';
|
||||
import { ModuleIOValueTypeEnum, ModuleInputKeyEnum, ModuleTemplateTypeEnum } from '../../constants';
|
||||
} from '../../../node/constant';
|
||||
import { FlowModuleTemplateType } from '../../../type';
|
||||
import {
|
||||
ModuleIOValueTypeEnum,
|
||||
ModuleInputKeyEnum,
|
||||
ModuleTemplateTypeEnum
|
||||
} from '../../../constants';
|
||||
import {
|
||||
Input_Template_AddInputParam,
|
||||
Input_Template_DynamicInput,
|
||||
Input_Template_Switch
|
||||
} from '../input';
|
||||
import { Output_Template_AddOutput, Output_Template_Finish } from '../output';
|
||||
} from '../../input';
|
||||
import { Output_Template_AddOutput, Output_Template_Finish } from '../../output';
|
||||
|
||||
export const HttpModule: FlowModuleTemplateType = {
|
||||
id: FlowNodeTypeEnum.httpRequest,
|
||||
@@ -18,7 +22,8 @@ export const HttpModule: FlowModuleTemplateType = {
|
||||
flowType: FlowNodeTypeEnum.httpRequest,
|
||||
avatar: '/imgs/module/http.png',
|
||||
name: 'core.module.template.Http request',
|
||||
intro: 'core.module.template.Http request intro',
|
||||
intro:
|
||||
'该Http模块已被弃用,将于2024/3/31 不再提供服务。请尽快删除该模块并重新添加新的 Http 模块。',
|
||||
showStatus: true,
|
||||
inputs: [
|
||||
Input_Template_Switch,
|
||||
@@ -54,9 +59,10 @@ export const HttpModule: FlowModuleTemplateType = {
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.httpHeader,
|
||||
key: ModuleInputKeyEnum.httpHeaders,
|
||||
type: FlowNodeInputTypeEnum.JSONEditor,
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
value: '',
|
||||
label: 'core.module.input.label.Http Request Header',
|
||||
description: 'core.module.input.description.Http Request Header',
|
||||
placeholder: 'core.module.input.description.Http Request Header',
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
ModuleTemplateTypeEnum
|
||||
} from '../../constants';
|
||||
import {
|
||||
Input_Template_Dataset_Quote,
|
||||
Input_Template_History,
|
||||
Input_Template_Switch,
|
||||
Input_Template_UserChatInput
|
||||
@@ -108,16 +109,8 @@ export const AiChatModule: FlowModuleTemplateType = {
|
||||
showTargetInPlugin: true
|
||||
},
|
||||
Input_Template_History,
|
||||
{
|
||||
key: ModuleInputKeyEnum.aiChatDatasetQuote,
|
||||
type: FlowNodeInputTypeEnum.target,
|
||||
label: 'core.module.input.label.Quote',
|
||||
description: 'core.module.input.description.Quote',
|
||||
valueType: ModuleIOValueTypeEnum.datasetQuote,
|
||||
showTargetInApp: true,
|
||||
showTargetInPlugin: true
|
||||
},
|
||||
Input_Template_UserChatInput
|
||||
Input_Template_UserChatInput,
|
||||
Input_Template_Dataset_Quote
|
||||
],
|
||||
outputs: [
|
||||
Output_Template_UserChatInput,
|
||||
|
||||
@@ -19,11 +19,11 @@ import { Output_Template_UserChatInput } from '../output';
|
||||
|
||||
export const AiCFR: FlowModuleTemplateType = {
|
||||
id: FlowNodeTypeEnum.chatNode,
|
||||
templateType: ModuleTemplateTypeEnum.tools,
|
||||
templateType: ModuleTemplateTypeEnum.other,
|
||||
flowType: FlowNodeTypeEnum.cfr,
|
||||
avatar: '/imgs/module/cfr.svg',
|
||||
name: 'core.module.template.cfr',
|
||||
intro: 'core.module.template.cfr intro',
|
||||
name: 'core.module.template.Query extension',
|
||||
intro: '该模块已合并到知识库搜索参数中,无需单独使用。模块将于2024/3/31弃用,请尽快修改。',
|
||||
showStatus: true,
|
||||
inputs: [
|
||||
Input_Template_Switch,
|
||||
@@ -39,11 +39,11 @@ export const AiCFR: FlowModuleTemplateType = {
|
||||
{
|
||||
key: ModuleInputKeyEnum.aiSystemPrompt,
|
||||
type: FlowNodeInputTypeEnum.textarea,
|
||||
label: 'core.module.input.label.cfr background',
|
||||
label: 'core.module.input.label.Background',
|
||||
max: 300,
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
description: 'core.app.edit.cfr background tip',
|
||||
placeholder: 'core.module.input.placeholder.cfr background',
|
||||
description: 'core.app.edit.Query extension background tip',
|
||||
placeholder: 'core.module.QueryExtension.placeholder',
|
||||
showTargetInApp: true,
|
||||
showTargetInPlugin: true
|
||||
},
|
||||
|
||||
54
packages/global/core/module/template/system/datasetConcat.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
import {
|
||||
FlowNodeInputTypeEnum,
|
||||
FlowNodeOutputTypeEnum,
|
||||
FlowNodeTypeEnum
|
||||
} from '../../node/constant';
|
||||
import { FlowModuleTemplateType } from '../../type.d';
|
||||
import {
|
||||
ModuleIOValueTypeEnum,
|
||||
ModuleInputKeyEnum,
|
||||
ModuleOutputKeyEnum,
|
||||
ModuleTemplateTypeEnum
|
||||
} from '../../constants';
|
||||
import { Input_Template_Dataset_Quote, Input_Template_Switch } from '../input';
|
||||
import { Output_Template_Finish } from '../output';
|
||||
import { getNanoid } from '../../../../common/string/tools';
|
||||
|
||||
export const getOneQuoteInputTemplate = (key = getNanoid()) => ({
|
||||
...Input_Template_Dataset_Quote,
|
||||
key,
|
||||
type: FlowNodeInputTypeEnum.hidden
|
||||
});
|
||||
|
||||
export const DatasetConcatModule: FlowModuleTemplateType = {
|
||||
id: FlowNodeTypeEnum.datasetConcatNode,
|
||||
flowType: FlowNodeTypeEnum.datasetConcatNode,
|
||||
templateType: ModuleTemplateTypeEnum.tools,
|
||||
avatar: '/imgs/module/concat.svg',
|
||||
name: '知识库搜索引用合并',
|
||||
intro: 'core.module.template.Dataset search result concat intro',
|
||||
showStatus: false,
|
||||
inputs: [
|
||||
Input_Template_Switch,
|
||||
{
|
||||
key: ModuleInputKeyEnum.datasetMaxTokens,
|
||||
type: FlowNodeInputTypeEnum.custom,
|
||||
label: '最大 Tokens',
|
||||
value: 1500,
|
||||
valueType: ModuleIOValueTypeEnum.number,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
getOneQuoteInputTemplate('defaultQuote')
|
||||
],
|
||||
outputs: [
|
||||
{
|
||||
key: ModuleOutputKeyEnum.datasetQuoteQA,
|
||||
label: 'core.module.Dataset quote.label',
|
||||
type: FlowNodeOutputTypeEnum.source,
|
||||
valueType: ModuleIOValueTypeEnum.datasetQuote,
|
||||
targets: []
|
||||
},
|
||||
Output_Template_Finish
|
||||
]
|
||||
};
|
||||
@@ -37,22 +37,15 @@ export const DatasetSearchModule: FlowModuleTemplateType = {
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.datasetSimilarity,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
type: FlowNodeInputTypeEnum.selectDatasetParamsModal,
|
||||
label: '',
|
||||
value: 0.4,
|
||||
valueType: ModuleIOValueTypeEnum.number,
|
||||
min: 0,
|
||||
max: 1,
|
||||
step: 0.01,
|
||||
markList: [
|
||||
{ label: '0', value: 0 },
|
||||
{ label: '1', value: 1 }
|
||||
],
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.datasetLimit,
|
||||
key: ModuleInputKeyEnum.datasetMaxTokens,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
label: '',
|
||||
value: 1500,
|
||||
@@ -79,13 +72,31 @@ export const DatasetSearchModule: FlowModuleTemplateType = {
|
||||
value: false
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.datasetParamsModal,
|
||||
type: FlowNodeInputTypeEnum.selectDatasetParamsModal,
|
||||
key: ModuleInputKeyEnum.datasetSearchUsingExtensionQuery,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
label: '',
|
||||
valueType: ModuleIOValueTypeEnum.any,
|
||||
valueType: ModuleIOValueTypeEnum.boolean,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false,
|
||||
value: true
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.datasetSearchExtensionModel,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
label: '',
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.datasetSearchExtensionBg,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
label: '',
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false,
|
||||
value: ''
|
||||
},
|
||||
Input_Template_UserChatInput
|
||||
],
|
||||
outputs: [
|
||||
@@ -106,8 +117,7 @@ export const DatasetSearchModule: FlowModuleTemplateType = {
|
||||
},
|
||||
{
|
||||
key: ModuleOutputKeyEnum.datasetQuoteQA,
|
||||
label: 'core.module.output.label.Quote',
|
||||
description: 'core.module.output.label.Quote intro',
|
||||
label: 'core.module.Dataset quote.label',
|
||||
type: FlowNodeOutputTypeEnum.source,
|
||||
valueType: ModuleIOValueTypeEnum.datasetQuote,
|
||||
targets: []
|
||||
|
||||
122
packages/global/core/module/template/system/http468.ts
Normal file
@@ -0,0 +1,122 @@
|
||||
import {
|
||||
FlowNodeInputTypeEnum,
|
||||
FlowNodeOutputTypeEnum,
|
||||
FlowNodeTypeEnum
|
||||
} from '../../node/constant';
|
||||
import { FlowModuleTemplateType } from '../../type';
|
||||
import {
|
||||
DYNAMIC_INPUT_KEY,
|
||||
ModuleIOValueTypeEnum,
|
||||
ModuleInputKeyEnum,
|
||||
ModuleTemplateTypeEnum
|
||||
} from '../../constants';
|
||||
import {
|
||||
Input_Template_AddInputParam,
|
||||
Input_Template_DynamicInput,
|
||||
Input_Template_Switch
|
||||
} from '../input';
|
||||
import { Output_Template_AddOutput, Output_Template_Finish } from '../output';
|
||||
|
||||
export const HttpModule468: FlowModuleTemplateType = {
|
||||
id: FlowNodeTypeEnum.httpRequest468,
|
||||
templateType: ModuleTemplateTypeEnum.externalCall,
|
||||
flowType: FlowNodeTypeEnum.httpRequest468,
|
||||
avatar: '/imgs/module/http.png',
|
||||
name: 'core.module.template.Http request',
|
||||
intro: 'core.module.template.Http request intro',
|
||||
showStatus: true,
|
||||
inputs: [
|
||||
Input_Template_Switch,
|
||||
{
|
||||
key: ModuleInputKeyEnum.httpMethod,
|
||||
type: FlowNodeInputTypeEnum.custom,
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
label: '',
|
||||
value: 'POST',
|
||||
required: true,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.httpReqUrl,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
label: '',
|
||||
description: 'core.module.input.description.Http Request Url',
|
||||
placeholder: 'https://api.ai.com/getInventory',
|
||||
required: false,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.httpHeaders,
|
||||
type: FlowNodeInputTypeEnum.custom,
|
||||
valueType: ModuleIOValueTypeEnum.any,
|
||||
value: [],
|
||||
label: '',
|
||||
description: 'core.module.input.description.Http Request Header',
|
||||
placeholder: 'core.module.input.description.Http Request Header',
|
||||
required: false,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.httpParams,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
valueType: ModuleIOValueTypeEnum.any,
|
||||
value: [],
|
||||
label: '',
|
||||
required: false,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
{
|
||||
key: ModuleInputKeyEnum.httpJsonBody,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
valueType: ModuleIOValueTypeEnum.any,
|
||||
value: '',
|
||||
label: '',
|
||||
required: false,
|
||||
showTargetInApp: false,
|
||||
showTargetInPlugin: false
|
||||
},
|
||||
Input_Template_DynamicInput,
|
||||
{
|
||||
...Input_Template_AddInputParam,
|
||||
editField: {
|
||||
key: true,
|
||||
name: true,
|
||||
description: true,
|
||||
required: true,
|
||||
dataType: true
|
||||
},
|
||||
defaultEditField: {
|
||||
label: '',
|
||||
key: '',
|
||||
description: '',
|
||||
inputType: FlowNodeInputTypeEnum.target,
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
required: true
|
||||
}
|
||||
}
|
||||
],
|
||||
outputs: [
|
||||
Output_Template_Finish,
|
||||
{
|
||||
...Output_Template_AddOutput,
|
||||
editField: {
|
||||
key: true,
|
||||
name: true,
|
||||
description: true,
|
||||
dataType: true
|
||||
},
|
||||
defaultEditField: {
|
||||
label: '',
|
||||
key: '',
|
||||
description: '',
|
||||
outputType: FlowNodeOutputTypeEnum.source,
|
||||
valueType: ModuleIOValueTypeEnum.string
|
||||
}
|
||||
}
|
||||
]
|
||||
};
|
||||
@@ -6,7 +6,6 @@ export const RunPluginModule: FlowModuleTemplateType = {
|
||||
id: FlowNodeTypeEnum.pluginModule,
|
||||
templateType: ModuleTemplateTypeEnum.externalCall,
|
||||
flowType: FlowNodeTypeEnum.pluginModule,
|
||||
avatar: '/imgs/module/custom.png',
|
||||
intro: '',
|
||||
name: '',
|
||||
showStatus: false,
|
||||
|
||||
@@ -15,7 +15,7 @@ export const UserInputModule: FlowModuleTemplateType = {
|
||||
id: FlowNodeTypeEnum.questionInput,
|
||||
templateType: ModuleTemplateTypeEnum.systemInput,
|
||||
flowType: FlowNodeTypeEnum.questionInput,
|
||||
avatar: '/imgs/module/userChatInput.png',
|
||||
avatar: '/imgs/module/userChatInput.svg',
|
||||
name: 'core.module.template.Chat entrance',
|
||||
intro: 'core.module.template.Chat entrance intro',
|
||||
inputs: [
|
||||
|
||||
9
packages/global/core/module/type.d.ts
vendored
@@ -3,9 +3,9 @@ import { ModuleIOValueTypeEnum, ModuleTemplateTypeEnum, VariableInputEnum } from
|
||||
import { FlowNodeInputItemType, FlowNodeOutputItemType } from './node/type';
|
||||
|
||||
export type FlowModuleTemplateType = {
|
||||
id: string;
|
||||
id: string; // module id, unique
|
||||
templateType: `${ModuleTemplateTypeEnum}`;
|
||||
flowType: `${FlowNodeTypeEnum}`; // unique
|
||||
flowType: `${FlowNodeTypeEnum}`; // render node card
|
||||
avatar?: string;
|
||||
name: string;
|
||||
intro: string; // template list intro
|
||||
@@ -85,12 +85,14 @@ export type RunningModuleItemType = {
|
||||
inputs: {
|
||||
key: string;
|
||||
value?: any;
|
||||
valueType?: `${ModuleIOValueTypeEnum}`;
|
||||
}[];
|
||||
outputs: {
|
||||
key: string;
|
||||
answer?: boolean;
|
||||
response?: boolean;
|
||||
value?: any;
|
||||
valueType?: `${ModuleIOValueTypeEnum}`;
|
||||
targets: {
|
||||
moduleId: string;
|
||||
key: string;
|
||||
@@ -115,5 +117,6 @@ export type ChatDispatchProps = {
|
||||
|
||||
export type ModuleDispatchProps<T> = ChatDispatchProps & {
|
||||
outputs: RunningModuleItemType['outputs'];
|
||||
inputs: T;
|
||||
inputs: RunningModuleItemType['inputs'];
|
||||
params: T;
|
||||
};
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
import { FlowNodeInputTypeEnum, FlowNodeTypeEnum } from './node/constant';
|
||||
import { ModuleIOValueTypeEnum, ModuleInputKeyEnum, variableMap } from './constants';
|
||||
import {
|
||||
ModuleIOValueTypeEnum,
|
||||
ModuleInputKeyEnum,
|
||||
VariableInputEnum,
|
||||
variableMap
|
||||
} from './constants';
|
||||
import { FlowNodeInputItemType, FlowNodeOutputItemType } from './node/type';
|
||||
import { AppTTSConfigType, ModuleItemType, VariableItemType } from './type';
|
||||
import { Input_Template_Switch } from './template/input';
|
||||
import { EditorVariablePickerType } from '../../../web/components/common/Textarea/PromptEditor/type';
|
||||
|
||||
export const getGuideModule = (modules: ModuleItemType[]) =>
|
||||
modules.find((item) => item.flowType === FlowNodeTypeEnum.userGuide);
|
||||
@@ -68,7 +74,7 @@ export function plugin2ModuleIO(
|
||||
// plugin id
|
||||
key: ModuleInputKeyEnum.pluginId,
|
||||
type: FlowNodeInputTypeEnum.hidden,
|
||||
label: 'pluginId',
|
||||
label: '',
|
||||
value: pluginId,
|
||||
valueType: ModuleIOValueTypeEnum.string,
|
||||
connected: true,
|
||||
@@ -95,11 +101,11 @@ export function plugin2ModuleIO(
|
||||
};
|
||||
}
|
||||
|
||||
export const formatVariablesIcon = (
|
||||
variables: VariableItemType[]
|
||||
): (VariableItemType & { icon: string })[] => {
|
||||
export const formatEditorVariablePickerIcon = (
|
||||
variables: { key: string; label: string; type?: `${VariableInputEnum}` }[]
|
||||
): EditorVariablePickerType[] => {
|
||||
return variables.map((item) => ({
|
||||
...item,
|
||||
icon: variableMap[item.type]?.icon
|
||||
icon: item.type ? variableMap[item.type]?.icon : variableMap['input'].icon
|
||||
}));
|
||||
};
|
||||
|
||||
@@ -7,13 +7,28 @@ export enum BillSourceEnum {
|
||||
api = 'api',
|
||||
shareLink = 'shareLink',
|
||||
training = 'training',
|
||||
datasetExpand = 'datasetExpand'
|
||||
|
||||
standSubPlan = 'standSubPlan',
|
||||
extraDatasetSub = 'extraDatasetSub'
|
||||
}
|
||||
|
||||
export const BillSourceMap: Record<`${BillSourceEnum}`, string> = {
|
||||
[BillSourceEnum.fastgpt]: '在线使用',
|
||||
[BillSourceEnum.api]: 'Api',
|
||||
[BillSourceEnum.shareLink]: '免登录链接',
|
||||
[BillSourceEnum.training]: '数据训练',
|
||||
[BillSourceEnum.datasetExpand]: '知识库扩容'
|
||||
export const BillSourceMap = {
|
||||
[BillSourceEnum.fastgpt]: {
|
||||
label: '在线使用'
|
||||
},
|
||||
[BillSourceEnum.api]: {
|
||||
label: 'Api'
|
||||
},
|
||||
[BillSourceEnum.shareLink]: {
|
||||
label: '免登录链接'
|
||||
},
|
||||
[BillSourceEnum.training]: {
|
||||
label: 'dataset.Training Name'
|
||||
},
|
||||
[BillSourceEnum.standSubPlan]: {
|
||||
label: 'support.wallet.subscription.type.standard'
|
||||
},
|
||||
[BillSourceEnum.extraDatasetSub]: {
|
||||
label: 'support.wallet.subscription.type.extraDatasetSize'
|
||||
}
|
||||
};
|
||||
|
||||
@@ -7,6 +7,9 @@ export type BillListItemCountType = {
|
||||
charsLength?: number;
|
||||
duration?: number;
|
||||
|
||||
// sub
|
||||
datasetSize?: number;
|
||||
|
||||
// abandon
|
||||
tokenLen?: number;
|
||||
};
|
||||
@@ -23,7 +26,7 @@ export type BillSchema = CreateBillProps & {
|
||||
|
||||
export type BillItemType = {
|
||||
id: string;
|
||||
memberName: string;
|
||||
// memberName: string;
|
||||
time: Date;
|
||||
appName: string;
|
||||
source: BillSchema['source'];
|
||||
|
||||
41
packages/global/support/wallet/pay/constants.ts
Normal file
@@ -0,0 +1,41 @@
|
||||
export enum PayTypeEnum {
|
||||
balance = 'balance',
|
||||
subStandard = 'subStandard',
|
||||
subExtraDatasetSize = 'subExtraDatasetSize',
|
||||
subExtraPoints = 'subExtraPoints'
|
||||
}
|
||||
export const payTypeMap = {
|
||||
[PayTypeEnum.balance]: {
|
||||
label: 'support.user.team.pay.type.balance'
|
||||
},
|
||||
[PayTypeEnum.subStandard]: {
|
||||
label: 'support.wallet.subscription.type.standard'
|
||||
},
|
||||
[PayTypeEnum.subExtraDatasetSize]: {
|
||||
label: 'support.wallet.subscription.type.extraDatasetSize'
|
||||
},
|
||||
[PayTypeEnum.subExtraPoints]: {
|
||||
label: 'support.wallet.subscription.type.extraPoints'
|
||||
}
|
||||
};
|
||||
|
||||
export enum PayStatusEnum {
|
||||
SUCCESS = 'SUCCESS',
|
||||
REFUND = 'REFUND',
|
||||
NOTPAY = 'NOTPAY',
|
||||
CLOSED = 'CLOSED'
|
||||
}
|
||||
export const payStatusMap = {
|
||||
[PayStatusEnum.SUCCESS]: {
|
||||
label: 'support.user.team.pay.status.success'
|
||||
},
|
||||
[PayStatusEnum.REFUND]: {
|
||||
label: 'support.user.team.pay.status.refund'
|
||||
},
|
||||
[PayStatusEnum.NOTPAY]: {
|
||||
label: 'support.user.team.pay.status.notpay'
|
||||
},
|
||||
[PayStatusEnum.CLOSED]: {
|
||||
label: 'support.user.team.pay.status.closed'
|
||||
}
|
||||
};
|
||||
10
packages/global/support/wallet/pay/type.d.ts
vendored
@@ -1,10 +1,18 @@
|
||||
import { SubModeEnum, SubTypeEnum } from '../sub/constants';
|
||||
import { PayTypeEnum } from './constants';
|
||||
|
||||
export type PaySchema = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
createTime: Date;
|
||||
price: number;
|
||||
orderId: string;
|
||||
status: 'SUCCESS' | 'REFUND' | 'NOTPAY' | 'CLOSED';
|
||||
type: `${PayType}`;
|
||||
|
||||
price: number;
|
||||
payWay: 'balance' | 'wx';
|
||||
|
||||
subMetadata: {};
|
||||
};
|
||||
|
||||
34
packages/global/support/wallet/sub/api.d.ts
vendored
@@ -1,4 +1,36 @@
|
||||
import { StandardSubLevelEnum, SubModeEnum } from './constants';
|
||||
import { TeamSubSchema } from './type.d';
|
||||
|
||||
export type SubDatasetSizeParams = {
|
||||
size: number;
|
||||
renew: boolean;
|
||||
};
|
||||
export type StandardSubPlanParams = {
|
||||
level: `${StandardSubLevelEnum}`;
|
||||
mode: `${SubModeEnum}`;
|
||||
};
|
||||
|
||||
export type SubDatasetSizePreviewCheckResponse = {
|
||||
payForNewSub: boolean; // Does this change require payment
|
||||
newSubSize: number; // new sub dataset size
|
||||
alreadySubSize: number; // old sub dataset size
|
||||
payPrice: number; // this change require payment
|
||||
newPlanPrice: number; // the new sub price
|
||||
newSubStartTime: Date;
|
||||
newSubExpiredTime: Date;
|
||||
balanceEnough: boolean; // team balance is enough
|
||||
};
|
||||
export type StandardSubPlanUpdateResponse = {
|
||||
balanceEnough: boolean; // team balance is enough
|
||||
payPrice?: number;
|
||||
planPrice: number;
|
||||
planPointPrice: number;
|
||||
|
||||
currentMode: `${SubModeEnum}`;
|
||||
nextMode: `${SubModeEnum}`;
|
||||
currentSubLevel: `${StandardSubLevelEnum}`;
|
||||
nextSubLevel: `${StandardSubLevelEnum}`;
|
||||
totalPoints: number;
|
||||
surplusPoints: number;
|
||||
planStartTime: Date;
|
||||
planExpiredTime: Date;
|
||||
};
|
||||
|
||||
@@ -1,37 +1,80 @@
|
||||
export enum SubTypeEnum {
|
||||
datasetStore = 'datasetStore'
|
||||
}
|
||||
export const POINTS_SCALE = 10000;
|
||||
|
||||
export enum SubTypeEnum {
|
||||
standard = 'standard',
|
||||
extraDatasetSize = 'extraDatasetSize',
|
||||
extraPoints = 'extraPoints'
|
||||
}
|
||||
export const subTypeMap = {
|
||||
[SubTypeEnum.datasetStore]: {
|
||||
label: 'support.user.team.subscription.type.datasetStore'
|
||||
[SubTypeEnum.standard]: {
|
||||
label: 'support.wallet.subscription.type.standard'
|
||||
},
|
||||
[SubTypeEnum.extraDatasetSize]: {
|
||||
label: 'support.wallet.subscription.type.extraDatasetSize'
|
||||
},
|
||||
[SubTypeEnum.extraPoints]: {
|
||||
label: 'support.wallet.subscription.type.extraPoints'
|
||||
}
|
||||
};
|
||||
|
||||
export enum SubStatusEnum {
|
||||
active = 'active',
|
||||
canceled = 'canceled'
|
||||
}
|
||||
export const subStatusMap = {
|
||||
[SubStatusEnum.active]: {
|
||||
label: 'support.wallet.subscription.status.active'
|
||||
},
|
||||
[SubStatusEnum.canceled]: {
|
||||
label: 'support.wallet.subscription.status.canceled'
|
||||
}
|
||||
};
|
||||
export const subSelectMap = {
|
||||
true: SubStatusEnum.active,
|
||||
false: SubStatusEnum.canceled
|
||||
};
|
||||
|
||||
export enum SubModeEnum {
|
||||
month = 'month',
|
||||
year = 'year'
|
||||
}
|
||||
|
||||
export const subModeMap = {
|
||||
[SubModeEnum.month]: {
|
||||
label: 'support.user.team.subscription.mode.month'
|
||||
label: 'support.wallet.subscription.mode.month',
|
||||
durationMonth: 1
|
||||
},
|
||||
[SubModeEnum.year]: {
|
||||
label: 'support.user.team.subscription.mode.year'
|
||||
label: 'support.wallet.subscription.mode.year',
|
||||
durationMonth: 12
|
||||
}
|
||||
};
|
||||
|
||||
export enum SubStatusEnum {
|
||||
active = 'active',
|
||||
expired = 'expired'
|
||||
export enum StandardSubLevelEnum {
|
||||
free = 'free',
|
||||
experience = 'experience',
|
||||
team = 'team',
|
||||
enterprise = 'enterprise',
|
||||
custom = 'custom'
|
||||
}
|
||||
|
||||
export const subStatusMap = {
|
||||
[SubStatusEnum.active]: {
|
||||
label: 'support.user.team.subscription.status.active'
|
||||
export const standardSubLevelMap = {
|
||||
[StandardSubLevelEnum.free]: {
|
||||
label: 'support.wallet.subscription.standardSubLevel.free',
|
||||
desc: 'support.wallet.subscription.standardSubLevel.free desc'
|
||||
},
|
||||
[SubStatusEnum.expired]: {
|
||||
label: 'support.user.team.subscription.status.expired'
|
||||
[StandardSubLevelEnum.experience]: {
|
||||
label: 'support.wallet.subscription.standardSubLevel.experience',
|
||||
desc: 'support.wallet.subscription.standardSubLevel.experience desc'
|
||||
},
|
||||
[StandardSubLevelEnum.team]: {
|
||||
label: 'support.wallet.subscription.standardSubLevel.team',
|
||||
desc: ''
|
||||
},
|
||||
[StandardSubLevelEnum.enterprise]: {
|
||||
label: 'support.wallet.subscription.standardSubLevel.enterprise',
|
||||
desc: ''
|
||||
},
|
||||
[StandardSubLevelEnum.custom]: {
|
||||
label: 'support.wallet.subscription.standardSubLevel.custom',
|
||||
desc: ''
|
||||
}
|
||||
};
|
||||
|
||||
67
packages/global/support/wallet/sub/type.d.ts
vendored
@@ -1,12 +1,73 @@
|
||||
import { SubModeEnum, SubStatusEnum, SubTypeEnum } from './constants';
|
||||
import { StandardSubLevelEnum, SubModeEnum, SubStatusEnum, SubTypeEnum } from './constants';
|
||||
|
||||
// Content of plan
|
||||
export type TeamStandardSubPlanItemType = {
|
||||
price: number; // read price
|
||||
pointPrice: number; // read price/ one ten thousand
|
||||
maxTeamMember: number;
|
||||
maxAppAmount: number; // max app or plugin amount
|
||||
maxDatasetAmount: number;
|
||||
chatHistoryStoreDuration: number; // n day
|
||||
maxDatasetSize: number;
|
||||
customApiKey: boolean;
|
||||
customCopyright: boolean; // feature
|
||||
websiteSyncInterval: number; // n hours
|
||||
trainingWeight: number; // 1~4
|
||||
reRankWeight: number; // 1~4
|
||||
totalPoints: number; // n ten thousand
|
||||
};
|
||||
|
||||
export type StandSubPlanLevelMapType = Record<
|
||||
`${StandardSubLevelEnum}`,
|
||||
TeamStandardSubPlanItemType
|
||||
>;
|
||||
|
||||
export type SubPlanType = {
|
||||
[SubTypeEnum.standard]: StandSubPlanLevelMapType;
|
||||
[SubTypeEnum.extraDatasetSize]: {
|
||||
price: number;
|
||||
};
|
||||
};
|
||||
|
||||
export type TeamSubSchema = {
|
||||
_id: string;
|
||||
teamId: string;
|
||||
type: `${SubTypeEnum}`;
|
||||
mode: `${SubModeEnum}`;
|
||||
status: `${SubStatusEnum}`;
|
||||
renew: boolean;
|
||||
currentMode: `${SubModeEnum}`;
|
||||
nextMode: `${SubModeEnum}`;
|
||||
startTime: Date;
|
||||
expiredTime: Date;
|
||||
price: number;
|
||||
|
||||
currentSubLevel: `${StandardSubLevelEnum}`;
|
||||
nextSubLevel: `${StandardSubLevelEnum}`;
|
||||
pointPrice: number;
|
||||
totalPoints: number;
|
||||
|
||||
currentExtraDatasetSize: number;
|
||||
nextExtraDatasetSize: number;
|
||||
|
||||
currentExtraPoints: number;
|
||||
nextExtraPoints: number;
|
||||
|
||||
surplusPoints: number;
|
||||
|
||||
// abandon
|
||||
datasetStoreAmount?: number;
|
||||
renew?: boolean;
|
||||
};
|
||||
|
||||
export type FeTeamSubType = {
|
||||
[SubTypeEnum.standard]?: TeamSubSchema;
|
||||
[SubTypeEnum.extraDatasetSize]?: TeamSubSchema;
|
||||
[SubTypeEnum.extraPoints]?: TeamSubSchema;
|
||||
|
||||
standardMaxDatasetSize: number;
|
||||
totalPoints: number;
|
||||
usedPoints: number;
|
||||
|
||||
standardMaxPoints: number;
|
||||
datasetMaxSize: number;
|
||||
usedDatasetSize: number;
|
||||
};
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { UploadImgProps } from '@fastgpt/global/common/file/api';
|
||||
import { imageBaseUrl } from '@fastgpt/global/common/file/image/constants';
|
||||
import { MongoImage } from './schema';
|
||||
import { ClientSession } from '../../../common/mongo';
|
||||
|
||||
export function getMongoImgUrl(id: string) {
|
||||
return `${imageBaseUrl}${id}`;
|
||||
@@ -48,15 +49,20 @@ export async function readMongoImg({ id }: { id: string }) {
|
||||
|
||||
export async function delImgByRelatedId({
|
||||
teamId,
|
||||
relateIds
|
||||
relateIds,
|
||||
session
|
||||
}: {
|
||||
teamId: string;
|
||||
relateIds: string[];
|
||||
session: ClientSession;
|
||||
}) {
|
||||
if (relateIds.length === 0) return;
|
||||
|
||||
return MongoImage.deleteMany({
|
||||
teamId,
|
||||
'metadata.relatedId': { $in: relateIds.map((id) => String(id)) }
|
||||
});
|
||||
return MongoImage.deleteMany(
|
||||
{
|
||||
teamId,
|
||||
'metadata.relatedId': { $in: relateIds.map((id) => String(id)) }
|
||||
},
|
||||
{ session }
|
||||
);
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ export const removeFilesByPaths = (paths: string[]) => {
|
||||
paths.forEach((path) => {
|
||||
fs.unlink(path, (err) => {
|
||||
if (err) {
|
||||
console.error(err);
|
||||
// console.error(err);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
import mongoose, { connectionMongo } from './index';
|
||||
import { connectionMongo, ClientSession } from './index';
|
||||
|
||||
export async function mongoSessionTask(
|
||||
fn: (session: mongoose.mongo.ClientSession) => Promise<any>
|
||||
) {
|
||||
export const mongoSessionRun = async <T = unknown>(fn: (session: ClientSession) => Promise<T>) => {
|
||||
const session = await connectionMongo.startSession();
|
||||
session.startTransaction();
|
||||
|
||||
try {
|
||||
session.startTransaction();
|
||||
|
||||
await fn(session);
|
||||
const result = await fn(session);
|
||||
|
||||
await session.commitTransaction();
|
||||
await session.endSession();
|
||||
session.endSession();
|
||||
|
||||
return result as T;
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
|
||||
await session.abortTransaction();
|
||||
await session.endSession();
|
||||
console.error(error);
|
||||
session.endSession();
|
||||
return Promise.reject(error);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
export enum sseResponseEventEnum {
|
||||
error = 'error',
|
||||
answer = 'answer',
|
||||
answer = 'answer', // animation stream
|
||||
response = 'response', // direct response, not animation
|
||||
moduleStatus = 'moduleStatus',
|
||||
appStreamResponse = 'appStreamResponse' // sse response request
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ import { sseResponseEventEnum } from './constant';
|
||||
import { proxyError, ERROR_RESPONSE, ERROR_ENUM } from '@fastgpt/global/common/error/errorCode';
|
||||
import { addLog } from '../system/log';
|
||||
import { clearCookie } from '../../support/permission/controller';
|
||||
import { replaceSensitiveLink } from '@fastgpt/global/common/string/tools';
|
||||
|
||||
export interface ResponseType<T = any> {
|
||||
code: number;
|
||||
@@ -52,7 +53,7 @@ export const jsonRes = <T = any>(
|
||||
res.status(code).json({
|
||||
code,
|
||||
statusText: '',
|
||||
message: message || msg,
|
||||
message: replaceSensitiveLink(message || msg),
|
||||
data: data !== undefined ? data : null
|
||||
});
|
||||
};
|
||||
@@ -90,7 +91,7 @@ export const sseErrRes = (res: NextApiResponse, error: any) => {
|
||||
responseWrite({
|
||||
res,
|
||||
event: sseResponseEventEnum.error,
|
||||
data: JSON.stringify({ message: msg })
|
||||
data: JSON.stringify({ message: replaceSensitiveLink(msg) })
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ export const addLog = {
|
||||
},
|
||||
error(msg: string, error?: any) {
|
||||
this.log('error', msg, {
|
||||
message: error?.message,
|
||||
message: error?.message || error,
|
||||
stack: error?.stack,
|
||||
...(error?.config && {
|
||||
config: {
|
||||
|
||||
7
packages/service/common/system/tools.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
import { isIPv6 } from 'net';
|
||||
|
||||
export const SERVICE_LOCAL_PORT = `${process.env.PORT || 3000}`;
|
||||
export const SERVICE_LOCAL_HOST =
|
||||
process.env.HOSTNAME && isIPv6(process.env.HOSTNAME)
|
||||
? `[${process.env.HOSTNAME}]:${SERVICE_LOCAL_PORT}`
|
||||
: `${process.env.HOSTNAME || 'localhost'}:${SERVICE_LOCAL_PORT}`;
|
||||
@@ -16,4 +16,5 @@ export type InsertVectorProps = {
|
||||
export type EmbeddingRecallProps = {
|
||||
datasetIds: string[];
|
||||
similarity?: number;
|
||||
efSearch?: number;
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import { PgVector } from './pg/class';
|
||||
import { getVectorsByText } from '../../core/ai/embedding';
|
||||
import { InsertVectorProps } from './controller.d';
|
||||
import { VectorModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
|
||||
const getVectorObj = () => {
|
||||
return new PgVector();
|
||||
@@ -20,7 +21,7 @@ export const insertDatasetDataVector = async ({
|
||||
...props
|
||||
}: InsertVectorProps & {
|
||||
query: string;
|
||||
model: string;
|
||||
model: VectorModelItemType;
|
||||
}) => {
|
||||
const { vectors, charsLength } = await getVectorsByText({
|
||||
model,
|
||||
@@ -43,7 +44,7 @@ export const updateDatasetDataVector = async ({
|
||||
}: InsertVectorProps & {
|
||||
id: string;
|
||||
query: string;
|
||||
model: string;
|
||||
model: VectorModelItemType;
|
||||
}) => {
|
||||
// insert new vector
|
||||
const { charsLength, insertId } = await insertDatasetDataVector(props);
|
||||
|
||||
@@ -121,12 +121,12 @@ export const embeddingRecall = async (
|
||||
): Promise<{
|
||||
results: EmbeddingRecallItemType[];
|
||||
}> => {
|
||||
const { datasetIds, vectors, limit, similarity = 0, retry = 2 } = props;
|
||||
const { datasetIds, vectors, limit, similarity = 0, retry = 2, efSearch = 100 } = props;
|
||||
|
||||
try {
|
||||
const results: any = await PgClient.query(
|
||||
`BEGIN;
|
||||
SET LOCAL hnsw.ef_search = ${global.systemEnv.pgHNSWEfSearch || 100};
|
||||
SET LOCAL hnsw.ef_search = ${efSearch};
|
||||
select id, collection_id, (vector <#> '[${vectors[0]}]') * -1 AS score
|
||||
from ${PgDatasetTableName}
|
||||
where dataset_id IN (${datasetIds.map((id) => `'${String(id)}'`).join(',')})
|
||||
|
||||
@@ -6,10 +6,14 @@ export const baseUrl = process.env.ONEAPI_URL || openaiBaseUrl;
|
||||
|
||||
export const systemAIChatKey = process.env.CHAT_API_KEY || '';
|
||||
|
||||
export const getAIApi = (props?: UserModelSchema['openaiAccount'], timeout = 60000) => {
|
||||
export const getAIApi = (props?: {
|
||||
userKey?: UserModelSchema['openaiAccount'];
|
||||
timeout?: number;
|
||||
}) => {
|
||||
const { userKey, timeout } = props || {};
|
||||
return new OpenAI({
|
||||
apiKey: props?.key || systemAIChatKey,
|
||||
baseURL: props?.baseUrl || baseUrl,
|
||||
apiKey: userKey?.key || systemAIChatKey,
|
||||
baseURL: userKey?.baseUrl || baseUrl,
|
||||
httpAgent: global.httpsAgent,
|
||||
timeout,
|
||||
maxRetries: 2
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
import { VectorModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../config';
|
||||
|
||||
export type GetVectorProps = {
|
||||
model: string;
|
||||
type GetVectorProps = {
|
||||
model: VectorModelItemType;
|
||||
input: string;
|
||||
};
|
||||
|
||||
// text to vector
|
||||
export async function getVectorsByText({
|
||||
model = 'text-embedding-ada-002',
|
||||
input
|
||||
}: GetVectorProps) {
|
||||
export async function getVectorsByText({ model, input }: GetVectorProps) {
|
||||
if (!input) {
|
||||
return Promise.reject({
|
||||
code: 500,
|
||||
@@ -23,7 +21,8 @@ export async function getVectorsByText({
|
||||
// input text to vector
|
||||
const result = await ai.embeddings
|
||||
.create({
|
||||
model,
|
||||
...model.defaultConfig,
|
||||
model: model.model,
|
||||
input: [input]
|
||||
})
|
||||
.then(async (res) => {
|
||||
|
||||
159
packages/service/core/ai/functions/cfr.ts
Normal file
@@ -0,0 +1,159 @@
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { getAIApi } from '../config';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
|
||||
/*
|
||||
cfr: coreference resolution - 指代消除
|
||||
可以根据上下文,完事当前问题指代内容,利于检索。
|
||||
*/
|
||||
|
||||
const defaultPrompt = `请不要回答任何问题。
|
||||
你的任务是结合历史记录,为当前问题,实现代词替换,确保问题描述的对象清晰明确。例如:
|
||||
历史记录:
|
||||
"""
|
||||
Q: 对话背景。
|
||||
A: 关于 FatGPT 的介绍和使用等问题。
|
||||
"""
|
||||
当前问题: 怎么下载
|
||||
输出: FastGPT 怎么下载?
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: 报错 "no connection"
|
||||
A: FastGPT 报错"no connection"可能是因为……
|
||||
"""
|
||||
当前问题: 怎么解决
|
||||
输出: FastGPT 报错"no connection"如何解决?
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: 作者是谁?
|
||||
A: FastGPT 的作者是 labring。
|
||||
"""
|
||||
当前问题: 介绍下他
|
||||
输出: 介绍下 FastGPT 的作者 labring。
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: 作者是谁?
|
||||
A: FastGPT 的作者是 labring。
|
||||
"""
|
||||
当前问题: 我想购买商业版。
|
||||
输出: FastGPT 商业版如何购买?
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: 对话背景。
|
||||
A: 关于 FatGPT 的介绍和使用等问题。
|
||||
"""
|
||||
当前问题: nh
|
||||
输出: nh
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: FastGPT 如何收费?
|
||||
A: FastGPT 收费可以参考……
|
||||
"""
|
||||
当前问题: 你知道 laf 么?
|
||||
输出: 你知道 laf 么?
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: FastGPT 的优势
|
||||
A: 1. 开源
|
||||
2. 简便
|
||||
3. 扩展性强
|
||||
"""
|
||||
当前问题: 介绍下第2点。
|
||||
输出: 介绍下 FastGPT 简便的优势。
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
Q: 什么是 FastGPT?
|
||||
A: FastGPT 是一个 RAG 平台。
|
||||
Q: 什么是 Sealos?
|
||||
A: Sealos 是一个云操作系统。
|
||||
"""
|
||||
当前问题: 它们有什么关系?
|
||||
输出: FastGPT 和 Sealos 有什么关系?
|
||||
----------------
|
||||
历史记录:
|
||||
"""
|
||||
{{histories}}
|
||||
"""
|
||||
当前问题: {{query}}
|
||||
输出: `;
|
||||
|
||||
export const queryCfr = async ({
|
||||
chatBg,
|
||||
query,
|
||||
histories = [],
|
||||
model
|
||||
}: {
|
||||
chatBg?: string;
|
||||
query: string;
|
||||
histories: ChatItemType[];
|
||||
model: string;
|
||||
}) => {
|
||||
if (histories.length === 0 && !chatBg) {
|
||||
return {
|
||||
rawQuery: query,
|
||||
cfrQuery: query,
|
||||
model,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
};
|
||||
}
|
||||
|
||||
const systemFewShot = chatBg
|
||||
? `Q: 对话背景。
|
||||
A: ${chatBg}
|
||||
`
|
||||
: '';
|
||||
const historyFewShot = histories
|
||||
.map((item) => {
|
||||
const role = item.obj === 'Human' ? 'Q' : 'A';
|
||||
return `${role}: ${item.value}`;
|
||||
})
|
||||
.join('\n');
|
||||
const concatFewShot = `${systemFewShot}${historyFewShot}`.trim();
|
||||
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
|
||||
const result = await ai.chat.completions.create({
|
||||
model: model,
|
||||
temperature: 0.01,
|
||||
max_tokens: 150,
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: replaceVariable(defaultPrompt, {
|
||||
query: `${query}`,
|
||||
histories: concatFewShot
|
||||
})
|
||||
}
|
||||
],
|
||||
stream: false
|
||||
});
|
||||
|
||||
const answer = result.choices?.[0]?.message?.content || '';
|
||||
if (!answer) {
|
||||
return {
|
||||
rawQuery: query,
|
||||
cfrQuery: query,
|
||||
model,
|
||||
inputTokens: 0,
|
||||
outputTokens: 0
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
rawQuery: query,
|
||||
cfrQuery: answer,
|
||||
model,
|
||||
inputTokens: result.usage?.prompt_tokens || 0,
|
||||
outputTokens: result.usage?.completion_tokens || 0
|
||||
};
|
||||
};
|
||||
@@ -10,10 +10,12 @@ export async function createQuestionGuide({
|
||||
messages: ChatMessageItemType[];
|
||||
model: string;
|
||||
}) {
|
||||
const ai = getAIApi(undefined, 480000);
|
||||
const ai = getAIApi({
|
||||
timeout: 480000
|
||||
});
|
||||
const data = await ai.chat.completions.create({
|
||||
model: model,
|
||||
temperature: 0,
|
||||
temperature: 0.1,
|
||||
max_tokens: 200,
|
||||
messages: [
|
||||
...messages,
|
||||
|
||||