Compare commits

..

30 Commits

Author SHA1 Message Date
a.e.
4343eecaaf deps: bump pdfjs-dist to 4.10.38 (#4491)
remove deprecated dependency: canvas@2
2025-04-09 23:49:53 +08:00
Archer
c02864facc fix: package plus request (#4492)
* fix plus request (#4476)

* perf: package plus request

* perf: plus request fix

* fix: doc

---------

Co-authored-by: heheer <heheer@sealos.io>
2025-04-09 23:44:14 +08:00
Archer
e4629a5c8c fix: queue (#4485) 2025-04-09 13:43:26 +08:00
a.e.
2dc3cb75fe fix: add backward compatibility patch for regex lookbehind in autolink literals (#4483) 2025-04-09 12:21:10 +08:00
a.e.
431390fe42 fix: Add patch for mdast-util-gfm-autolink-literal@2.0.1 (#4479)
Compatibility patch for Safari versions below 16.4
2025-04-08 18:03:58 +08:00
Donald Yang
1f5709eda6 Fix the log error when the loop input array is too long (#4478) 2025-04-08 17:03:44 +08:00
Archer
86988e31d9 feat: Add docs deploy to cf (#4475)
* perf: remove loading ui

* feat: config chat file expired time

* 494 doc

* feat: Add docs deploy to cf
2025-04-08 13:50:36 +08:00
Archer
675e8ccedb 494 doc (#4472)
* perf: remove loading ui

* feat: config chat file expired time

* 494 doc

* 494 doc

* 494 doc

* update doc preview action
2025-04-08 13:42:09 +08:00
Archer
9dfafb13bf Action test (#4471)
* perf: remove loading ui

* feat: config chat file expired time

* Update doc

* fix: ts (#4458)

* test

* remove

* 494 doc

* update action
2025-04-08 12:36:00 +08:00
Archer
f642c9603b V4.9.4 feature (#4470)
* Training status (#4424)

* dataset data training state (#4311)

* dataset data training state

* fix

* fix ts

* fix

* fix api format

* fix

* fix

* perf: count training

* format

* fix: dataset training state (#4417)

* fix

* add test

* fix

* fix

* fix test

* fix test

* perf: training count

* count

* loading status

---------

Co-authored-by: heheer <heheer@sealos.io>

* doc

* website sync feature (#4429)

* perf: introduce BullMQ for website sync (#4403)

* perf: introduce BullMQ for website sync

* feat: new redis module

* fix: remove graceful shutdown

* perf: improve UI in dataset detail

- Updated the "change" icon SVG file.
- Modified i18n strings.
- Added new i18n string "immediate_sync".
- Improved UI in dataset detail page, including button icons and
background colors.

* refactor: Add chunkSettings to DatasetSchema

* perf: website sync ux

* env template

* fix: clean up website dataset when updating chunk settings (#4420)

* perf: check setting updated

* perf: worker currency

* feat: init script for website sync refactor (#4425)

* website feature doc

---------

Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>

* pro migration (#4388) (#4433)

* pro migration

* reuse customPdfParseType

Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>

* perf: remove loading ui

* feat: config chat file expired time

* Redis cache (#4436)

* perf: add Redis cache for vector counting (#4432)

* feat: cache

* perf: get cache key

---------

Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>

* perf: mobile voice input (#4437)

* update:Mobile voice interaction (#4362)

* Add files via upload

* Add files via upload

* Update ollama.md

* Update ollama.md

* Add files via upload

* Update useSpeech.ts

* Update ChatInput.tsx

* Update useSpeech.ts

* Update ChatInput.tsx

* Update useSpeech.ts

* Update constants.ts

* Add files via upload

* Update ChatInput.tsx

* Update useSpeech.ts

* Update useSpeech.ts

* Update useSpeech.ts

* Update ChatInput.tsx

* Add files via upload

* Update common.json

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update VoiceInput.tsx

* Update useSpeech.ts

* Update useSpeech.ts

* Update common.json

* Update common.json

* Update common.json

* Update VoiceInput.tsx

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update useSpeech.ts

* Update common.json

* Update chat.json

* Update common.json

* Update chat.json

* Update common.json

* Update chat.json

* Update VoiceInput.tsx

* Update ChatInput.tsx

* Update useSpeech.ts

* Update VoiceInput.tsx

* speech ui

* 优化语音输入组件,调整输入框显示逻辑,修复语音输入遮罩层样式,更新画布背景透明度,增强用户交互体验。 (#4435)

* perf: mobil voice input

---------

Co-authored-by: dreamer6680 <1468683855@qq.com>

* Test completion v2 (#4438)

* add v2 completions (#4364)

* add v2 completions

* completion config

* config version

* fix

* frontend

* doc

* fix

* fix: completions v2 api

---------

Co-authored-by: heheer <heheer@sealos.io>

* package

* Test mongo log (#4443)

* feat: mongodb-log (#4426)

* perf: mongo log

* feat: completions stop reasoner

* mongo db log

---------

Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>

* update doc

* Update doc

* fix external var ui (#4444)

* action

* fix: ts (#4458)

* preview doc action

add docs preview permission

update preview action

udpate action

* update doc (#4460)

* update preview action

* update doc

* remove

* update

* schema

* update mq export;perf: redis cache  (#4465)

* perf: redis cache

* update mq export

* perf: website sync error tip

* add error worker

* website sync ui (#4466)

* Updated the dynamic display of the voice input pop-up (#4469)

* Update VoiceInput.tsx

* Update VoiceInput.tsx

* Update VoiceInput.tsx

* fix: voice input

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>
Co-authored-by: dreamer6680 <1468683855@qq.com>
Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
2025-04-08 12:05:04 +08:00
Archer
5839325f77 test (#4456)
* test

* update action

* remove test
2025-04-06 18:45:04 +08:00
Archer
73c997f7c5 更新 491.md (#4453) 2025-04-04 23:35:55 +08:00
Finley Ge
ff92dced98 chore: security update (#4447) 2025-04-03 21:58:32 +08:00
Carson Yang
7a0747947c Enhance GitHub Actions workflows security and permissions (#4445)
Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>
2025-04-03 14:01:17 +08:00
a.e.
5ad383bc6e fix: add connection timeout to database settings (#4434) 2025-04-02 18:11:23 +08:00
heheer
c85b719384 add chunk reader doc (#4422) 2025-04-01 18:55:16 +08:00
Finley Ge
aeedc2fada Update sso.md (#4412) 2025-03-31 22:08:20 +08:00
Archer
be34b69f9b update doc (#4408) 2025-03-31 15:11:36 +08:00
Mr-Chiang
944774ec5f Update:同知识库搜索问题 (#4405) 2025-03-31 13:34:07 +08:00
Mr-Chiang
5b21b4b674 Update:知识库名称搜索出文件夹后,点击文件夹未置空搜索框,导致进不了文件夹的下一级,工作台也有此问题 (#4404) 2025-03-31 13:33:49 +08:00
dependabot[bot]
b0f0afabd2 chore(deps): bump axios in /plugins/webcrawler/SPIDER (#4399)
Bumps [axios](https://github.com/axios/axios) from 1.7.9 to 1.8.2.
- [Release notes](https://github.com/axios/axios/releases)
- [Changelog](https://github.com/axios/axios/blob/v1.x/CHANGELOG.md)
- [Commits](https://github.com/axios/axios/compare/v1.7.9...v1.8.2)

---
updated-dependencies:
- dependency-name: axios
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-03-31 11:00:29 +08:00
Finley Ge
d9aea53d13 fix: sso.md (#4401)
should be EXTERNAL_USER_SYSTEM_BASE_URL
2025-03-31 11:00:14 +08:00
Finley Ge
73db92e4ad chore: edit the version number (#4398) 2025-03-31 10:19:42 +08:00
Archer
267cc5702c update doc (#4386)
* version type

* doc

* doc

* version yml
2025-03-28 18:16:59 +08:00
Archer
540f321fc9 Test email plugin (#4387)
* add email plugin (#4343)

* add email plugin

* remove console.log

---------

Co-authored-by: zhengshuai.li <zhengshuai.li@cloudpense.com>

* perf: smtp email

---------

Co-authored-by: lzs2000131 <lzs2000131@163.com>
Co-authored-by: zhengshuai.li <zhengshuai.li@cloudpense.com>
2025-03-28 18:07:55 +08:00
heheer
a37c75159f fix child app update variables (#4385) 2025-03-28 17:36:07 +08:00
Archer
0ed99d8c9a Check debug (#4384)
* feat : Added support for interactive nodes in the debugging interface (#4339)

* feat: add VSCode launch configuration and enhance debug API handler

* feat: refactor debug API handler to streamline workflow processing and enhance interactive chat features

* feat: enhance debug API handler with structured input forms and improved query handling

* feat: enhance debug API handler to support optional query and histories parameters

* feat: simplify query and histories initialization in debug API handler

* feat: add realmode parameter to workflow dispatch and update interactive handling

* feat: add optional query parameter to PostWorkflowDebugProps and remove realmode from ModuleDispatchProps

* feat: add history parameter to PostWorkflowDebugProps and update related components

* feat: remove realmode

* feat: simplify handler parameter destructuring in debug.ts

* feat: remove unused interactive prop from WholeResponseContent component

* feat: refactor onNextNodeDebug to use parameter object for better readability

* feat: Merge selections and next actions to remove unused state management

* feat: 添加 NodeDebugResponse 组件以增强调试功能

* feat: Simplify the import statements in InteractiveComponents.tsx

* feat: Update the handler function to use default parameters to simplify the code

* feat: Add optional workflowInteractiveResponse field to PostWorkflowDebugResponse type

* feat: Add the workflowInteractiveResponse field in the debugging handler to enhance response capabilities

* feat: Added workflowInteractiveResponse field in FlowNodeItemType to enhance responsiveness

* feat: Refactor NodeDebugResponse to utilize workflowInteractiveResponse for improved interactivity

* feat: Extend UserSelectInteractive and UserInputInteractive types to inherit from InteractiveBasicType

* feat: Refactor NodeDebugResponse to streamline interactive handling and improve code clarity

* feat: 重构交互式调试逻辑,创建共用 Hook 以简化用户选择和输入处理

* fix: type error

* feat: 重构 AIResponseBox 组件,简化用户交互逻辑并引入共用表单组件

* feat: 清理 AIResponseBox 和表单组件代码,移除冗余注释和未使用的导入

* fix: type error

* feat: 重构 AIResponseBox 组件,简化类型定义并优化代码结构

* refactor: 将 FormItem 接口更改为类型定义,优化代码结构

* refactor: 将 NodeDebugResponseProps 接口更改为类型定义,优化代码结构

* refactor: 移除不必要的入口节点检查,简化调试处理逻辑

* feat: 移动调试交互组件位置

* refactor: 将 InteractiveBasicType 中的属性设为可选,简化数据结构

* refactor: 优化类型定义

* refactor: 移除未使用的 ChatItemType 和 UserChatItemValueItemType 导入

* refactor: 将接口定义更改为类型别名,简化代码结构

* refactor: 更新类型定义,使用类型别名简化代码结构

* refactor: 使用类型导入简化代码结构,重构 AIResponseBox 组件

* refactor: 提取描述框和表单项标签组件,简化代码结构

* refactor: 移除多余的空行

* refactor: 移除多余的空行和注释

* refactor: 移除多余的空行,简化 AIResponseBox 组件代码

* refactor: 重构组件,移动 FormComponents 到 InteractiveComponents,简化代码结构

* refactor: 移除多余的空行,简化 NodeDebugResponse 组件代码

* refactor: 更新导入语句,使用 type 关键字优化类型导入

* refactor: 在 tsconfig.json 中启用 verbatimModuleSyntax 选项

* Revert "refactor: 在 tsconfig.json 中启用 verbatimModuleSyntax 选项"

This reverts commit 2b335a9938.

* revert: rendertool

* refactor: Remove unused imports and functions to simplify code

* perf: debug interactive

---------

Co-authored-by: Theresa <63280168+sd0ric4@users.noreply.github.com>
2025-03-28 17:09:08 +08:00
Archer
2d3ae7f944 doc (#4381)
* doc

* doc
2025-03-28 13:52:08 +08:00
Archer
565a966d19 Python Sandbox (#4380)
* Python3 Sandbox (#3944)

* update python box (#4251)

* update python box

* Adjust the height of the NodeCode border.

* update python sandbox and add test systemcall bash

* update sandbox

* add VERSION_RELEASE (#4376)

* save empty docx

* fix pythonbox log error

* fix: js template

---------

Co-authored-by: dogfar <37035781+dogfar@users.noreply.github.com>
Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>
Co-authored-by: gggaaallleee <1293587368@qq.com>
2025-03-28 13:45:09 +08:00
Shixian Sheng
8323c2d27e 修复了几个链接 (#4377)
* Update bge-rerank.md

* Update bge-rerank.md

* Update chatglm2.md

* Update README.md
2025-03-28 10:59:12 +08:00
247 changed files with 8401 additions and 2498 deletions

View File

@@ -1,4 +1,4 @@
yangchuansheng/fastgpt-imgs:
- source: docSite/assets/imgs/
dest: imgs/
deleteOrphaned: true
deleteOrphaned: true

30
.github/gh-bot.yml vendored
View File

@@ -1,30 +0,0 @@
version: v1
debug: true
action:
printConfig: false
release:
retry: 15s
actionName: Release
allowOps:
- cuisongliu
bot:
prefix: /
spe: _
allowOps:
- sealos-ci-robot
- sealos-release-robot
email: sealos-ci-robot@sealos.io
username: sealos-ci-robot
repo:
org: false
message:
success: |
🤖 says: Hooray! The action {{.Body}} has been completed successfully. 🎉
format_error: |
🤖 says: ‼️ There is a formatting issue with the action, kindly verify the action's format.
permission_error: |
🤖 says: ‼️ The action doesn't have permission to trigger.
release_error: |
🤖 says: ‼️ Release action failed.
Error details: {{.Error}}

View File

@@ -1,4 +1,4 @@
name: Deploy doc image to vercel
name: Deploy doc image to cf
on:
workflow_dispatch:
@@ -20,6 +20,11 @@ jobs:
# The type of runner that the job will run on
runs-on: ubuntu-22.04
permissions:
contents: write
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
# Job outputs
outputs:
docs: ${{ steps.filter.outputs.docs }}
@@ -58,20 +63,9 @@ jobs:
- name: Build
run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs@6d0568e && hugo -v --minify
# Step 5 - Push our generated site to Vercel
- name: Deploy to Vercel
uses: amondnet/vercel-action@v25
id: vercel-action
with:
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
vercel-org-id: ${{ secrets.VERCEL_ORG_ID }} #Required
vercel-project-id: ${{ secrets.VERCEL_PROJECT_ID }} #Required
github-comment: false
vercel-args: '--prod --local-config ../vercel.json' # Optional
working-directory: docSite/public
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v3
uses: peaceiris/actions-gh-pages@v4
if: github.ref == 'refs/heads/main'
with:
github_token: ${{ secrets.GH_PAT }}
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: docSite/public

View File

@@ -10,6 +10,13 @@ on:
jobs:
build-fastgpt-docs-images:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
attestations: write
id-token: write
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -27,7 +34,6 @@ jobs:
with:
# list of Docker images to use as base name for tags
images: |
${{ secrets.DOCKER_HUB_NAME }}/fastgpt-docs
ghcr.io/${{ github.repository_owner }}/fastgpt-docs
registry.cn-hangzhou.aliyuncs.com/${{ secrets.ALI_HUB_USERNAME }}/fastgpt-docs
tags: |
@@ -40,18 +46,12 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_NAME }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Login to ghcr.io
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Aliyun
uses: docker/login-action@v3
@@ -70,6 +70,7 @@ jobs:
labels: ${{ steps.meta.outputs.labels }}
outputs:
tags: ${{ steps.datetime.outputs.datetime }}
update-docs-image:
needs: build-fastgpt-docs-images
runs-on: ubuntu-20.04

View File

@@ -10,6 +10,12 @@ on:
jobs:
# This workflow contains jobs "deploy-production"
deploy-preview:
permissions:
contents: read
packages: write
attestations: write
id-token: write
pull-requests: write
# The environment this job references
environment:
name: Preview
@@ -32,6 +38,7 @@ jobs:
repository: ${{ github.event.pull_request.head.repo.full_name }}
submodules: recursive # Fetch submodules
fetch-depth: 0 # Fetch all history for .GitInfo and .Lastmod
token: ${{ secrets.GITHUB_TOKEN }}
# Step 2 Detect changes to Docs Content
- name: Detect changes in doc content
@@ -43,10 +50,6 @@ jobs:
- 'docSite/content/docs/**'
base: main
- name: Add cdn for images
run: |
sed -i "s#\](/imgs/#\](https://cdn.jsdelivr.net/gh/yangchuansheng/fastgpt-imgs@main/imgs/#g" $(grep -rl "\](/imgs/" docSite/content/zh-cn/docs)
# Step 3 - Install Hugo (specific version)
- name: Install Hugo
uses: peaceiris/actions-hugo@v2
@@ -58,39 +61,35 @@ jobs:
- name: Build
run: cd docSite && hugo mod get -u github.com/colinwilson/lotusdocs@6d0568e && hugo -v --minify
# Step 5 - Push our generated site to Vercel
- name: Deploy to Vercel
uses: amondnet/vercel-action@v25
id: vercel-action
# Step 5 - Push our generated site to Cloudflare
- name: Deploy to Cloudflare Pages
id: deploy
uses: cloudflare/wrangler-action@v3
with:
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
vercel-org-id: ${{ secrets.VERCEL_ORG_ID }} #Required
vercel-project-id: ${{ secrets.VERCEL_PROJECT_ID }} #Required
github-comment: false
vercel-args: '--local-config ../vercel.json' # Optional
working-directory: docSite/public
alias-domains: | #Optional
fastgpt-staging.vercel.app
docsOutput:
needs: [deploy-preview]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
- name: Write md
run: |
echo "# 🤖 Generated by deploy action" > report.md
echo "[👀 Visit Preview](${{ needs.deploy-preview.outputs.url }})" >> report.md
cat report.md
- name: Gh Rebot for Sealos
uses: labring/gh-rebot@v0.0.6
if: ${{ (github.event_name == 'pull_request_target') }}
with:
version: v0.0.6
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
command: pages deploy ./docSite/public --project-name=fastgpt-doc
packageManager: npm
- name: Create deployment status comment
if: always()
env:
GH_TOKEN: '${{ secrets.GH_PAT }}'
SEALOS_TYPE: 'pr_comment'
SEALOS_FILENAME: 'report.md'
SEALOS_REPLACE_TAG: 'DEFAULT_REPLACE_DEPLOY'
JOB_STATUS: ${{ job.status }}
PREVIEW_URL: ${{ steps.deploy.outputs.deployment-url }}
uses: actions/github-script@v6
with:
token: ${{ secrets.GITHUB_TOKEN }}
script: |
const success = process.env.JOB_STATUS === 'success';
const deploymentUrl = `${process.env.PREVIEW_URL}`;
const status = success ? '✅ Success' : '❌ Failed';
console.log(process.env.JOB_STATUS);
const commentBody = `**Deployment Status: ${status}**
${success ? `🔗 Preview URL: ${deploymentUrl}` : ''}`;
await github.rest.issues.createComment({
...context.repo,
issue_number: context.payload.pull_request.number,
body: commentBody
});

View File

@@ -1,6 +1,6 @@
name: Sync images
on:
pull_request_target:
pull_request:
branches:
- main
paths:
@@ -15,13 +15,6 @@ jobs:
sync:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
if: ${{ (github.event_name == 'pull_request_target') }}
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
- name: Checkout
uses: actions/checkout@v3
@@ -32,4 +25,4 @@ jobs:
CONFIG_PATH: .github/sync_imgs.yml
ORIGINAL_MESSAGE: true
SKIP_PR: true
COMMIT_EACH_FILE: false
COMMIT_EACH_FILE: false

View File

@@ -9,6 +9,11 @@ on:
- 'main'
jobs:
build-fastgpt-images:
permissions:
packages: write
contents: read
attestations: write
id-token: write
runs-on: ubuntu-20.04
if: github.repository != 'labring/FastGPT'
steps:
@@ -32,7 +37,7 @@ jobs:
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set DOCKER_REPO_TAGGED based on branch or tag
run: |
echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt:latest" >> $GITHUB_ENV

View File

@@ -9,6 +9,11 @@ on:
- 'v*'
jobs:
build-fastgpt-images:
permissions:
packages: write
contents: read
attestations: write
id-token: write
runs-on: ubuntu-20.04
steps:
# install env
@@ -39,7 +44,7 @@ jobs:
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Ali Hub
uses: docker/login-action@v2
with:
@@ -91,6 +96,11 @@ jobs:
-t ${Docker_Hub_Latest} \
.
build-fastgpt-images-sub-route:
permissions:
packages: write
contents: read
attestations: write
id-token: write
runs-on: ubuntu-20.04
steps:
# install env
@@ -121,7 +131,7 @@ jobs:
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Ali Hub
uses: docker/login-action@v2
with:
@@ -174,6 +184,11 @@ jobs:
-t ${Docker_Hub_Latest} \
.
build-fastgpt-images-sub-route-gchat:
permissions:
packages: write
contents: read
attestations: write
id-token: write
runs-on: ubuntu-20.04
steps:
# install env
@@ -204,7 +219,7 @@ jobs:
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Ali Hub
uses: docker/login-action@v2
with:

View File

@@ -5,6 +5,13 @@ on:
jobs:
preview-fastgpt-images:
permissions:
contents: read
packages: write
attestations: write
id-token: write
pull-requests: write
runs-on: ubuntu-20.04
steps:
- name: Checkout
@@ -12,8 +19,9 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.ref }}
repository: ${{ github.event.pull_request.head.repo.full_name }}
submodules: recursive # Fetch submodules
fetch-depth: 0 # Fetch all history for .GitInfo and .Lastmod
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
@@ -25,15 +33,18 @@ jobs:
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set DOCKER_REPO_TAGGED based on branch or tag
run: |
echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt-pr:${{ github.event.pull_request.head.sha }}" >> $GITHUB_ENV
- name: Build image for PR
env:
DOCKER_REPO_TAGGED: ${{ env.DOCKER_REPO_TAGGED }}
@@ -48,20 +59,13 @@ jobs:
--cache-to=type=local,dest=/tmp/.buildx-cache \
-t ${DOCKER_REPO_TAGGED} \
.
# Add write md step after build
- name: Write md
run: |
echo "# 🤖 Generated by deploy action" > report.md
echo "📦 Preview Image: \`${DOCKER_REPO_TAGGED}\`" >> report.md
cat report.md
- name: Gh Rebot for Sealos
uses: labring/gh-rebot@v0.0.6
if: ${{ (github.event_name == 'pull_request_target') }}
- uses: actions/github-script@v7
with:
version: v0.0.6
env:
GH_TOKEN: '${{ secrets.GH_PAT }}'
SEALOS_TYPE: 'pr_comment'
SEALOS_FILENAME: 'report.md'
SEALOS_REPLACE_TAG: 'DEFAULT_REPLACE_DEPLOY'
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: 'Preview Image: `${{ env.DOCKER_REPO_TAGGED }}`'
})

View File

@@ -8,6 +8,11 @@ on:
jobs:
helm:
permissions:
packages: write
contents: read
attestations: write
id-token: write
runs-on: ubuntu-20.04
steps:
- name: Checkout
@@ -20,7 +25,7 @@ jobs:
run: echo "tag=$(git describe --tags)" >> $GITHUB_OUTPUT
- name: Release Helm
run: |
echo ${{ secrets.GH_PAT }} | helm registry login ghcr.io -u ${{ github.repository_owner }} --password-stdin
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io -u ${{ github.repository_owner }} --password-stdin
export APP_VERSION=${{ steps.vars.outputs.tag }}
export HELM_VERSION=${{ steps.vars.outputs.tag }}
export HELM_REPO=ghcr.io/${{ github.repository_owner }}

View File

@@ -8,6 +8,11 @@ on:
- 'v*'
jobs:
build-fastgpt-sandbox-images:
permissions:
packages: write
contents: read
attestations: write
id-token: write
runs-on: ubuntu-20.04
steps:
# install env
@@ -38,7 +43,7 @@ jobs:
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GH_PAT }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Ali Hub
uses: docker/login-action@v2
with:

39
.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,39 @@
{
"version": "0.2.0",
"configurations": [
{
"name": "Next.js: debug server-side",
"type": "node-terminal",
"request": "launch",
"command": "pnpm run dev",
"cwd": "${workspaceFolder}/projects/app"
},
{
"name": "Next.js: debug client-side",
"type": "chrome",
"request": "launch",
"url": "http://localhost:3000"
},
{
"name": "Next.js: debug client-side (Edge)",
"type": "msedge",
"request": "launch",
"url": "http://localhost:3000"
},
{
"name": "Next.js: debug full stack",
"type": "node-terminal",
"request": "launch",
"command": "pnpm run dev",
"cwd": "${workspaceFolder}/projects/app",
"skipFiles": ["<node_internals>/**"],
"serverReadyAction": {
"action": "debugWithEdge",
"killOnServerStop": true,
"pattern": "- Local:.+(https?://.+)",
"uriFormat": "%s",
"webRoot": "${workspaceFolder}/projects/app"
}
}
]
}

View File

@@ -110,19 +110,31 @@ services:
# 等待docker-entrypoint.sh脚本执行的MongoDB服务进程
wait $$!
redis:
image: redis:7.2-alpine
container_name: redis
# ports:
# - 6379:6379
networks:
- fastgpt
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
volumes:
- ./redis/data:/data
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.1-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.1-fix2 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.9.3 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.3 # 阿里云
networks:
- fastgpt
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.1-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.1-fix2 # 阿里云
image: ghcr.io/labring/fastgpt:v4.9.3 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.3 # 阿里云
ports:
- 3000:3000
networks:
@@ -157,6 +169,8 @@ services:
# zilliz 连接参数
- MILVUS_ADDRESS=http://milvusStandalone:19530
- MILVUS_TOKEN=none
# Redis 地址
- REDIS_URL=redis://default:mypassword@redis:6379
# sandbox 地址
- SANDBOX_URL=http://sandbox:3000
# 日志等级: debug, info, warn, error

View File

@@ -69,18 +69,31 @@ services:
# 等待docker-entrypoint.sh脚本执行的MongoDB服务进程
wait $$!
redis:
image: redis:7.2-alpine
container_name: redis
# ports:
# - 6379:6379
networks:
- fastgpt
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
volumes:
- ./redis/data:/data
# fastgpt
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.1-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.1-fix2 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.9.3 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.3 # 阿里云
networks:
- fastgpt
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.1-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.1-fix2 # 阿里云
image: ghcr.io/labring/fastgpt:v4.9.3 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.3 # 阿里云
ports:
- 3000:3000
networks:
@@ -114,6 +127,8 @@ services:
- MONGODB_URI=mongodb://myusername:mypassword@mongo:27017/fastgpt?authSource=admin
# pg 连接参数
- PG_URL=postgresql://username:password@pg:5432/postgres
# Redis 连接参数
- REDIS_URL=redis://default:mypassword@redis:6379
# sandbox 地址
- SANDBOX_URL=http://sandbox:3000
# 日志等级: debug, info, warn, error
@@ -132,7 +147,7 @@ services:
# AI Proxy
aiproxy:
image: ghcr.io/labring/aiproxy:v0.1.3
image: ghcr.io/labring/aiproxy:v0.1.5
# image: registry.cn-hangzhou.aliyuncs.com/labring/aiproxy:v0.1.3 # 阿里云
container_name: aiproxy
restart: unless-stopped

View File

@@ -51,17 +51,30 @@ services:
# 等待docker-entrypoint.sh脚本执行的MongoDB服务进程
wait $$!
redis:
image: redis:7.2-alpine
container_name: redis
# ports:
# - 6379:6379
networks:
- fastgpt
restart: always
command: |
redis-server --requirepass mypassword --loglevel warning --maxclients 10000 --appendonly yes --save 60 10 --maxmemory 4gb --maxmemory-policy noeviction
volumes:
- ./redis/data:/data
sandbox:
container_name: sandbox
image: ghcr.io/labring/fastgpt-sandbox:v4.9.1-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.1-fix2 # 阿里云
image: ghcr.io/labring/fastgpt-sandbox:v4.9.3 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt-sandbox:v4.9.3 # 阿里云
networks:
- fastgpt
restart: always
fastgpt:
container_name: fastgpt
image: ghcr.io/labring/fastgpt:v4.9.1-fix2 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.1-fix2 # 阿里云
image: ghcr.io/labring/fastgpt:v4.9.3 # git
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:v4.9.3 # 阿里云
ports:
- 3000:3000
networks:
@@ -92,6 +105,8 @@ services:
- FILE_TOKEN_KEY=filetoken
# MongoDB 连接参数. 用户名myusername,密码mypassword。
- MONGODB_URI=mongodb://myusername:mypassword@mongo:27017/fastgpt?authSource=admin
# Redis 连接参数
- REDIS_URI=redis://default:mypassword@redis:6379
# zilliz 连接参数
- MILVUS_ADDRESS=zilliz_cloud_address
- MILVUS_TOKEN=zilliz_cloud_token

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 197 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 284 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 294 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 86 KiB

View File

@@ -31,9 +31,9 @@ weight: 920
3 个模型代码分别为:
1. [https://github.com/labring/FastGPT/tree/main/plugins/rerank-bge/bge-reranker-base](https://github.com/labring/FastGPT/tree/main/plugins/rerank-bge/bge-reranker-base)
2. [https://github.com/labring/FastGPT/tree/main/plugins/rerank-bge/bge-reranker-large](https://github.com/labring/FastGPT/tree/main/plugins/rerank-bge/bge-reranker-large)
3. [https://github.com/labring/FastGPT/tree/main/plugins/rerank-bge/bge-reranker-v2-m3](https://github.com/labring/FastGPT/tree/main/plugins/rerank-bge/bge-reranker-v2-m3)
1. [https://github.com/labring/FastGPT/tree/main/plugins/model/rerank-bge/bge-reranker-base](https://github.com/labring/FastGPT/tree/main/plugins/model/rerank-bge/bge-reranker-base)
2. [https://github.com/labring/FastGPT/tree/main/plugins/model/rerank-bge/bge-reranker-large](https://github.com/labring/FastGPT/tree/main/plugins/model/rerank-bge/bge-reranker-large)
3. [https://github.com/labring/FastGPT/tree/main/plugins/model/rerank-bge/bge-reranker-v2-m3](https://github.com/labring/FastGPT/tree/main/plugins/model/rerank-bge/bge-reranker-v2-m3)
### 3. 安装依赖

View File

@@ -46,7 +46,7 @@ ChatGLM2-6B 是开源中英双语对话模型 ChatGLM-6B 的第二代版本,
### 源码部署
1. 根据上面的环境配置配置好环境,具体教程自行 GPT
2. 下载 [python 文件](https://github.com/labring/FastGPT/blob/main/files/models/ChatGLM2/openai_api.py)
2. 下载 [python 文件](https://github.com/labring/FastGPT/blob/main/plugins/model/llm-ChatGLM2/openai_api.py)
3. 在命令行输入命令 `pip install -r requirements.txt`
4. 打开你需要启动的 py 文件,在代码的 `verify_token` 方法中配置 token这里的 token 只是加一层验证,防止接口被人盗用;
5. 执行命令 `python openai_api.py --model_name 16`。这里的数字根据上面的配置进行选择。

View File

@@ -18,12 +18,14 @@ weight: 852
{{% alert icon="🤖 " context="success" %}}
* 该接口的 API Key 需使用`应用特定的 key`,否则会报错。
<!-- * 对话现在有`v1``v2`两个接口可以按需使用v2 自 4.9.4 版本新增v1 接口同时不再维护 -->
* 有些包调用时,`BaseUrl`需要添加`v1`路径有些不需要如果出现404情况可补充`v1`重试。
{{% /alert %}}
## 请求简易应用和工作流
对话接口兼容`GPT`的接口!如果你的项目使用的是标准的`GPT`官方接口,可以直接通过修改`BaseUrl``Authorization`来访问 FastGpt 应用,不过需要注意下面几个规则:
`v1`对话接口兼容`GPT`的接口!如果你的项目使用的是标准的`GPT`官方接口,可以直接通过修改`BaseUrl``Authorization`来访问 FastGpt 应用,不过需要注意下面几个规则:
{{% alert icon="🤖 " context="success" %}}
* 传入的`model``temperature`等参数字段均无效,这些字段由编排决定,不会根据 API 参数改变。
@@ -65,7 +67,7 @@ curl --location --request POST 'http://localhost:3000/api/v1/chat/completions' \
{{< markdownify >}}
*`messages`有部分区别,其他参数一致。
* 目前不支持上文件,需上传到自己的对象存储中,获取对应的文件链接。
* 目前不支持上文件,需上传到自己的对象存储中,获取对应的文件链接。
```bash
curl --location --request POST 'http://localhost:3000/api/v1/chat/completions' \
@@ -116,14 +118,284 @@ curl --location --request POST 'http://localhost:3000/api/v1/chat/completions' \
- variables: 模块变量,一个对象,会替换模块中,输入框内容里的`{{key}}`
{{% /alert %}}
{{< /markdownify >}}
{{< /tab >}}
{{< /tabs >}}
<!-- #### v2
v1,v2 接口请求参数一致,仅请求地址不一样。
{{< tabs tabTotal="3" >}}
{{< tab tabName="基础请求示例" >}}
{{< markdownify >}}
```bash
curl --location --request POST 'http://localhost:3000/api/v2/chat/completions' \
--header 'Authorization: fastgpt-xxxxxx' \
--header 'Content-Type: application/json' \
--data-raw '{
"chatId": "my_chatId",
"stream": false,
"detail": false,
"responseChatItemId": "my_responseChatItemId",
"variables": {
"uid": "asdfadsfasfd2323",
"name": "张三"
},
"messages": [
{
"role": "user",
"content": "你是谁"
}
]
}'
```
{{< /markdownify >}}
{{< /tab >}}
{{< tab tabName="图片/文件请求示例" >}}
{{< markdownify >}}
*`messages`有部分区别,其他参数一致。
* 目前不支持上传文件,需上传到自己的对象存储中,获取对应的文件链接。
```bash
curl --location --request POST 'http://localhost:3000/api/v2/chat/completions' \
--header 'Authorization: Bearer fastgpt-xxxxxx' \
--header 'Content-Type: application/json' \
--data-raw '{
"chatId": "abcd",
"stream": false,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "导演是谁"
},
{
"type": "image_url",
"image_url": {
"url": "图片链接"
}
},
{
"type": "file_url",
"name": "文件名",
"url": "文档链接,支持 txt md html word pdf ppt csv excel"
}
]
}
]
}'
```
{{< /markdownify >}}
{{< /tab >}}
{{< tab tabName="参数说明" >}}
{{< markdownify >}}
{{% alert context="info" %}}
- headers.Authorization: Bearer {{apikey}}
- chatId: string | undefined 。
-`undefined` 时(不传入),不使用 FastGpt 提供的上下文功能,完全通过传入的 messages 构建上下文。
-`非空字符串`时,意味着使用 chatId 进行对话,自动从 FastGpt 数据库取历史记录,并使用 messages 数组最后一个内容作为用户问题,其余 message 会被忽略。请自行确保 chatId 唯一长度小于250通常可以是自己系统的对话框ID。
- messages: 结构与 [GPT接口](https://platform.openai.com/docs/api-reference/chat/object) chat模式一致。
- responseChatItemId: string | undefined 。如果传入,则会将该值作为本次对话的响应消息的 IDFastGPT 会自动将该 ID 存入数据库。请确保,在当前`chatId`下,`responseChatItemId`是唯一的。
- detail: 是否返回中间值(模块状态,响应的完整结果等),`stream模式`下会通过`event`进行区分,`非stream模式`结果保存在`responseData`中。
- variables: 模块变量,一个对象,会替换模块中,输入框内容里的`{{key}}`
{{% /alert %}}
{{< /markdownify >}}
{{< /tab >}}
{{< /tabs >}}
#### v1
### 响应
#### v2
v2 接口比起 v1主要变变化在于会在每个节点运行结束后及时返回 response而不是等工作流结束后再统一返回。
{{< tabs tabTotal="5" >}}
{{< tab tabName="detail=false,stream=false 响应" >}}
{{< markdownify >}}
```json
{
"id": "",
"model": "",
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 1
},
"choices": [
{
"message": {
"role": "assistant",
"content": "我是一个人工智能助手,旨在回答问题和提供信息。如果你有任何问题或者需要帮助,随时问我!"
},
"finish_reason": "stop",
"index": 0
}
]
}
```
{{< /markdownify >}}
{{< /tab >}}
{{< tab tabName="detail=false,stream=true 响应" >}}
{{< markdownify >}}
```bash
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":"你好"},"index":0,"finish_reason":null}]}
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":""},"index":0,"finish_reason":null}]}
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":"今天"},"index":0,"finish_reason":null}]}
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":"过得怎么样?"},"index":0,"finish_reason":null}]}
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":null},"index":0,"finish_reason":"stop"}]}
data: [DONE]
```
{{< /markdownify >}}
{{< /tab >}}
{{< tab tabName="detail=true,stream=false 响应" >}}
{{< markdownify >}}
```json
{
"responseData": [
{
"id": "iSol79OFrBH1I9kC",
"nodeId": "448745",
"moduleName": "common:core.module.template.work_start",
"moduleType": "workflowStart",
"runningTime": 0
},
{
"id": "t1T94WCy6Su3BK4V",
"nodeId": "fjLpE3XPegmoGtbU",
"moduleName": "AI 对话",
"moduleType": "chatNode",
"runningTime": 1.46,
"totalPoints": 0,
"model": "GPT-4o-mini",
"tokens": 64,
"inputTokens": 10,
"outputTokens": 54,
"query": "你是谁",
"reasoningText": "",
"historyPreview": [
{
"obj": "Human",
"value": "你是谁"
},
{
"obj": "AI",
"value": "我是一个人工智能助手,旨在帮助回答问题和提供信息。如果你有任何问题或需要帮助,请告诉我!"
}
],
"contextTotalLen": 2
}
],
"newVariables": {
},
"id": "",
"model": "",
"usage": {
"prompt_tokens": 1,
"completion_tokens": 1,
"total_tokens": 1
},
"choices": [
{
"message": {
"role": "assistant",
"content": "我是一个人工智能助手,旨在帮助回答问题和提供信息。如果你有任何问题或需要帮助,请告诉我!"
},
"finish_reason": "stop",
"index": 0
}
]
}
```
{{< /markdownify >}}
{{< /tab >}}
{{< tab tabName="detail=true,stream=true 响应" >}}
{{< markdownify >}}
```bash
event: flowNodeResponse
data: {"id":"iYv2uA9rCWAtulWo","nodeId":"workflowStartNodeId","moduleName":"流程开始","moduleType":"workflowStart","runningTime":0}
event: flowNodeStatus
data: {"status":"running","name":"AI 对话"}
event: answer
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":"你好"},"index":0,"finish_reason":null}]}
event: answer
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":""},"index":0,"finish_reason":null}]}
event: answer
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":"今天"},"index":0,"finish_reason":null}]}
event: answer
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":"过得怎么样?"},"index":0,"finish_reason":null}]}
event: flowNodeResponse
data: {"id":"pVzLBF7M3Ol4n7s6","nodeId":"ixe20AHN3jy74pKf","moduleName":"AI 对话","moduleType":"chatNode","runningTime":1.48,"totalPoints":0.0042,"model":"Qwen-plus","tokens":28,"inputTokens":8,"outputTokens":20,"query":"你好","reasoningText":"","historyPreview":[{"obj":"Human","value":"你好"},{"obj":"AI","value":"你好!今天过得怎么样?"}],"contextTotalLen":2}
event: answer
data: {"id":"","object":"","created":0,"model":"","choices":[{"delta":{"role":"assistant","content":null},"index":0,"finish_reason":"stop"}]}
event: answer
data: [DONE]
```
{{< /markdownify >}}
{{< /tab >}}
{{< tab tabName="event值" >}}
{{< markdownify >}}
event取值
- answer: 返回给客户端的文本(最终会算作回答)
- fastAnswer: 指定回复返回给客户端的文本(最终会算作回答)
- toolCall: 执行工具
- toolParams: 工具参数
- toolResponse: 工具返回
- flowNodeStatus: 运行到的节点状态
- flowNodeResponse: 单个节点详细响应
- updateVariables: 更新变量
- error: 报错
{{< /markdownify >}}
{{< /tab >}}
{{< /tabs >}}
#### v1 -->
{{< tabs tabTotal="5" >}}
{{< tab tabName="detail=false,stream=false 响应" >}}
{{< markdownify >}}
@@ -648,8 +920,6 @@ event取值
{{< /tab >}}
{{< /tabs >}}
# 对话 CRUD
{{% alert icon="🤖 " context="success" %}}

View File

@@ -39,7 +39,7 @@ curl --location --request POST 'https://{{host}}/api/admin/initv491' \
3. API 知识库支持 PDF 增强解析。
4. 邀请团队成员,改为邀请链接模式。
5. 支持混合检索权重设置。
6. 支持重排模型选择和权重设置,同时调整了知识库搜索权重计算方式,改成 搜索权重 + 重排权重,而不是向量检索权重+全文检索权重+重排权重。
6. 支持重排模型选择和权重设置,同时调整了知识库搜索权重计算方式,改成 搜索权重 + 重排权重,而不是向量检索权重+全文检索权重+重排权重。会对检索结果有一定影响,可以通过调整相关权重来进行数据适配。
## ⚙️ 优化

View File

@@ -1,5 +1,5 @@
---
title: 'V4.9.2(进行中)'
title: 'V4.9.2'
description: 'FastGPT V4.9.2 更新说明'
icon: 'upgrade'
draft: false
@@ -8,20 +8,31 @@ weight: 798
---
## 更新指南
### 配置参数变更
可直接升级v4.9.3v4.9.2存在一个工作流数据类型转化错误。
修改`config.json`文件中`systemEnv.pgHNSWEfSearch`参数名,改成`hnswEfSearch`
商业版用户直接在后台`系统配置-基础配置`中进行变更。
### 1. 做好数据库备份
### SSO 迁移
### 2. SSO 迁移
使用了 SSO 或成员同步的商业版用户,并且是对接`钉钉``企微`的,需要迁移已有的 SSO 相关配置:
参考:[SSO & 外部成员同步](/docs/guide/admin/sso.md)中的配置进行`sso-service`的部署和配置。
参考:[SSO & 外部成员同步](/docs/guide/admin/sso)中的配置进行`sso-service`的部署和配置。
1. 先将原商业版后台中的相关配置项复制备份出来(以企微为例,将 AppId, Secret 等复制出来)再进行镜像升级。
2. 参考上述文档,部署 SSO 服务,配置相关的环境变量
3. 如果原先使用企微组织架构同步的用户,在商业版后台切换团队模式为“同步模式”
3. 如果原先使用企微组织架构同步的用户,升级完镜像后,需要在商业版后台切换团队模式为“同步模式”
### 3. 配置参数变更
修改`config.json`文件中`systemEnv.pgHNSWEfSearch`参数名,改成`hnswEfSearch`
商业版用户升级镜像后,直接在后台`系统配置-基础配置`中进行变更。
### 4. 更新镜像
- 更新 FastGPT 镜像 tag: v4.9.2
- 更新 FastGPT 商业版镜像 tag: v4.9.2
- Sandbox 镜像,可以不更新
- AIProxy 镜像修改为: registry.cn-hangzhou.aliyuncs.com/labring/aiproxy:v0.1.4
## 重要更新
@@ -35,6 +46,8 @@ weight: 798
4. 集合同步时,支持同步修改标题。
5. 团队成员管理重构,抽离主流 IM SSO企微、飞书、钉钉并支持通过自定义 SSO 接入 FastGPT。同时完善与外部系统的成员同步。
6. 支持 `oceanbase` 向量数据库。填写环境变量`OCEANBASE_URL`即可。
7. 基于 mistral-ocr 的 PDF 解析示例。
8. 基于 miner-u 的 PDF 解析示例。
## ⚙️ 优化

View File

@@ -0,0 +1,29 @@
---
title: 'V4.9.3'
description: 'FastGPT V4.9.3 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 797
---
## 更新指南
### 1. 做好数据库备份
### 2. 更新镜像
- 更新 FastGPT 镜像 tag: v4.9.3
- 更新 FastGPT 商业版镜像 tag: v4.9.3
- Sandbox 镜像tag: v4.9.3
- AIProxy 镜像tag: v0.1.5
## 🚀 新增内容
1. 工作流 debug 模式支持交互节点。
2. 代码运行支持 Python3 代码。
## 🐛 修复
1. 工作流格式转化异常。

View File

@@ -0,0 +1,66 @@
---
title: 'V4.9.4(进行中)'
description: 'FastGPT V4.9.4 更新说明'
icon: 'upgrade'
draft: false
toc: true
weight: 796
---
## 升级指南
### 1. 做好数据备份
### 1. 安装 Redis
* docker 部署的用户,参考最新的 `docker-compose.yml` 文件增加 Redis 配置。增加一个 redis 容器,并配置`fastgpt`,`fastgpt-pro`的环境变量,增加 `REDIS_URL` 环境变量。
* Sealos 部署的用户,在数据库里新建一个`redis`数据库,并复制`内网地址的 connection` 作为 `redis` 的链接串。然后配置`fastgpt`,`fastgpt-pro`的环境变量,增加 `REDIS_URL` 环境变量。
| | | |
| --- | --- | --- |
| ![](/imgs/sealos-redis1.png) | ![](/imgs/sealos-redis2.png) | ![](/imgs/sealos-redis3.png) |
### 2. 更新镜像 tag
- 更新 FastGPT 镜像 tag: v4.9.4-alpha
- 更新 FastGPT 商业版镜像 tag: v4.9.4-alpha
- Sandbox 无需更新
- AIProxy 无需更新
### 3. 执行升级脚本
该脚本仅需商业版用户执行。
从任意终端,发起 1 个 HTTP 请求。其中 {{rootkey}} 替换成环境变量里的 `rootkey`{{host}} 替换成**FastGPT 域名**。
```bash
curl --location --request POST 'https://{{host}}/api/admin/initv494' \
--header 'rootkey: {{rootkey}}' \
--header 'Content-Type: application/json'
```
**脚本功能**
1. 更新站点同步定时器
## 🚀 新增内容
1. 集合数据训练状态展示
2. SMTP 发送邮件插件
3. BullMQ 消息队列。
4. 利用 redis 进行部分数据缓存。
5. 站点同步支持配置训练参数。
6. AI 对话/工具调用,增加返回模型 finish_reason 字段。
7. 移动端语音输入交互调整
## ⚙️ 优化
1. Admin 模板渲染调整。
2. 支持环境变量配置对话文件过期时间。
3. MongoDB log 库可独立部署。
## 🐛 修复
1. 搜索应用/知识库时,无法点击目录进入下一层。
2. 重新训练时,参数未成功初始化。
3. package/service 部分请求在多 app 中不一致。

View File

@@ -0,0 +1,88 @@
---
title: '知识库引用分块阅读器'
description: 'FastGPT 分块阅读器功能介绍'
icon: 'description'
draft: false
toc: true
weight: 480
---
在企业 AI 应用落地过程中文档知识引用的精确性和透明度一直是用户关注的焦点。FastGPT 4.9.1 版本带来的知识库分块阅读器,巧妙解决了这一痛点,让 AI 引用不再是"黑盒"。
# 为什么需要分块阅读器?
传统的 AI 对话中,当模型引用企业知识库内容时,用户往往只能看到被引用的片段,无法获取完整语境,这给内容验证和深入理解带来了挑战。分块阅读器的出现,让用户可以在对话中直接查看引用内容的完整文档,并精确定位到引用位置,实现了引用的"可解释性"。
## 传统引用体验的局限
以往在知识库中上传文稿后,当我们在工作流中输入问题时,传统的引用方式只会展示引用到的分块,无法确认分块在文章中的上下文:
| 问题 | 引用 |
| --- | --- |
| ![](/imgs/chunkReader1.png) | ![](/imgs/chunkReader2.jpg) |
## FastGPT 分块阅读器:精准定位,无缝阅读
而在 FastGPT 全新的分块式阅读器中,同样的知识库内容和问题,呈现方式发生了质的飞跃
![](/imgs/chunkReader4.jpg)
当 AI 引用知识库内容时,用户只需点击引用链接,即可打开一个浮窗,呈现完整的原文内容,并通过醒目的高亮标记精确显示引用的文本片段。这既保证了回答的可溯源性,又提供了便捷的原文查阅体验。
# 核心功能
## 全文展示与定位
"分块阅读器" 让用户能直观查看AI回答引用的知识来源。
在对话界面中,当 AI 引用了知识库内容,系统会在回复下方展示出处信息。用户只需点击这些引用链接,即可打开一个优雅的浮窗,呈现完整的原文内容,并通过醒目的高亮标记精确显示 AI 引用的文本片段。
这一设计既保证了回答的可溯源性又提供了便捷的原文查阅体验让用户能轻松验证AI回答的准确性和相关上下文。
![](/imgs/chunkReader3.webp)
## 便捷引用导航
分块阅读器右上角设计了简洁实用的导航控制,用户可以通过这对按钮轻松在多个引用间切换浏览。导航区还直观显示当前查看的引用序号及总引用数量(如 "7/10"),帮助用户随时了解浏览进度和引用内容的整体规模。
![](imgs/chunkReader5.jpg)
## 引用质量评分
每条引用内容旁边都配有智能评分标签直观展示该引用在所有知识片段中的相关性排名。用户只需将鼠标悬停在评分标签上即可查看完整的评分详情了解这段引用内容为何被AI选中以及其相关性的具体构成。
![](imgs/chunkReader6.png)
## 文档内容一键导出
分块阅读器贴心配备了内容导出功能,让有效信息不再流失。只要用户拥有相应知识库的阅读权限,便可通过简单点击将引用涉及的全文直接保存到本地设备。
![](imgs/chunkReader7.jpg)
# 进阶特性
## 灵活的可见度控制
FastGPT提供灵活的引用可见度设置让知识共享既开放又安全。以免登录链接为例管理员可精确控制外部访问者能看到的信息范围。
当设置为"仅引用内容可见"时,外部用户点击引用链接将只能查看 AI 引用的特定文本片段,而非完整原文档。如图所示,分块阅读器此时智能调整显示模式,仅呈现相关引用内容。
| | |
| --- | --- |
| ![](/imgs/chunkReader8.png) | ![](/imgs/chunkReader9.jpg) |
## 即时标注优化
在浏览过程中,授权用户可以直接对引用内容进行即时标注和修正,系统会智能处理这些更新而不打断当前的对话体验。所有修改过的内容会通过醒目的"已更新"标签清晰标识,既保证了引用的准确性,又维持了对话历史的完整性。
这一无缝的知识优化流程特别适合团队协作场景让知识库能在实际使用过程中持续进化确保AI回答始终基于最新、最准确的信息源。
## 智能文档性能优化
面对现实业务中可能包含成千上万分块的超长文档FastGPT采用了先进的性能优化策略确保分块阅读器始终保持流畅响应。
系统根据引用相关性排序和数据库索引进行智能加载管理,实现了"按需渲染"机制——根据索引排序和数据库 id只有当用户实际需要查看的内容才会被加载到内存中。这意味着无论是快速跳转到特定引用还是自然滚动浏览文档都能获得丝滑的用户体验不会因为文档体积庞大而出现卡顿或延迟。
这一技术优化使FastGPT能够轻松应对企业级的大规模知识库场景让即使是包含海量信息的专业文档也能高效展示和查阅。

View File

@@ -61,11 +61,12 @@ FastGPT-SSO-Service 是为了聚合不同来源的 SSO 和成员同步接口,
#### 1. 配置环境变量
环境变量中的 `EXTERNAL_USER_SERVICE_BASE_URL` 为内网地址,例如上述例子中的配置,环境变量应该设置为
环境变量中的 `EXTERNAL_USER_SYSTEM_BASE_URL` 为内网地址,例如上述例子中的配置,环境变量应该设置为
```yaml
EXTERNAL_USER_SERVICE_BASE_URL=http://fastgpt-sso:3000
EXTERNAL_USER_SERVICE_AUTH_TOKEN=xxxxx
env:
- EXTERNAL_USER_SYSTEM_BASE_URL=http://fastgpt-sso:3000
- EXTERNAL_USER_SYSTEM_AUTH_TOKEN=xxxxx
```
#### 2. 在商业版后台配置按钮文字,图标等。
@@ -89,7 +90,8 @@ EXTERNAL_USER_SERVICE_AUTH_TOKEN=xxxxx
设置 fastgpt-pro 环境变量则可开启自动成员同步
```bash
SYNC_MEMBER_CRON="0 0 * * *" # Cron 表达式,每天 0 点执行
env:
- "SYNC_MEMBER_CRON=0 0 * * *" # Cron 表达式,每天 0 点执行
```
## 内置的通用协议/IM 配置示例

View File

@@ -7,7 +7,7 @@ toc: true
weight: -10
---
FastGPT 是一个基于 LLM 大语言模型的知识库问答系统,提供开箱即用的数据处理、模型调用等能力同时可以通过 Flow 可视化进行工作流编排,从而实现复杂的问答场景!
FastGPT 是一个AI Agent 构建平台,提供开箱即用的数据处理、模型调用等能力同时可以通过 Flow 可视化进行工作流编排,从而实现复杂的应用场景!
{{% alert icon="🤖 " context="success" %}}
FastGPT 在线使用:[https://tryfastgpt.ai](https://tryfastgpt.ai)

40
env.d.ts vendored Normal file
View File

@@ -0,0 +1,40 @@
declare global {
namespace NodeJS {
interface ProcessEnv {
LOG_DEPTH: string;
DEFAULT_ROOT_PSW: string;
DB_MAX_LINK: string;
TOKEN_KEY: string;
FILE_TOKEN_KEY: string;
ROOT_KEY: string;
OPENAI_BASE_URL: string;
CHAT_API_KEY: string;
AIPROXY_API_ENDPOINT: string;
AIPROXY_API_TOKEN: string;
MULTIPLE_DATA_TO_BASE64: string;
MONGODB_URI: string;
MONGODB_LOG_URI?: string;
PG_URL: string;
OCEANBASE_URL: string;
MILVUS_ADDRESS: string;
MILVUS_TOKEN: string;
SANDBOX_URL: string;
PRO_URL: string;
FE_DOMAIN: string;
FILE_DOMAIN: string;
NEXT_PUBLIC_BASE_URL: string;
LOG_LEVEL: string;
STORE_LOG_LEVEL: string;
USE_IP_LIMIT: string;
WORKFLOW_MAX_RUN_TIMES: string;
WORKFLOW_MAX_LOOP_TIMES: string;
CHECK_INTERNAL_IP: string;
CHAT_LOG_URL: string;
CHAT_LOG_INTERVAL: string;
CHAT_LOG_SOURCE_ID_PREFIX: string;
ALLOWED_ORIGINS: string;
}
}
}
export {};

View File

@@ -118,11 +118,12 @@ export type SystemEnvType = {
oneapiUrl?: string;
chatApiKey?: string;
customPdfParse?: {
url?: string;
key?: string;
doc2xKey?: string;
price?: number; // n points/1 page
};
customPdfParse?: customPdfParseType;
};
export type customPdfParseType = {
url?: string;
key?: string;
doc2xKey?: string;
price?: number;
};

View File

@@ -1,3 +1,5 @@
import { i18nT } from '../../../web/i18n/utils';
export enum ChatCompletionRequestMessageRoleEnum {
'System' = 'system',
'User' = 'user',
@@ -28,3 +30,13 @@ export enum EmbeddingTypeEnm {
query = 'query',
db = 'db'
}
export const completionFinishReasonMap = {
close: i18nT('chat:completion_finish_close'),
stop: i18nT('chat:completion_finish_stop'),
length: i18nT('chat:completion_finish_length'),
tool_calls: i18nT('chat:completion_finish_tool_calls'),
content_filter: i18nT('chat:completion_finish_content_filter'),
function_call: i18nT('chat:completion_finish_function_call'),
null: i18nT('chat:completion_finish_null')
};

View File

@@ -73,6 +73,15 @@ export type ChatCompletionMessageFunctionCall =
export type StreamChatType = Stream<openai.Chat.Completions.ChatCompletionChunk>;
export type UnStreamChatType = openai.Chat.Completions.ChatCompletion;
export type CompletionFinishReason =
| 'close'
| 'stop'
| 'length'
| 'tool_calls'
| 'content_filter'
| 'function_call'
| null;
export default openai;
export * from 'openai';

View File

@@ -15,7 +15,6 @@ export type DatasetUpdateBody = {
name?: string;
avatar?: string;
intro?: string;
status?: DatasetSchemaType['status'];
agentModel?: string;
vlmModel?: string;
@@ -26,6 +25,7 @@ export type DatasetUpdateBody = {
apiServer?: DatasetSchemaType['apiServer'];
yuqueServer?: DatasetSchemaType['yuqueServer'];
feishuServer?: DatasetSchemaType['feishuServer'];
chunkSettings?: DatasetSchemaType['chunkSettings'];
// sync schedule
autoSync?: boolean;
@@ -141,7 +141,6 @@ export type PushDatasetDataChunkProps = {
export type PostWebsiteSyncParams = {
datasetId: string;
billId: string;
};
export type PushDatasetDataProps = {

View File

@@ -50,7 +50,9 @@ export const DatasetTypeMap = {
export enum DatasetStatusEnum {
active = 'active',
syncing = 'syncing'
syncing = 'syncing',
waiting = 'waiting',
error = 'error'
}
export const DatasetStatusMap = {
[DatasetStatusEnum.active]: {
@@ -58,6 +60,12 @@ export const DatasetStatusMap = {
},
[DatasetStatusEnum.syncing]: {
label: i18nT('common:core.dataset.status.syncing')
},
[DatasetStatusEnum.waiting]: {
label: i18nT('common:core.dataset.status.waiting')
},
[DatasetStatusEnum.error]: {
label: i18nT('dataset:status_error')
}
};

View File

@@ -17,6 +17,20 @@ import { SourceMemberType } from 'support/user/type';
import { DatasetDataIndexTypeEnum } from './data/constants';
import { ChunkSettingModeEnum } from './constants';
export type ChunkSettingsType = {
trainingType: DatasetCollectionDataProcessModeEnum;
autoIndexes?: boolean;
imageIndex?: boolean;
chunkSettingMode?: ChunkSettingModeEnum;
chunkSplitMode?: DataChunkSplitModeEnum;
chunkSize?: number;
indexSize?: number;
chunkSplitter?: string;
qaPrompt?: string;
};
export type DatasetSchemaType = {
_id: string;
parentId?: string;
@@ -29,7 +43,6 @@ export type DatasetSchemaType = {
name: string;
intro: string;
type: `${DatasetTypeEnum}`;
status: `${DatasetStatusEnum}`;
vectorModel: string;
agentModel: string;
@@ -39,14 +52,16 @@ export type DatasetSchemaType = {
url: string;
selector: string;
};
chunkSettings?: ChunkSettingsType;
inheritPermission: boolean;
apiServer?: APIFileServer;
feishuServer?: FeishuServer;
yuqueServer?: YuqueServer;
autoSync?: boolean;
// abandon
autoSync?: boolean;
externalReadUrl?: string;
defaultPermission?: number;
};
@@ -163,6 +178,7 @@ export type DatasetTrainingSchemaType = {
weight: number;
indexes: Omit<DatasetDataIndexItemType, 'dataId'>[];
retryCount: number;
errorMsg?: string;
};
export type CollectionWithDatasetType = DatasetCollectionSchemaType & {
@@ -192,6 +208,8 @@ export type DatasetListItemType = {
};
export type DatasetItemType = Omit<DatasetSchemaType, 'vectorModel' | 'agentModel' | 'vlmModel'> & {
status: `${DatasetStatusEnum}`;
errorMsg?: string;
vectorModel: EmbeddingModelItemType;
agentModel: LLMModelItemType;
vlmModel?: LLMModelItemType;
@@ -216,6 +234,7 @@ export type DatasetCollectionItemType = CollectionWithDatasetType & {
file?: DatasetFileSchema;
permission: DatasetPermission;
indexAmount: number;
errorCount?: number;
};
/* ================= data ===================== */

View File

@@ -5,6 +5,7 @@ export enum SseResponseEventEnum {
answer = 'answer', // animation stream
fastAnswer = 'fastAnswer', // direct answer text, not animation
flowNodeStatus = 'flowNodeStatus', // update node status
flowNodeResponse = 'flowNodeResponse', // node response
toolCall = 'toolCall', // tool start
toolParams = 'toolParams', // tool params return

View File

@@ -22,6 +22,7 @@ import { UserSelectOptionType } from '../template/system/userSelect/type';
import { WorkflowResponseType } from '../../../../service/core/workflow/dispatch/type';
import { AiChatQuoteRoleType } from '../template/system/aiChat/type';
import { LafAccountType, OpenaiAccountType } from '../../../support/user/team/type';
import { CompletionFinishReason } from '../../ai/type';
export type ExternalProviderType = {
openaiAccount?: OpenaiAccountType;
@@ -40,6 +41,7 @@ export type ChatDispatchProps = {
id: string; // May be the id of the system plug-in (cannot be used directly to look up the table)
teamId: string;
tmbId: string; // App tmbId
isChildApp?: boolean;
};
runningUserInfo: {
teamId: string;
@@ -58,6 +60,7 @@ export type ChatDispatchProps = {
isToolCall?: boolean;
workflowStreamResponse?: WorkflowResponseType;
workflowDispatchDeep?: number;
version?: 'v1' | 'v2';
};
export type ModuleDispatchProps<T> = ChatDispatchProps & {
@@ -128,6 +131,7 @@ export type DispatchNodeResponseType = {
obj: `${ChatRoleEnum}`;
value: string;
}[]; // completion context array. history will slice
finishReason?: CompletionFinishReason;
// dataset search
similarity?: number;

View File

@@ -10,7 +10,6 @@ import { FlowNodeOutputItemType, ReferenceValueType } from '../type/io';
import { ChatItemType, NodeOutputItemType } from '../../../core/chat/type';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '../../../core/chat/constants';
import { replaceVariable, valToStr } from '../../../common/string/tools';
import { ChatCompletionChunk } from 'openai/resources';
export const getMaxHistoryLimitFromNodes = (nodes: StoreNodeItemType[]): number => {
let limit = 10;
@@ -67,7 +66,7 @@ export const getLastInteractiveValue = (histories: ChatItemType[]) => {
};
export const initWorkflowEdgeStatus = (
edges: StoreEdgeItemType[],
edges: StoreEdgeItemType[] | RuntimeEdgeItemType[],
histories?: ChatItemType[]
): RuntimeEdgeItemType[] => {
// If there is a history, use the last interactive value

View File

@@ -5,10 +5,36 @@ import { FlowNodeInputTypeEnum } from 'core/workflow/node/constant';
import { WorkflowIOValueTypeEnum } from 'core/workflow/constants';
import type { ChatCompletionMessageParam } from '../../../../ai/type';
type InteractiveBasicType = {
entryNodeIds: string[];
memoryEdges: RuntimeEdgeItemType[];
nodeOutputs: NodeOutputItemType[];
toolParams?: {
entryNodeIds: string[]; // 记录工具中,交互节点的 Id而不是起始工作流的入口
memoryMessages: ChatCompletionMessageParam[]; // 这轮工具中,产生的新的 messages
toolCallId: string; // 记录对应 tool 的id用于后续交互节点可以替换掉 tool 的 response
};
};
type InteractiveNodeType = {
entryNodeIds?: string[];
memoryEdges?: RuntimeEdgeItemType[];
nodeOutputs?: NodeOutputItemType[];
};
export type UserSelectOptionItemType = {
key: string;
value: string;
};
type UserSelectInteractive = InteractiveNodeType & {
type: 'userSelect';
params: {
description: string;
userSelectOptions: UserSelectOptionItemType[];
userSelectedVal?: string;
};
};
export type UserInputFormItemType = {
type: FlowNodeInputTypeEnum;
@@ -28,29 +54,7 @@ export type UserInputFormItemType = {
// select
list?: { label: string; value: string }[];
};
type InteractiveBasicType = {
entryNodeIds: string[];
memoryEdges: RuntimeEdgeItemType[];
nodeOutputs: NodeOutputItemType[];
toolParams?: {
entryNodeIds: string[]; // 记录工具中,交互节点的 Id而不是起始工作流的入口
memoryMessages: ChatCompletionMessageParam[]; // 这轮工具中,产生的新的 messages
toolCallId: string; // 记录对应 tool 的id用于后续交互节点可以替换掉 tool 的 response
};
};
type UserSelectInteractive = {
type: 'userSelect';
params: {
description: string;
userSelectOptions: UserSelectOptionItemType[];
userSelectedVal?: string;
};
};
type UserInputInteractive = {
type UserInputInteractive = InteractiveNodeType & {
type: 'userInput';
params: {
description: string;
@@ -58,6 +62,5 @@ type UserInputInteractive = {
submitted?: boolean;
};
};
export type InteractiveNodeResponseType = UserSelectInteractive | UserInputInteractive;
export type WorkflowInteractiveResponseType = InteractiveBasicType & InteractiveNodeResponseType;

View File

@@ -1,7 +1,23 @@
export const JS_TEMPLATE = `function main({data1, data2}){
return {
result: data1,
data2
}
return {
result: data1,
data2
}
}`;
export const PY_TEMPLATE = `def main(data1, data2):
return {
"result": data1,
"data2": data2
}
`;
export enum SandboxCodeTypeEnum {
js = 'js',
py = 'py'
}
export const SNADBOX_CODE_TEMPLATE = {
[SandboxCodeTypeEnum.js]: JS_TEMPLATE,
[SandboxCodeTypeEnum.py]: PY_TEMPLATE
};

View File

@@ -68,12 +68,14 @@ export const CodeNode: FlowNodeTemplateType = {
key: NodeInputKeyEnum.codeType,
renderTypeList: [FlowNodeInputTypeEnum.hidden],
label: '',
valueType: WorkflowIOValueTypeEnum.string,
value: 'js'
},
{
key: NodeInputKeyEnum.code,
renderTypeList: [FlowNodeInputTypeEnum.custom],
label: '',
valueType: WorkflowIOValueTypeEnum.string,
value: JS_TEMPLATE
}
],

View File

@@ -23,6 +23,7 @@ import { NextApiResponse } from 'next';
import { AppDetailType, AppSchema } from '../../app/type';
import { ParentIdType } from 'common/parentFolder/type';
import { AppTypeEnum } from 'core/app/constants';
import { WorkflowInteractiveResponseType } from '../template/system/interactive/type';
export type FlowNodeCommonType = {
parentNodeId?: string;
@@ -120,6 +121,7 @@ export type FlowNodeItemType = FlowNodeTemplateType & {
showResult?: boolean; // show and hide result modal
response?: ChatHistoryItemResType;
isExpired?: boolean;
workflowInteractiveResponse?: WorkflowInteractiveResponseType;
};
isFolded?: boolean;
};

View File

@@ -10,7 +10,7 @@
"js-yaml": "^4.1.0",
"jschardet": "3.1.1",
"nanoid": "^5.1.3",
"next": "14.2.25",
"next": "14.2.26",
"openai": "4.61.0",
"openapi-types": "^12.1.3",
"json5": "^2.2.3",

View File

@@ -5,6 +5,7 @@
"dependencies": {
"cheerio": "1.0.0-rc.12",
"@types/pg": "^8.6.6",
"@types/nodemailer": "^6.4.17",
"axios": "^1.8.2",
"duck-duck-scrape": "^2.2.5",
"echarts": "5.4.1",
@@ -13,6 +14,7 @@
"mssql": "^11.0.1",
"mysql2": "^3.11.3",
"json5": "^2.2.3",
"nodemailer": "^6.10.0",
"pg": "^8.10.0",
"wikijs": "^6.4.1"
},

View File

@@ -29,7 +29,8 @@ const packagePluginList = [
'databaseConnection',
'Doc2X',
'Doc2X/PDF2text',
'searchXNG'
'searchXNG',
'smtpEmail'
];
export const list = [...staticPluginList, ...packagePluginList];

View File

@@ -34,7 +34,8 @@ const main = async ({
port: parseInt(port, 10),
database: databaseName,
user,
password
password,
connectionTimeoutMillis: 30000
});
await client.connect();
@@ -47,7 +48,8 @@ const main = async ({
port: parseInt(port, 10),
database: databaseName,
user,
password
password,
connectTimeout: 30000
});
const [rows] = await connection.execute(sql);

View File

@@ -0,0 +1,122 @@
import { getErrText } from '@fastgpt/global/common/error/utils';
import nodemailer from 'nodemailer';
interface Props {
// SMTP配置
smtpHost: string;
smtpPort: string;
SSL: boolean;
smtpUser: string;
smtpPass: string;
fromName?: string;
// 邮件参数
to: string;
subject: string;
content: string;
cc?: string;
bcc?: string;
attachments?: string;
}
interface Response {
success: boolean;
messageId?: string;
error?: string;
}
const validateEmail = (email: string) => {
const regex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/;
return regex.test(email);
};
const validateEmails = (emails: string) => {
return emails.split(',').every((email) => validateEmail(email.trim()));
};
const main = async ({
smtpHost,
smtpPort,
SSL,
smtpUser,
smtpPass,
fromName,
to,
subject,
content,
cc,
bcc,
attachments
}: Props): Promise<Response> => {
try {
// 验证SMTP配置
if (!smtpHost || !smtpPort || !smtpUser || !smtpPass) {
throw new Error('Incomplete SMTP configuration');
}
// 验证必填参数
if (!to || !subject || !content) {
throw new Error('Recipient, subject, and content are required');
}
// 验证邮箱格式
if (!validateEmails(to)) {
throw new Error('Invalid recipient email format');
}
if (cc && !validateEmails(cc)) {
throw new Error('Invalid CC email format');
}
if (bcc && !validateEmails(bcc)) {
throw new Error('Invalid BCC email format');
}
// 创建SMTP传输对象
const transporter = nodemailer.createTransport({
host: smtpHost,
port: Number(smtpPort),
secure: SSL === true,
auth: {
user: smtpUser,
pass: smtpPass
}
});
let attachmentsArray = [];
try {
attachmentsArray = JSON.parse(attachments || '[]');
} catch (error) {
throw new Error('Attachment format parsing error, please check attachment configuration');
}
// 发送邮件
const info = await transporter.sendMail({
from: `"${fromName || 'FastGPT'}" <${smtpUser}>`,
to: to
.split(',')
.map((email) => email.trim())
.join(','),
cc: cc
?.split(',')
.map((email) => email.trim())
.join(','),
bcc: bcc
?.split(',')
.map((email) => email.trim())
.join(','),
subject,
html: content,
attachments: attachmentsArray || []
});
return {
success: true,
messageId: info.messageId
};
} catch (error: any) {
return {
success: false,
error: getErrText(error)
};
}
};
export default main;

View File

@@ -0,0 +1,651 @@
{
"author": "cloudpense",
"version": "1.0.0",
"name": "Email 邮件发送",
"avatar": "plugins/email",
"intro": "通过SMTP协议发送电子邮件(nodemailer)",
"showStatus": true,
"weight": 10,
"isTool": true,
"templateType": "tools",
"workflow": {
"nodes": [
{
"nodeId": "pluginInput",
"name": "workflow:template.plugin_start",
"intro": "workflow:intro_plugin_input",
"avatar": "core/workflow/template/workflowStart",
"flowNodeType": "pluginInput",
"showStatus": false,
"position": {
"x": 595.3456736313964,
"y": -323.02524442647456
},
"version": "481",
"inputs": [
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "smtpHost",
"label": "smtpHost",
"description": "",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": true,
"customInputConfig": {
"selectValueTypeList": ["string"]
}
},
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "smtpPort",
"label": "smtpPort",
"description": "SMTP端口",
"defaultValue": "465",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": true
},
{
"renderTypeList": ["select", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "SSL",
"label": "SSL",
"description": "SSL",
"defaultValue": "true",
"list": [
{
"label": "true",
"value": "true"
},
{
"label": "false",
"value": "false"
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": true
},
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "smtpUser",
"label": "smtpUser",
"description": "SMTP用户名, 邮箱账号",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": true
},
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "smtpPass",
"label": "smtpPass",
"description": "邮箱密码或授权码",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": true
},
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "fromName",
"label": "fromName",
"description": "显示的发件人名称",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": true
},
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "to",
"label": "to",
"description": "请输入收件人邮箱,多个邮箱用逗号分隔",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": true,
"toolDescription": "请输入收件人邮箱,多个邮箱用逗号分隔"
},
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "subject",
"label": "subject",
"description": "请输入邮件主题",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": true,
"toolDescription": "请输入邮件主题"
},
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "content",
"label": "content",
"description": "请输入邮件内容支持HTML格式",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": true,
"toolDescription": "请输入邮件内容支持HTML格式"
},
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "cc",
"label": "cc",
"description": "请输入抄送邮箱,多个邮箱用逗号分隔",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": false,
"toolDescription": "请输入抄送邮箱,多个邮箱用逗号分隔"
},
{
"renderTypeList": ["input", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "bcc",
"label": "bcc",
"description": "请输入密送邮箱,多个邮箱用逗号分隔",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": false,
"toolDescription": "请输入密送邮箱,多个邮箱用逗号分隔"
},
{
"renderTypeList": ["JSONEditor", "reference"],
"selectedTypeIndex": 0,
"valueType": "string",
"canEdit": true,
"key": "attachments",
"label": "attachments",
"description": "必须是json数组格式\n[{\"filename\":\"附件名\",\"path\":\"附件url\"}]",
"defaultValue": "",
"list": [
{
"label": "",
"value": ""
}
],
"maxFiles": 5,
"canSelectFile": true,
"canSelectImg": true,
"required": false,
"customInputConfig": {
"selectValueTypeList": ["arrayObject"]
},
"toolDescription": "必须是json数组格式\n[{\"filename\":\"附件名\",\"path\":\"附件url\"}]",
"maxLength": 0
}
],
"outputs": [
{
"id": "smtpHost",
"valueType": "string",
"key": "smtpHost",
"label": "smtpHost",
"type": "hidden"
},
{
"id": "smtpPort",
"valueType": "string",
"key": "smtpPort",
"label": "smtpPort",
"type": "hidden"
},
{
"id": "SSL",
"valueType": "string",
"key": "SSL",
"label": "SSL",
"type": "hidden"
},
{
"id": "smtpUser",
"valueType": "string",
"key": "smtpUser",
"label": "smtpUser",
"type": "hidden"
},
{
"id": "smtpPass",
"valueType": "string",
"key": "smtpPass",
"label": "smtpPass",
"type": "hidden"
},
{
"id": "fromName",
"valueType": "string",
"key": "fromName",
"label": "fromName",
"type": "hidden"
},
{
"id": "to",
"valueType": "string",
"key": "to",
"label": "to",
"type": "hidden"
},
{
"id": "subject",
"valueType": "string",
"key": "subject",
"label": "subject",
"type": "hidden"
},
{
"id": "content",
"valueType": "string",
"key": "content",
"label": "content",
"type": "hidden"
},
{
"id": "cc",
"valueType": "string",
"key": "cc",
"label": "cc",
"type": "hidden"
},
{
"id": "bcc",
"valueType": "string",
"key": "bcc",
"label": "bcc",
"type": "hidden"
},
{
"id": "attachments",
"valueType": "string",
"key": "attachments",
"label": "attachments",
"type": "hidden"
}
]
},
{
"nodeId": "pluginOutput",
"name": "common:core.module.template.self_output",
"intro": "workflow:intro_custom_plugin_output",
"avatar": "core/workflow/template/pluginOutput",
"flowNodeType": "pluginOutput",
"showStatus": false,
"position": {
"x": 2135.4991928806685,
"y": -98.02524442647456
},
"version": "481",
"inputs": [
{
"renderTypeList": ["reference"],
"valueType": "string",
"canEdit": true,
"key": "发送结果",
"label": "发送结果",
"isToolOutput": true,
"description": "",
"required": true,
"value": ["uOX6ITvPWm9O", "httpRawResponse"]
}
],
"outputs": []
},
{
"nodeId": "pluginConfig",
"name": "common:core.module.template.system_config",
"intro": "",
"avatar": "core/workflow/template/systemConfig",
"flowNodeType": "pluginConfig",
"position": {
"x": 184.66337662472682,
"y": -216.05298493910115
},
"version": "4811",
"inputs": [],
"outputs": []
},
{
"nodeId": "uOX6ITvPWm9O",
"name": "HTTP 请求",
"intro": "可以发出一个 HTTP 请求,实现更为复杂的操作(联网搜索、数据库查询等)",
"avatar": "core/workflow/template/httpRequest",
"flowNodeType": "httpRequest468",
"showStatus": true,
"position": {
"x": 1340.0519095857342,
"y": -393.02524442647456
},
"version": "481",
"inputs": [
{
"key": "system_addInputParam",
"renderTypeList": ["addInputParam"],
"valueType": "dynamic",
"label": "",
"required": false,
"description": "common:core.module.input.description.HTTP Dynamic Input",
"customInputConfig": {
"selectValueTypeList": [
"string",
"number",
"boolean",
"object",
"arrayString",
"arrayNumber",
"arrayBoolean",
"arrayObject",
"arrayAny",
"any",
"chatHistory",
"datasetQuote",
"dynamic",
"selectDataset",
"selectApp"
],
"showDescription": false,
"showDefaultValue": true
},
"valueDesc": "",
"debugLabel": "",
"toolDescription": ""
},
{
"key": "system_httpMethod",
"renderTypeList": ["custom"],
"valueType": "string",
"label": "",
"value": "POST",
"required": true,
"valueDesc": "",
"description": "",
"debugLabel": "",
"toolDescription": ""
},
{
"key": "system_httpTimeout",
"renderTypeList": ["custom"],
"valueType": "number",
"label": "",
"value": 30,
"min": 5,
"max": 600,
"required": true,
"valueDesc": "",
"description": "",
"debugLabel": "",
"toolDescription": ""
},
{
"key": "system_httpReqUrl",
"renderTypeList": ["hidden"],
"valueType": "string",
"label": "",
"description": "common:core.module.input.description.Http Request Url",
"placeholder": "https://api.ai.com/getInventory",
"required": false,
"value": "smtpEmail",
"valueDesc": "",
"debugLabel": "",
"toolDescription": ""
},
{
"key": "system_httpHeader",
"renderTypeList": ["custom"],
"valueType": "any",
"value": [],
"label": "",
"description": "common:core.module.input.description.Http Request Header",
"placeholder": "common:core.module.input.description.Http Request Header",
"required": false,
"valueDesc": "",
"debugLabel": "",
"toolDescription": ""
},
{
"key": "system_httpParams",
"renderTypeList": ["hidden"],
"valueType": "any",
"value": [],
"label": "",
"required": false,
"valueDesc": "",
"description": "",
"debugLabel": "",
"toolDescription": ""
},
{
"key": "system_httpJsonBody",
"renderTypeList": ["hidden"],
"valueType": "any",
"value": "{\n\"smtpHost\": \"{{$pluginInput.smtpHost$}}\",\n\"smtpPort\": \"{{$pluginInput.smtpPort$}}\",\n\"SSL\": {{$pluginInput.SSL$}},\n\"smtpUser\": \"{{$pluginInput.smtpUser$}}\",\n\"smtpPass\": \"{{$pluginInput.smtpPass$}}\",\n\"fromName\": \"{{$pluginInput.fromName$}}\",\n\"to\": \"{{$pluginInput.to$}}\",\n\"subject\": \"{{$pluginInput.subject$}}\",\n\"content\": \"{{$pluginInput.content$}}\",\n\"cc\": \"{{$pluginInput.cc$}}\",\n\"bcc\": \"{{$pluginInput.bcc$}}\",\n\"attachments\":'{{$pluginInput.attachments$}}'\n}",
"label": "",
"required": false,
"valueDesc": "",
"description": "",
"debugLabel": "",
"toolDescription": ""
},
{
"key": "system_httpFormBody",
"renderTypeList": ["hidden"],
"valueType": "any",
"value": [],
"label": "",
"required": false,
"valueDesc": "",
"description": "",
"debugLabel": "",
"toolDescription": ""
},
{
"key": "system_httpContentType",
"renderTypeList": ["hidden"],
"valueType": "string",
"value": "json",
"label": "",
"required": false,
"debugLabel": "",
"toolDescription": ""
}
],
"outputs": [
{
"id": "error",
"key": "error",
"label": "workflow:request_error",
"description": "HTTP请求错误信息成功时返回空",
"valueType": "object",
"type": "static"
},
{
"id": "httpRawResponse",
"key": "httpRawResponse",
"required": true,
"label": "workflow:raw_response",
"description": "HTTP请求的原始响应。只能接受字符串或JSON类型响应数据。",
"valueType": "any",
"type": "static"
},
{
"id": "system_addOutputParam",
"key": "system_addOutputParam",
"type": "dynamic",
"valueType": "dynamic",
"label": "输出字段提取",
"customFieldConfig": {
"selectValueTypeList": [
"string",
"number",
"boolean",
"object",
"arrayString",
"arrayNumber",
"arrayBoolean",
"arrayObject",
"arrayAny",
"any",
"chatHistory",
"datasetQuote",
"dynamic",
"selectApp",
"selectDataset"
],
"showDescription": false,
"showDefaultValue": false
},
"description": "可以通过 JSONPath 语法来提取响应值中的指定字段",
"valueDesc": ""
}
]
}
],
"edges": [
{
"source": "uOX6ITvPWm9O",
"target": "pluginOutput",
"sourceHandle": "uOX6ITvPWm9O-source-right",
"targetHandle": "pluginOutput-target-left"
},
{
"source": "pluginInput",
"target": "uOX6ITvPWm9O",
"sourceHandle": "pluginInput-source-right",
"targetHandle": "uOX6ITvPWm9O-target-left"
}
],
"chatConfig": {
"welcomeText": "",
"variables": [],
"questionGuide": {
"open": false,
"model": "gpt-4o-mini",
"customPrompt": ""
},
"ttsConfig": {
"type": "web"
},
"whisperConfig": {
"open": false,
"autoSend": false,
"autoTTSResponse": false
},
"chatInputGuide": {
"open": false,
"textList": [],
"customUrl": ""
},
"instruction": "通过SMTP协议发送电子邮件",
"autoExecute": {
"open": false,
"defaultPrompt": ""
},
"_id": "67ad649ea4b6b8eefa9d3d0d"
}
}
}

29
packages/service/common/api/type.d.ts vendored Normal file
View File

@@ -0,0 +1,29 @@
import { FeishuServer, YuqueServer } from '@fastgpt/global/core/dataset/apiDataset';
import {
DeepRagSearchProps,
SearchDatasetDataResponse
} from '../../core/dataset/search/controller';
import { AuthOpenApiLimitProps } from '../../support/openapi/auth';
import { CreateUsageProps, ConcatUsageProps } from '@fastgpt/global/support/wallet/usage/api';
import {
GetProApiDatasetFileContentParams,
GetProApiDatasetFileListParams,
GetProApiDatasetFilePreviewUrlParams
} from '../../core/dataset/apiDataset/proApi';
declare global {
var textCensorHandler: (params: { text: string }) => Promise<{ code: number; message?: string }>;
var deepRagHandler: (data: DeepRagSearchProps) => Promise<SearchDatasetDataResponse>;
var authOpenApiHandler: (data: AuthOpenApiLimitProps) => Promise<any>;
var createUsageHandler: (data: CreateUsageProps) => Promise<void>;
var concatUsageHandler: (data: ConcatUsageProps) => Promise<void>;
// API dataset
var getProApiDatasetFileList: (data: GetProApiDatasetFileListParams) => Promise<APIFileItem[]>;
var getProApiDatasetFileContent: (
data: GetProApiDatasetFileContentParams
) => Promise<ApiFileReadContentResponse>;
var getProApiDatasetFilePreviewUrl: (
data: GetProApiDatasetFilePreviewUrlParams
) => Promise<string>;
}

View File

@@ -1,5 +1,4 @@
import { connectionMongo, getMongoModel } from '../../mongo';
const { Schema } = connectionMongo;
import { getMongoModel, Schema } from '../../mongo';
import { RawTextBufferSchemaType } from './type';
export const collectionName = 'buffer_rawtexts';

View File

@@ -1,5 +1,4 @@
import { connectionMongo, getMongoModel, type Model } from '../../../common/mongo';
const { Schema, model, models } = connectionMongo;
import { Schema, getMongoModel } from '../../../common/mongo';
import { TTSBufferSchemaType } from './type.d';
export const collectionName = 'buffer_tts';

View File

@@ -0,0 +1,79 @@
import { ConnectionOptions, Processor, Queue, QueueOptions, Worker, WorkerOptions } from 'bullmq';
import { addLog } from '../system/log';
import { newQueueRedisConnection, newWorkerRedisConnection } from '../redis';
const defaultWorkerOpts: Omit<ConnectionOptions, 'connection'> = {
removeOnComplete: {
count: 0 // Delete jobs immediately on completion
},
removeOnFail: {
count: 0 // Delete jobs immediately on failure
}
};
export enum QueueNames {
websiteSync = 'websiteSync'
}
export const queues = (() => {
if (!global.queues) {
global.queues = new Map<QueueNames, Queue>();
}
return global.queues;
})();
export const workers = (() => {
if (!global.workers) {
global.workers = new Map<QueueNames, Worker>();
}
return global.workers;
})();
export function getQueue<DataType, ReturnType = void>(
name: QueueNames,
opts?: Omit<QueueOptions, 'connection'>
): Queue<DataType, ReturnType> {
// check if global.queues has the queue
const queue = queues.get(name);
if (queue) {
return queue as Queue<DataType, ReturnType>;
}
const newQueue = new Queue<DataType, ReturnType>(name.toString(), {
connection: newQueueRedisConnection(),
...opts
});
// default error handler, to avoid unhandled exceptions
newQueue.on('error', (error) => {
addLog.error(`MQ Queue [${name}]: ${error.message}`, error);
});
queues.set(name, newQueue);
return newQueue;
}
export function getWorker<DataType, ReturnType = void>(
name: QueueNames,
processor: Processor<DataType, ReturnType>,
opts?: Omit<WorkerOptions, 'connection'>
): Worker<DataType, ReturnType> {
const worker = workers.get(name);
if (worker) {
return worker as Worker<DataType, ReturnType>;
}
const newWorker = new Worker<DataType, ReturnType>(name.toString(), processor, {
connection: newWorkerRedisConnection(),
...defaultWorkerOpts,
...opts
});
// default error handler, to avoid unhandled exceptions
newWorker.on('error', (error) => {
addLog.error(`MQ Worker [${name}]: ${error.message}`, error);
});
newWorker.on('failed', (jobId, error) => {
addLog.error(`MQ Worker [${name}]: ${error.message}`, error);
});
workers.set(name, newWorker);
return newWorker;
}
export * from 'bullmq';

View File

@@ -0,0 +1,7 @@
import { Queue, Worker } from 'bullmq';
import { QueueNames } from './index';
declare global {
var queues: Map<QueueNames, Queue> | undefined;
var workers: Map<QueueNames, Worker> | undefined;
}

View File

@@ -1,5 +1,4 @@
import { connectionMongo, getMongoModel, type Model } from '../../mongo';
const { Schema } = connectionMongo;
import { Schema, getMongoModel } from '../../mongo';
const DatasetFileSchema = new Schema({});
const ChatFileSchema = new Schema({});

View File

@@ -1,7 +1,6 @@
import { TeamCollectionName } from '@fastgpt/global/support/user/team/constant';
import { connectionMongo, getMongoModel } from '../../mongo';
import { Schema, getMongoModel } from '../../mongo';
import { MongoImageSchemaType } from '@fastgpt/global/common/file/image/type.d';
const { Schema } = connectionMongo;
const ImageSchema = new Schema({
teamId: {

View File

@@ -1,17 +1,26 @@
import { addLog } from '../../common/system/log';
import mongoose, { Model } from 'mongoose';
import mongoose, { Model, Mongoose } from 'mongoose';
export default mongoose;
export * from 'mongoose';
export const MONGO_URL = process.env.MONGODB_URI as string;
export const MONGO_LOG_URL = (process.env.MONGODB_LOG_URI ?? process.env.MONGODB_URI) as string;
export const connectionMongo = (() => {
if (!global.mongodb) {
global.mongodb = mongoose;
global.mongodb = new Mongoose();
}
return global.mongodb;
})();
export const connectionLogMongo = (() => {
if (!global.mongodbLog) {
global.mongodbLog = new Mongoose();
}
return global.mongodbLog;
})();
const addCommonMiddleware = (schema: mongoose.Schema) => {
const operations = [
/^find/,
@@ -71,6 +80,19 @@ export const getMongoModel = <T>(name: string, schema: mongoose.Schema) => {
return model;
};
export const getMongoLogModel = <T>(name: string, schema: mongoose.Schema) => {
if (connectionLogMongo.models[name]) return connectionLogMongo.models[name] as Model<T>;
console.log('Load model======', name);
addCommonMiddleware(schema);
const model = connectionLogMongo.model<T>(name, schema);
// Sync index
syncMongoIndex(model);
return model;
};
const syncMongoIndex = async (model: Model<any>) => {
if (process.env.SYNC_INDEX !== '0' && process.env.NODE_ENV !== 'test') {
try {

View File

@@ -1,6 +1,5 @@
import { delay } from '@fastgpt/global/common/system/utils';
import { addLog } from '../system/log';
import { connectionMongo } from './index';
import type { Mongoose } from 'mongoose';
const maxConnecting = Math.max(30, Number(process.env.DB_MAX_LINK || 20));
@@ -8,41 +7,41 @@ const maxConnecting = Math.max(30, Number(process.env.DB_MAX_LINK || 20));
/**
* connect MongoDB and init data
*/
export async function connectMongo(): Promise<Mongoose> {
export async function connectMongo(db: Mongoose, url: string): Promise<Mongoose> {
/* Connecting, connected will return */
if (connectionMongo.connection.readyState !== 0) {
return connectionMongo;
if (db.connection.readyState !== 0) {
return db;
}
console.log('mongo start connect');
console.log('MongoDB start connect');
try {
// Remove existing listeners to prevent duplicates
connectionMongo.connection.removeAllListeners('error');
connectionMongo.connection.removeAllListeners('disconnected');
connectionMongo.set('strictQuery', 'throw');
db.connection.removeAllListeners('error');
db.connection.removeAllListeners('disconnected');
db.set('strictQuery', 'throw');
connectionMongo.connection.on('error', async (error) => {
db.connection.on('error', async (error) => {
console.log('mongo error', error);
try {
if (connectionMongo.connection.readyState !== 0) {
await connectionMongo.disconnect();
if (db.connection.readyState !== 0) {
await db.disconnect();
await delay(1000);
await connectMongo();
await connectMongo(db, url);
}
} catch (error) {}
});
connectionMongo.connection.on('disconnected', async () => {
db.connection.on('disconnected', async () => {
console.log('mongo disconnected');
try {
if (connectionMongo.connection.readyState !== 0) {
await connectionMongo.disconnect();
if (db.connection.readyState !== 0) {
await db.disconnect();
await delay(1000);
await connectMongo();
await connectMongo(db, url);
}
} catch (error) {}
});
await connectionMongo.connect(process.env.MONGODB_URI as string, {
const options = {
bufferCommands: true,
maxConnecting: maxConnecting,
maxPoolSize: maxConnecting,
@@ -53,18 +52,18 @@ export async function connectMongo(): Promise<Mongoose> {
maxIdleTimeMS: 300000,
retryWrites: true,
retryReads: true
};
// readPreference: 'secondaryPreferred',
// readConcern: { level: 'local' },
// writeConcern: { w: 'majority', j: true }
});
db.connect(url, options);
console.log('mongo connected');
return connectionMongo;
return db;
} catch (error) {
addLog.error('mongo connect error', error);
await connectionMongo.disconnect();
addLog.error('Mongo connect error', error);
await db.disconnect();
await delay(1000);
return connectMongo();
return connectMongo(db, url);
}
}

View File

@@ -3,4 +3,5 @@ import type { Logger } from 'winston';
declare global {
var mongodb: Mongoose | undefined;
var mongodbLog: Mongoose | undefined;
}

View File

@@ -0,0 +1,38 @@
import { getGlobalRedisCacheConnection } from './index';
import { addLog } from '../system/log';
import { retryFn } from '@fastgpt/global/common/system/utils';
export enum CacheKeyEnum {
team_vector_count = 'team_vector_count'
}
export const setRedisCache = async (
key: string,
data: string | Buffer | number,
expireSeconds?: number
) => {
return await retryFn(async () => {
try {
const redis = getGlobalRedisCacheConnection();
if (expireSeconds) {
await redis.set(key, data, 'EX', expireSeconds);
} else {
await redis.set(key, data);
}
} catch (error) {
addLog.error('Set cache error:', error);
return Promise.reject(error);
}
});
};
export const getRedisCache = async (key: string) => {
const redis = getGlobalRedisCacheConnection();
return await retryFn(() => redis.get(key));
};
export const delRedisCache = async (key: string) => {
const redis = getGlobalRedisCacheConnection();
await retryFn(() => redis.del(key));
};

View File

@@ -0,0 +1,43 @@
import { addLog } from '../system/log';
import Redis from 'ioredis';
const REDIS_URL = process.env.REDIS_URL ?? 'redis://localhost:6379';
export const newQueueRedisConnection = () => {
const redis = new Redis(REDIS_URL);
redis.on('connect', () => {
console.log('Redis connected');
});
redis.on('error', (error) => {
console.error('Redis connection error', error);
});
return redis;
};
export const newWorkerRedisConnection = () => {
const redis = new Redis(REDIS_URL, {
maxRetriesPerRequest: null
});
redis.on('connect', () => {
console.log('Redis connected');
});
redis.on('error', (error) => {
console.error('Redis connection error', error);
});
return redis;
};
export const getGlobalRedisCacheConnection = () => {
if (global.redisCache) return global.redisCache;
global.redisCache = new Redis(REDIS_URL, { keyPrefix: 'fastgpt:cache:' });
global.redisCache.on('connect', () => {
addLog.info('Redis connected');
});
global.redisCache.on('error', (error) => {
addLog.error('Redis connection error', error);
});
return global.redisCache;
};

View File

@@ -0,0 +1,5 @@
import Redis from 'ioredis';
declare global {
var redisCache: Redis | null;
}

View File

@@ -1,4 +1,3 @@
export const FastGPTProUrl = process.env.PRO_URL ? `${process.env.PRO_URL}/api` : '';
export const isFastGPTMainService = !!process.env.PRO_URL;
// @ts-ignore
export const isFastGPTProService = () => !!global.systemConfig;

View File

@@ -1,4 +1,4 @@
import { getMongoModel, Schema } from '../../../common/mongo';
import { getMongoLogModel as getMongoModel, Schema } from '../../../common/mongo';
import { SystemLogType } from './type';
import { LogLevelEnum } from './constant';

View File

@@ -1,5 +1,5 @@
export enum TimerIdEnum {
checkInValidDatasetFiles = 'checkInValidDatasetFiles',
checkExpiredFiles = 'checkExpiredFiles',
checkInvalidDatasetData = 'checkInvalidDatasetData',
checkInvalidVector = 'checkInvalidVector',
clearExpiredSubPlan = 'clearExpiredSubPlan',

View File

@@ -2,10 +2,13 @@
import { PgVectorCtrl } from './pg/class';
import { ObVectorCtrl } from './oceanbase/class';
import { getVectorsByText } from '../../core/ai/embedding';
import { InsertVectorProps } from './controller.d';
import { DelDatasetVectorCtrlProps, InsertVectorProps } from './controller.d';
import { EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
import { MILVUS_ADDRESS, PG_ADDRESS, OCEANBASE_ADDRESS } from './constants';
import { MilvusCtrl } from './milvus/class';
import { setRedisCache, getRedisCache, delRedisCache, CacheKeyEnum } from '../redis/cache';
import { throttle } from 'lodash';
import { retryFn } from '@fastgpt/global/common/system/utils';
const getVectorObj = () => {
if (PG_ADDRESS) return new PgVectorCtrl();
@@ -15,13 +18,33 @@ const getVectorObj = () => {
return new PgVectorCtrl();
};
const getChcheKey = (teamId: string) => `${CacheKeyEnum.team_vector_count}:${teamId}`;
const onDelCache = throttle((teamId: string) => delRedisCache(getChcheKey(teamId)), 30000, {
leading: true,
trailing: true
});
const Vector = getVectorObj();
export const initVectorStore = Vector.init;
export const deleteDatasetDataVector = Vector.delete;
export const recallFromVectorStore = Vector.embRecall;
export const getVectorDataByTime = Vector.getVectorDataByTime;
export const getVectorCountByTeamId = Vector.getVectorCountByTeamId;
export const getVectorCountByTeamId = async (teamId: string) => {
const key = getChcheKey(teamId);
const countStr = await getRedisCache(key);
if (countStr) {
return Number(countStr);
}
const count = await Vector.getVectorCountByTeamId(teamId);
await setRedisCache(key, count, 30 * 60);
return count;
};
export const getVectorCountByDatasetId = Vector.getVectorCountByDatasetId;
export const getVectorCountByCollectionId = Vector.getVectorCountByCollectionId;
@@ -33,18 +56,28 @@ export const insertDatasetDataVector = async ({
query: string;
model: EmbeddingModelItemType;
}) => {
const { vectors, tokens } = await getVectorsByText({
model,
input: query,
type: 'db'
});
const { insertId } = await Vector.insert({
...props,
vector: vectors[0]
});
return retryFn(async () => {
const { vectors, tokens } = await getVectorsByText({
model,
input: query,
type: 'db'
});
const { insertId } = await Vector.insert({
...props,
vector: vectors[0]
});
return {
tokens,
insertId
};
onDelCache(props.teamId);
return {
tokens,
insertId
};
});
};
export const deleteDatasetDataVector = async (props: DelDatasetVectorCtrlProps) => {
const result = await Vector.delete(props);
onDelCache(props.teamId);
return result;
};

View File

@@ -2,6 +2,7 @@ import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming,
CompletionFinishReason,
StreamChatType
} from '@fastgpt/global/core/ai/type';
import { getLLMModel } from './model';
@@ -142,26 +143,40 @@ export const parseReasoningStreamContent = () => {
content?: string;
reasoning_content?: string;
};
finish_reason?: CompletionFinishReason;
}[];
},
parseThinkTag = false
): [string, string] => {
): {
reasoningContent: string;
content: string;
finishReason: CompletionFinishReason;
} => {
const content = part.choices?.[0]?.delta?.content || '';
const finishReason = part.choices?.[0]?.finish_reason || null;
// @ts-ignore
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
if (reasoningContent || !parseThinkTag) {
isInThinkTag = false;
return [reasoningContent, content];
return { reasoningContent, content, finishReason };
}
if (!content) {
return ['', ''];
return {
reasoningContent: '',
content: '',
finishReason
};
}
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
if (isInThinkTag === false) {
return ['', content];
return {
reasoningContent: '',
content,
finishReason
};
}
// 检测是否为 think 标签开头的数据
@@ -170,17 +185,29 @@ export const parseReasoningStreamContent = () => {
startTagBuffer += content;
// 太少内容时候,暂时不解析
if (startTagBuffer.length < startTag.length) {
return ['', ''];
return {
reasoningContent: '',
content: '',
finishReason
};
}
if (startTagBuffer.startsWith(startTag)) {
isInThinkTag = true;
return [startTagBuffer.slice(startTag.length), ''];
return {
reasoningContent: startTagBuffer.slice(startTag.length),
content: '',
finishReason
};
}
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
isInThinkTag = false;
return ['', startTagBuffer];
return {
reasoningContent: '',
content: startTagBuffer,
finishReason
};
}
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
@@ -201,19 +228,35 @@ export const parseReasoningStreamContent = () => {
if (endTagBuffer.includes(endTag)) {
isInThinkTag = false;
const answer = endTagBuffer.slice(endTag.length);
return ['', answer];
return {
reasoningContent: '',
content: answer,
finishReason
};
} else if (endTagBuffer.length >= endTag.length) {
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
const tmp = endTagBuffer;
endTagBuffer = '';
return [tmp, ''];
return {
reasoningContent: tmp,
content: '',
finishReason
};
}
return ['', ''];
return {
reasoningContent: '',
content: '',
finishReason
};
} else if (content.includes(endTag)) {
// 返回内容,完整命中</think>,直接结束
isInThinkTag = false;
const [think, answer] = content.split(endTag);
return [think, answer];
return {
reasoningContent: think,
content: answer,
finishReason
};
} else {
// 无 buffer且未命中 </think>,开始疑似 </think> 检测。
for (let i = 1; i < endTag.length; i++) {
@@ -222,13 +265,21 @@ export const parseReasoningStreamContent = () => {
if (content.endsWith(partialEndTag)) {
const think = content.slice(0, -partialEndTag.length);
endTagBuffer += partialEndTag;
return [think, ''];
return {
reasoningContent: think,
content: '',
finishReason
};
}
}
}
// 完全未命中尾标签,还是 think 阶段。
return [content, ''];
return {
reasoningContent: content,
content: '',
finishReason
};
};
const getStartTagBuffer = () => startTagBuffer;

View File

@@ -1,7 +1,6 @@
import { POST } from './plusRequest';
export const postTextCensor = (data: { text: string }) =>
POST<{ code?: number; message: string }>('/common/censor/check', data)
global
.textCensorHandler(data)
.then((res) => {
if (res?.code === 5000) {
return Promise.reject(res);

View File

@@ -0,0 +1,25 @@
import { ParentIdType } from '@fastgpt/global/common/parentFolder/type';
import { FeishuServer, YuqueServer } from '@fastgpt/global/core/dataset/apiDataset';
export enum ProApiDatasetOperationTypeEnum {
LIST = 'list',
READ = 'read',
CONTENT = 'content'
}
export type ProApiDatasetCommonParams = {
feishuServer?: FeishuServer;
yuqueServer?: YuqueServer;
};
export type GetProApiDatasetFileListParams = ProApiDatasetCommonParams & {
parentId?: ParentIdType;
};
export type GetProApiDatasetFileContentParams = ProApiDatasetCommonParams & {
apiFileId: string;
};
export type GetProApiDatasetFilePreviewUrlParams = ProApiDatasetCommonParams & {
apiFileId: string;
};

View File

@@ -1,6 +1,7 @@
import {
DatasetCollectionTypeEnum,
DatasetCollectionDataProcessModeEnum
DatasetCollectionDataProcessModeEnum,
DatasetTypeEnum
} from '@fastgpt/global/core/dataset/constants';
import type { CreateDatasetCollectionParams } from '@fastgpt/global/core/dataset/api.d';
import { MongoDatasetCollection } from './schema';
@@ -104,7 +105,8 @@ export const createCollectionAndInsertData = async ({
hashRawText: hashStr(rawText),
rawTextLength: rawText.length,
nextSyncTime: (() => {
if (!dataset.autoSync) return undefined;
// ignore auto collections sync for website datasets
if (!dataset.autoSync && dataset.type === DatasetTypeEnum.websiteDataset) return undefined;
if (
[DatasetCollectionTypeEnum.link, DatasetCollectionTypeEnum.apiFile].includes(
createCollectionParams.type

View File

@@ -1,13 +1,8 @@
import { connectionMongo, getMongoModel } from '../../../common/mongo';
const { Schema, model, models } = connectionMongo;
const { Schema } = connectionMongo;
import { DatasetCollectionSchemaType } from '@fastgpt/global/core/dataset/type.d';
import {
DatasetCollectionTypeMap,
DatasetCollectionDataProcessModeEnum,
ChunkSettingModeEnum,
DataChunkSplitModeEnum
} from '@fastgpt/global/core/dataset/constants';
import { DatasetCollectionName } from '../schema';
import { DatasetCollectionTypeMap } from '@fastgpt/global/core/dataset/constants';
import { ChunkSettings, DatasetCollectionName } from '../schema';
import {
TeamCollectionName,
TeamMemberCollectionName
@@ -90,25 +85,7 @@ const DatasetCollectionSchema = new Schema({
customPdfParse: Boolean,
// Chunk settings
imageIndex: Boolean,
autoIndexes: Boolean,
trainingType: {
type: String,
enum: Object.values(DatasetCollectionDataProcessModeEnum)
},
chunkSettingMode: {
type: String,
enum: Object.values(ChunkSettingModeEnum)
},
chunkSplitMode: {
type: String,
enum: Object.values(DataChunkSplitModeEnum)
},
chunkSize: Number,
chunkSplitter: String,
indexSize: Number,
qaPrompt: String
...ChunkSettings
});
DatasetCollectionSchema.virtual('dataset', {

View File

@@ -9,6 +9,8 @@ import { deleteDatasetDataVector } from '../../common/vectorStore/controller';
import { MongoDatasetDataText } from './data/dataTextSchema';
import { DatasetErrEnum } from '@fastgpt/global/common/error/code/dataset';
import { retryFn } from '@fastgpt/global/common/system/utils';
import { removeWebsiteSyncJobScheduler } from './websiteSync';
import { DatasetTypeEnum } from '@fastgpt/global/core/dataset/constants';
/* ============= dataset ========== */
/* find all datasetId by top datasetId */

View File

@@ -9,7 +9,6 @@ import { readRawContentByFileBuffer } from '../../common/file/read/utils';
import { parseFileExtensionFromUrl } from '@fastgpt/global/common/string/tools';
import { APIFileServer, FeishuServer, YuqueServer } from '@fastgpt/global/core/dataset/apiDataset';
import { useApiDatasetRequest } from './apiDataset/api';
import { POST } from '../../common/api/plusRequest';
export const readFileRawTextByUrl = async ({
teamId,
@@ -168,11 +167,7 @@ export const readApiServerFileContent = async ({
}
if (feishuServer || yuqueServer) {
return POST<{
title?: string;
rawText: string;
}>(`/core/dataset/systemApiDataset`, {
type: 'content',
return global.getProApiDatasetFileContent({
feishuServer,
yuqueServer,
apiFileId

View File

@@ -1,7 +1,8 @@
import { getMongoModel, Schema } from '../../common/mongo';
import {
DatasetStatusEnum,
DatasetStatusMap,
ChunkSettingModeEnum,
DataChunkSplitModeEnum,
DatasetCollectionDataProcessModeEnum,
DatasetTypeEnum,
DatasetTypeMap
} from '@fastgpt/global/core/dataset/constants';
@@ -13,6 +14,28 @@ import type { DatasetSchemaType } from '@fastgpt/global/core/dataset/type.d';
export const DatasetCollectionName = 'datasets';
export const ChunkSettings = {
imageIndex: Boolean,
autoIndexes: Boolean,
trainingType: {
type: String,
enum: Object.values(DatasetCollectionDataProcessModeEnum)
},
chunkSettingMode: {
type: String,
enum: Object.values(ChunkSettingModeEnum)
},
chunkSplitMode: {
type: String,
enum: Object.values(DataChunkSplitModeEnum)
},
chunkSize: Number,
chunkSplitter: String,
indexSize: Number,
qaPrompt: String
};
const DatasetSchema = new Schema({
parentId: {
type: Schema.Types.ObjectId,
@@ -40,11 +63,6 @@ const DatasetSchema = new Schema({
required: true,
default: DatasetTypeEnum.dataset
},
status: {
type: String,
enum: Object.keys(DatasetStatusMap),
default: DatasetStatusEnum.active
},
avatar: {
type: String,
default: '/icon/logo.svg'
@@ -84,6 +102,9 @@ const DatasetSchema = new Schema({
}
}
},
chunkSettings: {
type: ChunkSettings
},
inheritPermission: {
type: Boolean,
default: true
@@ -98,9 +119,8 @@ const DatasetSchema = new Schema({
type: Object
},
autoSync: Boolean,
// abandoned
autoSync: Boolean,
externalReadUrl: {
type: String
},

View File

@@ -24,7 +24,6 @@ import { MongoDatasetCollectionTags } from '../tag/schema';
import { readFromSecondary } from '../../../common/mongo/utils';
import { MongoDatasetDataText } from '../data/dataTextSchema';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { POST } from '../../../common/api/plusRequest';
import { NodeInputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { datasetSearchQueryExtension } from './utils';
import type { RerankModelItemType } from '@fastgpt/global/core/ai/model.d';
@@ -850,5 +849,4 @@ export type DeepRagSearchProps = SearchDatasetDataProps & {
[NodeInputKeyEnum.datasetDeepSearchMaxTimes]?: number;
[NodeInputKeyEnum.datasetDeepSearchBg]?: string;
};
export const deepRagSearch = (data: DeepRagSearchProps) =>
POST<SearchDatasetDataResponse>('/core/dataset/deepRag', data);
export const deepRagSearch = (data: DeepRagSearchProps) => global.deepRagHandler(data);

View File

@@ -98,7 +98,9 @@ const TrainingDataSchema = new Schema({
}
],
default: []
}
},
errorMsg: String
});
TrainingDataSchema.virtual('dataset', {

View File

@@ -0,0 +1,101 @@
import { Processor } from 'bullmq';
import { getQueue, getWorker, QueueNames } from '../../../common/bullmq';
import { DatasetStatusEnum } from '@fastgpt/global/core/dataset/constants';
export type WebsiteSyncJobData = {
datasetId: string;
};
export const websiteSyncQueue = getQueue<WebsiteSyncJobData>(QueueNames.websiteSync, {
defaultJobOptions: {
attempts: 3, // retry 3 times
backoff: {
type: 'exponential',
delay: 1000 // delay 1 second between retries
}
}
});
export const getWebsiteSyncWorker = (processor: Processor<WebsiteSyncJobData>) => {
return getWorker<WebsiteSyncJobData>(QueueNames.websiteSync, processor, {
removeOnFail: {
age: 15 * 24 * 60 * 60, // Keep up to 15 days
count: 1000 // Keep up to 1000 jobs
},
concurrency: 1 // Set worker to process only 1 job at a time
});
};
export const addWebsiteSyncJob = (data: WebsiteSyncJobData) => {
const datasetId = String(data.datasetId);
// deduplication: make sure only 1 job
return websiteSyncQueue.add(datasetId, data, { deduplication: { id: datasetId } });
};
export const getWebsiteSyncDatasetStatus = async (datasetId: string) => {
const jobId = await websiteSyncQueue.getDeduplicationJobId(datasetId);
if (!jobId) {
return {
status: DatasetStatusEnum.active,
errorMsg: undefined
};
}
const job = await websiteSyncQueue.getJob(jobId);
if (!job) {
return {
status: DatasetStatusEnum.active,
errorMsg: undefined
};
}
const jobState = await job.getState();
if (jobState === 'failed' || jobState === 'unknown') {
return {
status: DatasetStatusEnum.error,
errorMsg: job.failedReason
};
}
if (['waiting-children', 'waiting'].includes(jobState)) {
return {
status: DatasetStatusEnum.waiting,
errorMsg: undefined
};
}
if (jobState === 'active') {
return {
status: DatasetStatusEnum.syncing,
errorMsg: undefined
};
}
return {
status: DatasetStatusEnum.active,
errorMsg: undefined
};
};
// Scheduler setting
const repeatDuration = 24 * 60 * 60 * 1000; // every day
export const upsertWebsiteSyncJobScheduler = (data: WebsiteSyncJobData, startDate?: number) => {
const datasetId = String(data.datasetId);
return websiteSyncQueue.upsertJobScheduler(
datasetId,
{
every: repeatDuration,
startDate: startDate || new Date().getTime() + repeatDuration // First run tomorrow
},
{
name: datasetId,
data
}
);
};
export const getWebsiteSyncJobScheduler = (datasetId: string) => {
return websiteSyncQueue.getJobScheduler(String(datasetId));
};
export const removeWebsiteSyncJobScheduler = (datasetId: string) => {
return websiteSyncQueue.removeJobScheduler(String(datasetId));
};

View File

@@ -29,9 +29,9 @@ import { InteractiveNodeResponseType } from '@fastgpt/global/core/workflow/templ
import { getFileContentFromLinks, getHistoryFileLinks } from '../../tools/readFiles';
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { postTextCensor } from '../../../../../common/api/requestPlusApi';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { getDocumentQuotePrompt } from '@fastgpt/global/core/ai/prompt/AIChat';
import { postTextCensor } from '../../../../chat/postTextCensor';
type Response = DispatchNodeResultType<{
[NodeOutputKeyEnum.answerText]: string;
@@ -176,7 +176,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
toolNodeOutputTokens,
completeMessages = [], // The actual message sent to AI(just save text)
assistantResponses = [], // FastGPT system store assistant.value response
runTimes
runTimes,
finish_reason
} = await (async () => {
const adaptMessages = chats2GPTMessages({
messages,
@@ -276,7 +277,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
useVision
),
toolDetail: childToolResponse,
mergeSignId: nodeId
mergeSignId: nodeId,
finishReason: finish_reason
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
// 工具调用本身的积分消耗

View File

@@ -1,6 +1,10 @@
import { createChatCompletion } from '../../../../ai/config';
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../../chat/utils';
import { StreamChatType, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import {
StreamChatType,
ChatCompletionMessageParam,
CompletionFinishReason
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import { responseWriteController } from '../../../../../common/response';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
@@ -252,9 +256,9 @@ export const runToolWithPromptCall = async (
}
});
const { answer, reasoning } = await (async () => {
const { answer, reasoning, finish_reason } = await (async () => {
if (res && isStreamResponse) {
const { answer, reasoning } = await streamResponse({
const { answer, reasoning, finish_reason } = await streamResponse({
res,
toolNodes,
stream: aiResponse,
@@ -262,8 +266,9 @@ export const runToolWithPromptCall = async (
aiChatReasoning
});
return { answer, reasoning };
return { answer, reasoning, finish_reason };
} else {
const finish_reason = aiResponse.choices?.[0]?.finish_reason as CompletionFinishReason;
const content = aiResponse.choices?.[0]?.message?.content || '';
const reasoningContent: string = aiResponse.choices?.[0]?.message?.reasoning_content || '';
@@ -271,14 +276,16 @@ export const runToolWithPromptCall = async (
if (reasoningContent || !aiChatReasoning) {
return {
answer: content,
reasoning: reasoningContent
reasoning: reasoningContent,
finish_reason
};
}
const [think, answer] = parseReasoningContent(content);
return {
answer,
reasoning: think
reasoning: think,
finish_reason
};
}
})();
@@ -525,7 +532,8 @@ ANSWER: `;
toolNodeInputTokens,
toolNodeOutputTokens,
assistantResponses: toolNodeAssistants,
runTimes
runTimes,
finish_reason
}
);
};
@@ -550,15 +558,18 @@ async function streamResponse({
let startResponseWrite = false;
let answer = '';
let reasoning = '';
let finish_reason: CompletionFinishReason = null;
const { parsePart, getStartTagBuffer } = parseReasoningStreamContent();
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
finish_reason = 'close';
break;
}
const [reasoningContent, content] = parsePart(part, aiChatReasoning);
const { reasoningContent, content, finishReason } = parsePart(part, aiChatReasoning);
finish_reason = finish_reason || finishReason;
answer += content;
reasoning += reasoningContent;
@@ -618,7 +629,7 @@ async function streamResponse({
}
}
return { answer, reasoning };
return { answer, reasoning, finish_reason };
}
const parseAnswer = (

View File

@@ -7,7 +7,8 @@ import {
ChatCompletionToolMessageParam,
ChatCompletionMessageParam,
ChatCompletionTool,
ChatCompletionAssistantMessageParam
ChatCompletionAssistantMessageParam,
CompletionFinishReason
} from '@fastgpt/global/core/ai/type';
import { NextApiResponse } from 'next';
import { responseWriteController } from '../../../../../common/response';
@@ -300,7 +301,7 @@ export const runToolWithToolChoice = async (
}
});
const { answer, toolCalls } = await (async () => {
const { answer, toolCalls, finish_reason } = await (async () => {
if (res && isStreamResponse) {
return streamResponse({
res,
@@ -310,6 +311,7 @@ export const runToolWithToolChoice = async (
});
} else {
const result = aiResponse as ChatCompletion;
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
const calls = result.choices?.[0]?.message?.tool_calls || [];
const answer = result.choices?.[0]?.message?.content || '';
@@ -350,7 +352,8 @@ export const runToolWithToolChoice = async (
return {
answer,
toolCalls: toolCalls
toolCalls: toolCalls,
finish_reason
};
}
})();
@@ -549,8 +552,9 @@ export const runToolWithToolChoice = async (
toolNodeOutputTokens,
completeMessages,
assistantResponses: toolNodeAssistants,
toolWorkflowInteractiveResponse,
runTimes,
toolWorkflowInteractiveResponse
finish_reason
};
}
@@ -565,7 +569,8 @@ export const runToolWithToolChoice = async (
toolNodeInputTokens,
toolNodeOutputTokens,
assistantResponses: toolNodeAssistants,
runTimes
runTimes,
finish_reason
}
);
} else {
@@ -588,7 +593,8 @@ export const runToolWithToolChoice = async (
completeMessages,
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
runTimes: (response?.runTimes || 0) + 1
runTimes: (response?.runTimes || 0) + 1,
finish_reason
};
}
};
@@ -612,14 +618,18 @@ async function streamResponse({
let textAnswer = '';
let callingTool: { name: string; arguments: string } | null = null;
let toolCalls: ChatCompletionMessageToolCall[] = [];
let finishReason: CompletionFinishReason = null;
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
finishReason = 'close';
break;
}
const responseChoice = part.choices?.[0]?.delta;
const finish_reason = part.choices?.[0]?.finish_reason as CompletionFinishReason;
finishReason = finishReason || finish_reason;
if (responseChoice?.content) {
const content = responseChoice.content || '';
@@ -705,5 +715,5 @@ async function streamResponse({
}
}
return { answer: textAnswer, toolCalls };
return { answer: textAnswer, toolCalls, finish_reason: finishReason };
}

View File

@@ -1,4 +1,4 @@
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { ChatCompletionMessageParam, CompletionFinishReason } from '@fastgpt/global/core/ai/type';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import type {
ModuleDispatchProps,
@@ -43,6 +43,7 @@ export type RunToolResponse = {
assistantResponses?: AIChatItemValueItemType[];
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
[DispatchNodeResponseKeyEnum.runTimes]: number;
finish_reason?: CompletionFinishReason;
};
export type ToolNodeItemType = RuntimeNodeItemType & {
toolParams: RuntimeNodeItemType['inputs'];

View File

@@ -6,10 +6,13 @@ import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/cons
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
import { parseReasoningContent, parseReasoningStreamContent } from '../../../ai/utils';
import { createChatCompletion } from '../../../ai/config';
import type { ChatCompletionMessageParam, StreamChatType } from '@fastgpt/global/core/ai/type.d';
import type {
ChatCompletionMessageParam,
CompletionFinishReason,
StreamChatType
} from '@fastgpt/global/core/ai/type.d';
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { postTextCensor } from '../../../../common/api/requestPlusApi';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import type {
ChatDispatchProps,
@@ -47,6 +50,7 @@ import { getFileContentFromLinks, getHistoryFileLinks } from '../tools/readFiles
import { parseUrlToFileType } from '@fastgpt/global/common/file/tools';
import { i18nT } from '../../../../../web/i18n/utils';
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
import { postTextCensor } from '../../../chat/postTextCensor';
export type ChatProps = ModuleDispatchProps<
AIChatNodeProps & {
@@ -101,7 +105,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
const modelConstantsData = getLLMModel(model);
if (!modelConstantsData) {
return Promise.reject('The chat model is undefined, you need to select a chat model.');
return Promise.reject(`Mode ${model} is undefined, you need to select a chat model.`);
}
aiChatVision = modelConstantsData.vision && aiChatVision;
@@ -195,16 +199,17 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
}
});
const { answerText, reasoningText } = await (async () => {
const { answerText, reasoningText, finish_reason } = await (async () => {
if (isStreamResponse) {
if (!res) {
return {
answerText: '',
reasoningText: ''
reasoningText: '',
finish_reason: 'close' as const
};
}
// sse response
const { answer, reasoning } = await streamResponse({
const { answer, reasoning, finish_reason } = await streamResponse({
res,
stream: response,
aiChatReasoning,
@@ -215,9 +220,12 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return {
answerText: answer,
reasoningText: reasoning
reasoningText: reasoning,
finish_reason
};
} else {
const finish_reason = response.choices?.[0]?.finish_reason as CompletionFinishReason;
const { content, reasoningContent } = (() => {
const content = response.choices?.[0]?.message?.content || '';
// @ts-ignore
@@ -260,7 +268,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
return {
answerText: content,
reasoningText: reasoningContent
reasoningText: reasoningContent,
finish_reason
};
}
})();
@@ -303,7 +312,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
maxToken: max_tokens,
reasoningText,
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
contextTotalLen: completeMessages.length
contextTotalLen: completeMessages.length,
finishReason: finish_reason
},
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
{
@@ -528,15 +538,18 @@ async function streamResponse({
});
let answer = '';
let reasoning = '';
let finish_reason: CompletionFinishReason = null;
const { parsePart, getStartTagBuffer } = parseReasoningStreamContent();
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
finish_reason = 'close';
break;
}
const [reasoningContent, content] = parsePart(part, parseThinkTag);
const { reasoningContent, content, finishReason } = parsePart(part, parseThinkTag);
finish_reason = finish_reason || finishReason;
answer += content;
reasoning += reasoningContent;
@@ -575,5 +588,5 @@ async function streamResponse({
}
}
return { answer, reasoning };
return { answer, reasoning, finish_reason };
}

View File

@@ -4,9 +4,10 @@ import { DispatchNodeResultType } from '@fastgpt/global/core/workflow/runtime/ty
import axios from 'axios';
import { formatHttpError } from '../utils';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { SandboxCodeTypeEnum } from '@fastgpt/global/core/workflow/template/system/sandbox/constants';
type RunCodeType = ModuleDispatchProps<{
[NodeInputKeyEnum.codeType]: 'js';
[NodeInputKeyEnum.codeType]: string;
[NodeInputKeyEnum.code]: string;
[NodeInputKeyEnum.addInputParam]: Record<string, any>;
}>;
@@ -16,6 +17,14 @@ type RunCodeResponse = DispatchNodeResultType<{
[key: string]: any;
}>;
function getURL(codeType: string): string {
if (codeType == SandboxCodeTypeEnum.py) {
return `${process.env.SANDBOX_URL}/sandbox/python`;
} else {
return `${process.env.SANDBOX_URL}/sandbox/js`;
}
}
export const dispatchRunCode = async (props: RunCodeType): Promise<RunCodeResponse> => {
const {
params: { codeType, code, [NodeInputKeyEnum.addInputParam]: customVariables }
@@ -27,7 +36,7 @@ export const dispatchRunCode = async (props: RunCodeType): Promise<RunCodeRespon
};
}
const sandBoxRequestUrl = `${process.env.SANDBOX_URL}/sandbox/js`;
const sandBoxRequestUrl = getURL(codeType);
try {
const { data: runResult } = await axios.post<{
success: boolean;
@@ -40,6 +49,8 @@ export const dispatchRunCode = async (props: RunCodeType): Promise<RunCodeRespon
variables: customVariables
});
console.log(runResult);
if (runResult.success) {
return {
[NodeOutputKeyEnum.rawResponse]: runResult.data.codeReturn,
@@ -52,7 +63,7 @@ export const dispatchRunCode = async (props: RunCodeType): Promise<RunCodeRespon
...runResult.data.codeReturn
};
} else {
throw new Error('Run code failed');
return Promise.reject('Run code failed');
}
} catch (error) {
return {

View File

@@ -44,14 +44,14 @@ import {
textAdaptGptResponse,
replaceEditorVariable
} from '@fastgpt/global/core/workflow/runtime/utils';
import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import type { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type';
import { dispatchRunTools } from './agent/runTool/index';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
import { DispatchFlowResponse } from './type';
import type { DispatchFlowResponse } from './type';
import { dispatchStopToolCall } from './agent/runTool/stopTool';
import { dispatchLafRequest } from './tools/runLaf';
import { dispatchIfElse } from './tools/runIfElse';
import { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import type { RuntimeEdgeItemType } from '@fastgpt/global/core/workflow/type/edge';
import { getReferenceVariableValue } from '@fastgpt/global/core/workflow/runtime/utils';
import { dispatchSystemConfig } from './init/systemConfig';
import { dispatchUpdateVariable } from './tools/runUpdateVar';
@@ -62,7 +62,7 @@ import { dispatchTextEditor } from './tools/textEditor';
import { dispatchCustomFeedback } from './tools/customFeedback';
import { dispatchReadFiles } from './tools/readFiles';
import { dispatchUserSelect } from './interactive/userSelect';
import {
import type {
WorkflowInteractiveResponseType,
InteractiveNodeResponseType
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
@@ -130,6 +130,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
timezone,
externalProvider,
stream = false,
version = 'v1',
...props
} = data;
@@ -451,6 +452,11 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
const interactiveResponse = nodeRunResult.result?.[DispatchNodeResponseKeyEnum.interactive];
if (interactiveResponse) {
pushStore(nodeRunResult.node, nodeRunResult.result);
if (props.mode === 'debug') {
debugNextStepRunNodes = debugNextStepRunNodes.concat([nodeRunResult.node]);
}
nodeInteractiveResponse = {
entryNodeIds: [nodeRunResult.node.nodeId],
interactiveResponse
@@ -621,6 +627,21 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
};
})();
// Response node response
if (
version === 'v2' &&
!props.isToolCall &&
!props.runningAppInfo.isChildApp &&
formatResponseData
) {
props.workflowStreamResponse?.({
event: SseResponseEventEnum.flowNodeResponse,
data: {
...formatResponseData
}
});
}
// Add output default value
node.outputs.forEach((item) => {
if (!item.required) return;

View File

@@ -1,11 +1,11 @@
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import {
import type {
DispatchNodeResultType,
ModuleDispatchProps
} from '@fastgpt/global/core/workflow/runtime/type';
import {
import type {
UserInputFormItemType,
UserInputInteractive
} from '@fastgpt/global/core/workflow/template/system/interactive/type';
@@ -32,7 +32,6 @@ export const dispatchFormInput = async (props: Props): Promise<FormInputResponse
query
} = props;
const { isEntry } = node;
const interactive = getLastInteractiveValue(histories);
// Interactive node is not the entry node, return interactive result

View File

@@ -1,5 +1,5 @@
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import {
import type {
DispatchNodeResultType,
ModuleDispatchProps
} from '@fastgpt/global/core/workflow/runtime/type';
@@ -30,7 +30,6 @@ export const dispatchUserSelect = async (props: Props): Promise<UserSelectRespon
query
} = props;
const { nodeId, isEntry } = node;
const interactive = getLastInteractiveValue(histories);
// Interactive node is not the entry node, return interactive result

View File

@@ -33,7 +33,7 @@ export const dispatchLoop = async (props: Props): Promise<Response> => {
? Number(process.env.WORKFLOW_MAX_LOOP_TIMES)
: 50;
if (loopInputArray.length > maxLength) {
return Promise.reject('Input array length cannot be greater than 50');
return Promise.reject(`Input array length cannot be greater than ${maxLength}`);
}
const outputValueArr = [];

View File

@@ -90,7 +90,8 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
id: String(plugin.id),
// 如果系统插件有 teamId 和 tmbId则使用系统插件的 teamId 和 tmbId管理员指定了插件作为系统插件
teamId: plugin.teamId || runningAppInfo.teamId,
tmbId: plugin.tmbId || runningAppInfo.tmbId
tmbId: plugin.tmbId || runningAppInfo.tmbId,
isChildApp: true
},
variables: runtimeVariables,
query: getPluginRunUserQuery({

View File

@@ -112,7 +112,8 @@ export const dispatchRunAppNode = async (props: Props): Promise<Response> => {
runningAppInfo: {
id: String(appData._id),
teamId: String(appData.teamId),
tmbId: String(appData.tmbId)
tmbId: String(appData.tmbId),
isChildApp: true
},
runtimeNodes: storeNodes2RuntimeNodes(nodes, getWorkflowEntryNodeIds(nodes)),
runtimeEdges: initWorkflowEdgeStatus(edges),

View File

@@ -19,7 +19,14 @@ type Props = ModuleDispatchProps<{
type Response = DispatchNodeResultType<{}>;
export const dispatchUpdateVariable = async (props: Props): Promise<Response> => {
const { params, variables, runtimeNodes, workflowStreamResponse, externalProvider } = props;
const {
params,
variables,
runtimeNodes,
workflowStreamResponse,
externalProvider,
runningAppInfo
} = props;
const { updateList } = params;
const nodeIds = runtimeNodes.map((node) => node.nodeId);
@@ -78,10 +85,12 @@ export const dispatchUpdateVariable = async (props: Props): Promise<Response> =>
return value;
});
workflowStreamResponse?.({
event: SseResponseEventEnum.updateVariables,
data: removeSystemVariable(variables, externalProvider.externalWorkflowVariables)
});
if (!runningAppInfo.isChildApp) {
workflowStreamResponse?.({
event: SseResponseEventEnum.updateVariables,
data: removeSystemVariable(variables, externalProvider.externalWorkflowVariables)
});
}
return {
[DispatchNodeResponseKeyEnum.newVariables]: variables,

View File

@@ -53,7 +53,8 @@ export const getWorkflowResponseWrite = ({
[SseResponseEventEnum.toolCall]: 1,
[SseResponseEventEnum.toolParams]: 1,
[SseResponseEventEnum.toolResponse]: 1,
[SseResponseEventEnum.updateVariables]: 1
[SseResponseEventEnum.updateVariables]: 1,
[SseResponseEventEnum.flowNodeResponse]: 1
};
if (!detail && detailEvent[event]) return;
@@ -106,6 +107,7 @@ export const getHistories = (history?: ChatItemType[] | number, histories: ChatI
/* value type format */
export const valueTypeFormat = (value: any, type?: WorkflowIOValueTypeEnum) => {
if (value === undefined) return;
if (!type || type === WorkflowIOValueTypeEnum.any) return value;
if (type === 'string') {
if (typeof value !== 'object') return String(value);
@@ -117,7 +119,7 @@ export const valueTypeFormat = (value: any, type?: WorkflowIOValueTypeEnum) => {
return Boolean(value);
}
try {
if (WorkflowIOValueTypeEnum.arrayString && typeof value === 'string') {
if (type === WorkflowIOValueTypeEnum.arrayString && typeof value === 'string') {
return [value];
}
if (

View File

@@ -7,6 +7,7 @@
"@xmldom/xmldom": "^0.8.10",
"@zilliz/milvus2-sdk-node": "2.4.2",
"axios": "^1.8.2",
"bullmq": "^5.44.0",
"chalk": "^5.3.0",
"cheerio": "1.0.0-rc.12",
"cookie": "^0.7.1",
@@ -18,6 +19,7 @@
"file-type": "^19.0.0",
"form-data": "^4.0.0",
"iconv-lite": "^0.6.3",
"ioredis": "^5.6.0",
"joplin-turndown-plugin-gfm": "^1.0.12",
"json5": "^2.2.3",
"jsonpath-plus": "^10.3.0",
@@ -27,12 +29,12 @@
"mongoose": "^8.10.1",
"multer": "1.4.5-lts.1",
"mysql2": "^3.11.3",
"next": "14.2.25",
"next": "14.2.26",
"nextjs-cors": "^2.2.0",
"node-cron": "^3.0.3",
"node-xlsx": "^0.24.0",
"papaparse": "5.4.1",
"pdfjs-dist": "4.4.168",
"pdfjs-dist": "4.10.38",
"pg": "^8.10.0",
"request-ip": "^3.3.0",
"tiktoken": "1.0.17",

Some files were not shown because too many files have changed in this diff Show More