Compare commits
16 Commits
v4.5.2
...
v4.6.1-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c5664c7e90 | ||
|
|
70f3373246 | ||
|
|
af16817a4a | ||
|
|
4358b6de4d | ||
|
|
f6aea484ce | ||
|
|
fbe1d8cfed | ||
|
|
16103029f5 | ||
|
|
cd3acb44ab | ||
|
|
bfd8be5df0 | ||
|
|
592e1a93a2 | ||
|
|
2b8ff7d32c | ||
|
|
4593eef2ff | ||
|
|
d91551e6be | ||
|
|
0a0fe31d3c | ||
|
|
9f889d8806 | ||
|
|
8bb5588305 |
2
.github/ISSUE_TEMPLATE/bugs.md
vendored
@@ -11,7 +11,7 @@ assignees: ''
|
||||
[//]: # '方框内填 x 表示打钩'
|
||||
|
||||
- [ ] 我已确认目前没有类似 issue
|
||||
- [ ] 我已完整查看过项目 README,以及[项目文档](https://doc.fastgpt.run/docs/intro/)
|
||||
- [ ] 我已完整查看过项目 README,以及[项目文档](https://doc.fastgpt.in/docs/intro/)
|
||||
- [ ] 我使用了自己的 key,并确认我的 key 是可正常使用的
|
||||
- [ ] 我理解并愿意跟进此 issue,协助测试和提供反馈
|
||||
- [x] 我理解并认可上述内容,并理解项目维护者精力有限,**不遵循规则的 issue 可能会被无视或直接关闭**
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +1,5 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 微信交流群
|
||||
url: https://doc.fastgpt.run/wechat-fastgpt.webp
|
||||
url: https://doc.fastgpt.in/wechat-fastgpt.webp
|
||||
about: FastGPT 全是问题群
|
||||
|
||||
52
.github/workflows/fastgpt-image-personal.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Build FastGPT images in Personal warehouse
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'projects/app/**'
|
||||
- 'packages/**'
|
||||
branches:
|
||||
- 'main'
|
||||
jobs:
|
||||
build-fastgpt-images:
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository != 'labring/FastGPT'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
driver-opts: network=host
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GH_PAT }}
|
||||
- name: Set DOCKER_REPO_TAGGED based on branch or tag
|
||||
run: |
|
||||
echo "DOCKER_REPO_TAGGED=ghcr.io/${{ github.repository_owner }}/fastgpt:latest" >> $GITHUB_ENV
|
||||
- name: Build and publish image for main branch or tag push event
|
||||
env:
|
||||
DOCKER_REPO_TAGGED: ${{ env.DOCKER_REPO_TAGGED }}
|
||||
run: |
|
||||
docker buildx build \
|
||||
--build-arg name=app \
|
||||
--label "org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/FastGPT" \
|
||||
--label "org.opencontainers.image.description=fastgpt image" \
|
||||
--push \
|
||||
--cache-from=type=local,src=/tmp/.buildx-cache \
|
||||
--cache-to=type=local,dest=/tmp/.buildx-cache \
|
||||
-t ${DOCKER_REPO_TAGGED} \
|
||||
-f Dockerfile \
|
||||
.
|
||||
5
.github/workflows/fastgpt-image.yml
vendored
@@ -5,8 +5,6 @@ on:
|
||||
paths:
|
||||
- 'projects/app/**'
|
||||
- 'packages/**'
|
||||
branches:
|
||||
- 'main'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
jobs:
|
||||
@@ -53,9 +51,8 @@ jobs:
|
||||
docker buildx build \
|
||||
--build-arg name=app \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--label "org.opencontainers.image.source= https://github.com/ ${{ github.repository_owner }}/FastGPT" \
|
||||
--label "org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/FastGPT" \
|
||||
--label "org.opencontainers.image.description=fastgpt image" \
|
||||
--label "org.opencontainers.image.licenses=Apache" \
|
||||
--push \
|
||||
--cache-from=type=local,src=/tmp/.buildx-cache \
|
||||
--cache-to=type=local,dest=/tmp/.buildx-cache \
|
||||
|
||||
3
.github/workflows/preview-image.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
with:
|
||||
driver-opts: network=host
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
@@ -48,6 +48,7 @@ jobs:
|
||||
--label "org.opencontainers.image.source= https://github.com/ ${{ github.repository_owner }}/FastGPT" \
|
||||
--label "org.opencontainers.image.description=fastgpt-pr image" \
|
||||
--label "org.opencontainers.image.licenses=Apache" \
|
||||
--push \
|
||||
--cache-from=type=local,src=/tmp/.buildx-cache \
|
||||
--cache-to=type=local,dest=/tmp/.buildx-cache \
|
||||
-t ${DOCKER_REPO_TAGGED} \
|
||||
|
||||
40
README.md
@@ -17,10 +17,10 @@ FastGPT 是一个基于 LLM 大语言模型的知识库问答系统,提供开
|
||||
<a href="https://fastgpt.run/">
|
||||
<img height="21" src="https://img.shields.io/badge/在线使用-d4eaf7?style=flat-square&logo=spoj&logoColor=7d09f1" alt="cloud">
|
||||
</a>
|
||||
<a href="https://doc.fastgpt.run/docs/intro">
|
||||
<a href="https://doc.fastgpt.in/docs/intro">
|
||||
<img height="21" src="https://img.shields.io/badge/相关文档-7d09f1?style=flat-square" alt="document">
|
||||
</a>
|
||||
<a href="https://doc.fastgpt.run/docs/development">
|
||||
<a href="https://doc.fastgpt.in/docs/development">
|
||||
<img height="21" src="https://img.shields.io/badge/本地开发-%23d4eaf7?style=flat-square&logo=xcode&logoColor=7d09f1" alt="development">
|
||||
</a>
|
||||
<a href="/#-%E7%9B%B8%E5%85%B3%E9%A1%B9%E7%9B%AE">
|
||||
@@ -43,6 +43,10 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
|  |  |
|
||||
|  |  |
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 💡 功能
|
||||
|
||||
1. 强大的可视化编排,轻松构建 AI 应用
|
||||
@@ -78,6 +82,10 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
- [x] Iframe 一键嵌入
|
||||
- [x] 统一查阅对话记录,并对数据进行标注
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 👨💻 开发
|
||||
|
||||
项目技术栈:NextJs + TS + ChakraUI + Mongo + Postgres (Vector 插件)
|
||||
@@ -98,12 +106,20 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
* [OpenAPI API 文档](https://doc.fastgpt.in/docs/development/openapi/)
|
||||
* [知识库结构详解](https://doc.fastgpt.in/docs/use-cases/datasetengine/)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 🏘️ 社区交流群
|
||||
|
||||
添加 wx 小助手加入:
|
||||
|
||||

|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 💪 相关项目
|
||||
|
||||
- [Laf:3 分钟快速接入三方应用](https://github.com/labring/laf)
|
||||
@@ -111,20 +127,36 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
- [One API:多模型管理,支持 Azure、文心一言等](https://github.com/songquanpeng/one-api)
|
||||
- [TuShan:5 分钟搭建后台管理系统](https://github.com/msgbyte/tushan)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 👀 其他
|
||||
|
||||
- [保姆级 FastGPT 教程](https://www.bilibili.com/video/BV1n34y1A7Bo/?spm_id_from=333.999.0.0)
|
||||
- [接入飞书](https://www.bilibili.com/video/BV1Su4y1r7R3/?spm_id_from=333.999.0.0)
|
||||
- [接入企微](https://www.bilibili.com/video/BV1Tp4y1n72T/?spm_id_from=333.999.0.0)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 🤝 第三方生态
|
||||
|
||||
- [OnWeChat 个人微信/企微机器人](https://doc.fastgpt.run/docs/use-cases/onwechat/)
|
||||
- [OnWeChat 个人微信/企微机器人](https://doc.fastgpt.in/docs/use-cases/onwechat/)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 🌟 Star History
|
||||
|
||||
[](https://star-history.com/#labring/FastGPT&Date)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-返回顶部-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 使用协议
|
||||
|
||||
本仓库遵循 [FastGPT Open Source License](./LICENSE) 开源协议。
|
||||
@@ -132,4 +164,4 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
1. 允许作为后台服务直接商用,但不允许提供 SaaS 服务。
|
||||
2. 未经商业授权,任何形式的商用服务均需保留相关版权信息。
|
||||
3. 完整请查看 [FastGPT Open Source License](./LICENSE)
|
||||
4. 联系方式:yujinlong@sealos.io,[点击查看商业版定价策略](https://doc.fastgpt.run/docs/commercial)
|
||||
4. 联系方式:yujinlong@sealos.io,[点击查看商业版定价策略](https://doc.fastgpt.in/docs/commercial)
|
||||
|
||||
24
README_en.md
@@ -41,6 +41,10 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
|  |  |
|
||||
|  |  |
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-Back_to_Top-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 💡 Features
|
||||
|
||||
1. Powerful visual workflows: Effortlessly craft AI applications
|
||||
@@ -86,6 +90,10 @@ https://github.com/labring/FastGPT/assets/15308462/7d3a38df-eb0e-4388-9250-2409b
|
||||
- [x] One-click embedding with Iframe
|
||||
- [ ] Unified access to dialogue records
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-Back_to_Top-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 👨💻 Development
|
||||
|
||||
Project tech stack: NextJs + TS + ChakraUI + Mongo + Postgres (Vector plugin)
|
||||
@@ -111,6 +119,10 @@ Project tech stack: NextJs + TS + ChakraUI + Mongo + Postgres (Vector plugin)
|
||||
| ------------------------------------------------- | ---------------------------------------------- |
|
||||
|  |  | -->
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-Back_to_Top-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 👀 Others
|
||||
|
||||
- [FastGPT FAQ](https://kjqvjse66l.feishu.cn/docx/HtrgdT0pkonP4kxGx8qcu6XDnGh)
|
||||
@@ -118,6 +130,10 @@ Project tech stack: NextJs + TS + ChakraUI + Mongo + Postgres (Vector plugin)
|
||||
- [Official Account Integration Video Tutorial](https://www.bilibili.com/video/BV1xh4y1t7fy/)
|
||||
- [FastGPT Knowledge Base Demo](https://www.bilibili.com/video/BV1Wo4y1p7i1/)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-Back_to_Top-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 💪 Related Projects
|
||||
|
||||
- [Laf: 3-minute quick access to third-party applications](https://github.com/labring/laf)
|
||||
@@ -125,10 +141,18 @@ Project tech stack: NextJs + TS + ChakraUI + Mongo + Postgres (Vector plugin)
|
||||
- [One API: Multi-model management, supports Azure, Wenxin Yiyuan, etc.](https://github.com/songquanpeng/one-api)
|
||||
- [TuShan: Build a backend management system in 5 minutes](https://github.com/msgbyte/tushan)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-Back_to_Top-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 🤝 Third-party Ecosystem
|
||||
|
||||
- [luolinAI: Enterprise WeChat bot, ready to use](https://github.com/luolin-ai/FastGPT-Enterprise-WeChatbot)
|
||||
|
||||
<a href="#readme">
|
||||
<img src="https://img.shields.io/badge/-Back_to_Top-7d09f1.svg" alt="#" align="right">
|
||||
</a>
|
||||
|
||||
## 🌟 Star History
|
||||
|
||||
[](https://star-history.com/#labring/FastGPT&Date)
|
||||
|
||||
BIN
docSite/assets/imgs/datasetSetting1.png
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
docSite/assets/imgs/wechat1.png
Normal file
|
After Width: | Height: | Size: 210 KiB |
BIN
docSite/assets/imgs/wechat10.png
Normal file
|
After Width: | Height: | Size: 326 KiB |
BIN
docSite/assets/imgs/wechat2.png
Normal file
|
After Width: | Height: | Size: 344 KiB |
BIN
docSite/assets/imgs/wechat3.png
Normal file
|
After Width: | Height: | Size: 269 KiB |
BIN
docSite/assets/imgs/wechat4.png
Normal file
|
After Width: | Height: | Size: 209 KiB |
BIN
docSite/assets/imgs/wechat5.png
Normal file
|
After Width: | Height: | Size: 270 KiB |
BIN
docSite/assets/imgs/wechat6.png
Normal file
|
After Width: | Height: | Size: 261 KiB |
BIN
docSite/assets/imgs/wechat7.png
Normal file
|
After Width: | Height: | Size: 157 KiB |
BIN
docSite/assets/imgs/wechat8.png
Normal file
|
After Width: | Height: | Size: 227 KiB |
BIN
docSite/assets/imgs/wechat9.png
Normal file
|
After Width: | Height: | Size: 324 KiB |
@@ -21,62 +21,76 @@ weight: 520
|
||||
```json
|
||||
{
|
||||
"SystemParams": {
|
||||
"pluginBaseUrl": "", // 商业版接口地址
|
||||
"vectorMaxProcess": 15, // 向量生成最大进程,结合数据库性能和 key 来设置
|
||||
"qaMaxProcess": 15, // QA 生成最大进程,结合数据库性能和 key 来设置
|
||||
"pgHNSWEfSearch": 100 // pg vector 索引参数,越大精度高但速度慢
|
||||
},
|
||||
"ChatModels": [
|
||||
{
|
||||
"model": "gpt-3.5-turbo", // 实际调用的模型
|
||||
"name": "GPT35-4k", // 展示的名字
|
||||
"maxToken": 4000, // 最大token,均按 gpt35 计算
|
||||
"quoteMaxToken": 2000, // 引用内容最大 token
|
||||
"maxTemperature": 1.2, // 最大温度
|
||||
"price": 0,
|
||||
"model": "gpt-3.5-turbo-1106",
|
||||
"name": "GPT35-1106",
|
||||
"price": 0, // 除以 100000 后等于1个token的价格
|
||||
"maxContext": 16000, // 最大上下文长度
|
||||
"maxResponse": 4000, // 最大回复长度
|
||||
"quoteMaxToken": 2000, // 最大引用内容长度
|
||||
"maxTemperature": 1.2, // 最大温度值
|
||||
"censor": false, // 是否开启敏感词过滤(商业版)
|
||||
"vision": false, // 支持图片输入
|
||||
"defaultSystemChatPrompt": ""
|
||||
},
|
||||
{
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
"name": "GPT35-16k",
|
||||
"maxToken": 16000,
|
||||
"maxContext": 16000,
|
||||
"maxResponse": 16000,
|
||||
"price": 0,
|
||||
"quoteMaxToken": 8000,
|
||||
"maxTemperature": 1.2,
|
||||
"price": 0,
|
||||
"censor": false,
|
||||
"vision": false,
|
||||
"defaultSystemChatPrompt": ""
|
||||
},
|
||||
{
|
||||
"model": "gpt-4",
|
||||
"name": "GPT4-8k",
|
||||
"maxToken": 8000,
|
||||
"maxContext": 8000,
|
||||
"maxResponse": 8000,
|
||||
"price": 0,
|
||||
"quoteMaxToken": 4000,
|
||||
"maxTemperature": 1.2,
|
||||
"censor": false,
|
||||
"vision": false,
|
||||
"defaultSystemChatPrompt": ""
|
||||
},
|
||||
{
|
||||
"model": "gpt-4-vision-preview",
|
||||
"name": "GPT4-Vision",
|
||||
"maxContext": 128000,
|
||||
"maxResponse": 4000,
|
||||
"price": 0,
|
||||
"quoteMaxToken": 100000,
|
||||
"maxTemperature": 1.2,
|
||||
"censor": false,
|
||||
"vision": true,
|
||||
"defaultSystemChatPrompt": ""
|
||||
}
|
||||
],
|
||||
"QAModels": [ // QA 拆分模型
|
||||
{
|
||||
"QAModels": [
|
||||
{
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
"name": "GPT35-16k",
|
||||
"maxToken": 16000,
|
||||
"maxContext": 16000,
|
||||
"maxResponse": 16000,
|
||||
"price": 0
|
||||
}
|
||||
],
|
||||
"ExtractModels": [ // 内容提取模型
|
||||
{
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
"name": "GPT35-16k",
|
||||
"maxToken": 16000,
|
||||
"price": 0,
|
||||
"functionCall": true, // 是否支持 function call
|
||||
"functionPrompt": "" // 自定义非 function call 提示词
|
||||
}
|
||||
],
|
||||
"CQModels": [ // Classify Question: 问题分类模型
|
||||
"CQModels": [
|
||||
{
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
"name": "GPT35-16k",
|
||||
"maxToken": 16000,
|
||||
"model": "gpt-3.5-turbo-1106",
|
||||
"name": "GPT35-1106",
|
||||
"maxContext": 16000,
|
||||
"maxResponse": 4000,
|
||||
"price": 0,
|
||||
"functionCall": true,
|
||||
"functionPrompt": ""
|
||||
@@ -84,17 +98,30 @@ weight: 520
|
||||
{
|
||||
"model": "gpt-4",
|
||||
"name": "GPT4-8k",
|
||||
"maxToken": 8000,
|
||||
"maxContext": 8000,
|
||||
"maxResponse": 8000,
|
||||
"price": 0,
|
||||
"functionCall": true,
|
||||
"functionPrompt": ""
|
||||
}
|
||||
],
|
||||
"QGModels": [ // Question Generation: 生成下一步指引模型
|
||||
{
|
||||
"model": "gpt-3.5-turbo",
|
||||
"name": "GPT35-4k",
|
||||
"maxToken": 4000,
|
||||
"ExtractModels": [
|
||||
{
|
||||
"model": "gpt-3.5-turbo-1106",
|
||||
"name": "GPT35-1106",
|
||||
"maxContext": 16000,
|
||||
"maxResponse": 4000,
|
||||
"price": 0,
|
||||
"functionCall": true,
|
||||
"functionPrompt": ""
|
||||
}
|
||||
],
|
||||
"QGModels": [
|
||||
{
|
||||
"model": "gpt-3.5-turbo-1106",
|
||||
"name": "GPT35-1106",
|
||||
"maxContext": 1600,
|
||||
"maxResponse": 4000,
|
||||
"price": 0
|
||||
}
|
||||
],
|
||||
@@ -102,10 +129,32 @@ weight: 520
|
||||
{
|
||||
"model": "text-embedding-ada-002",
|
||||
"name": "Embedding-2",
|
||||
"price": 0,
|
||||
"defaultToken": 500,
|
||||
"price": 0.2,
|
||||
"defaultToken": 700,
|
||||
"maxToken": 3000
|
||||
}
|
||||
]
|
||||
],
|
||||
"AudioSpeechModels": [
|
||||
{
|
||||
"model": "tts-1",
|
||||
"name": "OpenAI TTS1",
|
||||
"price": 0,
|
||||
"baseUrl": "",
|
||||
"key": "",
|
||||
"voices": [
|
||||
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
|
||||
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
|
||||
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
|
||||
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
|
||||
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
|
||||
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
|
||||
]
|
||||
}
|
||||
],
|
||||
"WhisperModel": {
|
||||
"model": "whisper-1",
|
||||
"name": "Whisper1",
|
||||
"price": 0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -393,7 +393,7 @@ curl --location --request POST 'https://fastgpt.run/api/core/dataset/searchTest'
|
||||
**请求示例**
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'https://fastgpt.run/api/common/bill/createTrainingBill' \
|
||||
curl --location --request POST 'https://fastgpt.run/api/support/wallet/bill/createTrainingBill' \
|
||||
--header 'Authorization: Bearer {{apikey}}' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data-raw ''
|
||||
|
||||
@@ -99,9 +99,9 @@ CHAT_API_KEY=sk-xxxxxx
|
||||
{
|
||||
"model": "ERNIE-Bot", // 这里的模型需要对应 One API 的模型
|
||||
"name": "文心一言", // 对外展示的名称
|
||||
"maxToken": 4000, // 最大长下文 token,无论什么模型都按 GPT35 的计算。GPT 外的模型需要自行大致计算下这个值。可以调用官方接口去比对 Token 的倍率,然后在这里粗略计算。
|
||||
"maxContext": 8000, // 最大长下文 token,无论什么模型都按 GPT35 的计算。GPT 外的模型需要自行大致计算下这个值。可以调用官方接口去比对 Token 的倍率,然后在这里粗略计算。
|
||||
"maxResponse": 4000, // 最大回复 token
|
||||
// 例如:文心一言的中英文 token 基本是 1:1,而 GPT 的中文 Token 是 2:1,如果文心一言官方最大 Token 是 4000,那么这里就可以填 8000,保险点就填 7000.
|
||||
"price": 0, // 1个token 价格 => 1.5 / 100000 * 1000 = 0.015元/1k token
|
||||
"quoteMaxToken": 2000, // 引用知识库的最大 Token
|
||||
"maxTemperature": 1, // 最大温度
|
||||
"defaultSystemChatPrompt": "" // 默认的系统提示词
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: 'V4.4.7'
|
||||
title: 'V4.4.7(需执行升级脚本)'
|
||||
description: 'FastGPT V4.4.7 更新(需执行升级脚本)'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
|
||||
67
docSite/content/docs/installation/upgrading/46.md
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
title: 'V4.6(需要初始化)'
|
||||
description: 'FastGPT V4.6 更新'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 836
|
||||
---
|
||||
|
||||
**V4.6 版本加入了简单的团队功能,可以邀请其他用户进来管理资源。该版本升级后无法执行旧的升级脚本,且无法回退。**
|
||||
|
||||
## 1. 更新镜像并变更配置文件
|
||||
|
||||
更新镜像至 latest 或者 v4.6 版本。商业版镜像更新至 V0.2.1
|
||||
|
||||
最新配置可参考: [V46版本最新 config.json](/docs/development/configuration),商业镜像配置文件也更新,参考最新的飞书文档。
|
||||
|
||||
|
||||
## 2. 执行初始化 API
|
||||
|
||||
发起 2 个 HTTP 请求({{rootkey}} 替换成环境变量里的`rootkey`,{{host}}替换成自己域名)
|
||||
|
||||
**该初始化接口可能速度很慢,返回超时不用管,注意看日志即可,需要注意的是,需确保initv46成功后,在执行initv46-2**
|
||||
|
||||
1. https://xxxxx/api/admin/initv46
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'https://{{host}}/api/admin/initv46' \
|
||||
--header 'rootkey: {{rootkey}}' \
|
||||
--header 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
2. https://xxxxx/api/admin/initv46-2
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'https://{{host}}/api/admin/initv46-2' \
|
||||
--header 'rootkey: {{rootkey}}' \
|
||||
--header 'Content-Type: application/json'
|
||||
```
|
||||
|
||||
初始化内容:
|
||||
1. 创建默认团队
|
||||
2. 初始化 Mongo 所有资源的团队字段
|
||||
3. 初始化 Pg 的字段
|
||||
4. 初始化 Mongo Data
|
||||
|
||||
|
||||
## V4.6功能介绍
|
||||
|
||||
1. 新增 - 团队空间
|
||||
2. 新增 - 多路向量(多个向量映射一组数据)
|
||||
3. 新增 - tts语音
|
||||
4. 新增 - 支持知识库配置文本预处理模型
|
||||
5. 线上环境新增 - ReRank向量召回,提高召回精度
|
||||
6. 优化 - 知识库导出,可直接触发流下载,无需等待转圈圈
|
||||
|
||||
## 4.6缺陷修复
|
||||
|
||||
旧的 4.6 版本由于缺少一个字段,导致文件导入时知识库数据无法显示,可执行下面的脚本:
|
||||
|
||||
https://xxxxx/api/admin/initv46-fix
|
||||
|
||||
```bash
|
||||
curl --location --request POST 'https://{{host}}/api/admin/initv46-fix' \
|
||||
--header 'rootkey: {{rootkey}}' \
|
||||
--header 'Content-Type: application/json'
|
||||
```
|
||||
16
docSite/content/docs/installation/upgrading/461.md
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
title: 'V4.6.1'
|
||||
description: 'FastGPT V4.6 .1'
|
||||
icon: 'upgrade'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 835
|
||||
---
|
||||
|
||||
|
||||
## V4.6.1 功能介绍
|
||||
|
||||
1. 新增 - GPT4-v 模型支持
|
||||
2. 新增 - whisper 语音输入
|
||||
3. 优化 - TTS 流传输
|
||||
4. 优化 - TTS 缓存
|
||||
@@ -1,10 +1,10 @@
|
||||
---
|
||||
title: '定价'
|
||||
description: 'FastGPT 的定价'
|
||||
title: '线上版定价'
|
||||
description: 'FastGPT 线上版定价'
|
||||
icon: 'currency_yen'
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 10
|
||||
weight: 11
|
||||
---
|
||||
|
||||
## Tokens 说明
|
||||
@@ -15,7 +15,7 @@ weight: 10
|
||||
|
||||
## FastGPT 线上计费
|
||||
|
||||
目前,FastGPT 线上计费也仅按 Tokens 使用数量为准。以下是详细的计费表(最新定价以线上表格为准,可在点击充值后实时获取):
|
||||
使用: [https://fastgpt.run](https://fastgpt.run) 或 [https://ai.fastgpt.in](https://ai.fastgpt.in) 只需仅按 Tokens 使用数量扣费即可。可在 账号-使用记录 中查看具体使用情况,以下是详细的计费表(最新定价以线上表格为准,可在点击充值后实时获取):
|
||||
|
||||
{{< table "table-hover table-striped-columns" >}}
|
||||
| 计费项 | 价格: 元/ 1K tokens(包含上下文) |
|
||||
|
||||
@@ -9,23 +9,23 @@ weight: 310
|
||||
|
||||
在 FastGPT 的 AI 对话模块中,有一个 AI 高级配置,里面包含了 AI 模型的参数配置,本文详细介绍这些配置的含义。
|
||||
|
||||
# 返回AI内容
|
||||
## 返回AI内容
|
||||
|
||||
这是一个开关,打开的时候,当 AI 对话模块运行时,会将其输出的内容返回到浏览器(API响应);如果关闭,AI 输出的内容不会返回到浏览器,但是生成的内容仍可以通过【AI回复】进行输出。你可以将【AI回复】连接到其他模块中。
|
||||
|
||||
# 温度
|
||||
## 温度
|
||||
|
||||
可选范围0-10,约大代表生成的内容约自由扩散,越小代表约严谨。调节能力有限,知识库问答场景通常设置为0。
|
||||
|
||||
# 回复上限
|
||||
## 回复上限
|
||||
|
||||
控制 AI 回复的最大 Tokens,较小的值可以一定程度上减少 AI 的废话,但也可能导致 AI 回复不完整。
|
||||
|
||||
# 引用模板 & 引用提示词
|
||||
## 引用模板 & 引用提示词
|
||||
|
||||
这两个参数与知识库问答场景相关,可以控制知识库相关的提示词。
|
||||
|
||||
## AI 对话消息组成
|
||||
### AI 对话消息组成
|
||||
|
||||
想使用明白这两个变量,首先要了解传递传递给 AI 模型的消息格式。它是一个数组,FastGPT 中这个数组的组成形式为:
|
||||
|
||||
@@ -42,7 +42,7 @@ weight: 310
|
||||
Tips: 可以通过点击上下文按键查看完整的上下文组成,便于调试。
|
||||
{{% /alert %}}
|
||||
|
||||
## 引用模板和提示词设计
|
||||
### 引用模板和提示词设计
|
||||
|
||||
引用模板和引用提示词通常是成对出现,引用提示词依赖引用模板。
|
||||
|
||||
@@ -50,7 +50,7 @@ FastGPT 知识库采用 QA 对(不一定都是问答格式,仅代表两个变
|
||||
|
||||
可以通过 [知识库结构讲解](/docs/use-cases/datasetEngine/) 了解详细的知识库的结构。
|
||||
|
||||
### 引用模板
|
||||
#### 引用模板
|
||||
|
||||
```
|
||||
{instruction:"{{q}}",output:"{{a}}",source:"{{source}}"}
|
||||
@@ -64,7 +64,7 @@ FastGPT 知识库采用 QA 对(不一定都是问答格式,仅代表两个变
|
||||
{instruction:"电影《铃芽之旅》的编剧是谁?22",output:"新海诚是本片的编剧。",source:"手动输入"}
|
||||
```
|
||||
|
||||
### 引用提示词
|
||||
#### 引用提示词
|
||||
|
||||
引用模板需要和引用提示词一起使用,提示词中可以写引用模板的格式说明以及对话的要求等。可以使用 {{quote}} 来使用 **引用模板**,使用 {{question}} 来引入问题。例如:
|
||||
|
||||
@@ -95,15 +95,15 @@ FastGPT 知识库采用 QA 对(不一定都是问答格式,仅代表两个变
|
||||
我的问题是:"{{question}}"
|
||||
```
|
||||
|
||||
### 总结
|
||||
#### 总结
|
||||
|
||||
引用模板规定了搜索出来的内容如何组成一句话,其由 q,a,index,source 多个变量组成。
|
||||
|
||||
引用提示词由`引用模板`和`提示词`组成,提示词通常是对引用模板的一个描述,加上对模型的要求。
|
||||
|
||||
## 引用模板和提示词设计 示例
|
||||
### 引用模板和提示词设计 示例
|
||||
|
||||
### 通用模板与问答模板对比
|
||||
#### 通用模板与问答模板对比
|
||||
|
||||
我们通过一组`你是谁`的手动数据,对通用模板与问答模板的效果进行对比。此处特意打了个搞笑的答案,通用模板下 GPT35 就变得不那么听话了,而问答模板下 GPT35 依然能够回答正确。这是由于结构化的提示词,在大语言模型中具有更强的引导作用。
|
||||
|
||||
@@ -117,7 +117,7 @@ Tips: 建议根据不同的场景,每种知识库仅选择1类数据类型,
|
||||
|  |  |
|
||||
|  |  |
|
||||
|
||||
### 严格模板
|
||||
#### 严格模板
|
||||
|
||||
使用非严格模板,我们随便询问一个不在知识库中的内容,模型通常会根据其自身知识进行回答。
|
||||
|
||||
@@ -125,7 +125,7 @@ Tips: 建议根据不同的场景,每种知识库仅选择1类数据类型,
|
||||
| --- | --- | --- |
|
||||
|  |  | |
|
||||
|
||||
### 提示词设计思路
|
||||
#### 提示词设计思路
|
||||
|
||||
1. 使用序号进行不同要求描述。
|
||||
2. 使用首先、然后、最后等词语进行描述。
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "知识库结构讲解"
|
||||
description: "本节会介绍 FastGPT 知识库结构设计,理解其 QA 的存储格式和检索格式,以便更好的构建知识库。这篇介绍主要以使用为主,详细原理不多介绍。"
|
||||
description: "本节会详细介绍 FastGPT 知识库结构设计,理解其 QA 的存储格式和多向量映射,以便更好的构建知识库。这篇介绍主要以使用为主,详细原理不多介绍。"
|
||||
icon: "dataset"
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 311
|
||||
---
|
||||
|
||||
# 理解向量
|
||||
## 理解向量
|
||||
|
||||
FastGPT 采用了 RAG 中的 Embedding 方案构建知识库,要使用好 FastGPT 需要简单的理解`Embedding`向量是如何工作的及其特点。
|
||||
|
||||
@@ -21,21 +21,29 @@ FastGPT 采用了 RAG 中的 Embedding 方案构建知识库,要使用好 Fast
|
||||
|
||||
检索器的精度比较容易解决,向量模型的训练略复杂,因此数据和检索词质量优化成了一个重要的环节。
|
||||
|
||||
# FastGPT 中向量的结构设计
|
||||
## FastGPT 中向量的结构设计
|
||||
|
||||
FastGPT 采用了 `PostgresSQL` 的 `PG Vector` 插件作为向量检索器,索引为`HNSW`。且`PostgresSQL`仅用于向量检索,`MongoDB`用于其他数据的存取。
|
||||
|
||||
在`PostgresSQL`的表中,设置一个 `index` 字段用于存储向量、一个 `q` 字段用于存储向量对应的内容,以及一个 `a` 字段用于检索映射。之所以取字段为 `qa` 是由于一些历史缘故,无需完全解为 “问答对” 的格式。在实际使用过程中,可以利用`q`和`a`的组合,对检索后的内容做进一步的声明,提高大模型的理解力(注意,这里不直接提高搜索精度)。
|
||||
在`PostgresSQL`的表中,设置一个 `index` 字段用于存储向量,以及一个`data_id`用于在`MongoDB`中寻找对应的映射值。多个`index`可以对应一组`data_id`,也就是说,一组向量可以对应多组数据。在进行检索时,相同数据会进行合并。
|
||||
|
||||
目前,提高向量搜索的精度,主要可以通过几种途径:
|
||||

|
||||
|
||||
1. 精简`q`的内容,减少向量内容的长度:当`q`的内容更少,更准确时,检索精度自然会提高。但与此同时,会牺牲一定的检索范围,适合答案较为严格的场景。
|
||||
2. 更好分词分段:当一段话的结构和语义是完整的,并且是单一的,精度也会提高。因此,许多系统都会优化分词器,尽可能的保障每组数据的完整性。
|
||||
3. 多样性文本:为一段内容增加关键词、摘要、相似问题等描述性信息,可以使得该内容的向量具有更大的检索覆盖范围。
|
||||
### 多向量的目的和使用方式
|
||||
|
||||
在一组向量中,内容的长度和语义的丰富度通常是矛盾的,无法兼得。因此,FastGPT 采用了多向量映射的方式,将一组数据映射到多组向量中,从而保障数据的完整性和语义的丰富度。
|
||||
|
||||
你可以为一组较长的文本,添加多组向量,从而在检索时,只要其中一组向量被检索到,该数据也将被召回。
|
||||
|
||||
### 提高向量搜索精度的方法
|
||||
|
||||
1. 更好分词分段:当一段话的结构和语义是完整的,并且是单一的,精度也会提高。因此,许多系统都会优化分词器,尽可能的保障每组数据的完整性。
|
||||
2. 精简`index`的内容,减少向量内容的长度:当`index`的内容更少,更准确时,检索精度自然会提高。但与此同时,会牺牲一定的检索范围,适合答案较为严格的场景。
|
||||
3. 丰富`index`的数量,可以为同一个`chunk`内容增加多组`index`。
|
||||
4. 优化检索词:在实际使用过程中,用户的问题通常是模糊的或是缺失的,并不一定是完整清晰的问题。因此优化用户的问题(检索词)很大程度上也可以提高精度。
|
||||
5. 微调向量模型:由于市面上直接使用的向量模型都是通用型模型,在特定领域的检索精度并不高,因此微调向量模型可以很大程度上提高专业领域的检索效果。
|
||||
|
||||
# FastGPT 构建知识库方案
|
||||
## FastGPT 构建知识库方案
|
||||
|
||||
在 FastGPT 中,整个知识库由库、集合和数据 3 部分组成。集合可以简单理解为一个`文件`。一个`库`中可以包含多个`集合`,一个`集合`中可以包含多组`数据`。最小的搜索单位是`库`,也就是说,知识库搜索时,是对整个`库`进行搜索,而集合仅是为了对数据进行分类管理,与搜索效果无关。(起码目前还是)
|
||||
|
||||
@@ -43,7 +51,7 @@ FastGPT 采用了 `PostgresSQL` 的 `PG Vector` 插件作为向量检索器,
|
||||
| --- | --- | --- |
|
||||
|  |  |  |
|
||||
|
||||
## 导入数据方案1 - 直接分段导入
|
||||
### 导入数据方案1 - 直接分段导入
|
||||
|
||||
选择文件导入时,可以选择直接分段方案。直接分段会利用`句子分词器`对文本进行一定长度拆分,最终分割中多组的`q`。如果使用了直接分段方案,我们建议在`应用`设置`引用提示词`时,使用`通用模板`即可,无需选择`问答模板`。
|
||||
|
||||
@@ -52,7 +60,7 @@ FastGPT 采用了 `PostgresSQL` 的 `PG Vector` 插件作为向量检索器,
|
||||
|  |  |
|
||||
|
||||
|
||||
## 导入数据方案2 - QA导入
|
||||
### 导入数据方案2 - QA导入
|
||||
|
||||
选择文件导入时,可以选择QA拆分方案。仍然需要使用到`句子分词器`对文本进行拆分,但长度比直接分段大很多。在导入后,会先调用`大模型`对分段进行学习,并给出一些`问题`和`答案`,最终问题和答案会一起被存储到`q`中。注意,新版的 FastGPT 为了提高搜索的范围,不再将问题和答案分别存储到 qa 中。
|
||||
|
||||
@@ -60,7 +68,7 @@ FastGPT 采用了 `PostgresSQL` 的 `PG Vector` 插件作为向量检索器,
|
||||
| --- | --- |
|
||||
|  |  |
|
||||
|
||||
## 导入数据方案3 - 手动录入
|
||||
### 导入数据方案3 - 手动录入
|
||||
|
||||
在 FastGPT 中,你可以在任何一个`集合`中点击右上角的`插入`手动录入知识点,或者使用`标注`功能手动录入。被搜索的内容为`q`,补充内容(可选)为`a`。
|
||||
|
||||
@@ -68,16 +76,16 @@ FastGPT 采用了 `PostgresSQL` 的 `PG Vector` 插件作为向量检索器,
|
||||
| --- | --- | --- |
|
||||
|  |  |  |
|
||||
|
||||
## 导入数据方案4 - CSV录入
|
||||
### 导入数据方案4 - CSV录入
|
||||
|
||||
有些数据较为独特,可能需要单独的进行预处理分割后再导入 FastGPT,此时可以选择 csv 导入,可批量的将处理好的数据导入。
|
||||
|
||||

|
||||
|
||||
## 导入数据方案5 - API导入
|
||||
### 导入数据方案5 - API导入
|
||||
|
||||
参考[FastGPT OpenAPI使用](/docs/development/openapi/#知识库添加数据)。
|
||||
|
||||
# QA的组合与引用提示词构建
|
||||
## QA的组合与引用提示词构建
|
||||
|
||||
参考[引用模板与引用提示词示例](/docs/use-cases/ai_settings/#示例)
|
||||
|
||||
@@ -73,7 +73,7 @@ weight: 340
|
||||

|
||||
|
||||
导入结果如上图。可以看到,我们均采用的是问答对的格式,而不是粗略的直接导入。目的就是为了模拟用户问题,进一步的提高向量搜索的匹配效果。可以为同一个问题设置多种问法,效果更佳。
|
||||
FastGPT 还提供了 openapi 功能,你可以在本地对特殊格式的文件进行处理后,再上传到 FastGPT,具体可以参考:[FastGPT Api Docs](https://doc.fastgpt.run/docs/development/openapi)
|
||||
FastGPT 还提供了 openapi 功能,你可以在本地对特殊格式的文件进行处理后,再上传到 FastGPT,具体可以参考:[FastGPT Api Docs](https://doc.fastgpt.in/docs/development/openapi)
|
||||
|
||||
## 知识库微调和参数调整
|
||||
|
||||
|
||||
76
docSite/content/docs/use-cases/wechat.md
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
title: " 接入微信和企业微信 "
|
||||
description: "FastGPT 接入微信和企业微信 "
|
||||
icon: "chat"
|
||||
draft: false
|
||||
toc: true
|
||||
weight: 322
|
||||
---
|
||||
|
||||
# FastGPT 三分钟接入微信/企业微信
|
||||
私人微信和企业微信接入的方式基本一样,不同的地方会刻意指出。
|
||||
[查看视频教程](https://www.bilibili.com/video/BV1cu411F7FN/?spm_id_from=333.1007.top_right_bar_window_history.content.click&vd_source=903c2b09b7412037c2eddc6a8fb9828b)
|
||||
## 创建APIKey
|
||||
首先找到我们需要接入的应用,然后点击「外部使用」->「API访问」创建一个APIKey并保存。
|
||||
|
||||

|
||||
|
||||
## 配置微秘书
|
||||
|
||||
打开[微秘书](https://wechat.aibotk.com?r=zWLnZK) 注册登陆后找到菜单栏「基础配置」->「智能配置」,按照下图配置。
|
||||
|
||||

|
||||
|
||||
继续往下看到 `apikey` 和`服务器根地址`,这里`apikey`填写我们在 FastGPT 应用外部访问中创建的 APIkey,服务器根地址填写官方地址或者私有化部署的地址,这里用官方地址示例,注意要添加`/v1`后缀,填写完毕后保存。
|
||||
|
||||

|
||||
|
||||
## sealos部署服务
|
||||
|
||||
[访问sealos](https://cloud.sealos.io/) 登陆进来之后打开「应用管理」-> 「新建应用」。
|
||||
- 应用名:称随便填写
|
||||
- 镜像名:私人微信填写 aibotk/wechat-assistant 企业微信填写 aibotk/worker-assistant
|
||||
- cpu和内存建议 1c1g
|
||||
|
||||

|
||||
|
||||
往下翻页找到「高级配置」-> 「编辑环境变量」
|
||||
|
||||

|
||||
|
||||
这里需要填写四个环境变量:
|
||||
AIBOTK_KEY="微秘书 APIKEY"
|
||||
AIBOTK_SECRET="微秘书 APISECRET"
|
||||
WORK_PRO_TOKEN="你申请的企微 token" (企业微信需要填写,私人微信不需要)
|
||||
WECHATY_PUPPET_SERVICE_AUTHORITY=token-service-discovery-test.juzibot.com(企业微信需要填写,私人微信不需要)
|
||||
|
||||
这里最后两个变量只有部署企业微信才需要,私人微信只需要填写前两个即可。
|
||||
|
||||

|
||||
|
||||
这里环境变量我们介绍下如何填写:
|
||||
|
||||
`AIBOTK_KEY` 和 `AIBOTK_SECRET` 我们需要回到[微秘书](https://wechat.aibotk.com?r=zWLnZK)找到「个人中心」,这里的 APIKEY 对应 AIBOTK_KEY ,APISECRET 对应 `AIBOTK_SECRET`。
|
||||
|
||||

|
||||
|
||||
`WORK_PRO_TOKEN` [点击这里](https://tss.juzibot.com?aff=aibotk)申请 token 然后填入即可。
|
||||
|
||||
`WECHATY_PUPPET_SERVICE_AUTHORITY`的值复制过去就可以。
|
||||
|
||||
填写完毕后点右上角「部署」,等待应用状态变为运行中。
|
||||
|
||||

|
||||
|
||||
返回[微秘书](https://wechat.aibotk.com?r=zWLnZK) 找到「首页」,扫码登陆需要接入的微信号。
|
||||
|
||||

|
||||
|
||||
## 测试
|
||||
只需要发送信息,或者拉入群聊@登陆的微信就会回复信息啦。
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
12
package.json
@@ -8,12 +8,14 @@
|
||||
"format-doc": "zhlint --dir ./docSite *.md --fix"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/multer": "^1.4.10",
|
||||
"husky": "^8.0.3",
|
||||
"lint-staged": "^13.2.1",
|
||||
"prettier": "^3.0.3",
|
||||
"i18next": "^22.5.1",
|
||||
"lint-staged": "^13.2.1",
|
||||
"next-i18next": "^13.3.0",
|
||||
"prettier": "^3.0.3",
|
||||
"react-i18next": "^12.3.1",
|
||||
"next-i18next": "^13.3.0"
|
||||
"zhlint": "^0.7.1"
|
||||
},
|
||||
"lint-staged": {
|
||||
"./**/**/*.{ts,tsx,scss}": "npm run format-code",
|
||||
@@ -21,5 +23,9 @@
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"multer": "1.4.5-lts.1",
|
||||
"openai": "4.16.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
export const PRICE_SCALE = 100000;
|
||||
@@ -1,3 +0,0 @@
|
||||
export type CreateTrainingBillType = {
|
||||
name: string;
|
||||
};
|
||||
28
packages/global/common/error/code/app.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { ErrType } from '../errorCode';
|
||||
|
||||
/* dataset: 502000 */
|
||||
export enum AppErrEnum {
|
||||
unExist = 'unExist',
|
||||
unAuthApp = 'unAuthApp'
|
||||
}
|
||||
const appErrList = [
|
||||
{
|
||||
statusText: AppErrEnum.unExist,
|
||||
message: '应用不存在'
|
||||
},
|
||||
{
|
||||
statusText: AppErrEnum.unAuthApp,
|
||||
message: '无权操作该应用'
|
||||
}
|
||||
];
|
||||
export default appErrList.reduce((acc, cur, index) => {
|
||||
return {
|
||||
...acc,
|
||||
[cur.statusText]: {
|
||||
code: 502000 + index,
|
||||
statusText: cur.statusText,
|
||||
message: cur.message,
|
||||
data: null
|
||||
}
|
||||
};
|
||||
}, {} as ErrType<`${AppErrEnum}`>);
|
||||
23
packages/global/common/error/code/chat.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { ErrType } from '../errorCode';
|
||||
|
||||
/* dataset: 504000 */
|
||||
export enum ChatErrEnum {
|
||||
unAuthChat = 'unAuthChat'
|
||||
}
|
||||
const errList = [
|
||||
{
|
||||
statusText: ChatErrEnum.unAuthChat,
|
||||
message: '无权操作该对话记录'
|
||||
}
|
||||
];
|
||||
export default errList.reduce((acc, cur, index) => {
|
||||
return {
|
||||
...acc,
|
||||
[cur.statusText]: {
|
||||
code: 504000 + index,
|
||||
statusText: cur.statusText,
|
||||
message: cur.message,
|
||||
data: null
|
||||
}
|
||||
};
|
||||
}, {} as ErrType<`${ChatErrEnum}`>);
|
||||
43
packages/global/common/error/code/dataset.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { ErrType } from '../errorCode';
|
||||
|
||||
/* dataset: 501000 */
|
||||
export enum DatasetErrEnum {
|
||||
unAuthDataset = 'unAuthDataset',
|
||||
unCreateCollection = 'unCreateCollection',
|
||||
unAuthDatasetCollection = 'unAuthDatasetCollection',
|
||||
unAuthDatasetData = 'unAuthDatasetData',
|
||||
unAuthDatasetFile = 'unAuthDatasetFile'
|
||||
}
|
||||
const datasetErr = [
|
||||
{
|
||||
statusText: DatasetErrEnum.unAuthDataset,
|
||||
message: '无权操作该知识库'
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unAuthDatasetCollection,
|
||||
message: '无权操作该数据集'
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unAuthDatasetData,
|
||||
message: '无权操作该数据'
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unAuthDatasetFile,
|
||||
message: '无权操作该文件'
|
||||
},
|
||||
{
|
||||
statusText: DatasetErrEnum.unCreateCollection,
|
||||
message: '无权创建数据集'
|
||||
}
|
||||
];
|
||||
export default datasetErr.reduce((acc, cur, index) => {
|
||||
return {
|
||||
...acc,
|
||||
[cur.statusText]: {
|
||||
code: 501000 + index,
|
||||
statusText: cur.statusText,
|
||||
message: cur.message,
|
||||
data: null
|
||||
}
|
||||
};
|
||||
}, {} as ErrType<`${DatasetErrEnum}`>);
|
||||
28
packages/global/common/error/code/openapi.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { ErrType } from '../errorCode';
|
||||
|
||||
/* dataset: 506000 */
|
||||
export enum OpenApiErrEnum {
|
||||
unExist = 'unExist',
|
||||
unAuth = 'unAuth'
|
||||
}
|
||||
const errList = [
|
||||
{
|
||||
statusText: OpenApiErrEnum.unExist,
|
||||
message: 'Api Key 不存在'
|
||||
},
|
||||
{
|
||||
statusText: OpenApiErrEnum.unAuth,
|
||||
message: '无权操作该 Api Key'
|
||||
}
|
||||
];
|
||||
export default errList.reduce((acc, cur, index) => {
|
||||
return {
|
||||
...acc,
|
||||
[cur.statusText]: {
|
||||
code: 506000 + index,
|
||||
statusText: cur.statusText,
|
||||
message: cur.message,
|
||||
data: null
|
||||
}
|
||||
};
|
||||
}, {} as ErrType<`${OpenApiErrEnum}`>);
|
||||
34
packages/global/common/error/code/outLink.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { ErrType } from '../errorCode';
|
||||
|
||||
/* dataset: 505000 */
|
||||
export enum OutLinkErrEnum {
|
||||
unExist = 'unExist',
|
||||
unAuthLink = 'unAuthLink',
|
||||
linkUnInvalid = 'linkUnInvalid'
|
||||
}
|
||||
const errList = [
|
||||
{
|
||||
statusText: OutLinkErrEnum.unExist,
|
||||
message: '分享链接不存在'
|
||||
},
|
||||
{
|
||||
statusText: OutLinkErrEnum.unAuthLink,
|
||||
message: '分享链接无效'
|
||||
},
|
||||
{
|
||||
code: 501,
|
||||
statusText: OutLinkErrEnum.linkUnInvalid,
|
||||
message: '分享链接无效'
|
||||
}
|
||||
];
|
||||
export default errList.reduce((acc, cur, index) => {
|
||||
return {
|
||||
...acc,
|
||||
[cur.statusText]: {
|
||||
code: cur?.code || 505000 + index,
|
||||
statusText: cur.statusText,
|
||||
message: cur.message,
|
||||
data: null
|
||||
}
|
||||
};
|
||||
}, {} as ErrType<`${OutLinkErrEnum}`>);
|
||||
28
packages/global/common/error/code/plugin.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { ErrType } from '../errorCode';
|
||||
|
||||
/* dataset: 507000 */
|
||||
export enum PluginErrEnum {
|
||||
unExist = 'unExist',
|
||||
unAuth = 'unAuth'
|
||||
}
|
||||
const errList = [
|
||||
{
|
||||
statusText: PluginErrEnum.unExist,
|
||||
message: '插件不存在'
|
||||
},
|
||||
{
|
||||
statusText: PluginErrEnum.unAuth,
|
||||
message: '无权操作该插件'
|
||||
}
|
||||
];
|
||||
export default errList.reduce((acc, cur, index) => {
|
||||
return {
|
||||
...acc,
|
||||
[cur.statusText]: {
|
||||
code: 507000 + index,
|
||||
statusText: cur.statusText,
|
||||
message: cur.message,
|
||||
data: null
|
||||
}
|
||||
};
|
||||
}, {} as ErrType<`${PluginErrEnum}`>);
|
||||
22
packages/global/common/error/code/team.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import { ErrType } from '../errorCode';
|
||||
|
||||
/* team: 500000 */
|
||||
export enum TeamErrEnum {
|
||||
teamOverSize = 'teamOverSize',
|
||||
unAuthTeam = 'unAuthTeam'
|
||||
}
|
||||
const teamErr = [
|
||||
{ statusText: TeamErrEnum.teamOverSize, message: 'error.team.overSize' },
|
||||
{ statusText: TeamErrEnum.unAuthTeam, message: '无权操作该团队' }
|
||||
];
|
||||
export default teamErr.reduce((acc, cur, index) => {
|
||||
return {
|
||||
...acc,
|
||||
[cur.statusText]: {
|
||||
code: 500000 + index,
|
||||
statusText: cur.statusText,
|
||||
message: cur.message,
|
||||
data: null
|
||||
}
|
||||
};
|
||||
}, {} as ErrType<`${TeamErrEnum}`>);
|
||||
26
packages/global/common/error/code/user.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import { ErrType } from '../errorCode';
|
||||
|
||||
/* team: 503000 */
|
||||
export enum UserErrEnum {
|
||||
unAuthUser = 'unAuthUser',
|
||||
unAuthRole = 'unAuthRole',
|
||||
binVisitor = 'binVisitor',
|
||||
balanceNotEnough = 'balanceNotEnough'
|
||||
}
|
||||
const errList = [
|
||||
{ statusText: UserErrEnum.unAuthUser, message: '找不到该用户' },
|
||||
{ statusText: UserErrEnum.binVisitor, message: '您的身份校验未通过' },
|
||||
{ statusText: UserErrEnum.binVisitor, message: '您当前身份为游客,无权操作' },
|
||||
{ statusText: UserErrEnum.balanceNotEnough, message: '账号余额不足~' }
|
||||
];
|
||||
export default errList.reduce((acc, cur, index) => {
|
||||
return {
|
||||
...acc,
|
||||
[cur.statusText]: {
|
||||
code: 503000 + index,
|
||||
statusText: cur.statusText,
|
||||
message: cur.message,
|
||||
data: null
|
||||
}
|
||||
};
|
||||
}, {} as ErrType<`${UserErrEnum}`>);
|
||||
@@ -1,3 +1,12 @@
|
||||
import appErr from './code/app';
|
||||
import chatErr from './code/chat';
|
||||
import datasetErr from './code/dataset';
|
||||
import openapiErr from './code/openapi';
|
||||
import pluginErr from './code/plugin';
|
||||
import outLinkErr from './code/outLink';
|
||||
import teamErr from './code/team';
|
||||
import userErr from './code/user';
|
||||
|
||||
export const ERROR_CODE: { [key: number]: string } = {
|
||||
400: '请求失败',
|
||||
401: '无权访问',
|
||||
@@ -27,10 +36,19 @@ export enum ERROR_ENUM {
|
||||
insufficientQuota = 'insufficientQuota',
|
||||
unAuthModel = 'unAuthModel',
|
||||
unAuthApiKey = 'unAuthApiKey',
|
||||
unAuthDataset = 'unAuthDataset',
|
||||
unAuthDatasetCollection = 'unAuthDatasetCollection',
|
||||
unAuthFile = 'unAuthFile'
|
||||
}
|
||||
|
||||
export type ErrType<T> = Record<
|
||||
string,
|
||||
{
|
||||
code: number;
|
||||
statusText: T;
|
||||
message: string;
|
||||
data: null;
|
||||
}
|
||||
>;
|
||||
|
||||
export const ERROR_RESPONSE: Record<
|
||||
any,
|
||||
{
|
||||
@@ -55,15 +73,10 @@ export const ERROR_RESPONSE: Record<
|
||||
[ERROR_ENUM.unAuthModel]: {
|
||||
code: 511,
|
||||
statusText: ERROR_ENUM.unAuthModel,
|
||||
message: '无权使用该模型',
|
||||
data: null
|
||||
},
|
||||
[ERROR_ENUM.unAuthDataset]: {
|
||||
code: 512,
|
||||
statusText: ERROR_ENUM.unAuthDataset,
|
||||
message: '无权使用该知识库',
|
||||
message: '无权操作该模型',
|
||||
data: null
|
||||
},
|
||||
|
||||
[ERROR_ENUM.unAuthFile]: {
|
||||
code: 513,
|
||||
statusText: ERROR_ENUM.unAuthFile,
|
||||
@@ -76,10 +89,12 @@ export const ERROR_RESPONSE: Record<
|
||||
message: 'Api Key 不合法',
|
||||
data: null
|
||||
},
|
||||
[ERROR_ENUM.unAuthDatasetCollection]: {
|
||||
code: 515,
|
||||
statusText: ERROR_ENUM.unAuthDatasetCollection,
|
||||
message: '无权使用该知识库文件',
|
||||
data: null
|
||||
}
|
||||
...appErr,
|
||||
...chatErr,
|
||||
...datasetErr,
|
||||
...openapiErr,
|
||||
...outLinkErr,
|
||||
...teamErr,
|
||||
...userErr,
|
||||
...pluginErr
|
||||
};
|
||||
|
||||
5
packages/global/common/file/constants.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
export enum BucketNameEnum {
|
||||
dataset = 'dataset'
|
||||
}
|
||||
|
||||
export const FileBaseUrl = '/api/common/file/read';
|
||||
8
packages/global/common/file/type.d.ts
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
import { BucketNameEnum } from './constants';
|
||||
|
||||
export type FileTokenQuery = {
|
||||
bucketName: `${BucketNameEnum}`;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
fileId: string;
|
||||
};
|
||||
131
packages/global/common/string/textSplitter.ts
Normal file
@@ -0,0 +1,131 @@
|
||||
import { getErrText } from '../error/utils';
|
||||
import { countPromptTokens } from './tiktoken';
|
||||
|
||||
/**
|
||||
* text split into chunks
|
||||
* maxLen - one chunk len. max: 3500
|
||||
* overlapLen - The size of the before and after Text
|
||||
* maxLen > overlapLen
|
||||
* markdown
|
||||
*/
|
||||
export const splitText2Chunks = (props: { text: string; maxLen: number; overlapLen?: number }) => {
|
||||
const { text = '', maxLen, overlapLen = Math.floor(maxLen * 0.2) } = props;
|
||||
const tempMarker = 'SPLIT_HERE_SPLIT_HERE';
|
||||
|
||||
const stepReg: Record<number, RegExp> = {
|
||||
0: /^(#\s[^\n]+)\n/gm,
|
||||
1: /^(##\s[^\n]+)\n/gm,
|
||||
2: /^(###\s[^\n]+)\n/gm,
|
||||
3: /^(####\s[^\n]+)\n/gm,
|
||||
|
||||
4: /(\n\n)/g,
|
||||
5: /([\n])/g,
|
||||
6: /[。]|(?!<[^a-zA-Z])\.\s/g,
|
||||
7: /([!?]|!\s|\?\s)/g,
|
||||
8: /([;]|;\s)/g,
|
||||
9: /([,]|,\s)/g
|
||||
};
|
||||
|
||||
const splitTextRecursively = ({
|
||||
text = '',
|
||||
step,
|
||||
lastChunk,
|
||||
overlayChunk
|
||||
}: {
|
||||
text: string;
|
||||
step: number;
|
||||
lastChunk: string;
|
||||
overlayChunk: string;
|
||||
}) => {
|
||||
if (text.length <= maxLen) {
|
||||
return [text];
|
||||
}
|
||||
const reg = stepReg[step];
|
||||
const isMarkdownSplit = step < 4;
|
||||
|
||||
if (!reg) {
|
||||
// use slice-maxLen to split text
|
||||
const chunks: string[] = [];
|
||||
let chunk = '';
|
||||
for (let i = 0; i < text.length; i += maxLen - overlapLen) {
|
||||
chunk = text.slice(i, i + maxLen);
|
||||
chunks.push(chunk);
|
||||
}
|
||||
return chunks;
|
||||
}
|
||||
|
||||
// split text by special char
|
||||
const splitTexts = text
|
||||
.replace(reg, isMarkdownSplit ? `${tempMarker}$1` : `$1${tempMarker}`)
|
||||
.split(`${tempMarker}`)
|
||||
.filter((part) => part);
|
||||
|
||||
let chunks: string[] = [];
|
||||
for (let i = 0; i < splitTexts.length; i++) {
|
||||
let text = splitTexts[i];
|
||||
let chunkToken = lastChunk.length;
|
||||
const textToken = text.length;
|
||||
|
||||
// next chunk is too large / new chunk is too large(The current chunk must be smaller than maxLen)
|
||||
if (textToken >= maxLen || chunkToken + textToken > maxLen * 1.4) {
|
||||
// last chunk is too large, push it to chunks, not add to next chunk
|
||||
if (chunkToken > maxLen * 0.7) {
|
||||
chunks.push(lastChunk);
|
||||
lastChunk = '';
|
||||
overlayChunk = '';
|
||||
}
|
||||
// chunk is small, insert to next chunks
|
||||
const innerChunks = splitTextRecursively({
|
||||
text,
|
||||
step: step + 1,
|
||||
lastChunk,
|
||||
overlayChunk
|
||||
});
|
||||
if (innerChunks.length === 0) continue;
|
||||
chunks = chunks.concat(innerChunks);
|
||||
lastChunk = '';
|
||||
overlayChunk = '';
|
||||
continue;
|
||||
}
|
||||
|
||||
// size less than maxLen, push text to last chunk
|
||||
lastChunk += text;
|
||||
chunkToken += textToken; // Definitely less than 1.4 * maxLen
|
||||
|
||||
// size over lapLen, push it to next chunk
|
||||
if (
|
||||
overlapLen !== 0 &&
|
||||
!isMarkdownSplit &&
|
||||
chunkToken >= maxLen - overlapLen &&
|
||||
textToken < overlapLen
|
||||
) {
|
||||
overlayChunk += text;
|
||||
}
|
||||
if (chunkToken >= maxLen) {
|
||||
chunks.push(lastChunk);
|
||||
lastChunk = overlayChunk;
|
||||
overlayChunk = '';
|
||||
}
|
||||
}
|
||||
|
||||
/* If the last chunk is independent, it needs to be push chunks. */
|
||||
if (lastChunk && chunks[chunks.length - 1] && !chunks[chunks.length - 1].endsWith(lastChunk)) {
|
||||
chunks.push(lastChunk);
|
||||
}
|
||||
|
||||
return chunks;
|
||||
};
|
||||
|
||||
try {
|
||||
const chunks = splitTextRecursively({ text, step: 0, lastChunk: '', overlayChunk: '' });
|
||||
|
||||
const tokens = chunks.reduce((sum, chunk) => sum + countPromptTokens(chunk, 'system'), 0);
|
||||
|
||||
return {
|
||||
chunks,
|
||||
tokens
|
||||
};
|
||||
} catch (err) {
|
||||
throw new Error(getErrText(err));
|
||||
}
|
||||
};
|
||||
@@ -1,8 +1,8 @@
|
||||
/* Only the token of gpt-3.5-turbo is used */
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import type { ChatItemType } from '../../../core/chat/type';
|
||||
import { Tiktoken } from 'js-tiktoken/lite';
|
||||
import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constant';
|
||||
import { adaptChat2GptMessages } from '../../../core/chat/adapt';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '../../../core/ai/constant';
|
||||
import encodingJson from './cl100k_base.json';
|
||||
|
||||
/* init tikToken obj */
|
||||
@@ -55,17 +55,6 @@ export function countMessagesTokens({ messages }: { messages: ChatItemType[] })
|
||||
return totalTokens;
|
||||
}
|
||||
|
||||
export function sliceTextByTokens({ text, length }: { text: string; length: number }) {
|
||||
const enc = getTikTokenEnc();
|
||||
|
||||
try {
|
||||
const encodeText = enc.encode(text);
|
||||
return enc.decode(encodeText.slice(0, length));
|
||||
} catch (error) {
|
||||
return text.slice(0, length);
|
||||
}
|
||||
}
|
||||
|
||||
/* slice messages from top to bottom by maxTokens */
|
||||
export function sliceMessagesTB({
|
||||
messages,
|
||||
5
packages/global/common/string/tiktoken/type.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import type { Tiktoken } from 'js-tiktoken';
|
||||
|
||||
declare global {
|
||||
var TikToken: Tiktoken;
|
||||
}
|
||||
@@ -1,13 +1,15 @@
|
||||
import crypto from 'crypto';
|
||||
|
||||
/* check string is a web link */
|
||||
export function strIsLink(str?: string) {
|
||||
if (!str) return false;
|
||||
if (/^((http|https)?:\/\/|www\.|\/)[^\s/$.?#].[^\s]*$/i.test(str)) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
export const hashStr = (psw: string) => {
|
||||
return crypto.createHash('sha256').update(psw).digest('hex');
|
||||
/* hash string */
|
||||
export const hashStr = (str: string) => {
|
||||
return crypto.createHash('sha256').update(str).digest('hex');
|
||||
};
|
||||
|
||||
/* simple text, remove chinese space and extra \n */
|
||||
@@ -20,3 +22,16 @@ export const simpleText = (text: string) => {
|
||||
|
||||
return text;
|
||||
};
|
||||
|
||||
/*
|
||||
replace {{variable}} to value
|
||||
*/
|
||||
export function replaceVariable(text: string, obj: Record<string, string | number>) {
|
||||
for (const key in obj) {
|
||||
const val = obj[key];
|
||||
if (!['string', 'number'].includes(typeof val)) continue;
|
||||
|
||||
text = text.replace(new RegExp(`{{(${key})}}`, 'g'), String(val));
|
||||
}
|
||||
return text || '';
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ export type FeConfigsType = {
|
||||
exportLimitMinutes?: number;
|
||||
};
|
||||
scripts?: { [key: string]: string }[];
|
||||
favicon?: string;
|
||||
};
|
||||
|
||||
export type SystemEnvType = {
|
||||
|
||||
5
packages/global/core/ai/api.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
export type PostReRankProps = {
|
||||
query: string;
|
||||
inputs: { id: string; text: string }[];
|
||||
};
|
||||
export type PostReRankResponse = { id: string; score: number }[];
|
||||
@@ -2,5 +2,6 @@ export enum ChatCompletionRequestMessageRoleEnum {
|
||||
'System' = 'system',
|
||||
'User' = 'user',
|
||||
'Assistant' = 'assistant',
|
||||
'Function' = 'function'
|
||||
'Function' = 'function',
|
||||
'Tool' = 'tool'
|
||||
}
|
||||
|
||||
2
packages/global/core/ai/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
import OpenAI from 'openai';
|
||||
export default OpenAI;
|
||||
@@ -1,15 +1,15 @@
|
||||
import { LLMModelUsageEnum } from '@/constants/model';
|
||||
|
||||
export type LLMModelItemType = {
|
||||
model: string;
|
||||
name: string;
|
||||
maxToken: number;
|
||||
maxContext: number;
|
||||
maxResponse: number;
|
||||
price: number;
|
||||
};
|
||||
export type ChatModelItemType = LLMModelItemType & {
|
||||
quoteMaxToken: number;
|
||||
maxTemperature: number;
|
||||
censor?: boolean;
|
||||
vision?: boolean;
|
||||
defaultSystemChatPrompt?: string;
|
||||
};
|
||||
|
||||
@@ -25,3 +25,18 @@ export type VectorModelItemType = {
|
||||
price: number;
|
||||
maxToken: number;
|
||||
};
|
||||
|
||||
export type AudioSpeechModelType = {
|
||||
model: string;
|
||||
name: string;
|
||||
price: number;
|
||||
baseUrl?: string;
|
||||
key?: string;
|
||||
voices: { label: string; value: string; bufferId: string }[];
|
||||
};
|
||||
|
||||
export type WhisperModelType = {
|
||||
model: string;
|
||||
name: string;
|
||||
price: number;
|
||||
};
|
||||
140
packages/global/core/ai/model.ts
Normal file
@@ -0,0 +1,140 @@
|
||||
import type {
|
||||
LLMModelItemType,
|
||||
ChatModelItemType,
|
||||
FunctionModelItemType,
|
||||
VectorModelItemType,
|
||||
AudioSpeechModelType,
|
||||
WhisperModelType
|
||||
} from './model.d';
|
||||
|
||||
export const defaultChatModels: ChatModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-1106',
|
||||
name: 'GPT35-1106',
|
||||
price: 0,
|
||||
maxContext: 16000,
|
||||
maxResponse: 4000,
|
||||
quoteMaxToken: 2000,
|
||||
maxTemperature: 1.2,
|
||||
censor: false,
|
||||
vision: false,
|
||||
defaultSystemChatPrompt: ''
|
||||
},
|
||||
{
|
||||
model: 'gpt-3.5-turbo-16k',
|
||||
name: 'GPT35-16k',
|
||||
maxContext: 16000,
|
||||
maxResponse: 16000,
|
||||
price: 0,
|
||||
quoteMaxToken: 8000,
|
||||
maxTemperature: 1.2,
|
||||
censor: false,
|
||||
vision: false,
|
||||
defaultSystemChatPrompt: ''
|
||||
},
|
||||
{
|
||||
model: 'gpt-4',
|
||||
name: 'GPT4-8k',
|
||||
maxContext: 8000,
|
||||
maxResponse: 8000,
|
||||
price: 0,
|
||||
quoteMaxToken: 4000,
|
||||
maxTemperature: 1.2,
|
||||
censor: false,
|
||||
vision: false,
|
||||
defaultSystemChatPrompt: ''
|
||||
},
|
||||
{
|
||||
model: 'gpt-4-vision-preview',
|
||||
name: 'GPT4-Vision',
|
||||
maxContext: 128000,
|
||||
maxResponse: 4000,
|
||||
price: 0,
|
||||
quoteMaxToken: 100000,
|
||||
maxTemperature: 1.2,
|
||||
censor: false,
|
||||
vision: true,
|
||||
defaultSystemChatPrompt: ''
|
||||
}
|
||||
];
|
||||
export const defaultQAModels: LLMModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-16k',
|
||||
name: 'GPT35-16k',
|
||||
maxContext: 16000,
|
||||
maxResponse: 16000,
|
||||
price: 0
|
||||
}
|
||||
];
|
||||
export const defaultCQModels: FunctionModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-1106',
|
||||
name: 'GPT35-1106',
|
||||
maxContext: 16000,
|
||||
maxResponse: 4000,
|
||||
price: 0,
|
||||
functionCall: true,
|
||||
functionPrompt: ''
|
||||
},
|
||||
{
|
||||
model: 'gpt-4',
|
||||
name: 'GPT4-8k',
|
||||
maxContext: 8000,
|
||||
maxResponse: 8000,
|
||||
price: 0,
|
||||
functionCall: true,
|
||||
functionPrompt: ''
|
||||
}
|
||||
];
|
||||
export const defaultExtractModels: FunctionModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-1106',
|
||||
name: 'GPT35-1106',
|
||||
maxContext: 16000,
|
||||
maxResponse: 4000,
|
||||
price: 0,
|
||||
functionCall: true,
|
||||
functionPrompt: ''
|
||||
}
|
||||
];
|
||||
export const defaultQGModels: LLMModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-1106',
|
||||
name: 'GPT35-1106',
|
||||
maxContext: 1600,
|
||||
maxResponse: 4000,
|
||||
price: 0
|
||||
}
|
||||
];
|
||||
|
||||
export const defaultVectorModels: VectorModelItemType[] = [
|
||||
{
|
||||
model: 'text-embedding-ada-002',
|
||||
name: 'Embedding-2',
|
||||
price: 0,
|
||||
defaultToken: 500,
|
||||
maxToken: 3000
|
||||
}
|
||||
];
|
||||
|
||||
export const defaultAudioSpeechModels: AudioSpeechModelType[] = [
|
||||
{
|
||||
model: 'tts-1',
|
||||
name: 'OpenAI TTS1',
|
||||
price: 0,
|
||||
voices: [
|
||||
{ label: 'Alloy', value: 'Alloy', bufferId: 'openai-Alloy' },
|
||||
{ label: 'Echo', value: 'Echo', bufferId: 'openai-Echo' },
|
||||
{ label: 'Fable', value: 'Fable', bufferId: 'openai-Fable' },
|
||||
{ label: 'Onyx', value: 'Onyx', bufferId: 'openai-Onyx' },
|
||||
{ label: 'Nova', value: 'Nova', bufferId: 'openai-Nova' },
|
||||
{ label: 'Shimmer', value: 'Shimmer', bufferId: 'openai-Shimmer' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
export const defaultWhisperModel: WhisperModelType = {
|
||||
model: 'whisper-1',
|
||||
name: 'Whisper1',
|
||||
price: 0
|
||||
};
|
||||
22
packages/global/core/ai/type.d.ts
vendored
@@ -1,9 +1,21 @@
|
||||
import OpenAI from 'openai';
|
||||
export type ChatCompletionRequestMessage = OpenAI.Chat.CreateChatCompletionRequestMessage;
|
||||
export type ChatCompletion = OpenAI.Chat.ChatCompletion;
|
||||
export type CreateChatCompletionRequest = OpenAI.Chat.ChatCompletionCreateParams;
|
||||
import type {
|
||||
ChatCompletion,
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionChunk,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionContentPart
|
||||
} from 'openai/resources';
|
||||
|
||||
export type StreamChatType = Stream<OpenAI.Chat.ChatCompletionChunk>;
|
||||
export type ChatCompletionContentPart = ChatCompletionContentPart;
|
||||
export type ChatCompletionCreateParams = ChatCompletionCreateParams;
|
||||
export type ChatMessageItemType = Omit<ChatCompletionMessageParam, 'name'> & {
|
||||
name?: any;
|
||||
dataId?: string;
|
||||
content: any;
|
||||
} & any;
|
||||
|
||||
export type ChatCompletion = ChatCompletion;
|
||||
export type StreamChatType = Stream<ChatCompletionChunk>;
|
||||
|
||||
export type PromptTemplateItem = {
|
||||
title: string;
|
||||
|
||||
18
packages/global/core/app/api.d.ts
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
import { AppTypeEnum } from './constants';
|
||||
import { AppSchema } from './type';
|
||||
|
||||
export type CreateAppParams = {
|
||||
name?: string;
|
||||
avatar?: string;
|
||||
type?: `${AppTypeEnum}`;
|
||||
modules: AppSchema['modules'];
|
||||
};
|
||||
|
||||
export interface AppUpdateParams {
|
||||
name?: string;
|
||||
type?: `${AppTypeEnum}`;
|
||||
avatar?: string;
|
||||
intro?: string;
|
||||
modules?: AppSchema['modules'];
|
||||
permission?: AppSchema['permission'];
|
||||
}
|
||||
4
packages/global/core/app/constants.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
export enum AppTypeEnum {
|
||||
basic = 'basic',
|
||||
advanced = 'advanced'
|
||||
}
|
||||
31
packages/global/core/app/type.d.ts
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
import { ModuleItemType } from '../module/type';
|
||||
import { AppTypeEnum } from './constants';
|
||||
import { PermissionTypeEnum } from '../../support/permission/constant';
|
||||
|
||||
export interface AppSchema {
|
||||
_id: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
name: string;
|
||||
type: `${AppTypeEnum}`;
|
||||
avatar: string;
|
||||
intro: string;
|
||||
updateTime: number;
|
||||
modules: ModuleItemType[];
|
||||
permission: `${PermissionTypeEnum}`;
|
||||
}
|
||||
|
||||
export type AppListItemType = {
|
||||
_id: string;
|
||||
name: string;
|
||||
avatar: string;
|
||||
intro: string;
|
||||
isOwner: boolean;
|
||||
permission: `${PermissionTypeEnum}`;
|
||||
};
|
||||
|
||||
export type AppDetailType = AppSchema & {
|
||||
isOwner: boolean;
|
||||
canWrite: boolean;
|
||||
};
|
||||
@@ -1,18 +1,21 @@
|
||||
import type { ChatItemType } from '@/types/chat';
|
||||
import { ChatRoleEnum } from '@/constants/chat';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constant';
|
||||
import type { MessageItemType } from '@/types/core/chat/type';
|
||||
import type { ChatItemType } from '../../core/chat/type.d';
|
||||
import { ChatRoleEnum } from '../../core/chat/constants';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '../../core/ai/constant';
|
||||
import type { ChatMessageItemType } from '../../core/ai/type.d';
|
||||
|
||||
const chat2Message = {
|
||||
[ChatRoleEnum.AI]: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
[ChatRoleEnum.Human]: ChatCompletionRequestMessageRoleEnum.User,
|
||||
[ChatRoleEnum.System]: ChatCompletionRequestMessageRoleEnum.System
|
||||
[ChatRoleEnum.System]: ChatCompletionRequestMessageRoleEnum.System,
|
||||
[ChatRoleEnum.Function]: ChatCompletionRequestMessageRoleEnum.Function,
|
||||
[ChatRoleEnum.Tool]: ChatCompletionRequestMessageRoleEnum.Tool
|
||||
};
|
||||
const message2Chat = {
|
||||
[ChatCompletionRequestMessageRoleEnum.System]: ChatRoleEnum.System,
|
||||
[ChatCompletionRequestMessageRoleEnum.User]: ChatRoleEnum.Human,
|
||||
[ChatCompletionRequestMessageRoleEnum.Assistant]: ChatRoleEnum.AI,
|
||||
[ChatCompletionRequestMessageRoleEnum.Function]: 'function'
|
||||
[ChatCompletionRequestMessageRoleEnum.Function]: ChatRoleEnum.Function,
|
||||
[ChatCompletionRequestMessageRoleEnum.Tool]: ChatRoleEnum.Tool
|
||||
};
|
||||
|
||||
export function adaptRole_Chat2Message(role: `${ChatRoleEnum}`) {
|
||||
@@ -28,10 +31,10 @@ export const adaptChat2GptMessages = ({
|
||||
}: {
|
||||
messages: ChatItemType[];
|
||||
reserveId: boolean;
|
||||
}): MessageItemType[] => {
|
||||
}): ChatMessageItemType[] => {
|
||||
return messages.map((item) => ({
|
||||
...(reserveId && { dataId: item.dataId }),
|
||||
role: chat2Message[item.obj] || ChatCompletionRequestMessageRoleEnum.System,
|
||||
role: chat2Message[item.obj],
|
||||
content: item.value || ''
|
||||
}));
|
||||
};
|
||||
34
packages/global/core/chat/api.d.ts
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
import { ModuleItemType } from '../module/type';
|
||||
import { AdminFbkType, ChatItemType, moduleDispatchResType } from './type';
|
||||
|
||||
export type UpdateHistoryProps = {
|
||||
chatId: string;
|
||||
customTitle?: string;
|
||||
top?: boolean;
|
||||
};
|
||||
|
||||
export type AdminUpdateFeedbackParams = AdminFbkType & {
|
||||
chatItemId: string;
|
||||
};
|
||||
|
||||
export type InitChatResponse = {
|
||||
chatId: string;
|
||||
appId: string;
|
||||
app: {
|
||||
userGuideModule?: ModuleItemType;
|
||||
chatModels?: string[];
|
||||
name: string;
|
||||
avatar: string;
|
||||
intro: string;
|
||||
canUse?: boolean;
|
||||
};
|
||||
title: string;
|
||||
variables: Record<string, any>;
|
||||
history: ChatItemType[];
|
||||
};
|
||||
|
||||
export type ChatHistoryItemResType = moduleDispatchResType & {
|
||||
moduleType: `${FlowNodeTypeEnum}`;
|
||||
moduleName: string;
|
||||
moduleLogo?: string;
|
||||
};
|
||||
@@ -1,16 +1,9 @@
|
||||
import dayjs from 'dayjs';
|
||||
|
||||
export enum sseResponseEventEnum {
|
||||
error = 'error',
|
||||
answer = 'answer',
|
||||
moduleStatus = 'moduleStatus',
|
||||
appStreamResponse = 'appStreamResponse' // sse response request
|
||||
}
|
||||
|
||||
export enum ChatRoleEnum {
|
||||
System = 'System',
|
||||
Human = 'Human',
|
||||
AI = 'AI'
|
||||
AI = 'AI',
|
||||
Function = 'Function',
|
||||
Tool = 'Tool'
|
||||
}
|
||||
|
||||
export enum TaskResponseKeyEnum {
|
||||
@@ -28,6 +21,12 @@ export const ChatRoleMap = {
|
||||
},
|
||||
[ChatRoleEnum.AI]: {
|
||||
name: 'AI'
|
||||
},
|
||||
[ChatRoleEnum.Function]: {
|
||||
name: 'Function'
|
||||
},
|
||||
[ChatRoleEnum.Tool]: {
|
||||
name: 'Tool'
|
||||
}
|
||||
};
|
||||
|
||||
@@ -55,3 +54,6 @@ export const ChatSourceMap = {
|
||||
|
||||
export const HUMAN_ICON = `/icon/human.svg`;
|
||||
export const LOGO_ICON = `/icon/logo.svg`;
|
||||
|
||||
export const IMG_BLOCK_KEY = 'img-block';
|
||||
export const FILE_BLOCK_KEY = 'file-block';
|
||||
110
packages/global/core/chat/type.d.ts
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
import { ClassifyQuestionAgentItemType } from '../module/type';
|
||||
import { SearchDataResponseItemType } from '../dataset/type';
|
||||
import { ChatRoleEnum, ChatSourceEnum, TaskResponseKeyEnum } from './constants';
|
||||
import { FlowNodeTypeEnum } from '../module/node/constant';
|
||||
import { AppSchema } from 'core/app/type';
|
||||
|
||||
export type ChatSchema = {
|
||||
_id: string;
|
||||
chatId: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
appId: string;
|
||||
updateTime: Date;
|
||||
title: string;
|
||||
customTitle: string;
|
||||
top: boolean;
|
||||
variables: Record<string, any>;
|
||||
source: `${ChatSourceEnum}`;
|
||||
shareId?: string;
|
||||
isInit: boolean;
|
||||
content: ChatItemType[];
|
||||
};
|
||||
|
||||
export type ChatWithAppSchema = Omit<ChatSchema, 'appId'> & {
|
||||
appId: AppSchema;
|
||||
};
|
||||
|
||||
export type ChatItemSchema = {
|
||||
dataId: string;
|
||||
chatId: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
appId: string;
|
||||
time: Date;
|
||||
obj: `${ChatRoleEnum}`;
|
||||
value: string;
|
||||
userFeedback?: string;
|
||||
adminFeedback?: AdminFbkType;
|
||||
[TaskResponseKeyEnum.responseData]?: ChatHistoryItemResType[];
|
||||
};
|
||||
|
||||
export type AdminFbkType = {
|
||||
dataId: string;
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
q: string;
|
||||
a?: string;
|
||||
};
|
||||
|
||||
export type ChatItemType = {
|
||||
dataId?: string;
|
||||
obj: ChatItemSchema['obj'];
|
||||
value: any;
|
||||
userFeedback?: string;
|
||||
adminFeedback?: ChatItemSchema['feedback'];
|
||||
[TaskResponseKeyEnum.responseData]?: ChatItemSchema[TaskResponseKeyEnum.responseData];
|
||||
};
|
||||
|
||||
export type ChatSiteItemType = {
|
||||
status: 'loading' | 'running' | 'finish';
|
||||
moduleName?: string;
|
||||
ttsBuffer?: Uint8Array;
|
||||
} & ChatItemType;
|
||||
|
||||
export type HistoryItemType = {
|
||||
chatId: string;
|
||||
updateTime: Date;
|
||||
customTitle?: string;
|
||||
title: string;
|
||||
};
|
||||
export type ChatHistoryItemType = HistoryItemType & {
|
||||
appId: string;
|
||||
top: boolean;
|
||||
};
|
||||
|
||||
// response data
|
||||
export type moduleDispatchResType = {
|
||||
price: number;
|
||||
runningTime?: number;
|
||||
tokens?: number;
|
||||
model?: string;
|
||||
|
||||
// chat
|
||||
question?: string;
|
||||
temperature?: number;
|
||||
maxToken?: number;
|
||||
quoteList?: SearchDataResponseItemType[];
|
||||
historyPreview?: ChatItemType[]; // completion context array. history will slice
|
||||
|
||||
// dataset search
|
||||
similarity?: number;
|
||||
limit?: number;
|
||||
|
||||
// cq
|
||||
cqList?: ClassifyQuestionAgentItemType[];
|
||||
cqResult?: string;
|
||||
|
||||
// content extract
|
||||
extractDescription?: string;
|
||||
extractResult?: Record<string, any>;
|
||||
|
||||
// http
|
||||
body?: Record<string, any>;
|
||||
httpResult?: Record<string, any>;
|
||||
|
||||
// plugin output
|
||||
pluginOutput?: Record<string, any>;
|
||||
};
|
||||
6
packages/global/core/chat/utils.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
import { IMG_BLOCK_KEY, FILE_BLOCK_KEY } from './constants';
|
||||
|
||||
export function chatContentReplaceBlock(content: string = '') {
|
||||
const regex = new RegExp(`\`\`\`(${IMG_BLOCK_KEY})\\n([\\s\\S]*?)\`\`\``, 'g');
|
||||
return content.replace(regex, '').trim();
|
||||
}
|
||||
20
packages/global/core/dataset/api.d.ts
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
import { DatasetDataIndexItemType } from './type';
|
||||
|
||||
/* ================= dataset ===================== */
|
||||
|
||||
/* ================= collection ===================== */
|
||||
|
||||
/* ================= data ===================== */
|
||||
export type PgSearchRawType = {
|
||||
id: string;
|
||||
team_id: string;
|
||||
tmb_id: string;
|
||||
collection_id: string;
|
||||
data_id: string;
|
||||
score: number;
|
||||
};
|
||||
export type PushDatasetDataChunkProps = {
|
||||
q: string; // embedding content
|
||||
a?: string; // bonus content
|
||||
indexes?: Omit<DatasetDataIndexItemType, 'dataId'>[];
|
||||
};
|
||||
@@ -1,3 +1,5 @@
|
||||
export const PgDatasetTableName = 'modeldata';
|
||||
|
||||
export enum DatasetTypeEnum {
|
||||
folder = 'folder',
|
||||
dataset = 'dataset'
|
||||
@@ -34,29 +36,54 @@ export const DatasetCollectionTypeMap = {
|
||||
}
|
||||
};
|
||||
|
||||
export enum TrainingModeEnum {
|
||||
'qa' = 'qa',
|
||||
'index' = 'index'
|
||||
export enum DatasetDataIndexTypeEnum {
|
||||
chunk = 'chunk',
|
||||
qa = 'qa',
|
||||
summary = 'summary',
|
||||
hypothetical = 'hypothetical',
|
||||
custom = 'custom'
|
||||
}
|
||||
export const TrainingTypeMap = {
|
||||
[TrainingModeEnum.qa]: 'qa',
|
||||
[TrainingModeEnum.index]: 'index'
|
||||
};
|
||||
|
||||
export enum DatasetSpecialIdEnum {
|
||||
manual = 'manual',
|
||||
mark = 'mark'
|
||||
}
|
||||
export const datasetSpecialIdMap = {
|
||||
[DatasetSpecialIdEnum.manual]: {
|
||||
name: 'kb.Manual Data',
|
||||
sourceName: 'kb.Manual Input'
|
||||
export const DatasetDataIndexTypeMap = {
|
||||
[DatasetDataIndexTypeEnum.chunk]: {
|
||||
name: 'dataset.data.indexes.chunk'
|
||||
},
|
||||
[DatasetSpecialIdEnum.mark]: {
|
||||
name: 'kb.Mark Data',
|
||||
sourceName: 'kb.Manual Mark'
|
||||
[DatasetDataIndexTypeEnum.summary]: {
|
||||
name: 'dataset.data.indexes.summary'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.hypothetical]: {
|
||||
name: 'dataset.data.indexes.hypothetical'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.qa]: {
|
||||
name: 'dataset.data.indexes.qa'
|
||||
},
|
||||
[DatasetDataIndexTypeEnum.custom]: {
|
||||
name: 'dataset.data.indexes.custom'
|
||||
}
|
||||
};
|
||||
export const datasetSpecialIds: string[] = [DatasetSpecialIdEnum.manual, DatasetSpecialIdEnum.mark];
|
||||
|
||||
export enum TrainingModeEnum {
|
||||
'chunk' = 'chunk',
|
||||
'qa' = 'qa'
|
||||
// 'hypothetical' = 'hypothetical',
|
||||
// 'summary' = 'summary',
|
||||
// 'multipleIndex' = 'multipleIndex'
|
||||
}
|
||||
export const TrainingTypeMap = {
|
||||
[TrainingModeEnum.chunk]: {
|
||||
name: 'chunk'
|
||||
},
|
||||
[TrainingModeEnum.qa]: {
|
||||
name: 'qa'
|
||||
}
|
||||
// [TrainingModeEnum.hypothetical]: {
|
||||
// name: 'hypothetical'
|
||||
// },
|
||||
// [TrainingModeEnum.summary]: {
|
||||
// name: 'summary'
|
||||
// },
|
||||
// [TrainingModeEnum.multipleIndex]: {
|
||||
// name: 'multipleIndex'
|
||||
// }
|
||||
};
|
||||
|
||||
export const FolderAvatarSrc = '/imgs/files/folder.svg';
|
||||
|
||||
27
packages/global/core/dataset/controller.d.ts
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
import type { DatasetDataIndexItemType, DatasetDataSchemaType } from './type';
|
||||
|
||||
export type CreateDatasetDataProps = {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
q: string;
|
||||
a?: string;
|
||||
indexes?: Omit<DatasetDataIndexItemType, 'dataId'>[];
|
||||
};
|
||||
|
||||
export type UpdateDatasetDataProps = {
|
||||
dataId: string;
|
||||
q?: string;
|
||||
a?: string;
|
||||
indexes?: (Omit<DatasetDataIndexItemType, 'dataId'> & {
|
||||
dataId?: string; // pg data id
|
||||
})[];
|
||||
};
|
||||
|
||||
export type PatchIndexesProps = {
|
||||
type: 'create' | 'update' | 'delete';
|
||||
index: Omit<DatasetDataIndexItemType, 'dataId'> & {
|
||||
dataId?: string;
|
||||
};
|
||||
};
|
||||
96
packages/global/core/dataset/type.d.ts
vendored
@@ -1,20 +1,35 @@
|
||||
import { DatasetCollectionTypeEnum, DatasetTypeEnum, TrainingModeEnum } from './constant';
|
||||
import type { LLMModelItemType, VectorModelItemType } from '../../core/ai/model.d';
|
||||
import { PermissionTypeEnum } from '../../support/permission/constant';
|
||||
import { PushDatasetDataChunkProps } from './api';
|
||||
import {
|
||||
DatasetCollectionTypeEnum,
|
||||
DatasetDataIndexTypeEnum,
|
||||
DatasetTypeEnum,
|
||||
TrainingModeEnum
|
||||
} from './constant';
|
||||
|
||||
/* schema */
|
||||
export type DatasetSchemaType = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
parentId: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
updateTime: Date;
|
||||
avatar: string;
|
||||
name: string;
|
||||
vectorModel: string;
|
||||
agentModel: string;
|
||||
tags: string[];
|
||||
type: `${DatasetTypeEnum}`;
|
||||
permission: `${PermissionTypeEnum}`;
|
||||
};
|
||||
|
||||
export type DatasetCollectionSchemaType = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
datasetId: string;
|
||||
parentId?: string;
|
||||
name: string;
|
||||
@@ -27,11 +42,33 @@ export type DatasetCollectionSchemaType = {
|
||||
};
|
||||
};
|
||||
|
||||
export type DatasetDataIndexItemType = {
|
||||
defaultIndex: boolean;
|
||||
dataId: string; // pg data id
|
||||
type: `${DatasetDataIndexTypeEnum}`;
|
||||
text: string;
|
||||
};
|
||||
export type DatasetDataSchemaType = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
q: string; // large chunks or question
|
||||
a: string; // answer or custom content
|
||||
indexes: DatasetDataIndexItemType[];
|
||||
};
|
||||
|
||||
export type DatasetTrainingSchemaType = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
datasetId: string;
|
||||
datasetCollectionId: string;
|
||||
collectionId: string;
|
||||
billId: string;
|
||||
expireAt: Date;
|
||||
lockTime: Date;
|
||||
@@ -40,36 +77,59 @@ export type DatasetTrainingSchemaType = {
|
||||
prompt: string;
|
||||
q: string;
|
||||
a: string;
|
||||
indexes: Omit<DatasetDataIndexItemType, 'dataId'>[];
|
||||
};
|
||||
|
||||
export type CollectionWithDatasetType = Omit<DatasetCollectionSchemaType, 'datasetId'> & {
|
||||
datasetId: DatasetSchemaType;
|
||||
};
|
||||
|
||||
/* ================= dataset ===================== */
|
||||
export type DatasetItemType = Omit<DatasetSchemaType, 'vectorModel' | 'agentModel'> & {
|
||||
vectorModel: VectorModelItemType;
|
||||
agentModel: LLMModelItemType;
|
||||
isOwner: boolean;
|
||||
canWrite: boolean;
|
||||
};
|
||||
|
||||
/* ================= collection ===================== */
|
||||
export type DatasetCollectionItemType = CollectionWithDatasetType & {
|
||||
canWrite: boolean;
|
||||
sourceName: string;
|
||||
sourceId?: string;
|
||||
};
|
||||
|
||||
/* ================= data ===================== */
|
||||
export type PgDataItemType = {
|
||||
id: string;
|
||||
q: string;
|
||||
a: string;
|
||||
dataset_id: string;
|
||||
collection_id: string;
|
||||
};
|
||||
export type DatasetChunkItemType = {
|
||||
q: string;
|
||||
a: string;
|
||||
};
|
||||
export type DatasetDataItemType = DatasetChunkItemType & {
|
||||
export type DatasetDataItemType = {
|
||||
id: string;
|
||||
datasetId: string;
|
||||
collectionId: string;
|
||||
sourceName: string;
|
||||
sourceId?: string;
|
||||
q: string;
|
||||
a: string;
|
||||
indexes: DatasetDataIndexItemType[];
|
||||
isOwner: boolean;
|
||||
canWrite: boolean;
|
||||
};
|
||||
|
||||
/* --------------- file ---------------------- */
|
||||
export type DatasetFileSchema = {
|
||||
_id: string;
|
||||
length: number;
|
||||
chunkSize: number;
|
||||
uploadDate: Date;
|
||||
filename: string;
|
||||
contentType: string;
|
||||
metadata: {
|
||||
contentType: string;
|
||||
datasetId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
};
|
||||
};
|
||||
|
||||
/* ============= search =============== */
|
||||
export type SearchDataResultItemType = PgDataItemType & {
|
||||
score: number;
|
||||
};
|
||||
export type SearchDataResponseItemType = DatasetDataItemType & {
|
||||
score: number;
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { DatasetCollectionTypeEnum } from './constant';
|
||||
import { DatasetCollectionTypeEnum, DatasetDataIndexTypeEnum } from './constant';
|
||||
import { getFileIcon } from '../../common/file/icon';
|
||||
import { strIsLink } from '../../common/string/tools';
|
||||
|
||||
@@ -44,3 +44,14 @@ export function getSourceNameIcon({
|
||||
}
|
||||
return '/imgs/files/collection.svg';
|
||||
}
|
||||
|
||||
export function getDefaultIndex(props?: { q?: string; a?: string; dataId?: string }) {
|
||||
const { q = '', a, dataId } = props || {};
|
||||
const qaStr = `${q}\n${a}`.trim();
|
||||
return {
|
||||
defaultIndex: true,
|
||||
type: a ? DatasetDataIndexTypeEnum.qa : DatasetDataIndexTypeEnum.chunk,
|
||||
text: a ? qaStr : q,
|
||||
dataId
|
||||
};
|
||||
}
|
||||
|
||||
3
packages/global/core/module/api.d.ts
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
import { VectorModelItemType } from '../ai/model.d';
|
||||
|
||||
export type SelectedDatasetType = { datasetId: string; vectorModel: VectorModelItemType }[];
|
||||
1
packages/global/core/module/node/type.d.ts
vendored
@@ -32,6 +32,7 @@ export type FlowNodeInputItemType = {
|
||||
connected?: boolean;
|
||||
description?: string;
|
||||
placeholder?: string;
|
||||
plusField?: boolean;
|
||||
max?: number;
|
||||
min?: number;
|
||||
step?: number;
|
||||
|
||||
13
packages/global/core/module/type.d.ts
vendored
@@ -1,4 +1,4 @@
|
||||
import { FlowNodeTypeEnum } from './node/constant';
|
||||
import { FlowNodeTypeEnum, FlowNodeValTypeEnum } from './node/constant';
|
||||
import { FlowNodeInputItemType, FlowNodeOutputItemType } from './node/type';
|
||||
|
||||
export type FlowModuleTemplateType = {
|
||||
@@ -42,3 +42,14 @@ export type SelectAppItemType = {
|
||||
name: string;
|
||||
logo: string;
|
||||
};
|
||||
|
||||
/* agent */
|
||||
export type ClassifyQuestionAgentItemType = {
|
||||
value: string;
|
||||
key: string;
|
||||
};
|
||||
export type ContextExtractAgentItemType = {
|
||||
desc: string;
|
||||
key: string;
|
||||
required: boolean;
|
||||
};
|
||||
|
||||
2
packages/global/core/plugin/type.d.ts
vendored
@@ -3,6 +3,8 @@ import type { ModuleItemType } from '../module/type.d';
|
||||
export type PluginItemSchema = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
name: string;
|
||||
avatar: string;
|
||||
intro: string;
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
"timezones-list": "^3.0.2",
|
||||
"dayjs": "^1.11.7",
|
||||
"encoding": "^0.1.13",
|
||||
"openai": "^4.12.1"
|
||||
"openai": "^4.16.1",
|
||||
"js-tiktoken": "^1.0.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.8.5"
|
||||
|
||||
2
packages/global/support/openapi/type.d.ts
vendored
@@ -1,6 +1,8 @@
|
||||
export type OpenApiSchema = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
createTime: Date;
|
||||
lastUsedTime?: Date;
|
||||
apiKey: string;
|
||||
|
||||
27
packages/global/support/outLink/api.d.ts
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
import type { HistoryItemType, ChatSiteItemType } from '../../core/chat/type.d';
|
||||
import type { InitChatResponse } from '../../core/chat/api.d';
|
||||
import { OutLinkSchema } from '@fastgpt/global/support/outLink/type';
|
||||
|
||||
export type InitShareChatResponse = {
|
||||
userAvatar: string;
|
||||
app: InitChatResponse['app'];
|
||||
};
|
||||
|
||||
/* one page type */
|
||||
export type ShareChatType = InitShareChatResponse & {
|
||||
history: ShareChatHistoryItemType;
|
||||
};
|
||||
|
||||
/* history list item type */
|
||||
export type ShareChatHistoryItemType = HistoryItemType & {
|
||||
shareId: string;
|
||||
variables?: Record<string, any>;
|
||||
chats: ChatSiteItemType[];
|
||||
};
|
||||
|
||||
export type AuthLinkChatProps = { ip?: string | null; authToken?: string; question: string };
|
||||
export type AuthLinkLimitProps = AuthLinkChatProps & { outLink: OutLinkSchema };
|
||||
export type AuthShareChatInitProps = {
|
||||
authToken?: string;
|
||||
tokenUrl?: string;
|
||||
};
|
||||
2
packages/global/support/outLink/type.d.ts
vendored
@@ -4,6 +4,8 @@ export type OutLinkSchema = {
|
||||
_id: string;
|
||||
shareId: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
appId: string;
|
||||
name: string;
|
||||
total: number;
|
||||
|
||||
21
packages/global/support/permission/constant.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
export enum AuthUserTypeEnum {
|
||||
token = 'token',
|
||||
root = 'root',
|
||||
apikey = 'apikey',
|
||||
outLink = 'outLink'
|
||||
}
|
||||
|
||||
export enum PermissionTypeEnum {
|
||||
'private' = 'private',
|
||||
'public' = 'public'
|
||||
}
|
||||
export const PermissionTypeMap = {
|
||||
[PermissionTypeEnum.private]: {
|
||||
iconLight: 'support/permission/privateLight',
|
||||
label: 'permission.Private'
|
||||
},
|
||||
[PermissionTypeEnum.public]: {
|
||||
iconLight: 'support/permission/publicLight',
|
||||
label: 'permission.Public'
|
||||
}
|
||||
};
|
||||
12
packages/global/support/permission/type.d.ts
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
import { AuthUserTypeEnum } from './constant';
|
||||
|
||||
export type AuthResponseType = {
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
isOwner: boolean;
|
||||
canWrite: boolean;
|
||||
authType?: `${AuthUserTypeEnum}`;
|
||||
appId?: string;
|
||||
apikey?: string;
|
||||
};
|
||||
27
packages/global/support/permission/utils.ts
Normal file
@@ -0,0 +1,27 @@
|
||||
import { TeamMemberRoleEnum } from '../user/team/constant';
|
||||
import { PermissionTypeEnum } from './constant';
|
||||
|
||||
/* team public source, or owner source in team */
|
||||
export function mongoRPermission({
|
||||
teamId,
|
||||
tmbId,
|
||||
role
|
||||
}: {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
role: `${TeamMemberRoleEnum}`;
|
||||
}) {
|
||||
return {
|
||||
teamId,
|
||||
...(role === TeamMemberRoleEnum.visitor && { permission: PermissionTypeEnum.public }),
|
||||
...(role === TeamMemberRoleEnum.admin && {
|
||||
$or: [{ permission: PermissionTypeEnum.public }, { tmbId }]
|
||||
})
|
||||
};
|
||||
}
|
||||
export function mongoOwnerPermission({ teamId, tmbId }: { teamId: string; tmbId: string }) {
|
||||
return {
|
||||
teamId,
|
||||
tmbId
|
||||
};
|
||||
}
|
||||
15
packages/global/support/user/api.d.ts
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
import { OAuthEnum } from './constant';
|
||||
|
||||
export type PostLoginProps = {
|
||||
username: string;
|
||||
password: string;
|
||||
tmbId?: string;
|
||||
};
|
||||
|
||||
export type OauthLoginProps = {
|
||||
type: `${OAuthEnum}`;
|
||||
code: string;
|
||||
callbackUrl: string;
|
||||
inviterId?: string;
|
||||
tmbId?: string;
|
||||
};
|
||||
@@ -1,30 +1,4 @@
|
||||
export enum InformTypeEnum {
|
||||
system = 'system'
|
||||
export enum OAuthEnum {
|
||||
github = 'github',
|
||||
google = 'google'
|
||||
}
|
||||
|
||||
export const InformTypeMap = {
|
||||
[InformTypeEnum.system]: {
|
||||
label: '系统通知'
|
||||
}
|
||||
};
|
||||
|
||||
export enum TeamMemberRoleEnum {
|
||||
owner = 'owner',
|
||||
admin = 'admin',
|
||||
member = 'member',
|
||||
visitor = 'visitor'
|
||||
}
|
||||
export const TeamMemberRoleMap = {
|
||||
[TeamMemberRoleEnum.owner]: {
|
||||
label: 'user.team.role.owner'
|
||||
},
|
||||
[TeamMemberRoleEnum.admin]: {
|
||||
label: 'user.team.role.admin'
|
||||
},
|
||||
[TeamMemberRoleEnum.member]: {
|
||||
label: 'user.team.role.member'
|
||||
},
|
||||
[TeamMemberRoleEnum.visitor]: {
|
||||
label: 'user.team.role.visitor'
|
||||
}
|
||||
};
|
||||
|
||||
21
packages/global/support/user/controller.d.ts
vendored
@@ -1,21 +0,0 @@
|
||||
export type CreateTeamProps = {
|
||||
ownerId: string;
|
||||
name: string;
|
||||
avatar?: string;
|
||||
};
|
||||
export type UpdateTeamProps = {
|
||||
id: string;
|
||||
name?: string;
|
||||
avatar?: string;
|
||||
};
|
||||
export type updateTeamBalanceProps = {
|
||||
id: string;
|
||||
balance: number;
|
||||
};
|
||||
|
||||
export type CreateTeamMemberProps = {
|
||||
ownerId: string;
|
||||
teamId: string;
|
||||
userId: string;
|
||||
name?: string;
|
||||
};
|
||||
9
packages/global/support/user/inform/constants.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
export enum InformTypeEnum {
|
||||
system = 'system'
|
||||
}
|
||||
|
||||
export const InformTypeMap = {
|
||||
[InformTypeEnum.system]: {
|
||||
label: '系统通知'
|
||||
}
|
||||
};
|
||||
18
packages/global/support/user/inform/type.d.ts
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
import { InformTypeEnum } from './constant';
|
||||
|
||||
export type SendInformProps = {
|
||||
tmbId?: string;
|
||||
type: `${InformTypeEnum}`;
|
||||
title: string;
|
||||
content: string;
|
||||
};
|
||||
|
||||
export type UserInformSchema = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
time: Date;
|
||||
type: `${InformTypeEnum}`;
|
||||
title: string;
|
||||
content: string;
|
||||
read: boolean;
|
||||
};
|
||||
48
packages/global/support/user/team/constant.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
export const TeamCollectionName = 'teams';
|
||||
export const TeamMemberCollectionName = 'team.members';
|
||||
|
||||
export enum TeamMemberRoleEnum {
|
||||
owner = 'owner',
|
||||
admin = 'admin',
|
||||
visitor = 'visitor'
|
||||
}
|
||||
export const TeamMemberRoleMap = {
|
||||
[TeamMemberRoleEnum.owner]: {
|
||||
value: TeamMemberRoleEnum.owner,
|
||||
label: 'user.team.role.Owner'
|
||||
},
|
||||
[TeamMemberRoleEnum.admin]: {
|
||||
value: TeamMemberRoleEnum.admin,
|
||||
label: 'user.team.role.Admin'
|
||||
},
|
||||
[TeamMemberRoleEnum.visitor]: {
|
||||
value: TeamMemberRoleEnum.visitor,
|
||||
label: 'user.team.role.Visitor'
|
||||
}
|
||||
};
|
||||
|
||||
export enum TeamMemberStatusEnum {
|
||||
waiting = 'waiting',
|
||||
active = 'active',
|
||||
reject = 'reject',
|
||||
leave = 'leave'
|
||||
}
|
||||
export const TeamMemberStatusMap = {
|
||||
[TeamMemberStatusEnum.waiting]: {
|
||||
label: 'user.team.member.waiting',
|
||||
color: 'orange.600'
|
||||
},
|
||||
[TeamMemberStatusEnum.active]: {
|
||||
label: 'user.team.member.active',
|
||||
color: 'green.600'
|
||||
},
|
||||
[TeamMemberStatusEnum.reject]: {
|
||||
label: 'user.team.member.reject',
|
||||
color: 'red.600'
|
||||
},
|
||||
[TeamMemberStatusEnum.leave]: {
|
||||
label: 'user.team.member.leave',
|
||||
color: 'red.600'
|
||||
}
|
||||
};
|
||||
export const leaveStatus = { $ne: TeamMemberStatusEnum.leave };
|
||||
43
packages/global/support/user/team/controller.d.ts
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
import { TeamMemberRoleEnum } from './constant';
|
||||
import { TeamMemberSchema } from './type';
|
||||
|
||||
export type AuthTeamRoleProps = {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
role?: `${TeamMemberRoleEnum}`;
|
||||
};
|
||||
export type CreateTeamProps = {
|
||||
name: string;
|
||||
avatar?: string;
|
||||
defaultTeam?: boolean;
|
||||
};
|
||||
export type UpdateTeamProps = {
|
||||
teamId: string;
|
||||
name?: string;
|
||||
avatar?: string;
|
||||
};
|
||||
|
||||
/* ------------- member ----------- */
|
||||
export type DelMemberProps = {
|
||||
teamId: string;
|
||||
memberId: string;
|
||||
};
|
||||
export type UpdateTeamMemberProps = {
|
||||
teamId: string;
|
||||
memberId: string;
|
||||
role?: TeamMemberSchema['role'];
|
||||
status?: TeamMemberSchema['status'];
|
||||
};
|
||||
export type InviteMemberProps = {
|
||||
teamId: string;
|
||||
usernames: string[];
|
||||
role: `${TeamMemberRoleEnum}`;
|
||||
};
|
||||
export type UpdateInviteProps = {
|
||||
tmbId: string;
|
||||
status: TeamMemberSchema['status'];
|
||||
};
|
||||
export type InviteMemberResponse = Record<
|
||||
'invite' | 'inValid' | 'inTeam',
|
||||
{ username: string; userId: string }[]
|
||||
>;
|
||||
48
packages/global/support/user/team/type.d.ts
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
import { UserModelSchema } from '../type';
|
||||
import { TeamMemberRoleEnum, TeamMemberStatusEnum } from './constant';
|
||||
|
||||
export type TeamSchema = {
|
||||
_id: string;
|
||||
name: string;
|
||||
ownerId: string;
|
||||
avatar: string;
|
||||
createTime: Date;
|
||||
balance: number;
|
||||
maxSize: number;
|
||||
};
|
||||
|
||||
export type TeamMemberSchema = {
|
||||
_id: string;
|
||||
teamId: string;
|
||||
userId: string;
|
||||
createTime: Date;
|
||||
name: string;
|
||||
role: `${TeamMemberRoleEnum}`;
|
||||
status: `${TeamMemberStatusEnum}`;
|
||||
defaultTeam: boolean;
|
||||
};
|
||||
|
||||
export type TeamItemType = {
|
||||
userId: string;
|
||||
teamId: string;
|
||||
teamName: string;
|
||||
memberName: string;
|
||||
avatar: string;
|
||||
balance: number;
|
||||
tmbId: string;
|
||||
defaultTeam: boolean;
|
||||
role: `${TeamMemberRoleEnum}`;
|
||||
status: `${TeamMemberStatusEnum}`;
|
||||
canWrite: boolean;
|
||||
maxSize: number;
|
||||
};
|
||||
|
||||
export type TeamMemberItemType = {
|
||||
userId: string;
|
||||
tmbId: string;
|
||||
teamId: string;
|
||||
memberName: string;
|
||||
avatar: string;
|
||||
role: `${TeamMemberRoleEnum}`;
|
||||
status: `${TeamMemberStatusEnum}`;
|
||||
};
|
||||
32
packages/global/support/user/type.d.ts
vendored
@@ -1,4 +1,5 @@
|
||||
import { InformTypeEnum, TeamMemberRoleEnum } from './constant';
|
||||
import { InformTypeEnum } from './constant';
|
||||
import { TeamItemType } from './team/type';
|
||||
|
||||
export type UserModelSchema = {
|
||||
_id: string;
|
||||
@@ -21,28 +22,13 @@ export type UserModelSchema = {
|
||||
};
|
||||
};
|
||||
|
||||
export type UserInformSchema = {
|
||||
export type UserType = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
time: Date;
|
||||
type: `${InformTypeEnum}`;
|
||||
title: string;
|
||||
content: string;
|
||||
read: boolean;
|
||||
};
|
||||
|
||||
export type TeamSchema = {
|
||||
_id: string;
|
||||
name: string;
|
||||
ownerId: string;
|
||||
username: string;
|
||||
avatar: string;
|
||||
createTime: Date;
|
||||
};
|
||||
|
||||
export type TeamMemberSchema = {
|
||||
_id: string;
|
||||
name: string;
|
||||
teamId: string;
|
||||
userId: string;
|
||||
role: `${TeamMemberRoleEnum}`;
|
||||
balance: number;
|
||||
timezone: string;
|
||||
promotionRate: UserModelSchema['promotionRate'];
|
||||
openaiAccount: UserModelSchema['openaiAccount'];
|
||||
team: TeamItemType;
|
||||
};
|
||||
|
||||
27
packages/global/support/wallet/bill/api.d.ts
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
import { BillSourceEnum } from './constants';
|
||||
import { BillListItemType } from './type';
|
||||
|
||||
export type CreateTrainingBillProps = {
|
||||
name: string;
|
||||
vectorModel?: string;
|
||||
agentModel?: string;
|
||||
};
|
||||
|
||||
export type ConcatBillProps = {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
billId?: string;
|
||||
total: number;
|
||||
listIndex?: number;
|
||||
tokens?: number;
|
||||
};
|
||||
|
||||
export type CreateBillProps = {
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
appName: string;
|
||||
appId?: string;
|
||||
total: number;
|
||||
source: `${BillSourceEnum}`;
|
||||
list: BillListItemType[];
|
||||
};
|
||||
16
packages/global/support/wallet/bill/constants.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
// ¥1 = 100000
|
||||
export const PRICE_SCALE = 100000;
|
||||
|
||||
export enum BillSourceEnum {
|
||||
fastgpt = 'fastgpt',
|
||||
api = 'api',
|
||||
shareLink = 'shareLink',
|
||||
training = 'training'
|
||||
}
|
||||
|
||||
export const BillSourceMap: Record<`${BillSourceEnum}`, string> = {
|
||||
[BillSourceEnum.fastgpt]: '在线使用',
|
||||
[BillSourceEnum.api]: 'Api',
|
||||
[BillSourceEnum.shareLink]: '免登录链接',
|
||||
[BillSourceEnum.training]: '数据训练'
|
||||
};
|
||||
@@ -1,5 +1,6 @@
|
||||
/* bill common */
|
||||
import { PRICE_SCALE } from './constants';
|
||||
import { BillItemType, BillSchema } from './type';
|
||||
|
||||
/**
|
||||
* dataset price / PRICE_SCALE = real price
|
||||
24
packages/global/support/wallet/bill/type.d.ts
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
import { CreateBillProps } from './api';
|
||||
import { BillSourceEnum } from './constants';
|
||||
|
||||
export type BillListItemType = {
|
||||
moduleName: string;
|
||||
amount: number;
|
||||
model?: string;
|
||||
tokenLen?: number;
|
||||
};
|
||||
|
||||
export type BillSchema = CreateBillProps & {
|
||||
_id: string;
|
||||
time: Date;
|
||||
};
|
||||
|
||||
export type BillItemType = {
|
||||
id: string;
|
||||
memberName: string;
|
||||
time: Date;
|
||||
appName: string;
|
||||
source: BillSchema['source'];
|
||||
total: number;
|
||||
list: BillSchema['list'];
|
||||
};
|
||||
@@ -1,6 +1,8 @@
|
||||
export type PaySchema = {
|
||||
_id: string;
|
||||
userId: string;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
createTime: Date;
|
||||
price: number;
|
||||
orderId: string;
|
||||
@@ -15,10 +15,6 @@ interface ResponseDataType {
|
||||
* 请求开始
|
||||
*/
|
||||
function requestStart(config: InternalAxiosRequestConfig): InternalAxiosRequestConfig {
|
||||
if (config.headers) {
|
||||
config.headers.rootkey = process.env.ROOT_KEY;
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
@@ -62,7 +58,9 @@ function responseError(err: any) {
|
||||
const instance = axios.create({
|
||||
timeout: 60000, // 超时时间
|
||||
headers: {
|
||||
'content-type': 'application/json'
|
||||
'content-type': 'application/json',
|
||||
'Cache-Control': 'no-cache',
|
||||
rootkey: process.env.ROOT_KEY
|
||||
}
|
||||
});
|
||||
|
||||
@@ -73,7 +71,7 @@ instance.interceptors.response.use(responseSuccess, (err) => Promise.reject(err)
|
||||
|
||||
export function request(url: string, data: any, config: ConfigType, method: Method): any {
|
||||
if (!global.systemEnv?.pluginBaseUrl) {
|
||||
return Promise.reject('商业版插件加载中...');
|
||||
return Promise.reject('该功能为商业版特有...');
|
||||
}
|
||||
|
||||
/* 去空 */
|
||||
|
||||
36
packages/service/common/buffer/tts/schema.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { connectionMongo, type Model } from '../../../common/mongo';
|
||||
const { Schema, model, models } = connectionMongo;
|
||||
import { TTSBufferSchemaType } from './type.d';
|
||||
|
||||
export const collectionName = 'ttsbuffers';
|
||||
|
||||
const TTSBufferSchema = new Schema({
|
||||
bufferId: {
|
||||
type: String,
|
||||
required: true
|
||||
},
|
||||
text: {
|
||||
type: String,
|
||||
required: true
|
||||
},
|
||||
buffer: {
|
||||
type: Buffer,
|
||||
required: true
|
||||
},
|
||||
createTime: {
|
||||
type: Date,
|
||||
default: () => new Date()
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
TTSBufferSchema.index({ bufferId: 1 });
|
||||
// 24 hour
|
||||
TTSBufferSchema.index({ createTime: 1 }, { expireAfterSeconds: 24 * 60 * 60 });
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
}
|
||||
|
||||
export const MongoTTSBuffer: Model<TTSBufferSchemaType> =
|
||||
models[collectionName] || model(collectionName, TTSBufferSchema);
|
||||
MongoTTSBuffer.syncIndexes();
|
||||
5
packages/service/common/buffer/tts/type.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
export type TTSBufferSchemaType = {
|
||||
bufferId: string;
|
||||
text: string;
|
||||
buffer: Buffer;
|
||||
};
|
||||
111
packages/service/common/file/gridfs/controller.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
import { Types, connectionMongo } from '../../mongo';
|
||||
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
|
||||
import fsp from 'fs/promises';
|
||||
import fs from 'fs';
|
||||
import { DatasetFileSchema } from '@fastgpt/global/core/dataset/type';
|
||||
|
||||
export function getGFSCollection(bucket: `${BucketNameEnum}`) {
|
||||
return connectionMongo.connection.db.collection(`${bucket}.files`);
|
||||
}
|
||||
export function getGridBucket(bucket: `${BucketNameEnum}`) {
|
||||
return new connectionMongo.mongo.GridFSBucket(connectionMongo.connection.db, {
|
||||
bucketName: bucket
|
||||
});
|
||||
}
|
||||
|
||||
/* crud file */
|
||||
export async function uploadFile({
|
||||
bucketName,
|
||||
teamId,
|
||||
tmbId,
|
||||
path,
|
||||
filename,
|
||||
metadata = {}
|
||||
}: {
|
||||
bucketName: `${BucketNameEnum}`;
|
||||
teamId: string;
|
||||
tmbId: string;
|
||||
path: string;
|
||||
filename: string;
|
||||
metadata?: Record<string, any>;
|
||||
}) {
|
||||
if (!path) return Promise.reject(`filePath is empty`);
|
||||
if (!filename) return Promise.reject(`filename is empty`);
|
||||
|
||||
const stats = await fsp.stat(path);
|
||||
if (!stats.isFile()) return Promise.reject(`${path} is not a file`);
|
||||
|
||||
metadata.teamId = teamId;
|
||||
metadata.tmbId = tmbId;
|
||||
|
||||
// create a gridfs bucket
|
||||
const bucket = getGridBucket(bucketName);
|
||||
|
||||
const stream = bucket.openUploadStream(filename, {
|
||||
metadata,
|
||||
contentType: metadata?.contentType
|
||||
});
|
||||
|
||||
// save to gridfs
|
||||
await new Promise((resolve, reject) => {
|
||||
fs.createReadStream(path)
|
||||
.pipe(stream as any)
|
||||
.on('finish', resolve)
|
||||
.on('error', reject);
|
||||
});
|
||||
|
||||
return String(stream.id);
|
||||
}
|
||||
|
||||
export async function getFileById({
|
||||
bucketName,
|
||||
fileId
|
||||
}: {
|
||||
bucketName: `${BucketNameEnum}`;
|
||||
fileId: string;
|
||||
}) {
|
||||
const db = getGFSCollection(bucketName);
|
||||
const file = await db.findOne<DatasetFileSchema>({
|
||||
_id: new Types.ObjectId(fileId)
|
||||
});
|
||||
|
||||
if (!file) {
|
||||
return Promise.reject('File not found');
|
||||
}
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
export async function delFileById({
|
||||
bucketName,
|
||||
fileId
|
||||
}: {
|
||||
bucketName: `${BucketNameEnum}`;
|
||||
fileId: string;
|
||||
}) {
|
||||
const bucket = getGridBucket(bucketName);
|
||||
|
||||
await bucket.delete(new Types.ObjectId(fileId));
|
||||
return true;
|
||||
}
|
||||
|
||||
export async function getDownloadBuf({
|
||||
bucketName,
|
||||
fileId
|
||||
}: {
|
||||
bucketName: `${BucketNameEnum}`;
|
||||
fileId: string;
|
||||
}) {
|
||||
const bucket = getGridBucket(bucketName);
|
||||
|
||||
const stream = bucket.openDownloadStream(new Types.ObjectId(fileId));
|
||||
|
||||
const buf: Buffer = await new Promise((resolve, reject) => {
|
||||
const buffers: Buffer[] = [];
|
||||
stream.on('data', (data) => buffers.push(data));
|
||||
stream.on('error', reject);
|
||||
stream.on('end', () => resolve(Buffer.concat(buffers)));
|
||||
});
|
||||
|
||||
return buf;
|
||||
}
|
||||
@@ -5,12 +5,26 @@ export function getMongoImgUrl(id: string) {
|
||||
return `${imageBaseUrl}${id}`;
|
||||
}
|
||||
|
||||
export async function uploadMongoImg({ base64Img, userId }: { base64Img: string; userId: string }) {
|
||||
export const maxImgSize = 1024 * 1024 * 12;
|
||||
export async function uploadMongoImg({
|
||||
base64Img,
|
||||
teamId,
|
||||
expiredTime
|
||||
}: {
|
||||
base64Img: string;
|
||||
teamId: string;
|
||||
expiredTime?: Date;
|
||||
}) {
|
||||
if (base64Img.length > maxImgSize) {
|
||||
return Promise.reject('Image too large');
|
||||
}
|
||||
|
||||
const base64Data = base64Img.split(',')[1];
|
||||
|
||||
const { _id } = await MongoImage.create({
|
||||
userId,
|
||||
binary: Buffer.from(base64Data, 'base64')
|
||||
teamId,
|
||||
binary: Buffer.from(base64Data, 'base64'),
|
||||
expiredTime
|
||||
});
|
||||
|
||||
return getMongoImgUrl(String(_id));
|
||||
|
||||
@@ -1,16 +1,27 @@
|
||||
import { TeamCollectionName } from '@fastgpt/global/support/user/team/constant';
|
||||
import { connectionMongo, type Model } from '../../mongo';
|
||||
const { Schema, model, models } = connectionMongo;
|
||||
|
||||
const ImageSchema = new Schema({
|
||||
userId: {
|
||||
teamId: {
|
||||
type: Schema.Types.ObjectId,
|
||||
ref: 'user',
|
||||
required: true
|
||||
ref: TeamCollectionName
|
||||
},
|
||||
binary: {
|
||||
type: Buffer
|
||||
},
|
||||
expiredTime: {
|
||||
type: Date
|
||||
}
|
||||
});
|
||||
|
||||
export const MongoImage: Model<{ userId: string; binary: Buffer }> =
|
||||
try {
|
||||
ImageSchema.index({ expiredTime: 1 }, { expireAfterSeconds: 60 });
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
}
|
||||
|
||||
export const MongoImage: Model<{ teamId: string; binary: Buffer }> =
|
||||
models['image'] || model('image', ImageSchema);
|
||||
|
||||
MongoImage.syncIndexes();
|
||||
|
||||
71
packages/service/common/file/upload/multer.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { customAlphabet } from 'nanoid';
|
||||
import multer from 'multer';
|
||||
import path from 'path';
|
||||
import { BucketNameEnum } from '@fastgpt/global/common/file/constants';
|
||||
|
||||
const nanoid = customAlphabet('1234567890abcdef', 12);
|
||||
|
||||
type FileType = {
|
||||
fieldname: string;
|
||||
originalname: string;
|
||||
encoding: string;
|
||||
mimetype: string;
|
||||
filename: string;
|
||||
path: string;
|
||||
size: number;
|
||||
};
|
||||
|
||||
export function getUploadModel({ maxSize = 500 }: { maxSize?: number }) {
|
||||
maxSize *= 1024 * 1024;
|
||||
class UploadModel {
|
||||
uploader = multer({
|
||||
limits: {
|
||||
fieldSize: maxSize
|
||||
},
|
||||
preservePath: true,
|
||||
storage: multer.diskStorage({
|
||||
filename: (_req, file, cb) => {
|
||||
const { ext } = path.parse(decodeURIComponent(file.originalname));
|
||||
cb(null, nanoid() + ext);
|
||||
}
|
||||
})
|
||||
}).any();
|
||||
|
||||
async doUpload<T = Record<string, any>>(req: NextApiRequest, res: NextApiResponse) {
|
||||
return new Promise<{
|
||||
files: FileType[];
|
||||
metadata: T;
|
||||
bucketName?: `${BucketNameEnum}`;
|
||||
}>((resolve, reject) => {
|
||||
// @ts-ignore
|
||||
this.uploader(req, res, (error) => {
|
||||
if (error) {
|
||||
return reject(error);
|
||||
}
|
||||
|
||||
resolve({
|
||||
...req.body,
|
||||
files:
|
||||
// @ts-ignore
|
||||
req.files?.map((file) => ({
|
||||
...file,
|
||||
originalname: decodeURIComponent(file.originalname)
|
||||
})) || [],
|
||||
metadata: (() => {
|
||||
if (!req.body?.metadata) return {};
|
||||
try {
|
||||
return JSON.parse(req.body.metadata);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return {};
|
||||
}
|
||||
})()
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return new UploadModel();
|
||||
}
|
||||