4.8 preview (#1288)

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* perf: workflow ux

* system config

* Newflow (#89)

* docs: Add doc for Xinference (#1266)

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* perf: workflow ux

* system config

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* rename code

* move code

* update flow

* input type selector

* perf: workflow runtime

* feat: node adapt newflow

* feat: adapt plugin

* feat: 360 connection

* check workflow

* perf: flow 性能

* change plugin input type (#81)

* change plugin input type

* plugin label mode

* perf: nodecard

* debug

* perf: debug ui

* connection ui

* change workflow ui (#82)

* feat: workflow debug

* adapt openAPI for new workflow (#83)

* adapt openAPI for new workflow

* i18n

* perf: plugin debug

* plugin input ui

* delete

* perf: global variable select

* fix rebase

* perf: workflow performance

* feat: input render type icon

* input icon

* adapt flow (#84)

* adapt newflow

* temp

* temp

* fix

* feat: app schedule trigger

* feat: app schedule trigger

* perf: schedule ui

* feat: ioslatevm run js code

* perf: workflow varialbe table ui

* feat: adapt simple mode

* feat: adapt input params

* output

* feat: adapt tamplate

* fix: ts

* add if-else module (#86)

* perf: worker

* if else node

* perf: tiktoken worker

* fix: ts

* perf: tiktoken

* fix if-else node (#87)

* fix if-else node

* type

* fix

* perf: audio render

* perf: Parallel worker

* log

* perf: if else node

* adapt plugin

* prompt

* perf: reference ui

* reference ui

* handle ux

* template ui and plugin tool

* adapt v1 workflow

* adapt v1 workflow completions

* perf: time variables

* feat: workflow keyboard shortcuts

* adapt v1 workflow

* update workflow example doc (#88)

* fix: simple mode select tool

---------

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>

* doc

* perf: extract node

* extra node field

* update plugin version

* doc

* variable

* change doc & fix prompt editor (#90)

* fold workflow code

* value type label

---------

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-04-25 17:51:20 +08:00
committed by GitHub
parent b08d81f887
commit 439c819ff1
505 changed files with 23570 additions and 18215 deletions

View File

@@ -1,9 +1,9 @@
import { SseResponseEventEnum } from '@fastgpt/global/core/module/runtime/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import { getErrText } from '@fastgpt/global/common/error/utils';
import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type.d';
import type { StartChatFnProps } from '@/components/ChatBox/type.d';
import { getToken } from '@/web/support/user/auth';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/workflow/runtime/constants';
import dayjs from 'dayjs';
import {
// refer to https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web
@@ -109,7 +109,7 @@ export const streamFetch = ({
try {
// auto complete variables
const variables = data?.variables || {};
variables.cTime = dayjs().format('YYYY-MM-DD HH:mm:ss');
variables.cTime = dayjs().format('YYYY-MM-DD HH:mm:ss dddd');
const requestData = {
method: 'POST',

View File

@@ -1,5 +1,6 @@
import { useTranslation } from 'next-i18next';
import { useToast } from '@fastgpt/web/hooks/useToast';
import { useCallback } from 'react';
/**
* copy text data
@@ -8,12 +9,8 @@ export const useCopyData = () => {
const { t } = useTranslation();
const { toast } = useToast();
return {
copyData: async (
data: string,
title: string | null = t('common.Copy Successful'),
duration = 1000
) => {
const copyData = useCallback(
async (data: string, title: string | null = t('common.Copy Successful'), duration = 1000) => {
try {
if (navigator.clipboard) {
await navigator.clipboard.writeText(data);
@@ -31,11 +28,18 @@ export const useCopyData = () => {
document.body?.removeChild(textarea);
}
toast({
title,
status: 'success',
duration
});
}
if (title) {
toast({
title,
status: 'success',
duration
});
}
},
[t, toast]
);
return {
copyData
};
};

View File

@@ -51,7 +51,7 @@ export const useSpeech = (props?: OutLinkChatAuthProps & { appId?: string }) =>
}, []);
const startSpeak = async (onFinish: (text: string) => void) => {
if (!navigator.mediaDevices.getUserMedia) {
if (!navigator?.mediaDevices?.getUserMedia) {
return toast({
status: 'warning',
title: t('common.speech.not support')

View File

@@ -2,23 +2,20 @@ export enum EventNameEnum {
sendQuestion = 'sendQuestion',
editQuestion = 'editQuestion',
// flow
requestFlowEvent = 'requestFlowEvent',
requestFlowStore = 'requestFlowStore',
receiveFlowStore = 'receiveFlowStore'
requestWorkflowStore = 'requestWorkflowStore',
receiveWorkflowStore = 'receiveWorkflowStore'
}
type EventNameType = `${EventNameEnum}`;
export const eventBus = {
list: new Map<EventNameType, Function>(),
on: function (name: EventNameType, fn: Function) {
list: new Map<EventNameEnum, Function>(),
on: function (name: EventNameEnum, fn: Function) {
this.list.set(name, fn);
},
emit: function (name: EventNameType, data: Record<string, any> = {}) {
emit: function (name: EventNameEnum, data: Record<string, any> = {}) {
const fn = this.list.get(name);
fn && fn(data);
},
off: function (name: EventNameType) {
off: function (name: EventNameEnum) {
this.list.delete(name);
}
};

View File

@@ -5,6 +5,7 @@ import type { AppTTSConfigType } from '@fastgpt/global/core/app/type.d';
import { TTSTypeEnum } from '@/constants/app';
import { useTranslation } from 'next-i18next';
import type { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat.d';
import { getToken } from '@/web/support/user/auth';
const contentType = 'audio/mpeg';
const splitMarker = 'SPLIT_MARKER';
@@ -13,8 +14,7 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
const { t } = useTranslation();
const { ttsConfig, shareId, outLinkUid, teamId, teamToken } = props || {};
const { toast } = useToast();
const audioRef = useRef<HTMLAudioElement>(new Audio());
const audio = audioRef.current;
const audioRef = useRef<HTMLAudioElement>();
const [audioLoading, setAudioLoading] = useState(false);
const [audioPlaying, setAudioPlaying] = useState(false);
const audioController = useRef(new AbortController());
@@ -40,7 +40,8 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
const response = await fetch('/api/core/chat/item/getSpeech', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
'Content-Type': 'application/json',
token: getToken()
},
signal: audioController.current.signal,
body: JSON.stringify({
@@ -93,30 +94,39 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
window.speechSynthesis?.cancel();
audioController.current.abort('');
} catch (error) {}
if (audio) {
audio.pause();
audio.src = '';
if (audioRef.current) {
audioRef.current.pause();
audioRef.current.src = '';
}
setAudioPlaying(false);
}, [audio]);
}, []);
/* Perform a voice playback */
const playAudioByText = useCallback(
async ({ text, buffer }: { text: string; buffer?: Uint8Array }) => {
const playAudioBuffer = (buffer: Uint8Array) => {
if (!audioRef.current) return;
const audioUrl = URL.createObjectURL(new Blob([buffer], { type: 'audio/mpeg' }));
audio.src = audioUrl;
audio.play();
audioRef.current.src = audioUrl;
audioRef.current.play();
};
const readAudioStream = (stream: ReadableStream<Uint8Array>) => {
if (!audio) return;
if (!audioRef.current) return;
if (!MediaSource) {
toast({
status: 'error',
title: t('core.chat.Audio Not Support')
});
return;
}
// Create media source and play audio
const ms = new MediaSource();
const url = URL.createObjectURL(ms);
audio.src = url;
audio.play();
audioRef.current.src = url;
audioRef.current.play();
let u8Arr: Uint8Array = new Uint8Array();
return new Promise<Uint8Array>(async (resolve, reject) => {
@@ -132,7 +142,7 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
try {
while (true) {
const { done, value } = await reader.read();
if (done || audio.paused) {
if (done || audioRef.current?.paused) {
resolve(u8Arr);
if (sourceBuffer.updating) {
await new Promise((resolve) => (sourceBuffer.onupdateend = resolve));
@@ -161,7 +171,7 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
cancelAudio();
// tts play
if (audio && ttsConfig?.type === TTSTypeEnum.model) {
if (audioRef.current && ttsConfig?.type === TTSTypeEnum.model) {
/* buffer tts */
if (buffer) {
playAudioBuffer(buffer);
@@ -188,7 +198,7 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
}
});
},
[audio, cancelAudio, getAudioStream, playWebAudio, t, toast, ttsConfig?.type]
[cancelAudio, getAudioStream, playWebAudio, t, toast, ttsConfig?.type]
);
// segmented params
@@ -199,7 +209,13 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
/* Segmented voice playback */
const startSegmentedAudio = useCallback(async () => {
if (!audio) return;
if (!audioRef.current) return;
if (!MediaSource) {
return toast({
status: 'error',
title: t('core.chat.Audio Not Support')
});
}
cancelAudio();
/* reset all source */
@@ -223,15 +239,15 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
const ms = new MediaSource();
segmentedMediaSource.current = ms;
const url = URL.createObjectURL(ms);
audio.src = url;
audio.play();
audioRef.current.src = url;
audioRef.current.play();
await new Promise((resolve) => {
ms.onsourceopen = resolve;
});
const sourceBuffer = ms.addSourceBuffer(contentType);
segmentedSourceBuffer.current = sourceBuffer;
}, [audio, cancelAudio]);
}, [cancelAudio, t, toast]);
const finishSegmentedAudio = useCallback(() => {
appendAudioPromise.current = appendAudioPromise.current.finally(() => {
if (segmentedMediaSource.current?.readyState === 'open') {
@@ -256,7 +272,7 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
while (true) {
const { done, value } = await reader.read();
if (done || !audio?.played) {
if (done || !audioRef.current?.played) {
buffer.updating && (await new Promise((resolve) => (buffer.onupdateend = resolve)));
return resolve(u8Arr);
}
@@ -273,7 +289,7 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
}
});
},
[audio?.played, getAudioStream, segmentedSourceBuffer]
[getAudioStream, segmentedSourceBuffer]
);
/* split audio text and fetch tts */
const splitText2Audio = useCallback(
@@ -314,6 +330,9 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
// listen audio status
useEffect(() => {
const audio = new Audio();
audioRef.current = audio;
audio.onplay = () => {
setAudioPlaying(true);
};
@@ -341,7 +360,7 @@ export const useAudioPlay = (props?: OutLinkChatAuthProps & { ttsConfig?: AppTTS
}, []);
return {
audio,
audio: audioRef.current,
audioLoading,
audioPlaying,
setAudioPlaying,