perf: model test;perf: sidebar trigger (#4127)

* fix: import dataset step error;perf: ai proxy avatar (#4074)

* perf: pg config params

* perf: ai proxy avatar

* fix: import dataset step error

* feat: data input ux

* perf: app dataset rewite

* perf: model test

* perf: sidebar trigger

* lock

* update nanoid version

* fix: select component ux

* fix: ts

* fix: vitest

* remove test
This commit is contained in:
Archer
2025-03-12 21:11:43 +08:00
committed by archer
parent c131c2a7dc
commit f71ab0caeb
64 changed files with 438 additions and 1356 deletions

View File

@@ -9,7 +9,7 @@
"encoding": "^0.1.13",
"js-yaml": "^4.1.0",
"jschardet": "3.1.1",
"nanoid": "^4.0.1",
"nanoid": "^5.1.3",
"next": "14.2.21",
"openai": "4.61.0",
"openapi-types": "^12.1.3",

View File

@@ -300,6 +300,9 @@ export const readRawContentByFileBuffer = async ({
return systemParse();
};
const start = Date.now();
addLog.debug(`Start parse file`, { extension });
let { rawText, formatText, imageList } = await (async () => {
if (extension === 'pdf') {
return await pdfParseFn();
@@ -307,6 +310,8 @@ export const readRawContentByFileBuffer = async ({
return await systemParse();
})();
addLog.debug(`Parse file success, time: ${Date.now() - start}ms. Uploading file image.`);
// markdown data format
if (imageList) {
await batchRun(imageList, async (item) => {
@@ -341,5 +346,7 @@ export const readRawContentByFileBuffer = async ({
}
}
addLog.debug(`Upload file image success, time: ${Date.now() - start}ms`);
return { rawText, formatText, imageList };
};

View File

@@ -76,6 +76,10 @@ export const createChatCompletion = async ({
timeout: formatTimeout
});
addLog.debug(`Start create chat completion`, {
model: body.model
});
const response = await ai.chat.completions.create(body, {
...options,
...(modelConstantsData.requestUrl ? { path: modelConstantsData.requestUrl } : {}),

View File

@@ -36,14 +36,12 @@ export async function getVectorsByText({ model, input, type, headers }: GetVecto
model.requestUrl
? {
path: model.requestUrl,
headers: model.requestAuth
? {
Authorization: `Bearer ${model.requestAuth}`,
...headers
}
: headers
headers: {
...(model.requestAuth ? { Authorization: `Bearer ${model.requestAuth}` } : {}),
...headers
}
}
: {}
: { headers }
)
.then(async (res) => {
if (!res.data) {

View File

@@ -61,7 +61,6 @@ export async function rewriteAppWorkflowToDetail({
teamId: isRoot ? undefined : teamId,
datasetIdList: Array.from(datasetIdSet)
});
const datasetMap = new Map(datasetList.map((ds) => [String(ds.datasetId), ds]));
// Rewrite dataset ids, add dataset info to nodes

View File

@@ -9,7 +9,7 @@
"axios": "^1.8.2",
"chalk": "^5.3.0",
"cheerio": "1.0.0-rc.12",
"cookie": "^0.5.0",
"cookie": "^0.7.1",
"date-fns": "2.30.0",
"dayjs": "^1.11.7",
"decompress": "^4.2.1",

View File

@@ -9,6 +9,7 @@ type Props = FlexProps & {
onClick?: () => void;
hoverColor?: string;
tip?: string;
isLoading?: boolean;
};
const MyIconButton = ({
@@ -17,11 +18,13 @@ const MyIconButton = ({
hoverColor = 'primary.600',
size = '1rem',
tip,
isLoading = false,
...props
}: Props) => {
return (
<MyTooltip label={tip}>
<Flex
position={'relative'}
p={1}
color={'myGray.500'}
rounded={'sm'}
@@ -33,11 +36,14 @@ const MyIconButton = ({
bg: 'myGray.05',
color: hoverColor
}}
onClick={onClick}
onClick={() => {
if (isLoading) return;
onClick?.();
}}
sx={{ userSelect: 'none' }}
{...props}
>
<MyIcon name={icon as any} w={size} />
<MyIcon name={isLoading ? 'common/loading' : (icon as any)} w={size} />
</Flex>
</MyTooltip>
);

View File

@@ -24,6 +24,7 @@ import MyIcon from '../Icon';
import { useRequest2 } from '../../../hooks/useRequest';
import MyDivider from '../MyDivider';
import { useScrollPagination } from '../../../hooks/useScrollPagination';
import Avatar from '../Avatar';
/** 选择组件 Props 类型
* value: 选中的值
@@ -32,20 +33,21 @@ import { useScrollPagination } from '../../../hooks/useScrollPagination';
* isLoading: 是否加载中
* ScrollData: 分页滚动数据控制器 [useScrollPagination] 的返回值
* */
export type SelectProps<T = any> = ButtonProps & {
export type SelectProps<T = any> = Omit<ButtonProps, 'onChange'> & {
value?: T;
placeholder?: string;
isSearch?: boolean;
list: {
alias?: string;
icon?: string;
iconSize?: string;
label: string | React.ReactNode;
description?: string;
value: T;
showBorder?: boolean;
}[];
isLoading?: boolean;
onchange?: (val: T) => any | Promise<any>;
onChange?: (val: T) => any | Promise<any>;
ScrollData?: ReturnType<typeof useScrollPagination>['ScrollData'];
};
@@ -56,7 +58,7 @@ const MySelect = <T = any,>(
isSearch = false,
width = '100%',
list = [],
onchange,
onChange,
isLoading = false,
ScrollData,
...props
@@ -115,7 +117,7 @@ const MySelect = <T = any,>(
}
}, [isSearch, isOpen]);
const { runAsync: onChange, loading } = useRequest2((val: T) => onchange?.(val));
const { runAsync: onclickChange, loading } = useRequest2((val: T) => onChange?.(val));
const ListRender = useMemo(() => {
return (
@@ -135,16 +137,17 @@ const MySelect = <T = any,>(
color: 'myGray.900'
})}
onClick={() => {
if (onChange && value !== item.value) {
onChange(item.value);
if (value !== item.value) {
onclickChange(item.value);
}
}}
whiteSpace={'pre-wrap'}
fontSize={'sm'}
display={'block'}
mb={0.5}
>
<Flex alignItems={'center'}>
{item.icon && <MyIcon mr={2} name={item.icon as any} w={'1rem'} />}
{item.icon && <Avatar mr={2} src={item.icon as any} w={item.iconSize ?? '1rem'} />}
{item.label}
</Flex>
{item.description && (
@@ -224,7 +227,9 @@ const MySelect = <T = any,>(
/>
) : (
<>
{selectItem?.icon && <MyIcon mr={2} name={selectItem.icon as any} w={'1rem'} />}
{selectItem?.icon && (
<Avatar mr={2} src={selectItem.icon as any} w={selectItem.iconSize ?? '1rem'} />
)}
{selectItem?.alias || selectItem?.label || placeholder}
</>
)}

View File

@@ -200,7 +200,7 @@ export function usePagination<DataT, ResT = {}>(
// Watch scroll position
useThrottleEffect(
() => {
if (!ref?.current || type !== 'scroll' || noMore) return;
if (!ref?.current || type !== 'scroll' || noMore || isLoading) return;
const { scrollTop, scrollHeight, clientHeight } = ref.current;
if (
@@ -211,7 +211,7 @@ export function usePagination<DataT, ResT = {}>(
fetchData(pageNum + 1, ref);
}
},
[scroll],
[scroll, isLoading],
{ wait: 50 }
);

View File

@@ -21,7 +21,6 @@
"edit_channel": "Channel configuration",
"enable_channel": "Enable",
"forbid_channel": "Disabled",
"maxToken_tip": "The model max_tokens parameter, if left blank, means that the model does not support it.",
"key_type": "API key format:",
"log": "Call log",
"log_detail": "Log details",
@@ -29,6 +28,7 @@
"log_status": "Status",
"mapping": "Model Mapping",
"mapping_tip": "A valid Json is required. \nThe model can be mapped when sending a request to the actual address. \nFor example:\n{\n \n \"gpt-4o\": \"gpt-4o-test\"\n\n}\n\nWhen FastGPT requests the gpt-4o model, the gpt-4o-test model is sent to the actual address, instead of gpt-4o.",
"maxToken_tip": "The model max_tokens parameter, if left blank, means that the model does not support it.",
"max_temperature_tip": "If the model temperature parameter is not filled in, it means that the model does not support the temperature parameter.",
"model": "Model",
"model_name": "Model name",
@@ -43,7 +43,7 @@
"select_model_placeholder": "Select the model available under this channel",
"select_provider_placeholder": "Search for manufacturers",
"selected_model_empty": "Choose at least one model",
"start_test": "Start testing {{num}} models",
"start_test": "Batch test {{num}} models",
"test_failed": "There are {{num}} models that report errors",
"vlm_model": "Vlm",
"vlm_model_tip": "Used to generate additional indexing of images in a document in the knowledge base",

View File

@@ -21,7 +21,6 @@
"edit_channel": "渠道配置",
"enable_channel": "启用",
"forbid_channel": "禁用",
"maxToken_tip": "模型 max_tokens 参数,如果留空,则代表模型不支持该参数。",
"key_type": "API key 格式: ",
"log": "调用日志",
"log_detail": "日志详情",
@@ -29,6 +28,7 @@
"log_status": "状态",
"mapping": "模型映射",
"mapping_tip": "需填写一个有效 Json。可在向实际地址发送请求时对模型进行映射。例如\n{\n \"gpt-4o\": \"gpt-4o-test\"\n}\n当 FastGPT 请求 gpt-4o 模型时,会向实际地址发送 gpt-4o-test 的模型,而不是 gpt-4o。",
"maxToken_tip": "模型 max_tokens 参数,如果留空,则代表模型不支持该参数。",
"max_temperature_tip": "模型 temperature 参数,不填则代表模型不支持 temperature 参数。",
"model": "模型",
"model_name": "模型名",
@@ -43,7 +43,7 @@
"select_model_placeholder": "选择该渠道下可用的模型",
"select_provider_placeholder": "搜索厂商",
"selected_model_empty": "至少选择一个模型",
"start_test": "开始测试{{num}}个模型",
"start_test": "批量测试{{num}}个模型",
"test_failed": "有{{num}}个模型报错",
"vlm_model": "图片理解模型",
"vlm_model_tip": "用于知识库中对文档中的图片进行额外的索引生成",

View File

@@ -19,7 +19,6 @@
"edit_channel": "渠道配置",
"enable_channel": "啟用",
"forbid_channel": "禁用",
"maxToken_tip": "模型 max_tokens 參數,如果留空,則代表模型不支持該參數。",
"key_type": "API key 格式:",
"log": "調用日誌",
"log_detail": "日誌詳情",
@@ -27,6 +26,7 @@
"log_status": "狀態",
"mapping": "模型映射",
"mapping_tip": "需填寫一個有效 Json。\n可在向實際地址發送請求時對模型進行映射。\n例如\n{\n \n \"gpt-4o\": \"gpt-4o-test\"\n\n}\n\n當 FastGPT 請求 gpt-4o 模型時,會向實際地址發送 gpt-4o-test 的模型,而不是 gpt-4o。",
"maxToken_tip": "模型 max_tokens 參數,如果留空,則代表模型不支持該參數。",
"max_temperature_tip": "模型 temperature 參數,不填則代表模型不支持 temperature 參數。",
"model": "模型",
"model_name": "模型名",
@@ -41,7 +41,7 @@
"select_model_placeholder": "選擇該渠道下可用的模型",
"select_provider_placeholder": "搜索廠商",
"selected_model_empty": "至少選擇一個模型",
"start_test": "開始測試{{num}}個模型",
"start_test": "批量測試{{num}}個模型",
"test_failed": "有{{num}}個模型報錯",
"vlm_model": "圖片理解模型",
"vlm_model_tip": "用於知識庫中對文檔中的圖片進行額外的索引生成",