Optimize the search view component, add workspace statistics and RAG vector initialization features, update internationalization support, improve user interaction prompts, enhance log output, and ensure better user experience and code readability.

This commit is contained in:
duanfuxiang
2025-07-07 09:47:37 +08:00
parent 51f8620815
commit 3db334c6e8
10 changed files with 1532 additions and 39 deletions

View File

@@ -35,6 +35,7 @@ export interface EmbeddingModelInfo {
// https://docs.anthropic.com/en/docs/about-claude/models
export type AnthropicModelId = keyof typeof anthropicModels
export const anthropicDefaultModelId: AnthropicModelId = "claude-sonnet-4-20250514"
export const anthropicDefaultInsightModelId: AnthropicModelId = "claude-sonnet-4-20250514"
export const anthropicDefaultAutoCompleteModelId: AnthropicModelId = "claude-3-5-haiku-20241022"
export const anthropicDefaultEmbeddingModelId: AnthropicModelId = null // this is not supported embedding model
export const anthropicModels = {
@@ -131,6 +132,7 @@ export const anthropicModels = {
// Infio
export const infioDefaultModelId = "gemini/gemini-2.5-pro-preview-06-05" // for chat
export const infioDefaultInsightModelId = "deepseek/deepseek-v3" // for insight
export const infioDefaultAutoCompleteModelId = "groq/llama-3.3-70b-versatile" // for auto complete
export const infioDefaultEmbeddingModelId = "openai/text-embedding-3-small" // for embedding
export const infioDefaultModelInfo: ModelInfo = {
@@ -214,6 +216,7 @@ export const infioEmbeddingModels = {
// OpenRouter
// https://openrouter.ai/models?order=newest&supported_parameters=tools
export const openRouterDefaultModelId = "google/gemini-2.5-pro-preview" // for chat
export const openRouterDefaultInsightModelId = "deepseek/deepseek-chat-v3-0324" // for insight
export const openRouterDefaultAutoCompleteModelId = "google/gemini-2.5-flash-preview-05-20" // for auto complete
export const openRouterDefaultEmbeddingModelId = null // this is not supported embedding model
export const openRouterDefaultModelInfo: ModelInfo = {
@@ -268,6 +271,7 @@ async function fetchOpenRouterModels(): Promise<Record<string, ModelInfo>> {
// https://ai.google.dev/gemini-api/docs/models/gemini
export type GeminiModelId = keyof typeof geminiModels
export const geminiDefaultModelId: GeminiModelId = "gemini-2.5-pro-preview-05-06"
export const geminiDefaultInsightModelId: GeminiModelId = "gemini-2.5-flash-preview-05-20"
export const geminiDefaultAutoCompleteModelId: GeminiModelId = "gemini-2.5-flash-preview-05-20"
export const geminiDefaultEmbeddingModelId: keyof typeof geminiEmbeddingModels = "text-embedding-004"
@@ -497,6 +501,7 @@ export const geminiEmbeddingModels = {
// https://openai.com/api/pricing/
export type OpenAiNativeModelId = keyof typeof openAiNativeModels
export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-4o"
export const openAiNativeDefaultInsightModelId: OpenAiNativeModelId = "gpt-4o-mini"
export const openAiNativeDefaultAutoCompleteModelId: OpenAiNativeModelId = "gpt-4o-mini"
export const openAiNativeDefaultEmbeddingModelId: keyof typeof openAINativeEmbeddingModels = "text-embedding-3-small"
@@ -605,6 +610,7 @@ export const openAINativeEmbeddingModels = {
// https://api-docs.deepseek.com/quick_start/pricing
export type DeepSeekModelId = keyof typeof deepSeekModels
export const deepSeekDefaultModelId: DeepSeekModelId = "deepseek-chat"
export const deepSeekDefaultInsightModelId: DeepSeekModelId = "deepseek-chat"
export const deepSeekDefaultAutoCompleteModelId: DeepSeekModelId = "deepseek-chat"
export const deepSeekDefaultEmbeddingModelId = null // this is not supported embedding model
@@ -635,6 +641,7 @@ export const deepSeekModels = {
// https://help.aliyun.com/zh/model-studio/getting-started/
export type QwenModelId = keyof typeof qwenModels
export const qwenDefaultModelId: QwenModelId = "qwen3-235b-a22b"
export const qwenDefaultInsightModelId: QwenModelId = "qwen3-32b"
export const qwenDefaultAutoCompleteModelId: QwenModelId = "qwen3-32b"
export const qwenDefaultEmbeddingModelId: keyof typeof qwenEmbeddingModels = "text-embedding-v3"
@@ -937,6 +944,7 @@ export const qwenEmbeddingModels = {
// https://docs.siliconflow.cn/
export type SiliconFlowModelId = keyof typeof siliconFlowModels
export const siliconFlowDefaultModelId: SiliconFlowModelId = "deepseek-ai/DeepSeek-V3"
export const siliconFlowDefaultInsightModelId: SiliconFlowModelId = "deepseek-ai/DeepSeek-V3"
export const siliconFlowDefaultAutoCompleteModelId: SiliconFlowModelId = "deepseek-ai/DeepSeek-V3"
export const siliconFlowDefaultEmbeddingModelId: keyof typeof siliconFlowEmbeddingModels = "BAAI/bge-m3"
@@ -1420,6 +1428,7 @@ export const siliconFlowEmbeddingModels = {
// https://console.groq.com/docs/overview
export type GroqModelId = keyof typeof groqModels
export const groqDefaultModelId: GroqModelId = "llama-3.3-70b-versatile"
export const groqDefaultInsightModelId: GroqModelId = "llama-3.3-70b-versatile"
export const groqDefaultAutoCompleteModelId: GroqModelId = "llama-3.3-70b-versatile"
export const groqDefaultEmbeddingModelId = null // this is not supported embedding model
@@ -1581,6 +1590,7 @@ export const groqModels = {
// https://docs.x.ai/docs/models
export type GrokModelId = keyof typeof grokModels
export const grokDefaultModelId: GrokModelId = "grok-3"
export const grokDefaultInsightModelId: GrokModelId = "grok-3-mini"
export const grokDefaultAutoCompleteModelId: GrokModelId = "grok-3-mini-fast"
export const grokDefaultEmbeddingModelId = null // this is not supported embedding model
@@ -1637,6 +1647,7 @@ export const grokModels = {
// LocalProvider (本地嵌入模型)
export const localProviderDefaultModelId = null // this is not supported for chat/autocomplete
export const localProviderDefaultInsightModelId = null // this is not supported for insight
export const localProviderDefaultAutoCompleteModelId = null // this is not supported for chat/autocomplete
export const localProviderDefaultEmbeddingModelId: keyof typeof localProviderEmbeddingModels = "TaylorAI/bge-micro-v2"
@@ -1805,77 +1816,103 @@ export const GetEmbeddingModelInfo = (provider: ApiProvider, modelId: string): E
}
// Get default model id for a provider
export const GetDefaultModelId = (provider: ApiProvider): { chat: string, autoComplete: string, embedding: string } => {
export const GetDefaultModelId = (provider: ApiProvider): { chat: string, insight: string, autoComplete: string, embedding: string } => {
switch (provider) {
case ApiProvider.Infio:
return {
"chat": infioDefaultModelId,
"insight": infioDefaultInsightModelId,
"autoComplete": infioDefaultAutoCompleteModelId,
"embedding": infioDefaultEmbeddingModelId,
}
case ApiProvider.OpenRouter:
return {
"chat": openRouterDefaultModelId,
"insight": openRouterDefaultInsightModelId,
"autoComplete": openRouterDefaultAutoCompleteModelId,
"embedding": openRouterDefaultEmbeddingModelId,
}
case ApiProvider.Anthropic:
return {
"chat": anthropicDefaultModelId,
"insight": anthropicDefaultInsightModelId,
"autoComplete": anthropicDefaultAutoCompleteModelId,
"embedding": anthropicDefaultEmbeddingModelId,
}
case ApiProvider.OpenAI:
return {
"chat": openAiNativeDefaultModelId,
"insight": openAiNativeDefaultInsightModelId,
"autoComplete": openAiNativeDefaultAutoCompleteModelId,
"embedding": openAiNativeDefaultEmbeddingModelId,
}
case ApiProvider.Deepseek:
return {
"chat": deepSeekDefaultModelId,
"insight": deepSeekDefaultInsightModelId,
"autoComplete": deepSeekDefaultAutoCompleteModelId,
"embedding": deepSeekDefaultEmbeddingModelId,
}
case ApiProvider.Google:
return {
"chat": geminiDefaultModelId,
"insight": geminiDefaultInsightModelId,
"autoComplete": geminiDefaultAutoCompleteModelId,
"embedding": geminiDefaultEmbeddingModelId,
}
case ApiProvider.AlibabaQwen:
return {
"chat": qwenDefaultModelId,
"insight": qwenDefaultInsightModelId,
"autoComplete": qwenDefaultAutoCompleteModelId,
"embedding": qwenDefaultEmbeddingModelId,
}
case ApiProvider.SiliconFlow:
return {
"chat": siliconFlowDefaultModelId,
"insight": siliconFlowDefaultInsightModelId,
"autoComplete": siliconFlowDefaultAutoCompleteModelId,
"embedding": siliconFlowDefaultEmbeddingModelId,
}
case ApiProvider.Groq:
return {
"chat": groqDefaultModelId,
"insight": groqDefaultInsightModelId,
"autoComplete": groqDefaultAutoCompleteModelId,
"embedding": groqDefaultEmbeddingModelId,
}
case ApiProvider.Grok:
return {
"chat": grokDefaultModelId,
"insight": grokDefaultInsightModelId,
"autoComplete": grokDefaultAutoCompleteModelId,
"embedding": grokDefaultEmbeddingModelId,
}
case ApiProvider.Ollama:
return {
"chat": null, // user-configured
"insight": null, // user-configured
"autoComplete": null, // user-configured
"embedding": null, // not supported
}
case ApiProvider.OpenAICompatible:
return {
"chat": null, // user-configured
"insight": null, // user-configured
"autoComplete": null, // user-configured
"embedding": null, // user-configured
}
case ApiProvider.LocalProvider:
return {
"chat": localProviderDefaultModelId,
"insight": localProviderDefaultInsightModelId,
"autoComplete": localProviderDefaultAutoCompleteModelId,
"embedding": localProviderDefaultEmbeddingModelId,
}
default:
return {
"chat": null,
"insight": null,
"autoComplete": null,
"embedding": null,
}