init
This commit is contained in:
68
src/types/chat.ts
Normal file
68
src/types/chat.ts
Normal file
@@ -0,0 +1,68 @@
|
||||
import { SerializedEditorState } from 'lexical'
|
||||
|
||||
import { SelectVector } from '../database/schema'
|
||||
|
||||
import { CustomLLMModel } from './llm/model'
|
||||
import { ContentPart } from './llm/request'
|
||||
import { ResponseUsage } from './llm/response'
|
||||
import { Mentionable, SerializedMentionable } from './mentionable'
|
||||
|
||||
export type ChatUserMessage = {
|
||||
role: 'user'
|
||||
content: SerializedEditorState | null
|
||||
promptContent: string | ContentPart[] | null
|
||||
id: string
|
||||
mentionables: Mentionable[]
|
||||
similaritySearchResults?: (Omit<SelectVector, 'embedding'> & {
|
||||
similarity: number
|
||||
})[]
|
||||
}
|
||||
export type ChatAssistantMessage = {
|
||||
role: 'assistant'
|
||||
content: string
|
||||
id: string
|
||||
metadata?: {
|
||||
usage?: ResponseUsage
|
||||
model?: CustomLLMModel
|
||||
}
|
||||
}
|
||||
export type ChatMessage = ChatUserMessage | ChatAssistantMessage
|
||||
|
||||
export type SerializedChatUserMessage = {
|
||||
role: 'user'
|
||||
content: SerializedEditorState | null
|
||||
promptContent: string | ContentPart[] | null
|
||||
id: string
|
||||
mentionables: SerializedMentionable[]
|
||||
similaritySearchResults?: (Omit<SelectVector, 'embedding'> & {
|
||||
similarity: number
|
||||
})[]
|
||||
}
|
||||
export type SerializedChatAssistantMessage = {
|
||||
role: 'assistant'
|
||||
content: string
|
||||
id: string
|
||||
metadata?: {
|
||||
usage?: ResponseUsage
|
||||
model?: CustomLLMModel
|
||||
}
|
||||
}
|
||||
export type SerializedChatMessage =
|
||||
| SerializedChatUserMessage
|
||||
| SerializedChatAssistantMessage
|
||||
|
||||
export type ChatConversation = {
|
||||
schemaVersion: number
|
||||
id: string
|
||||
title: string
|
||||
createdAt: number
|
||||
updatedAt: number
|
||||
messages: SerializedChatMessage[]
|
||||
}
|
||||
export type ChatConversationMeta = {
|
||||
schemaVersion: number
|
||||
id: string
|
||||
title: string
|
||||
createdAt: number
|
||||
updatedAt: number
|
||||
}
|
||||
21
src/types/embedding.ts
Normal file
21
src/types/embedding.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import { CustomLLMModel } from './llm/model'
|
||||
|
||||
export type EmbeddingModelId =
|
||||
| 'text-embedding-3-small'
|
||||
| 'text-embedding-004'
|
||||
| 'nomic-embed-text'
|
||||
| 'mxbai-embed-large'
|
||||
| 'bge-m3'
|
||||
|
||||
export type EmbeddingModelOption = {
|
||||
id: EmbeddingModelId
|
||||
name: string
|
||||
model: CustomLLMModel
|
||||
dimension: number
|
||||
}
|
||||
|
||||
export type EmbeddingModel = {
|
||||
id: EmbeddingModelId
|
||||
dimension: number
|
||||
getEmbedding: (text: string) => Promise<number[]>
|
||||
}
|
||||
20
src/types/llm/model.ts
Normal file
20
src/types/llm/model.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
// Model Providers
|
||||
export enum ModelProviders {
|
||||
OPENAI = "openai",
|
||||
ANTHROPIC = "anthropic",
|
||||
GOOGLE = "google",
|
||||
GROQ = "groq",
|
||||
deepseek = "deepseek",
|
||||
Ollama = "ollama",
|
||||
}
|
||||
|
||||
export type CustomLLMModel = {
|
||||
name: string;
|
||||
provider: string;
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
enabled: boolean;
|
||||
isEmbeddingModel: boolean;
|
||||
isBuiltIn: boolean;
|
||||
dimension?: number;
|
||||
}
|
||||
56
src/types/llm/request.ts
Normal file
56
src/types/llm/request.ts
Normal file
@@ -0,0 +1,56 @@
|
||||
// These types are based on the OpenRouter API specification
|
||||
// https://openrouter.ai/docs/requests
|
||||
|
||||
import { ChatCompletionCreateParams } from 'openai/resources'
|
||||
|
||||
export type LLMRequestBase = {
|
||||
messages: RequestMessage[]
|
||||
model: string
|
||||
|
||||
// LLM Parameters (https://openrouter.ai/docs/parameters)
|
||||
max_tokens?: number // Range: [1, context_length)
|
||||
temperature?: number // Range: [0, 2]
|
||||
top_p?: number // Range: (0, 1]
|
||||
frequency_penalty?: number // Range: [-2, 2]
|
||||
presence_penalty?: number // Range: [-2, 2]
|
||||
|
||||
// Additional optional parameters
|
||||
logit_bias?: Record<number, number>
|
||||
|
||||
// Only available for OpenAI
|
||||
prediction?: ChatCompletionCreateParams['prediction']
|
||||
}
|
||||
|
||||
export type LLMRequestNonStreaming = LLMRequestBase & {
|
||||
stream?: false | null
|
||||
}
|
||||
|
||||
export type LLMRequestStreaming = LLMRequestBase & {
|
||||
stream: true
|
||||
}
|
||||
|
||||
export type LLMRequest = LLMRequestNonStreaming | LLMRequestStreaming
|
||||
|
||||
type TextContent = {
|
||||
type: 'text'
|
||||
text: string
|
||||
}
|
||||
|
||||
type ImageContentPart = {
|
||||
type: 'image_url'
|
||||
image_url: {
|
||||
url: string // URL or base64 encoded image data
|
||||
}
|
||||
}
|
||||
|
||||
export type ContentPart = TextContent | ImageContentPart
|
||||
|
||||
export type RequestMessage = {
|
||||
role: 'user' | 'assistant' | 'system'
|
||||
// ContentParts are only for the 'user' role:
|
||||
content: string | ContentPart[]
|
||||
}
|
||||
|
||||
export type LLMOptions = {
|
||||
signal?: AbortSignal
|
||||
}
|
||||
51
src/types/llm/response.ts
Normal file
51
src/types/llm/response.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
// These types are based on the OpenRouter API specification
|
||||
// https://openrouter.ai/docs/responses
|
||||
|
||||
export type LLMResponseBase = {
|
||||
id: string
|
||||
created?: number
|
||||
model: string
|
||||
system_fingerprint?: string
|
||||
usage?: ResponseUsage
|
||||
}
|
||||
|
||||
export type LLMResponseNonStreaming = LLMResponseBase & {
|
||||
choices: NonStreamingChoice[]
|
||||
object: 'chat.completion'
|
||||
}
|
||||
|
||||
export type LLMResponseStreaming = LLMResponseBase & {
|
||||
choices: StreamingChoice[]
|
||||
object: 'chat.completion.chunk'
|
||||
}
|
||||
|
||||
export type LLMResponse = LLMResponseNonStreaming | LLMResponseStreaming
|
||||
|
||||
export type ResponseUsage = {
|
||||
prompt_tokens: number
|
||||
completion_tokens: number
|
||||
total_tokens: number
|
||||
}
|
||||
|
||||
type NonStreamingChoice = {
|
||||
finish_reason: string | null // Depends on the model. Ex: 'stop' | 'length' | 'content_filter' | 'tool_calls' | 'function_call'
|
||||
message: {
|
||||
content: string | null
|
||||
role: string
|
||||
}
|
||||
error?: Error
|
||||
}
|
||||
|
||||
type StreamingChoice = {
|
||||
finish_reason: string | null
|
||||
delta: {
|
||||
content: string | null
|
||||
role?: string
|
||||
}
|
||||
error?: Error
|
||||
}
|
||||
|
||||
type Error = {
|
||||
code: number // See "Error Handling" section
|
||||
message: string
|
||||
}
|
||||
74
src/types/mentionable.ts
Normal file
74
src/types/mentionable.ts
Normal file
@@ -0,0 +1,74 @@
|
||||
import { TFile, TFolder } from 'obsidian'
|
||||
|
||||
export type MentionableFile = {
|
||||
type: 'file'
|
||||
file: TFile
|
||||
}
|
||||
export type MentionableFolder = {
|
||||
type: 'folder'
|
||||
folder: TFolder
|
||||
}
|
||||
export type MentionableVault = {
|
||||
type: 'vault'
|
||||
}
|
||||
export type MentionableCurrentFile = {
|
||||
type: 'current-file'
|
||||
file: TFile | null
|
||||
}
|
||||
export type MentionableBlockData = {
|
||||
content: string
|
||||
file: TFile
|
||||
startLine: number
|
||||
endLine: number
|
||||
}
|
||||
export type MentionableBlock = MentionableBlockData & {
|
||||
type: 'block'
|
||||
}
|
||||
export type MentionableUrl = {
|
||||
type: 'url'
|
||||
url: string
|
||||
}
|
||||
export type MentionableImage = {
|
||||
type: 'image'
|
||||
name: string
|
||||
mimeType: string
|
||||
data: string // base64
|
||||
}
|
||||
export type Mentionable =
|
||||
| MentionableFile
|
||||
| MentionableFolder
|
||||
| MentionableVault
|
||||
| MentionableCurrentFile
|
||||
| MentionableBlock
|
||||
| MentionableUrl
|
||||
| MentionableImage
|
||||
export type SerializedMentionableFile = {
|
||||
type: 'file'
|
||||
file: string
|
||||
}
|
||||
export type SerializedMentionableFolder = {
|
||||
type: 'folder'
|
||||
folder: string
|
||||
}
|
||||
export type SerializedMentionableVault = MentionableVault
|
||||
export type SerializedMentionableCurrentFile = {
|
||||
type: 'current-file'
|
||||
file: string | null
|
||||
}
|
||||
export type SerializedMentionableBlock = {
|
||||
type: 'block'
|
||||
content: string
|
||||
file: string
|
||||
startLine: number
|
||||
endLine: number
|
||||
}
|
||||
export type SerializedMentionableUrl = MentionableUrl
|
||||
export type SerializedMentionableImage = MentionableImage
|
||||
export type SerializedMentionable =
|
||||
| SerializedMentionableFile
|
||||
| SerializedMentionableFolder
|
||||
| SerializedMentionableVault
|
||||
| SerializedMentionableCurrentFile
|
||||
| SerializedMentionableBlock
|
||||
| SerializedMentionableUrl
|
||||
| SerializedMentionableImage
|
||||
122
src/types/settings.test.ts
Normal file
122
src/types/settings.test.ts
Normal file
@@ -0,0 +1,122 @@
|
||||
import { SETTINGS_SCHEMA_VERSION, parseInfioSettings } from './settings'
|
||||
|
||||
describe('parseSmartCopilotSettings', () => {
|
||||
it('should return default values for empty input', () => {
|
||||
const result = parseInfioSettings({})
|
||||
expect(result).toEqual({
|
||||
version: SETTINGS_SCHEMA_VERSION,
|
||||
|
||||
openAIApiKey: '',
|
||||
anthropicApiKey: '',
|
||||
geminiApiKey: '',
|
||||
groqApiKey: '',
|
||||
|
||||
chatModelId: 'anthropic/claude-3.5-sonnet-latest',
|
||||
ollamaChatModel: {
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
},
|
||||
openAICompatibleChatModel: {
|
||||
baseUrl: '',
|
||||
apiKey: '',
|
||||
model: '',
|
||||
},
|
||||
|
||||
applyModelId: 'openai/gpt-4o-mini',
|
||||
ollamaApplyModel: {
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
},
|
||||
openAICompatibleApplyModel: {
|
||||
baseUrl: '',
|
||||
apiKey: '',
|
||||
model: '',
|
||||
},
|
||||
|
||||
embeddingModelId: 'openai/text-embedding-3-small',
|
||||
ollamaEmbeddingModel: {
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
},
|
||||
|
||||
systemPrompt: '',
|
||||
ragOptions: {
|
||||
chunkSize: 1000,
|
||||
thresholdTokens: 8192,
|
||||
minSimilarity: 0.0,
|
||||
limit: 10,
|
||||
excludePatterns: [],
|
||||
includePatterns: [],
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('settings migration', () => {
|
||||
it('should migrate from v0 to v1', () => {
|
||||
const oldSettings = {
|
||||
openAIApiKey: 'openai-api-key',
|
||||
groqApiKey: 'groq-api-key',
|
||||
anthropicApiKey: 'anthropic-api-key',
|
||||
ollamaBaseUrl: 'http://localhost:11434',
|
||||
chatModel: 'claude-3.5-sonnet-latest',
|
||||
applyModel: 'gpt-4o-mini',
|
||||
embeddingModel: 'text-embedding-3-small',
|
||||
systemPrompt: 'system prompt',
|
||||
ragOptions: {
|
||||
chunkSize: 1000,
|
||||
thresholdTokens: 8192,
|
||||
minSimilarity: 0.0,
|
||||
limit: 10,
|
||||
},
|
||||
}
|
||||
|
||||
const result = parseInfioSettings(oldSettings)
|
||||
expect(result).toEqual({
|
||||
version: 1,
|
||||
|
||||
openAIApiKey: 'openai-api-key',
|
||||
anthropicApiKey: 'anthropic-api-key',
|
||||
geminiApiKey: '',
|
||||
groqApiKey: 'groq-api-key',
|
||||
|
||||
chatModelId: 'anthropic/claude-3.5-sonnet-latest',
|
||||
ollamaChatModel: {
|
||||
baseUrl: 'http://localhost:11434',
|
||||
model: '',
|
||||
},
|
||||
openAICompatibleChatModel: {
|
||||
baseUrl: '',
|
||||
apiKey: '',
|
||||
model: '',
|
||||
},
|
||||
|
||||
applyModelId: 'openai/gpt-4o-mini',
|
||||
ollamaApplyModel: {
|
||||
baseUrl: 'http://localhost:11434',
|
||||
model: '',
|
||||
},
|
||||
openAICompatibleApplyModel: {
|
||||
baseUrl: '',
|
||||
apiKey: '',
|
||||
model: '',
|
||||
},
|
||||
|
||||
embeddingModelId: 'openai/text-embedding-3-small',
|
||||
ollamaEmbeddingModel: {
|
||||
baseUrl: 'http://localhost:11434',
|
||||
model: '',
|
||||
},
|
||||
|
||||
systemPrompt: 'system prompt',
|
||||
ragOptions: {
|
||||
chunkSize: 1000,
|
||||
thresholdTokens: 8192,
|
||||
minSimilarity: 0.0,
|
||||
limit: 10,
|
||||
excludePatterns: [],
|
||||
includePatterns: [],
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
239
src/types/settings.ts
Normal file
239
src/types/settings.ts
Normal file
@@ -0,0 +1,239 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
|
||||
import { DEFAULT_MODELS } from '../constants';
|
||||
import {
|
||||
fewShotExampleSchema,
|
||||
MAX_DELAY,
|
||||
MAX_MAX_CHAR_LIMIT,
|
||||
MIN_DELAY,
|
||||
MIN_MAX_CHAR_LIMIT,
|
||||
modelOptionsSchema
|
||||
} from '../settings/versions/shared';
|
||||
import { DEFAULT_AUTOCOMPLETE_SETTINGS } from "../settings/versions/v1/v1";
|
||||
import { isRegexValid, isValidIgnorePattern } from '../utils/auto-complete';
|
||||
|
||||
export const SETTINGS_SCHEMA_VERSION = 0.1
|
||||
|
||||
const ollamaModelSchema = z.object({
|
||||
baseUrl: z.string().catch(''),
|
||||
model: z.string().catch(''),
|
||||
})
|
||||
|
||||
const openAICompatibleModelSchema = z.object({
|
||||
baseUrl: z.string().catch(''),
|
||||
apiKey: z.string().catch(''),
|
||||
model: z.string().catch(''),
|
||||
})
|
||||
|
||||
const ragOptionsSchema = z.object({
|
||||
chunkSize: z.number().catch(1000),
|
||||
thresholdTokens: z.number().catch(8192),
|
||||
minSimilarity: z.number().catch(0.0),
|
||||
limit: z.number().catch(10),
|
||||
excludePatterns: z.array(z.string()).catch([]),
|
||||
includePatterns: z.array(z.string()).catch([]),
|
||||
})
|
||||
|
||||
export const triggerSchema = z.object({
|
||||
type: z.enum(['string', 'regex']),
|
||||
value: z.string().min(1, { message: "Trigger value must be at least 1 character long" })
|
||||
}).strict().superRefine((trigger, ctx) => {
|
||||
if (trigger.type === "regex") {
|
||||
if (!trigger.value.endsWith("$")) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
message: "Regex triggers must end with a $.",
|
||||
path: ["value"],
|
||||
});
|
||||
}
|
||||
if (!isRegexValid(trigger.value)) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
message: `Invalid regex: "${trigger.value}"`,
|
||||
path: ["value"],
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const InfioSettingsSchema = z.object({
|
||||
// Version
|
||||
version: z.literal(SETTINGS_SCHEMA_VERSION).catch(SETTINGS_SCHEMA_VERSION),
|
||||
|
||||
// activeModels
|
||||
activeModels: z.array(
|
||||
z.object({
|
||||
name: z.string(),
|
||||
provider: z.string(),
|
||||
enabled: z.boolean(),
|
||||
isEmbeddingModel: z.boolean(),
|
||||
isBuiltIn: z.boolean(),
|
||||
apiKey: z.string().optional(),
|
||||
baseUrl: z.string().optional(),
|
||||
dimension: z.number().optional(),
|
||||
})
|
||||
).catch(DEFAULT_MODELS),
|
||||
|
||||
// API Keys
|
||||
infioApiKey: z.string().catch(''),
|
||||
openAIApiKey: z.string().catch(''),
|
||||
anthropicApiKey: z.string().catch(''),
|
||||
geminiApiKey: z.string().catch(''),
|
||||
groqApiKey: z.string().catch(''),
|
||||
deepseekApiKey: z.string().catch(''),
|
||||
|
||||
// DEFAULT Chat Model
|
||||
chatModelId: z.string().catch('deepseek-chat'),
|
||||
ollamaChatModel: ollamaModelSchema.catch({
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
}),
|
||||
openAICompatibleChatModel: openAICompatibleModelSchema.catch({
|
||||
baseUrl: '',
|
||||
apiKey: '',
|
||||
model: '',
|
||||
}),
|
||||
|
||||
// DEFAULT Apply Model
|
||||
applyModelId: z.string().catch('deepseek-chat'),
|
||||
ollamaApplyModel: ollamaModelSchema.catch({
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
}),
|
||||
openAICompatibleApplyModel: openAICompatibleModelSchema.catch({
|
||||
baseUrl: '',
|
||||
apiKey: '',
|
||||
model: '',
|
||||
}),
|
||||
|
||||
// DEFAULT Embedding Model
|
||||
embeddingModelId: z.string().catch(
|
||||
'text-embedding-004',
|
||||
),
|
||||
ollamaEmbeddingModel: ollamaModelSchema.catch({
|
||||
baseUrl: '',
|
||||
model: '',
|
||||
}),
|
||||
|
||||
// System Prompt
|
||||
systemPrompt: z.string().catch(''),
|
||||
|
||||
// RAG Options
|
||||
ragOptions: ragOptionsSchema.catch({
|
||||
chunkSize: 1000,
|
||||
thresholdTokens: 8192,
|
||||
minSimilarity: 0.0,
|
||||
limit: 10,
|
||||
excludePatterns: [],
|
||||
includePatterns: [],
|
||||
}),
|
||||
|
||||
// autocomplete options
|
||||
autocompleteEnabled: z.boolean(),
|
||||
advancedMode: z.boolean(),
|
||||
apiProvider: z.enum(['azure', 'openai', "ollama"]),
|
||||
azureOAIApiSettings: z.string().catch(''),
|
||||
openAIApiSettings: z.string().catch(''),
|
||||
ollamaApiSettings: z.string().catch(''),
|
||||
triggers: z.array(triggerSchema),
|
||||
delay: z.number().int().min(MIN_DELAY, { message: "Delay must be between 0ms and 2000ms" }).max(MAX_DELAY, { message: "Delay must be between 0ms and 2000ms" }),
|
||||
modelOptions: modelOptionsSchema,
|
||||
systemMessage: z.string().min(3, { message: "System message must be at least 3 characters long" }),
|
||||
fewShotExamples: z.array(fewShotExampleSchema),
|
||||
userMessageTemplate: z.string().min(3, { message: "User message template must be at least 3 characters long" }),
|
||||
chainOfThoughRemovalRegex: z.string().refine((regex) => isRegexValid(regex), { message: "Invalid regex" }),
|
||||
dontIncludeDataviews: z.boolean(),
|
||||
maxPrefixCharLimit: z.number().int().min(MIN_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at least ${MIN_MAX_CHAR_LIMIT}` }).max(MAX_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at most ${MAX_MAX_CHAR_LIMIT}` }),
|
||||
maxSuffixCharLimit: z.number().int().min(MIN_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at least ${MIN_MAX_CHAR_LIMIT}` }).max(MAX_MAX_CHAR_LIMIT, { message: `Max prefix char limit must be at most ${MAX_MAX_CHAR_LIMIT}` }),
|
||||
removeDuplicateMathBlockIndicator: z.boolean(),
|
||||
removeDuplicateCodeBlockIndicator: z.boolean(),
|
||||
ignoredFilePatterns: z.string().refine((value) => value
|
||||
.split("\n")
|
||||
.filter(s => s.trim().length > 0)
|
||||
.filter(s => !isValidIgnorePattern(s)).length === 0,
|
||||
{ message: "Invalid ignore pattern" }
|
||||
),
|
||||
ignoredTags: z.string().refine((value) => value
|
||||
.split("\n")
|
||||
.filter(s => s.includes(" ")).length === 0, { message: "Tags cannot contain spaces" }
|
||||
).refine((value) => value
|
||||
.split("\n")
|
||||
.filter(s => s.includes("#")).length === 0, { message: "Enter tags without the # symbol" }
|
||||
).refine((value) => value
|
||||
.split("\n")
|
||||
.filter(s => s.includes(",")).length === 0, { message: "Enter each tag on a new line without commas" }
|
||||
),
|
||||
cacheSuggestions: z.boolean(),
|
||||
debugMode: z.boolean(),
|
||||
})
|
||||
|
||||
export type InfioSettings = z.infer<typeof InfioSettingsSchema>
|
||||
|
||||
type Migration = {
|
||||
fromVersion: number
|
||||
toVersion: number
|
||||
migrate: (data: Record<string, unknown>) => Record<string, unknown>
|
||||
}
|
||||
|
||||
const MIGRATIONS: Migration[] = [
|
||||
{
|
||||
fromVersion: 0,
|
||||
toVersion: 1,
|
||||
migrate: (data) => {
|
||||
const newData = { ...data }
|
||||
if (
|
||||
'ollamaBaseUrl' in newData &&
|
||||
typeof newData.ollamaBaseUrl === 'string'
|
||||
) {
|
||||
newData.ollamaChatModel = {
|
||||
baseUrl: newData.ollamaBaseUrl,
|
||||
model: '',
|
||||
}
|
||||
newData.ollamaApplyModel = {
|
||||
baseUrl: newData.ollamaBaseUrl,
|
||||
model: '',
|
||||
}
|
||||
newData.ollamaEmbeddingModel = {
|
||||
baseUrl: newData.ollamaBaseUrl,
|
||||
model: '',
|
||||
}
|
||||
delete newData.ollamaBaseUrl
|
||||
}
|
||||
|
||||
return newData
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
function migrateSettings(
|
||||
data: Record<string, unknown>,
|
||||
): Record<string, unknown> {
|
||||
let currentData = { ...data }
|
||||
const currentVersion = (currentData.version as number) ?? 0
|
||||
|
||||
for (const migration of MIGRATIONS) {
|
||||
if (
|
||||
currentVersion >= migration.fromVersion &&
|
||||
currentVersion < migration.toVersion &&
|
||||
migration.toVersion <= SETTINGS_SCHEMA_VERSION
|
||||
) {
|
||||
console.log(
|
||||
`Migrating settings from ${migration.fromVersion} to ${migration.toVersion}`,
|
||||
)
|
||||
currentData = migration.migrate(currentData)
|
||||
}
|
||||
}
|
||||
|
||||
return currentData
|
||||
}
|
||||
|
||||
export function parseInfioSettings(data: unknown): InfioSettings {
|
||||
try {
|
||||
const migratedData = migrateSettings(data as Record<string, unknown>)
|
||||
return InfioSettingsSchema.parse(migratedData)
|
||||
} catch (error) {
|
||||
console.warn('Invalid settings provided, using defaults:', error)
|
||||
return InfioSettingsSchema.parse({ ...DEFAULT_AUTOCOMPLETE_SETTINGS })
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user