fix ts check

This commit is contained in:
duanfuxiang
2025-01-05 21:14:35 +08:00
parent 0c7ee142cb
commit 5465d5fca3
46 changed files with 11974 additions and 91 deletions

View File

@@ -108,9 +108,9 @@ class AutoComplete implements AutocompleteService {
groq: settings.groqApiKey,
infio: settings.infioApiKey,
})
const model: CustomLLMModel = settings.activeModels.find(
const model = settings.activeModels.find(
(option) => option.name === settings.chatModelId,
)
) as CustomLLMModel;
const llm = new LLMClient(llm_manager, model);
return new AutoComplete(

View File

@@ -1,8 +1,8 @@
import { Settings } from "../../../settings/versions";
import { extractNextWordAndRemaining } from "../utils";
import EventListener from "../../../event-listener";
import { DocumentChanges } from "../../../render-plugin/document-changes-listener";
import { InfioSettings } from "../../../types/settings";
import { extractNextWordAndRemaining } from "../utils";
import State from "./state";
@@ -166,7 +166,7 @@ class SuggestingState extends State {
return `Suggesting for ${this.context.context}`;
}
handleSettingChanged(settings: Settings): void {
handleSettingChanged(settings: InfioSettings ): void {
if (!settings.cacheSuggestions) {
this.clearPrediction();
}

View File

@@ -295,7 +295,7 @@ export class AnthropicProvider implements BaseLLMProvider {
`Anthropic only supports string content for system messages`,
)
}
return systemMessage
return systemMessage as string
}
private static isMessageEmpty(message: RequestMessage) {

View File

@@ -4,6 +4,7 @@ import {
GenerateContentResult,
GenerateContentStreamResult,
GoogleGenerativeAI,
Part,
} from '@google/generative-ai'
import { CustomLLMModel } from '../../types/llm/model'
@@ -207,7 +208,7 @@ export class GeminiProvider implements BaseLLMProvider {
}
}
}
}),
}) as Part[],
}
}

View File

@@ -1,3 +1,4 @@
// @ts-nocheck
/**
* This provider is nearly identical to OpenAICompatibleProvider, but uses a custom OpenAI client
* (NoStainlessOpenAI) to work around CORS issues specific to Ollama.