simple model config
This commit is contained in:
@@ -6,7 +6,7 @@ import {
|
||||
TextBlockParam,
|
||||
} from '@anthropic-ai/sdk/resources/messages'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@@ -36,21 +36,14 @@ export class AnthropicProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Anthropic API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new Anthropic({
|
||||
baseURL: model.baseUrl,
|
||||
apiKey: model.apiKey,
|
||||
dangerouslyAllowBrowser: true
|
||||
})
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Anthropic API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
const systemMessage = AnthropicProvider.validateSystemMessages(
|
||||
@@ -89,21 +82,14 @@ export class AnthropicProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Anthropic API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new Anthropic({
|
||||
baseURL: model.baseUrl,
|
||||
apiKey: model.apiKey,
|
||||
dangerouslyAllowBrowser: true
|
||||
})
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Anthropic API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
const systemMessage = AnthropicProvider.validateSystemMessages(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@@ -11,12 +11,12 @@ import {
|
||||
|
||||
export type BaseLLMProvider = {
|
||||
generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming>
|
||||
streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>>
|
||||
|
||||
@@ -7,7 +7,7 @@ import {
|
||||
Part,
|
||||
} from '@google/generative-ai'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@@ -43,18 +43,14 @@ export class GeminiProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
`Gemini API key is missing. Please set it in settings menu.`,
|
||||
)
|
||||
}
|
||||
this.apiKey = model.apiKey
|
||||
this.client = new GoogleGenerativeAI(model.apiKey)
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
`Gemini API key is missing. Please set it in settings menu.`,
|
||||
)
|
||||
}
|
||||
|
||||
const systemMessages = request.messages.filter((m) => m.role === 'system')
|
||||
@@ -110,18 +106,14 @@ export class GeminiProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
`Gemini API key is missing. Please set it in settings menu.`,
|
||||
)
|
||||
}
|
||||
this.apiKey = model.apiKey
|
||||
this.client = new GoogleGenerativeAI(model.apiKey)
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
`Gemini API key is missing. Please set it in settings menu.`,
|
||||
)
|
||||
}
|
||||
|
||||
const systemMessages = request.messages.filter((m) => m.role === 'system')
|
||||
|
||||
@@ -6,7 +6,7 @@ import {
|
||||
ChatCompletionMessageParam,
|
||||
} from 'groq-sdk/resources/chat/completions'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@@ -35,20 +35,14 @@ export class GroqProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Groq API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new Groq({
|
||||
apiKey: model.apiKey,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Groq API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -78,20 +72,14 @@ export class GroqProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Groq API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new Groq({
|
||||
apiKey: model.apiKey,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'Groq API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@@ -4,12 +4,12 @@ import {
|
||||
ChatCompletionChunk,
|
||||
} from 'openai/resources/chat/completions'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { INFIO_BASE_URL } from '../../constants'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
RequestMessage,
|
||||
RequestMessage
|
||||
} from '../../types/llm/request'
|
||||
import {
|
||||
LLMResponseNonStreaming,
|
||||
@@ -85,13 +85,13 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
// this.client = new OpenAI({ apiKey, dangerouslyAllowBrowser: true })
|
||||
// this.adapter = new OpenAIMessageAdapter()
|
||||
this.apiKey = apiKey
|
||||
this.baseUrl = 'https://api.infio.com/api/raw_message'
|
||||
this.baseUrl = INFIO_BASE_URL
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
// options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
@@ -107,7 +107,7 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
presence_penalty: request.presence_penalty,
|
||||
max_tokens: request.max_tokens,
|
||||
}
|
||||
const options = {
|
||||
const req_options = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Authorization: this.apiKey,
|
||||
@@ -117,7 +117,7 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
body: JSON.stringify(req)
|
||||
};
|
||||
|
||||
const response = await fetch(this.baseUrl, options);
|
||||
const response = await fetch(this.baseUrl, req_options);
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP error! status: ${response.status}`);
|
||||
}
|
||||
@@ -134,9 +134,8 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
@@ -154,7 +153,7 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
presence_penalty: request.presence_penalty,
|
||||
max_tokens: request.max_tokens,
|
||||
}
|
||||
const options = {
|
||||
const req_options = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Authorization: this.apiKey,
|
||||
@@ -164,7 +163,7 @@ export class InfioProvider implements BaseLLMProvider {
|
||||
body: JSON.stringify(req)
|
||||
};
|
||||
|
||||
const response = await fetch(this.baseUrl, options);
|
||||
const response = await fetch(this.baseUrl, req_options);
|
||||
if (!response.ok) {
|
||||
throw new Error(`HTTP error! status: ${response.status}`);
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
import { DEEPSEEK_BASE_URL } from '../../constants'
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { ALIBABA_QWEN_BASE_URL, DEEPSEEK_BASE_URL, OPENROUTER_BASE_URL, SILICONFLOW_BASE_URL } from '../../constants'
|
||||
import { ApiProvider, LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
} from '../../types/llm/request'
|
||||
import {
|
||||
LLMResponseNonStreaming,
|
||||
LLMResponseStreaming,
|
||||
LLMResponseNonStreaming,
|
||||
LLMResponseStreaming,
|
||||
} from '../../types/llm/response'
|
||||
import { InfioSettings } from '../../types/settings'
|
||||
|
||||
import { AnthropicProvider } from './anthropic'
|
||||
import { GeminiProvider } from './gemini'
|
||||
@@ -20,123 +21,147 @@ import { OpenAICompatibleProvider } from './openai-compatible-provider'
|
||||
|
||||
|
||||
export type LLMManagerInterface = {
|
||||
generateResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming>
|
||||
streamResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>>
|
||||
generateResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming>
|
||||
streamResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>>
|
||||
}
|
||||
|
||||
class LLMManager implements LLMManagerInterface {
|
||||
private openaiProvider: OpenAIAuthenticatedProvider
|
||||
private deepseekProvider: OpenAICompatibleProvider
|
||||
private anthropicProvider: AnthropicProvider
|
||||
private googleProvider: GeminiProvider
|
||||
private groqProvider: GroqProvider
|
||||
private infioProvider: InfioProvider
|
||||
private ollamaProvider: OllamaProvider
|
||||
private isInfioEnabled: boolean
|
||||
private openaiProvider: OpenAIAuthenticatedProvider
|
||||
private deepseekProvider: OpenAICompatibleProvider
|
||||
private anthropicProvider: AnthropicProvider
|
||||
private googleProvider: GeminiProvider
|
||||
private groqProvider: GroqProvider
|
||||
private infioProvider: InfioProvider
|
||||
private openrouterProvider: OpenAICompatibleProvider
|
||||
private siliconflowProvider: OpenAICompatibleProvider
|
||||
private alibabaQwenProvider: OpenAICompatibleProvider
|
||||
private ollamaProvider: OllamaProvider
|
||||
private isInfioEnabled: boolean
|
||||
|
||||
constructor(apiKeys: {
|
||||
deepseek?: string
|
||||
openai?: string
|
||||
anthropic?: string
|
||||
gemini?: string
|
||||
groq?: string
|
||||
infio?: string
|
||||
}) {
|
||||
this.deepseekProvider = new OpenAICompatibleProvider(apiKeys.deepseek ?? '', DEEPSEEK_BASE_URL)
|
||||
this.openaiProvider = new OpenAIAuthenticatedProvider(apiKeys.openai ?? '')
|
||||
this.anthropicProvider = new AnthropicProvider(apiKeys.anthropic ?? '')
|
||||
this.googleProvider = new GeminiProvider(apiKeys.gemini ?? '')
|
||||
this.groqProvider = new GroqProvider(apiKeys.groq ?? '')
|
||||
this.infioProvider = new InfioProvider(apiKeys.infio ?? '')
|
||||
this.ollamaProvider = new OllamaProvider()
|
||||
this.isInfioEnabled = !!apiKeys.infio
|
||||
}
|
||||
constructor(settings: InfioSettings) {
|
||||
this.infioProvider = new InfioProvider(settings.infioProvider.apiKey)
|
||||
this.openrouterProvider = new OpenAICompatibleProvider(settings.openrouterProvider.apiKey, OPENROUTER_BASE_URL)
|
||||
this.siliconflowProvider = new OpenAICompatibleProvider(settings.siliconflowProvider.apiKey, SILICONFLOW_BASE_URL)
|
||||
this.alibabaQwenProvider = new OpenAICompatibleProvider(settings.alibabaQwenProvider.apiKey, ALIBABA_QWEN_BASE_URL)
|
||||
this.deepseekProvider = new OpenAICompatibleProvider(settings.deepseekProvider.apiKey, DEEPSEEK_BASE_URL)
|
||||
this.openaiProvider = new OpenAIAuthenticatedProvider(settings.openaiProvider.apiKey)
|
||||
this.anthropicProvider = new AnthropicProvider(settings.anthropicProvider.apiKey)
|
||||
this.googleProvider = new GeminiProvider(settings.googleProvider.apiKey)
|
||||
this.groqProvider = new GroqProvider(settings.groqProvider.apiKey)
|
||||
this.ollamaProvider = new OllamaProvider(settings.groqProvider.baseUrl)
|
||||
this.isInfioEnabled = !!settings.infioProvider.apiKey
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (this.isInfioEnabled) {
|
||||
return await this.infioProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
}
|
||||
// use custom provider
|
||||
switch (model.provider) {
|
||||
case 'deepseek':
|
||||
return await this.deepseekProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'openai':
|
||||
return await this.openaiProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'anthropic':
|
||||
return await this.anthropicProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'google':
|
||||
return await this.googleProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'groq':
|
||||
return await this.groqProvider.generateResponse(model, request, options)
|
||||
case 'ollama':
|
||||
return await this.ollamaProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
}
|
||||
}
|
||||
async generateResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (this.isInfioEnabled) {
|
||||
return await this.infioProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
)
|
||||
}
|
||||
// use custom provider
|
||||
switch (model.provider) {
|
||||
case ApiProvider.OpenRouter:
|
||||
return await this.openrouterProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.SiliconFlow:
|
||||
return await this.siliconflowProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.AlibabaQwen:
|
||||
return await this.alibabaQwenProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Deepseek:
|
||||
return await this.deepseekProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.OpenAI:
|
||||
return await this.openaiProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Anthropic:
|
||||
return await this.anthropicProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Google:
|
||||
return await this.googleProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Groq:
|
||||
return await this.groqProvider.generateResponse(model, request, options)
|
||||
case ApiProvider.Ollama:
|
||||
return await this.ollamaProvider.generateResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
default:
|
||||
throw new Error(`Unsupported model provider: ${model.provider}`)
|
||||
}
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (this.isInfioEnabled) {
|
||||
return await this.infioProvider.streamResponse(model, request, options)
|
||||
}
|
||||
// use custom provider
|
||||
switch (model.provider) {
|
||||
case 'deepseek':
|
||||
return await this.deepseekProvider.streamResponse(model, request, options)
|
||||
case 'openai':
|
||||
return await this.openaiProvider.streamResponse(model, request, options)
|
||||
case 'anthropic':
|
||||
return await this.anthropicProvider.streamResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case 'google':
|
||||
return await this.googleProvider.streamResponse(model, request, options)
|
||||
case 'groq':
|
||||
return await this.groqProvider.streamResponse(model, request, options)
|
||||
case 'ollama':
|
||||
return await this.ollamaProvider.streamResponse(model, request, options)
|
||||
}
|
||||
}
|
||||
async streamResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (this.isInfioEnabled) {
|
||||
return await this.infioProvider.streamResponse(model, request)
|
||||
}
|
||||
// use custom provider
|
||||
switch (model.provider) {
|
||||
case ApiProvider.OpenRouter:
|
||||
return await this.openrouterProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.SiliconFlow:
|
||||
return await this.siliconflowProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.AlibabaQwen:
|
||||
return await this.alibabaQwenProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.Deepseek:
|
||||
return await this.deepseekProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.OpenAI:
|
||||
return await this.openaiProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.Anthropic:
|
||||
return await this.anthropicProvider.streamResponse(
|
||||
model,
|
||||
request,
|
||||
options,
|
||||
)
|
||||
case ApiProvider.Google:
|
||||
return await this.googleProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.Groq:
|
||||
return await this.groqProvider.streamResponse(model, request, options)
|
||||
case ApiProvider.Ollama:
|
||||
return await this.ollamaProvider.streamResponse(model, request, options)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default LLMManager
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
import OpenAI from 'openai'
|
||||
import { FinalRequestOptions } from 'openai/core'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@@ -19,7 +19,7 @@ import {
|
||||
} from '../../types/llm/response'
|
||||
|
||||
import { BaseLLMProvider } from './base'
|
||||
import { LLMBaseUrlNotSetException, LLMModelNotSetException } from './exception'
|
||||
import { LLMBaseUrlNotSetException } from './exception'
|
||||
import { OpenAIMessageAdapter } from './openai-message-adapter'
|
||||
|
||||
export class NoStainlessOpenAI extends OpenAI {
|
||||
@@ -35,7 +35,7 @@ export class NoStainlessOpenAI extends OpenAI {
|
||||
{ retryCount = 0 }: { retryCount?: number } = {},
|
||||
): { req: RequestInit; url: string; timeout: number } {
|
||||
const req = super.buildRequest(options, { retryCount })
|
||||
const headers = req.req.headers as Record<string, string>
|
||||
const headers: Record<string, string> = req.req.headers
|
||||
Object.keys(headers).forEach((k) => {
|
||||
if (k.startsWith('x-stainless')) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-dynamic-delete
|
||||
@@ -48,30 +48,26 @@ export class NoStainlessOpenAI extends OpenAI {
|
||||
|
||||
export class OllamaProvider implements BaseLLMProvider {
|
||||
private adapter: OpenAIMessageAdapter
|
||||
private baseUrl: string
|
||||
|
||||
constructor() {
|
||||
constructor(baseUrl: string) {
|
||||
this.adapter = new OpenAIMessageAdapter()
|
||||
this.baseUrl = baseUrl
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!model.baseUrl) {
|
||||
if (!this.baseUrl) {
|
||||
throw new LLMBaseUrlNotSetException(
|
||||
'Ollama base URL is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
if (!model.name) {
|
||||
throw new LLMModelNotSetException(
|
||||
'Ollama model is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
const client = new NoStainlessOpenAI({
|
||||
baseURL: `${model.baseUrl}/v1`,
|
||||
baseURL: `${this.baseUrl}/v1`,
|
||||
apiKey: '',
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
@@ -79,24 +75,18 @@ export class OllamaProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!model.baseUrl) {
|
||||
if (!this.baseUrl) {
|
||||
throw new LLMBaseUrlNotSetException(
|
||||
'Ollama base URL is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
if (!model.name) {
|
||||
throw new LLMModelNotSetException(
|
||||
'Ollama model is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
const client = new NoStainlessOpenAI({
|
||||
baseURL: `${model.baseUrl}/v1`,
|
||||
baseURL: `${this.baseUrl}/v1`,
|
||||
apiKey: '',
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import OpenAI from 'openai'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
@@ -33,7 +33,7 @@ export class OpenAICompatibleProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
@@ -47,7 +47,7 @@ export class OpenAICompatibleProvider implements BaseLLMProvider {
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
|
||||
@@ -1,91 +1,77 @@
|
||||
import OpenAI from 'openai'
|
||||
|
||||
import { CustomLLMModel } from '../../types/llm/model'
|
||||
import { LLMModel } from '../../types/llm/model'
|
||||
import {
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
LLMOptions,
|
||||
LLMRequestNonStreaming,
|
||||
LLMRequestStreaming,
|
||||
} from '../../types/llm/request'
|
||||
import {
|
||||
LLMResponseNonStreaming,
|
||||
LLMResponseStreaming,
|
||||
LLMResponseNonStreaming,
|
||||
LLMResponseStreaming,
|
||||
} from '../../types/llm/response'
|
||||
|
||||
import { BaseLLMProvider } from './base'
|
||||
import {
|
||||
LLMAPIKeyInvalidException,
|
||||
LLMAPIKeyNotSetException,
|
||||
LLMAPIKeyInvalidException,
|
||||
LLMAPIKeyNotSetException,
|
||||
} from './exception'
|
||||
import { OpenAIMessageAdapter } from './openai-message-adapter'
|
||||
|
||||
export class OpenAIAuthenticatedProvider implements BaseLLMProvider {
|
||||
private adapter: OpenAIMessageAdapter
|
||||
private client: OpenAI
|
||||
private adapter: OpenAIMessageAdapter
|
||||
private client: OpenAI
|
||||
|
||||
constructor(apiKey: string) {
|
||||
this.client = new OpenAI({
|
||||
apiKey,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
this.adapter = new OpenAIMessageAdapter()
|
||||
}
|
||||
constructor(apiKey: string) {
|
||||
this.client = new OpenAI({
|
||||
apiKey,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
this.adapter = new OpenAIMessageAdapter()
|
||||
}
|
||||
|
||||
async generateResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.baseUrl) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'OpenAI API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new OpenAI({
|
||||
apiKey: model.apiKey,
|
||||
baseURL: model.baseUrl,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
}
|
||||
try {
|
||||
return this.adapter.generateResponse(this.client, request, options)
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.AuthenticationError) {
|
||||
throw new LLMAPIKeyInvalidException(
|
||||
'OpenAI API key is invalid. Please update it in settings menu.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
async generateResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestNonStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<LLMResponseNonStreaming> {
|
||||
if (!this.client.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'OpenAI API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
try {
|
||||
return this.adapter.generateResponse(this.client, request, options)
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.AuthenticationError) {
|
||||
throw new LLMAPIKeyInvalidException(
|
||||
'OpenAI API key is invalid. Please update it in settings menu.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async streamResponse(
|
||||
model: CustomLLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.client.apiKey) {
|
||||
if (!model.baseUrl) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'OpenAI API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
this.client = new OpenAI({
|
||||
apiKey: model.apiKey,
|
||||
baseURL: model.baseUrl,
|
||||
dangerouslyAllowBrowser: true,
|
||||
})
|
||||
}
|
||||
async streamResponse(
|
||||
model: LLMModel,
|
||||
request: LLMRequestStreaming,
|
||||
options?: LLMOptions,
|
||||
): Promise<AsyncIterable<LLMResponseStreaming>> {
|
||||
if (!this.client.apiKey) {
|
||||
throw new LLMAPIKeyNotSetException(
|
||||
'OpenAI API key is missing. Please set it in settings menu.',
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
return this.adapter.streamResponse(this.client, request, options)
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.AuthenticationError) {
|
||||
throw new LLMAPIKeyInvalidException(
|
||||
'OpenAI API key is invalid. Please update it in settings menu.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
try {
|
||||
return this.adapter.streamResponse(this.client, request, options)
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.AuthenticationError) {
|
||||
throw new LLMAPIKeyInvalidException(
|
||||
'OpenAI API key is invalid. Please update it in settings menu.',
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user