simple model config

This commit is contained in:
duanfuxiang
2025-02-17 13:06:22 +08:00
parent bf29a42baa
commit 025dc85c59
34 changed files with 12098 additions and 708 deletions

1202
src/utils/api.ts Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,58 +1,24 @@
import {
ANTHROPIC_PRICES,
GEMINI_PRICES,
GROQ_PRICES,
OPENAI_PRICES,
} from '../constants'
import { CustomLLMModel } from '../types/llm/model'
import { LLMModel } from '../types/llm/model'
import { ResponseUsage } from '../types/llm/response'
import { GetProviderModels } from './api'
// Returns the cost in dollars. Returns null if the model is not supported.
export const calculateLLMCost = ({
model,
usage,
}: {
model: CustomLLMModel
model: LLMModel
usage: ResponseUsage
}): number | null => {
switch (model.provider) {
case 'openai': {
const modelPricing = OPENAI_PRICES[model.name]
if (!modelPricing) return null
return (
(usage.prompt_tokens * modelPricing.input +
usage.completion_tokens * modelPricing.output) /
1_000_000
)
}
case 'anthropic': {
const modelPricing = ANTHROPIC_PRICES[model.name]
if (!modelPricing) return null
return (
(usage.prompt_tokens * modelPricing.input +
usage.completion_tokens * modelPricing.output) /
1_000_000
)
}
case 'gemini': {
const modelPricing = GEMINI_PRICES[model.name]
if (!modelPricing) return null
return (
(usage.prompt_tokens * modelPricing.input +
usage.completion_tokens * modelPricing.output) /
1_000_000
)
}
case 'groq': {
const modelPricing = GROQ_PRICES[model.name]
if (!modelPricing) return null
return (
(usage.prompt_tokens * modelPricing.input +
usage.completion_tokens * modelPricing.output) /
1_000_000
)
}
default:
return null
const providerModels = GetProviderModels(model.provider)
if (!providerModels) {
return null
}
const modelInfo = providerModels[model.modelId]
if (!modelInfo) {
return null
}
const cost = modelInfo.inputPrice * usage.prompt_tokens + modelInfo.outputPrice * usage.completion_tokens
return cost
}