feat: initial release v0.3.0

This commit is contained in:
saturn
2026-03-08 03:15:27 +08:00
commit 881ed44996
1311 changed files with 225407 additions and 0 deletions

View File

@@ -0,0 +1,22 @@
import { describe, expect, it } from 'vitest'
import { buildArkThinkingParam } from '@/lib/ark-llm'
describe('ark thinking param builder', () => {
it('builds enabled thinking param without reasoning_effort', () => {
const params = buildArkThinkingParam('doubao-seed-2-0-lite-260215', true)
expect(params).toEqual({
thinking: {
type: 'enabled',
},
})
})
it('builds disabled thinking param without reasoning_effort', () => {
const params = buildArkThinkingParam('doubao-seed-2-0-lite-260215', false)
expect(params).toEqual({
thinking: {
type: 'disabled',
},
})
})
})

View File

@@ -0,0 +1,124 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const resolveLlmRuntimeModelMock = vi.hoisted(() =>
vi.fn(async () => ({
provider: 'bailian',
modelId: 'qwen3.5-flash',
modelKey: 'bailian::qwen3.5-flash',
})),
)
const completeBailianLlmMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_mock',
object: 'chat.completion',
created: 1,
model: 'qwen3.5-flash',
choices: [
{
index: 0,
message: { role: 'assistant', content: 'ok' },
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 1,
completion_tokens: 1,
total_tokens: 2,
},
})),
)
const completeSiliconFlowLlmMock = vi.hoisted(() =>
vi.fn(async () => {
throw new Error('siliconflow should not be called')
}),
)
const runOpenAICompatChatCompletionMock = vi.hoisted(() =>
vi.fn(async () => {
throw new Error('openai-compat should not be called')
}),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'bailian',
name: 'Alibaba Bailian',
apiKey: 'bl-key',
baseUrl: undefined,
gatewayRoute: 'official' as const,
})),
)
const llmLoggerInfoMock = vi.hoisted(() => vi.fn())
const llmLoggerWarnMock = vi.hoisted(() => vi.fn())
const logLlmRawInputMock = vi.hoisted(() => vi.fn())
const logLlmRawOutputMock = vi.hoisted(() => vi.fn())
const recordCompletionUsageMock = vi.hoisted(() => vi.fn())
vi.mock('@/lib/llm-observe/internal-stream-context', () => ({
getInternalLLMStreamCallbacks: vi.fn(() => null),
}))
vi.mock('@/lib/model-gateway', () => ({
resolveModelGatewayRoute: vi.fn(() => 'official'),
runOpenAICompatChatCompletion: runOpenAICompatChatCompletionMock,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
getProviderKey: vi.fn((providerId: string) => providerId),
}))
vi.mock('@/lib/providers/bailian', () => ({
completeBailianLlm: completeBailianLlmMock,
}))
vi.mock('@/lib/providers/siliconflow', () => ({
completeSiliconFlowLlm: completeSiliconFlowLlmMock,
}))
vi.mock('@/lib/llm/runtime-shared', () => ({
_ulogError: vi.fn(),
_ulogWarn: vi.fn(),
completionUsageSummary: vi.fn(() => ({ promptTokens: 1, completionTokens: 1 })),
isRetryableError: vi.fn(() => false),
llmLogger: {
info: llmLoggerInfoMock,
warn: llmLoggerWarnMock,
},
logLlmRawInput: logLlmRawInputMock,
logLlmRawOutput: logLlmRawOutputMock,
recordCompletionUsage: recordCompletionUsageMock,
resolveLlmRuntimeModel: resolveLlmRuntimeModelMock,
}))
import { chatCompletion } from '@/lib/llm/chat-completion'
describe('llm chatCompletion official provider branch', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('returns completion from bailian official provider without falling through to baseUrl checks', async () => {
const result = await chatCompletion(
'user-1',
'bailian::qwen3.5-flash',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.1 },
)
expect(completeBailianLlmMock).toHaveBeenCalledWith({
modelId: 'qwen3.5-flash',
messages: [{ role: 'user', content: 'hello' }],
apiKey: 'bl-key',
baseUrl: undefined,
temperature: 0.1,
})
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(completeSiliconFlowLlmMock).not.toHaveBeenCalled()
expect(result.choices[0]?.message?.content).toBe('ok')
expect(recordCompletionUsageMock).toHaveBeenCalledTimes(1)
})
})

View File

@@ -0,0 +1,158 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
type MockRuntimeModel = {
provider: string
modelId: string
modelKey: string
llmProtocol: 'responses' | 'chat-completions' | undefined
}
const resolveLlmRuntimeModelMock = vi.hoisted(() =>
vi.fn<(...args: unknown[]) => Promise<MockRuntimeModel>>(async () => ({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: 'responses',
})),
)
const runOpenAICompatResponsesCompletionMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_responses_1',
object: 'chat.completion',
created: 1,
model: 'gpt-4.1-mini',
choices: [{ index: 0, message: { role: 'assistant', content: 'responses-ok' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
})),
)
const runOpenAICompatChatCompletionMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_chat_1',
object: 'chat.completion',
created: 1,
model: 'gpt-4.1-mini',
choices: [{ index: 0, message: { role: 'assistant', content: 'chat-ok' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
})),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'openai-compatible:node-1',
name: 'OpenAI Compatible',
apiKey: 'sk-test',
baseUrl: 'https://compat.example.com/v1',
gatewayRoute: 'openai-compat' as const,
apiMode: 'openai-official' as const,
})),
)
const logLlmRawInputMock = vi.hoisted(() => vi.fn())
const logLlmRawOutputMock = vi.hoisted(() => vi.fn())
const recordCompletionUsageMock = vi.hoisted(() => vi.fn())
vi.mock('@/lib/llm-observe/internal-stream-context', () => ({
getInternalLLMStreamCallbacks: vi.fn(() => null),
}))
vi.mock('@/lib/model-gateway', () => ({
resolveModelGatewayRoute: vi.fn(() => 'openai-compat'),
runOpenAICompatChatCompletion: runOpenAICompatChatCompletionMock,
runOpenAICompatResponsesCompletion: runOpenAICompatResponsesCompletionMock,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
getProviderKey: vi.fn((providerId: string) => providerId.split(':')[0] || providerId),
}))
vi.mock('@/lib/providers/bailian', () => ({
completeBailianLlm: vi.fn(async () => {
throw new Error('bailian should not be called')
}),
}))
vi.mock('@/lib/providers/siliconflow', () => ({
completeSiliconFlowLlm: vi.fn(async () => {
throw new Error('siliconflow should not be called')
}),
}))
vi.mock('@/lib/llm/runtime-shared', () => ({
_ulogError: vi.fn(),
_ulogWarn: vi.fn(),
completionUsageSummary: vi.fn(() => ({ promptTokens: 1, completionTokens: 1 })),
isRetryableError: vi.fn(() => false),
llmLogger: {
info: vi.fn(),
warn: vi.fn(),
},
logLlmRawInput: logLlmRawInputMock,
logLlmRawOutput: logLlmRawOutputMock,
recordCompletionUsage: recordCompletionUsageMock,
resolveLlmRuntimeModel: resolveLlmRuntimeModelMock,
}))
import { chatCompletion } from '@/lib/llm/chat-completion'
describe('llm chatCompletion openai-compatible protocol routing', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('uses responses executor when llmProtocol=responses', async () => {
const completion = await chatCompletion(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
)
expect(runOpenAICompatResponsesCompletionMock).toHaveBeenCalledTimes(1)
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(completion.choices[0]?.message?.content).toBe('responses-ok')
})
it('uses chat-completions executor when llmProtocol=chat-completions', async () => {
resolveLlmRuntimeModelMock.mockResolvedValueOnce({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: 'chat-completions',
})
const completion = await chatCompletion(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
)
expect(runOpenAICompatChatCompletionMock).toHaveBeenCalledTimes(1)
expect(runOpenAICompatResponsesCompletionMock).not.toHaveBeenCalled()
expect(completion.choices[0]?.message?.content).toBe('chat-ok')
})
it('fails fast when llmProtocol is missing for openai-compatible model', async () => {
resolveLlmRuntimeModelMock.mockResolvedValueOnce({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: undefined,
})
await expect(
chatCompletion(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2, maxRetries: 0 },
),
).rejects.toThrow('MODEL_LLM_PROTOCOL_REQUIRED')
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(runOpenAICompatResponsesCompletionMock).not.toHaveBeenCalled()
})
})

View File

@@ -0,0 +1,129 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const resolveLlmRuntimeModelMock = vi.hoisted(() =>
vi.fn(async () => ({
provider: 'bailian',
modelId: 'qwen3.5-plus',
modelKey: 'bailian::qwen3.5-plus',
})),
)
const completeBailianLlmMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_stream_mock',
object: 'chat.completion',
created: 1,
model: 'qwen3.5-plus',
choices: [
{
index: 0,
message: { role: 'assistant', content: 'stream-ok' },
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 2,
completion_tokens: 2,
total_tokens: 4,
},
})),
)
const completeSiliconFlowLlmMock = vi.hoisted(() =>
vi.fn(async () => {
throw new Error('siliconflow should not be called')
}),
)
const runOpenAICompatChatCompletionMock = vi.hoisted(() =>
vi.fn(async () => {
throw new Error('openai-compat should not be called')
}),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'bailian',
name: 'Alibaba Bailian',
apiKey: 'bl-key',
baseUrl: undefined,
gatewayRoute: 'official' as const,
})),
)
const logLlmRawInputMock = vi.hoisted(() => vi.fn())
const logLlmRawOutputMock = vi.hoisted(() => vi.fn())
const recordCompletionUsageMock = vi.hoisted(() => vi.fn())
vi.mock('@/lib/model-gateway', () => ({
resolveModelGatewayRoute: vi.fn(() => 'official'),
runOpenAICompatChatCompletion: runOpenAICompatChatCompletionMock,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
getProviderKey: vi.fn((providerId: string) => providerId),
}))
vi.mock('@/lib/providers/bailian', () => ({
completeBailianLlm: completeBailianLlmMock,
}))
vi.mock('@/lib/providers/siliconflow', () => ({
completeSiliconFlowLlm: completeSiliconFlowLlmMock,
}))
vi.mock('@/lib/llm/runtime-shared', () => ({
completionUsageSummary: vi.fn(() => ({ promptTokens: 2, completionTokens: 2 })),
llmLogger: {
info: vi.fn(),
warn: vi.fn(),
},
logLlmRawInput: logLlmRawInputMock,
logLlmRawOutput: logLlmRawOutputMock,
recordCompletionUsage: recordCompletionUsageMock,
resolveLlmRuntimeModel: resolveLlmRuntimeModelMock,
}))
import { chatCompletionStream } from '@/lib/llm/chat-stream'
describe('llm chatCompletionStream official provider branch', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('streams from bailian completion result and exits early', async () => {
const onChunk = vi.fn()
const onComplete = vi.fn()
const completion = await chatCompletionStream(
'user-1',
'bailian::qwen3.5-plus',
[{ role: 'user', content: 'hello' }],
{},
{
onChunk,
onComplete,
},
)
expect(completeBailianLlmMock).toHaveBeenCalledWith({
modelId: 'qwen3.5-plus',
messages: [{ role: 'user', content: 'hello' }],
apiKey: 'bl-key',
baseUrl: undefined,
temperature: 0.7,
})
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(completeSiliconFlowLlmMock).not.toHaveBeenCalled()
expect(onComplete).toHaveBeenCalledWith('stream-ok', undefined)
expect(onChunk).toHaveBeenCalledWith(
expect.objectContaining({
kind: 'text',
delta: 'stream-ok',
}),
)
expect(completion.choices[0]?.message?.content).toBe('stream-ok')
expect(recordCompletionUsageMock).toHaveBeenCalledTimes(1)
})
})

View File

@@ -0,0 +1,156 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
type MockRuntimeModel = {
provider: string
modelId: string
modelKey: string
llmProtocol: 'responses' | 'chat-completions' | undefined
}
const resolveLlmRuntimeModelMock = vi.hoisted(() =>
vi.fn<(...args: unknown[]) => Promise<MockRuntimeModel>>(async () => ({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: 'responses',
})),
)
const runOpenAICompatResponsesCompletionMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_responses_1',
object: 'chat.completion',
created: 1,
model: 'gpt-4.1-mini',
choices: [{ index: 0, message: { role: 'assistant', content: 'responses-stream' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
})),
)
const runOpenAICompatChatCompletionMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_chat_1',
object: 'chat.completion',
created: 1,
model: 'gpt-4.1-mini',
choices: [{ index: 0, message: { role: 'assistant', content: 'chat-stream' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
})),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'openai-compatible:node-1',
name: 'OpenAI Compatible',
apiKey: 'sk-test',
baseUrl: 'https://compat.example.com/v1',
gatewayRoute: 'openai-compat' as const,
apiMode: 'openai-official' as const,
})),
)
const logLlmRawInputMock = vi.hoisted(() => vi.fn())
const logLlmRawOutputMock = vi.hoisted(() => vi.fn())
const recordCompletionUsageMock = vi.hoisted(() => vi.fn())
vi.mock('@/lib/model-gateway', () => ({
resolveModelGatewayRoute: vi.fn(() => 'openai-compat'),
runOpenAICompatChatCompletion: runOpenAICompatChatCompletionMock,
runOpenAICompatResponsesCompletion: runOpenAICompatResponsesCompletionMock,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
getProviderKey: vi.fn((providerId: string) => providerId.split(':')[0] || providerId),
}))
vi.mock('@/lib/providers/bailian', () => ({
completeBailianLlm: vi.fn(async () => {
throw new Error('bailian should not be called')
}),
}))
vi.mock('@/lib/providers/siliconflow', () => ({
completeSiliconFlowLlm: vi.fn(async () => {
throw new Error('siliconflow should not be called')
}),
}))
vi.mock('@/lib/llm/runtime-shared', () => ({
completionUsageSummary: vi.fn(() => ({ promptTokens: 1, completionTokens: 1 })),
llmLogger: {
info: vi.fn(),
warn: vi.fn(),
},
logLlmRawInput: logLlmRawInputMock,
logLlmRawOutput: logLlmRawOutputMock,
recordCompletionUsage: recordCompletionUsageMock,
resolveLlmRuntimeModel: resolveLlmRuntimeModelMock,
}))
import { chatCompletionStream } from '@/lib/llm/chat-stream'
describe('llm chatCompletionStream openai-compatible protocol routing', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('uses responses executor when llmProtocol=responses', async () => {
const onChunk = vi.fn()
const completion = await chatCompletionStream(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
{ onChunk },
)
expect(runOpenAICompatResponsesCompletionMock).toHaveBeenCalledTimes(1)
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(completion.choices[0]?.message?.content).toBe('responses-stream')
expect(onChunk).toHaveBeenCalled()
})
it('uses chat-completions executor when llmProtocol=chat-completions', async () => {
resolveLlmRuntimeModelMock.mockResolvedValueOnce({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: 'chat-completions',
})
const completion = await chatCompletionStream(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
undefined,
)
expect(runOpenAICompatChatCompletionMock).toHaveBeenCalledTimes(1)
expect(runOpenAICompatResponsesCompletionMock).not.toHaveBeenCalled()
expect(completion.choices[0]?.message?.content).toBe('chat-stream')
})
it('fails fast when llmProtocol is missing for openai-compatible model', async () => {
resolveLlmRuntimeModelMock.mockResolvedValueOnce({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: undefined,
})
await expect(
chatCompletionStream(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
undefined,
),
).rejects.toThrow('MODEL_LLM_PROTOCOL_REQUIRED')
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(runOpenAICompatResponsesCompletionMock).not.toHaveBeenCalled()
})
})

View File

@@ -0,0 +1,50 @@
import type OpenAI from 'openai'
import { describe, expect, it } from 'vitest'
import { getCompletionParts } from '@/lib/llm/completion-parts'
function buildCompletion(content: string): OpenAI.Chat.Completions.ChatCompletion {
return {
id: 'chatcmpl_test',
object: 'chat.completion',
created: 1,
model: 'minimax-m2.5',
choices: [
{
index: 0,
message: {
role: 'assistant',
content,
},
finish_reason: 'stop',
},
],
} as OpenAI.Chat.Completions.ChatCompletion
}
describe('llm completion parts think-tag parsing', () => {
it('splits think tag content into reasoning and clean text', () => {
const completion = buildCompletion(`<think>
让我分析这段文本,筛选出需要制作画面的场景。
</think>
{
"locations": []
}`)
const parts = getCompletionParts(completion)
expect(parts.reasoning).toContain('让我分析这段文本')
expect(parts.text).toBe(`{
"locations": []
}`)
})
it('keeps plain content untouched when no think tag exists', () => {
const completion = buildCompletion('{ "locations": [] }')
const parts = getCompletionParts(completion)
expect(parts.reasoning).toBe('')
expect(parts.text).toBe('{ "locations": [] }')
})
})

View File

@@ -0,0 +1,41 @@
import { describe, expect, it } from 'vitest'
import {
isLikelyOpenAIReasoningModel,
shouldUseOpenAIReasoningProviderOptions,
} from '@/lib/llm/reasoning-capability'
describe('llm/reasoning-capability', () => {
it('identifies likely OpenAI reasoning model ids', () => {
expect(isLikelyOpenAIReasoningModel('o3-mini')).toBe(true)
expect(isLikelyOpenAIReasoningModel('gpt-5.2')).toBe(true)
expect(isLikelyOpenAIReasoningModel('claude-sonnet-4-6')).toBe(false)
})
it('enables reasoning provider options for native openai provider', () => {
expect(shouldUseOpenAIReasoningProviderOptions({
providerKey: 'openai',
modelId: 'gpt-5.2',
})).toBe(true)
})
it('enables reasoning provider options for openai-compatible only when apiMode is openai-official', () => {
expect(shouldUseOpenAIReasoningProviderOptions({
providerKey: 'openai-compatible',
providerApiMode: 'openai-official',
modelId: 'gpt-5.2',
})).toBe(true)
expect(shouldUseOpenAIReasoningProviderOptions({
providerKey: 'openai-compatible',
modelId: 'gpt-5.2',
})).toBe(false)
})
it('disables reasoning provider options for non-openai models even on openai-compatible gateways', () => {
expect(shouldUseOpenAIReasoningProviderOptions({
providerKey: 'openai-compatible',
providerApiMode: 'openai-official',
modelId: 'claude-sonnet-4-6',
})).toBe(false)
})
})