feat: initial release v0.3.0
This commit is contained in:
71
tests/unit/api-config/assistant-chat-modal-content.test.ts
Normal file
71
tests/unit/api-config/assistant-chat-modal-content.test.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import type { UIMessage } from 'ai'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { extractMessageContent } from '@/components/assistant/AssistantChatModal'
|
||||
|
||||
function createAssistantMessage(parts: Array<Record<string, unknown>>): UIMessage {
|
||||
return {
|
||||
id: 'assistant-message',
|
||||
role: 'assistant',
|
||||
parts,
|
||||
} as unknown as UIMessage
|
||||
}
|
||||
|
||||
describe('assistant chat modal message content parser', () => {
|
||||
it('keeps reasoning parts out of normal visible lines', () => {
|
||||
const message = createAssistantMessage([
|
||||
{ type: 'reasoning', text: '先分析接口字段映射' },
|
||||
{ type: 'text', text: '我需要你的 status 返回样例。' },
|
||||
])
|
||||
|
||||
const content = extractMessageContent(message)
|
||||
|
||||
expect(content.lines).toEqual(['我需要你的 status 返回样例。'])
|
||||
expect(content.reasoningLines).toEqual(['先分析接口字段映射'])
|
||||
})
|
||||
|
||||
it('extracts think tags from text into reasoning section', () => {
|
||||
const message = createAssistantMessage([
|
||||
{
|
||||
type: 'text',
|
||||
text: '<think>先确认 create/status/content 三个端点</think>请补充 status 返回 JSON',
|
||||
},
|
||||
])
|
||||
|
||||
const content = extractMessageContent(message)
|
||||
|
||||
expect(content.lines).toEqual(['请补充 status 返回 JSON'])
|
||||
expect(content.reasoningLines).toEqual(['先确认 create/status/content 三个端点'])
|
||||
})
|
||||
|
||||
it('extracts reasoning from unclosed think tag during streaming', () => {
|
||||
const message = createAssistantMessage([
|
||||
{
|
||||
type: 'text',
|
||||
text: '<think>先确认任务状态枚举和输出路径',
|
||||
},
|
||||
])
|
||||
|
||||
const content = extractMessageContent(message)
|
||||
|
||||
expect(content.lines).toEqual([])
|
||||
expect(content.reasoningLines).toEqual(['先确认任务状态枚举和输出路径'])
|
||||
})
|
||||
|
||||
it('preserves tool output and issues as visible lines', () => {
|
||||
const message = createAssistantMessage([
|
||||
{
|
||||
type: 'tool-saveModelTemplate',
|
||||
state: 'output-available',
|
||||
output: {
|
||||
message: '模型已保存',
|
||||
issues: [{ field: 'response.statusPath', message: 'missing' }],
|
||||
},
|
||||
},
|
||||
])
|
||||
|
||||
const content = extractMessageContent(message)
|
||||
|
||||
expect(content.lines).toEqual(['模型已保存', 'response.statusPath: missing'])
|
||||
expect(content.reasoningLines).toEqual([])
|
||||
})
|
||||
})
|
||||
22
tests/unit/api-config/minimax-preset.test.ts
Normal file
22
tests/unit/api-config/minimax-preset.test.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { PRESET_MODELS, PRESET_PROVIDERS } from '@/app/[locale]/profile/components/api-config/types'
|
||||
|
||||
describe('api-config minimax preset', () => {
|
||||
it('uses official minimax baseUrl in preset provider', () => {
|
||||
const minimaxProvider = PRESET_PROVIDERS.find((provider) => provider.id === 'minimax')
|
||||
expect(minimaxProvider).toBeDefined()
|
||||
expect(minimaxProvider?.baseUrl).toBe('https://api.minimaxi.com/v1')
|
||||
})
|
||||
|
||||
it('includes all required minimax official llm preset models', () => {
|
||||
const minimaxLlmModelIds = PRESET_MODELS
|
||||
.filter((model) => model.provider === 'minimax' && model.type === 'llm')
|
||||
.map((model) => model.modelId)
|
||||
|
||||
expect(minimaxLlmModelIds).toContain('MiniMax-M2.5')
|
||||
expect(minimaxLlmModelIds).toContain('MiniMax-M2.5-highspeed')
|
||||
expect(minimaxLlmModelIds).toContain('MiniMax-M2.1')
|
||||
expect(minimaxLlmModelIds).toContain('MiniMax-M2.1-highspeed')
|
||||
expect(minimaxLlmModelIds).toContain('MiniMax-M2')
|
||||
})
|
||||
})
|
||||
52
tests/unit/api-config/preset-coming-soon.test.ts
Normal file
52
tests/unit/api-config/preset-coming-soon.test.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import {
|
||||
PRESET_MODELS,
|
||||
encodeModelKey,
|
||||
isPresetComingSoonModel,
|
||||
isPresetComingSoonModelKey,
|
||||
} from '@/app/[locale]/profile/components/api-config/types'
|
||||
|
||||
describe('api-config preset coming soon', () => {
|
||||
it('registers Nano Banana 2 under Google AI Studio presets', () => {
|
||||
const model = PRESET_MODELS.find(
|
||||
(entry) => entry.provider === 'google' && entry.modelId === 'gemini-3.1-flash-image-preview',
|
||||
)
|
||||
expect(model).toBeDefined()
|
||||
expect(model?.name).toBe('Nano Banana 2')
|
||||
})
|
||||
|
||||
it('registers Seedance 2.0 as a coming-soon preset model', () => {
|
||||
const model = PRESET_MODELS.find(
|
||||
(entry) => entry.provider === 'ark' && entry.modelId === 'doubao-seedance-2-0-260128',
|
||||
)
|
||||
expect(model).toBeDefined()
|
||||
expect(model?.name).toContain('待上线')
|
||||
})
|
||||
|
||||
it('recognizes coming-soon model by provider/modelId and modelKey', () => {
|
||||
const modelKey = encodeModelKey('ark', 'doubao-seedance-2-0-260128')
|
||||
expect(isPresetComingSoonModel('ark', 'doubao-seedance-2-0-260128')).toBe(true)
|
||||
expect(isPresetComingSoonModelKey(modelKey)).toBe(true)
|
||||
})
|
||||
|
||||
it('does not mark normal preset models as coming soon', () => {
|
||||
const modelKey = encodeModelKey('ark', 'doubao-seedance-1-5-pro-251215')
|
||||
expect(isPresetComingSoonModel('ark', 'doubao-seedance-1-5-pro-251215')).toBe(false)
|
||||
expect(isPresetComingSoonModelKey(modelKey)).toBe(false)
|
||||
})
|
||||
|
||||
it('registers Bailian Wan i2v preset models', () => {
|
||||
const modelIds = PRESET_MODELS
|
||||
.filter((entry) => entry.provider === 'bailian' && entry.type === 'video')
|
||||
.map((entry) => entry.modelId)
|
||||
|
||||
expect(modelIds).toEqual(expect.arrayContaining([
|
||||
'wan2.6-i2v-flash',
|
||||
'wan2.6-i2v',
|
||||
'wan2.5-i2v-preview',
|
||||
'wan2.2-i2v-plus',
|
||||
'wan2.2-kf2v-flash',
|
||||
'wanx2.1-kf2v-plus',
|
||||
]))
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,49 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { getAssistantSavedModelLabel } from '@/app/[locale]/profile/components/api-config/provider-card/hooks/useProviderCardState'
|
||||
|
||||
describe('provider card assistant saved label', () => {
|
||||
it('prefers draft model name when available', () => {
|
||||
const label = getAssistantSavedModelLabel({
|
||||
savedModelKey: 'openai-compatible:oa-1::veo_3_1-fast-4K',
|
||||
draftModel: {
|
||||
modelId: 'veo_3_1-fast-4K',
|
||||
name: 'Veo 3.1 Fast 4K',
|
||||
type: 'video',
|
||||
provider: 'openai-compatible:oa-1',
|
||||
compatMediaTemplate: {
|
||||
version: 1,
|
||||
mediaType: 'video',
|
||||
mode: 'async',
|
||||
create: {
|
||||
method: 'POST',
|
||||
path: '/v1/video/create',
|
||||
},
|
||||
status: {
|
||||
method: 'GET',
|
||||
path: '/v1/video/query?id={{task_id}}',
|
||||
},
|
||||
response: {
|
||||
taskIdPath: '$.id',
|
||||
statusPath: '$.status',
|
||||
},
|
||||
polling: {
|
||||
intervalMs: 5000,
|
||||
timeoutMs: 600000,
|
||||
doneStates: ['completed'],
|
||||
failStates: ['failed'],
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
expect(label).toBe('Veo 3.1 Fast 4K')
|
||||
})
|
||||
|
||||
it('falls back to model id parsed from savedModelKey', () => {
|
||||
const label = getAssistantSavedModelLabel({
|
||||
savedModelKey: 'openai-compatible:oa-1::veo_3_1-fast-4K',
|
||||
})
|
||||
|
||||
expect(label).toBe('veo_3_1-fast-4K')
|
||||
})
|
||||
})
|
||||
173
tests/unit/api-config/provider-card-pricing-form.test.ts
Normal file
173
tests/unit/api-config/provider-card-pricing-form.test.ts
Normal file
@@ -0,0 +1,173 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import {
|
||||
getAddableModelTypesForProvider,
|
||||
getVisibleModelTypesForProvider,
|
||||
shouldShowOpenAICompatVideoHint,
|
||||
} from '@/app/[locale]/profile/components/api-config/provider-card/ProviderAdvancedFields'
|
||||
import {
|
||||
buildCustomPricingFromModelForm,
|
||||
buildProviderConnectionPayload,
|
||||
} from '@/app/[locale]/profile/components/api-config/provider-card/hooks/useProviderCardState'
|
||||
|
||||
describe('provider card pricing form behavior', () => {
|
||||
it('allows openai-compatible provider to add llm/image/video', () => {
|
||||
expect(getAddableModelTypesForProvider('openai-compatible:oa-1')).toEqual(['llm', 'image', 'video'])
|
||||
})
|
||||
|
||||
it('shows llm/image/video tabs by default for openai-compatible even with only image models', () => {
|
||||
const visible = getVisibleModelTypesForProvider(
|
||||
'openai-compatible:oa-1',
|
||||
{
|
||||
image: [
|
||||
{
|
||||
modelId: 'gpt-image-1',
|
||||
modelKey: 'openai-compatible:oa-1::gpt-image-1',
|
||||
name: 'Image',
|
||||
type: 'image',
|
||||
provider: 'openai-compatible:oa-1',
|
||||
price: 0,
|
||||
enabled: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
expect(visible).toEqual(['llm', 'image', 'video'])
|
||||
})
|
||||
|
||||
it('shows the openai-compatible video hint only for openai-compatible video add forms', () => {
|
||||
expect(shouldShowOpenAICompatVideoHint('openai-compatible:oa-1', 'video')).toBe(true)
|
||||
expect(shouldShowOpenAICompatVideoHint('openai-compatible:oa-1', 'image')).toBe(false)
|
||||
expect(shouldShowOpenAICompatVideoHint('gemini-compatible:gm-1', 'video')).toBe(false)
|
||||
expect(shouldShowOpenAICompatVideoHint('ark', 'video')).toBe(false)
|
||||
})
|
||||
|
||||
it('keeps payload without customPricing when pricing toggle is off', () => {
|
||||
const result = buildCustomPricingFromModelForm(
|
||||
'image',
|
||||
{
|
||||
name: 'Image',
|
||||
modelId: 'gpt-image-1',
|
||||
enableCustomPricing: false,
|
||||
basePrice: '0.8',
|
||||
},
|
||||
{ needsCustomPricing: true },
|
||||
)
|
||||
|
||||
expect(result).toEqual({ ok: true })
|
||||
})
|
||||
|
||||
it('builds llm customPricing payload when pricing toggle is on', () => {
|
||||
const result = buildCustomPricingFromModelForm(
|
||||
'llm',
|
||||
{
|
||||
name: 'GPT',
|
||||
modelId: 'gpt-4.1',
|
||||
enableCustomPricing: true,
|
||||
priceInput: '2.5',
|
||||
priceOutput: '8',
|
||||
},
|
||||
{ needsCustomPricing: true },
|
||||
)
|
||||
|
||||
expect(result).toEqual({
|
||||
ok: true,
|
||||
customPricing: {
|
||||
llm: {
|
||||
inputPerMillion: 2.5,
|
||||
outputPerMillion: 8,
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
it('builds media customPricing payload with option prices when enabled', () => {
|
||||
const result = buildCustomPricingFromModelForm(
|
||||
'video',
|
||||
{
|
||||
name: 'Sora',
|
||||
modelId: 'sora-2',
|
||||
enableCustomPricing: true,
|
||||
basePrice: '0.9',
|
||||
optionPricesJson: '{"resolution":{"720x1280":0.1},"duration":{"8":0.4}}',
|
||||
},
|
||||
{ needsCustomPricing: true },
|
||||
)
|
||||
|
||||
expect(result).toEqual({
|
||||
ok: true,
|
||||
customPricing: {
|
||||
video: {
|
||||
basePrice: 0.9,
|
||||
optionPrices: {
|
||||
resolution: {
|
||||
'720x1280': 0.1,
|
||||
},
|
||||
duration: {
|
||||
'8': 0.4,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
it('rejects invalid media optionPrices JSON when enabled', () => {
|
||||
const result = buildCustomPricingFromModelForm(
|
||||
'image',
|
||||
{
|
||||
name: 'Image',
|
||||
modelId: 'gpt-image-1',
|
||||
enableCustomPricing: true,
|
||||
basePrice: '0.3',
|
||||
optionPricesJson: '{"resolution":{"1024x1024":"free"}}',
|
||||
},
|
||||
{ needsCustomPricing: true },
|
||||
)
|
||||
|
||||
expect(result).toEqual({ ok: false, reason: 'invalid' })
|
||||
})
|
||||
|
||||
it('bugfix: includes baseUrl for openai-compatible provider connection test payload', () => {
|
||||
const payload = buildProviderConnectionPayload({
|
||||
providerKey: 'openai-compatible',
|
||||
apiKey: ' sk-test ',
|
||||
baseUrl: ' https://api.openai-proxy.example/v1 ',
|
||||
})
|
||||
|
||||
expect(payload).toEqual({
|
||||
apiType: 'openai-compatible',
|
||||
apiKey: 'sk-test',
|
||||
baseUrl: 'https://api.openai-proxy.example/v1',
|
||||
})
|
||||
})
|
||||
|
||||
it('omits baseUrl for non-compatible provider connection test payload', () => {
|
||||
const payload = buildProviderConnectionPayload({
|
||||
providerKey: 'ark',
|
||||
apiKey: ' ark-key ',
|
||||
baseUrl: ' https://ignored.example/v1 ',
|
||||
})
|
||||
|
||||
expect(payload).toEqual({
|
||||
apiType: 'ark',
|
||||
apiKey: 'ark-key',
|
||||
})
|
||||
})
|
||||
|
||||
it('includes llmModel in provider connection test payload when configured', () => {
|
||||
const payload = buildProviderConnectionPayload({
|
||||
providerKey: 'openai-compatible',
|
||||
apiKey: ' sk-test ',
|
||||
baseUrl: ' https://compat.example.com/v1 ',
|
||||
llmModel: ' gpt-4.1-mini ',
|
||||
})
|
||||
|
||||
expect(payload).toEqual({
|
||||
apiType: 'openai-compatible',
|
||||
apiKey: 'sk-test',
|
||||
baseUrl: 'https://compat.example.com/v1',
|
||||
llmModel: 'gpt-4.1-mini',
|
||||
})
|
||||
})
|
||||
})
|
||||
83
tests/unit/api-config/provider-card-protocol-probe.test.ts
Normal file
83
tests/unit/api-config/provider-card-protocol-probe.test.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import type { CustomModel } from '@/app/[locale]/profile/components/api-config/types'
|
||||
import {
|
||||
probeModelLlmProtocolViaApi,
|
||||
shouldProbeModelLlmProtocol,
|
||||
shouldReprobeModelLlmProtocol,
|
||||
} from '@/app/[locale]/profile/components/api-config/provider-card/hooks/useProviderCardState'
|
||||
|
||||
describe('api-config provider-card protocol probe helpers', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
it('only probes openai-compatible llm models', () => {
|
||||
expect(shouldProbeModelLlmProtocol({ providerId: 'openai-compatible:oa-1', modelType: 'llm' })).toBe(true)
|
||||
expect(shouldProbeModelLlmProtocol({ providerId: 'openai-compatible:oa-1', modelType: 'image' })).toBe(false)
|
||||
expect(shouldProbeModelLlmProtocol({ providerId: 'gemini-compatible:gm-1', modelType: 'llm' })).toBe(false)
|
||||
})
|
||||
|
||||
it('re-probes only when modelId/provider changed on openai-compatible llm', () => {
|
||||
const originalModel: CustomModel = {
|
||||
modelId: 'gpt-4.1-mini',
|
||||
modelKey: 'openai-compatible:oa-1::gpt-4.1-mini',
|
||||
name: 'GPT 4.1 Mini',
|
||||
type: 'llm',
|
||||
provider: 'openai-compatible:oa-1',
|
||||
llmProtocol: 'chat-completions',
|
||||
llmProtocolCheckedAt: '2026-01-01T00:00:00.000Z',
|
||||
price: 0,
|
||||
enabled: true,
|
||||
}
|
||||
|
||||
expect(shouldReprobeModelLlmProtocol({
|
||||
providerId: 'openai-compatible:oa-1',
|
||||
originalModel,
|
||||
nextModelId: 'gpt-4.1-mini',
|
||||
})).toBe(false)
|
||||
|
||||
expect(shouldReprobeModelLlmProtocol({
|
||||
providerId: 'openai-compatible:oa-1',
|
||||
originalModel,
|
||||
nextModelId: 'gpt-4.1',
|
||||
})).toBe(true)
|
||||
|
||||
expect(shouldReprobeModelLlmProtocol({
|
||||
providerId: 'gemini-compatible:gm-1',
|
||||
originalModel,
|
||||
nextModelId: 'gpt-4.1',
|
||||
})).toBe(false)
|
||||
})
|
||||
|
||||
it('parses successful probe response payload', async () => {
|
||||
const fetchMock = vi.fn(async () => new Response(JSON.stringify({
|
||||
success: true,
|
||||
protocol: 'responses',
|
||||
checkedAt: '2026-03-05T10:00:00.000Z',
|
||||
}), { status: 200 }))
|
||||
vi.stubGlobal('fetch', fetchMock)
|
||||
|
||||
const result = await probeModelLlmProtocolViaApi({
|
||||
providerId: 'openai-compatible:oa-1',
|
||||
modelId: 'gpt-4.1-mini',
|
||||
})
|
||||
|
||||
expect(result).toEqual({
|
||||
llmProtocol: 'responses',
|
||||
llmProtocolCheckedAt: '2026-03-05T10:00:00.000Z',
|
||||
})
|
||||
})
|
||||
|
||||
it('throws probe failure code on unsuccessful probe response', async () => {
|
||||
const fetchMock = vi.fn(async () => new Response(JSON.stringify({
|
||||
success: false,
|
||||
code: 'PROBE_INCONCLUSIVE',
|
||||
}), { status: 200 }))
|
||||
vi.stubGlobal('fetch', fetchMock)
|
||||
|
||||
await expect(probeModelLlmProtocolViaApi({
|
||||
providerId: 'openai-compatible:oa-1',
|
||||
modelId: 'gpt-4.1-mini',
|
||||
})).rejects.toThrow('PROBE_INCONCLUSIVE')
|
||||
})
|
||||
})
|
||||
25
tests/unit/api-config/provider-card-shell.test.ts
Normal file
25
tests/unit/api-config/provider-card-shell.test.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { getCompatibilityLayerBadgeLabel } from '@/app/[locale]/profile/components/api-config/provider-card/ProviderCardShell'
|
||||
|
||||
describe('provider card shell compatibility layer badge', () => {
|
||||
const t = (key: string): string => {
|
||||
if (key === 'compatibilityLayerOpenAI') return 'OpenAI 兼容层'
|
||||
if (key === 'compatibilityLayerGemini') return 'Gemini 兼容层'
|
||||
return key
|
||||
}
|
||||
|
||||
it('shows OpenAI compatible layer label for openai-compatible providers', () => {
|
||||
expect(getCompatibilityLayerBadgeLabel('openai-compatible:oa-1', t)).toBe('OpenAI 兼容层')
|
||||
})
|
||||
|
||||
it('shows Gemini compatible layer label for gemini-compatible providers', () => {
|
||||
expect(getCompatibilityLayerBadgeLabel('gemini-compatible:gm-1', t)).toBe('Gemini 兼容层')
|
||||
})
|
||||
|
||||
it('does not show compatibility label for preset providers', () => {
|
||||
expect(getCompatibilityLayerBadgeLabel('google', t)).toBeNull()
|
||||
expect(getCompatibilityLayerBadgeLabel('ark', t)).toBeNull()
|
||||
expect(getCompatibilityLayerBadgeLabel('bailian', t)).toBeNull()
|
||||
expect(getCompatibilityLayerBadgeLabel('siliconflow', t)).toBeNull()
|
||||
})
|
||||
})
|
||||
119
tests/unit/api-config/use-api-config-filters.test.ts
Normal file
119
tests/unit/api-config/use-api-config-filters.test.ts
Normal file
@@ -0,0 +1,119 @@
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('react', async () => {
|
||||
const actual = await vi.importActual<typeof import('react')>('react')
|
||||
return {
|
||||
...actual,
|
||||
useMemo: <T,>(factory: () => T) => factory(),
|
||||
}
|
||||
})
|
||||
|
||||
import { useApiConfigFilters } from '@/app/[locale]/profile/components/api-config-tab/hooks/useApiConfigFilters'
|
||||
import type { CustomModel, Provider } from '@/app/[locale]/profile/components/api-config/types'
|
||||
|
||||
describe('api config filters', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
it('merges audio providers into modelProviders and removes audioProviders output', () => {
|
||||
const providers: Provider[] = [
|
||||
{ id: 'fal', name: 'FAL', hasApiKey: true, apiKey: 'k-fal' },
|
||||
{ id: 'bailian', name: 'Alibaba Bailian', hasApiKey: true, apiKey: 'k-bl' },
|
||||
]
|
||||
const models: CustomModel[] = [
|
||||
{
|
||||
modelId: 'fal-ai/index-tts-2/text-to-speech',
|
||||
modelKey: 'fal::fal-ai/index-tts-2/text-to-speech',
|
||||
name: 'IndexTTS 2',
|
||||
type: 'audio',
|
||||
provider: 'fal',
|
||||
price: 0,
|
||||
enabled: true,
|
||||
},
|
||||
{
|
||||
modelId: 'qwen3-tts-vd-2026-01-26',
|
||||
modelKey: 'bailian::qwen3-tts-vd-2026-01-26',
|
||||
name: 'Qwen3 TTS',
|
||||
type: 'audio',
|
||||
provider: 'bailian',
|
||||
price: 0,
|
||||
enabled: true,
|
||||
},
|
||||
{
|
||||
modelId: 'qwen-voice-design',
|
||||
modelKey: 'bailian::qwen-voice-design',
|
||||
name: 'Qwen Voice Design',
|
||||
type: 'audio',
|
||||
provider: 'bailian',
|
||||
price: 0,
|
||||
enabled: true,
|
||||
},
|
||||
{
|
||||
modelId: 'qwen3.5-flash',
|
||||
modelKey: 'bailian::qwen3.5-flash',
|
||||
name: 'Qwen 3.5 Flash',
|
||||
type: 'llm',
|
||||
provider: 'bailian',
|
||||
price: 0,
|
||||
enabled: true,
|
||||
},
|
||||
]
|
||||
|
||||
const result = useApiConfigFilters({ providers, models })
|
||||
const providerIds = result.modelProviders.map((provider) => provider.id)
|
||||
const audioDefaultIds = result.getEnabledModelsByType('audio').map((model) => model.modelId)
|
||||
|
||||
expect(providerIds).toEqual(['fal', 'bailian'])
|
||||
expect(audioDefaultIds).toEqual(expect.arrayContaining([
|
||||
'fal-ai/index-tts-2/text-to-speech',
|
||||
'qwen3-tts-vd-2026-01-26',
|
||||
]))
|
||||
expect(audioDefaultIds).not.toContain('qwen-voice-design')
|
||||
expect(Object.prototype.hasOwnProperty.call(result, 'audioProviders')).toBe(false)
|
||||
})
|
||||
|
||||
it('keeps modelProviders order aligned with providers input order', () => {
|
||||
const providers: Provider[] = [
|
||||
{ id: 'google', name: 'Google AI Studio', hasApiKey: true, apiKey: 'k-google' },
|
||||
{ id: 'openai-compatible:oa-2', name: 'OpenAI B', hasApiKey: true, apiKey: 'k-oa2' },
|
||||
{ id: 'ark', name: 'Volcengine Ark', hasApiKey: true, apiKey: 'k-ark' },
|
||||
]
|
||||
const models: CustomModel[] = [
|
||||
{
|
||||
modelId: 'gemini-3.1-pro-preview',
|
||||
modelKey: 'google::gemini-3.1-pro-preview',
|
||||
name: 'Gemini 3.1 Pro',
|
||||
type: 'llm',
|
||||
provider: 'google',
|
||||
price: 0,
|
||||
enabled: true,
|
||||
},
|
||||
{
|
||||
modelId: 'gpt-4.1',
|
||||
modelKey: 'openai-compatible:oa-2::gpt-4.1',
|
||||
name: 'GPT 4.1',
|
||||
type: 'llm',
|
||||
provider: 'openai-compatible:oa-2',
|
||||
price: 0,
|
||||
enabled: true,
|
||||
},
|
||||
{
|
||||
modelId: 'doubao-seed-2-0-pro-260215',
|
||||
modelKey: 'ark::doubao-seed-2-0-pro-260215',
|
||||
name: 'Doubao Seed 2.0 Pro',
|
||||
type: 'llm',
|
||||
provider: 'ark',
|
||||
price: 0,
|
||||
enabled: true,
|
||||
},
|
||||
]
|
||||
|
||||
const result = useApiConfigFilters({ providers, models })
|
||||
expect(result.modelProviders.map((provider) => provider.id)).toEqual([
|
||||
'google',
|
||||
'openai-compatible:oa-2',
|
||||
'ark',
|
||||
])
|
||||
})
|
||||
})
|
||||
100
tests/unit/api-config/use-assistant-chat-saved-events.test.ts
Normal file
100
tests/unit/api-config/use-assistant-chat-saved-events.test.ts
Normal file
@@ -0,0 +1,100 @@
|
||||
import type { UIMessage } from 'ai'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { collectSavedEvents } from '@/components/assistant/useAssistantChat'
|
||||
|
||||
describe('assistant chat saved events parser', () => {
|
||||
it('parses single save tool output event', () => {
|
||||
const messages = [{
|
||||
id: 'm1',
|
||||
role: 'assistant',
|
||||
parts: [{
|
||||
type: 'tool-saveModelTemplate',
|
||||
state: 'output-available',
|
||||
output: {
|
||||
status: 'saved',
|
||||
savedModelKey: 'openai-compatible:oa-1::veo3-fast',
|
||||
draftModel: {
|
||||
modelId: 'veo3-fast',
|
||||
name: 'Veo 3 Fast',
|
||||
type: 'video',
|
||||
provider: 'openai-compatible:oa-1',
|
||||
compatMediaTemplate: {
|
||||
version: 1,
|
||||
mediaType: 'video',
|
||||
mode: 'async',
|
||||
create: { method: 'POST', path: '/video/create' },
|
||||
status: { method: 'GET', path: '/video/query?id={{task_id}}' },
|
||||
response: { taskIdPath: '$.id', statusPath: '$.status' },
|
||||
polling: { intervalMs: 5000, timeoutMs: 600000, doneStates: ['completed'], failStates: ['failed'] },
|
||||
},
|
||||
},
|
||||
},
|
||||
}],
|
||||
}] as unknown as UIMessage[]
|
||||
|
||||
const events = collectSavedEvents(messages)
|
||||
|
||||
expect(events).toHaveLength(1)
|
||||
expect(events[0]?.savedModelKey).toBe('openai-compatible:oa-1::veo3-fast')
|
||||
expect(events[0]?.draftModel?.modelId).toBe('veo3-fast')
|
||||
})
|
||||
|
||||
it('parses batch save tool output events', () => {
|
||||
const messages = [{
|
||||
id: 'm2',
|
||||
role: 'assistant',
|
||||
parts: [{
|
||||
type: 'tool-saveModelTemplates',
|
||||
state: 'output-available',
|
||||
output: {
|
||||
status: 'saved',
|
||||
savedModelKeys: [
|
||||
'openai-compatible:oa-1::veo3-fast',
|
||||
'openai-compatible:oa-1::veo3.1-fast',
|
||||
],
|
||||
draftModels: [
|
||||
{
|
||||
modelId: 'veo3-fast',
|
||||
name: 'Veo 3 Fast',
|
||||
type: 'video',
|
||||
provider: 'openai-compatible:oa-1',
|
||||
compatMediaTemplate: {
|
||||
version: 1,
|
||||
mediaType: 'video',
|
||||
mode: 'async',
|
||||
create: { method: 'POST', path: '/video/create' },
|
||||
status: { method: 'GET', path: '/video/query?id={{task_id}}' },
|
||||
response: { taskIdPath: '$.id', statusPath: '$.status' },
|
||||
polling: { intervalMs: 5000, timeoutMs: 600000, doneStates: ['completed'], failStates: ['failed'] },
|
||||
},
|
||||
},
|
||||
{
|
||||
modelId: 'veo3.1-fast',
|
||||
name: 'Veo 3.1 Fast',
|
||||
type: 'video',
|
||||
provider: 'openai-compatible:oa-1',
|
||||
compatMediaTemplate: {
|
||||
version: 1,
|
||||
mediaType: 'video',
|
||||
mode: 'async',
|
||||
create: { method: 'POST', path: '/video/create' },
|
||||
status: { method: 'GET', path: '/video/query?id={{task_id}}' },
|
||||
response: { taskIdPath: '$.id', statusPath: '$.status' },
|
||||
polling: { intervalMs: 5000, timeoutMs: 600000, doneStates: ['completed'], failStates: ['failed'] },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
}],
|
||||
}] as unknown as UIMessage[]
|
||||
|
||||
const events = collectSavedEvents(messages)
|
||||
|
||||
expect(events).toHaveLength(2)
|
||||
expect(events.map((item) => item.savedModelKey)).toEqual([
|
||||
'openai-compatible:oa-1::veo3-fast',
|
||||
'openai-compatible:oa-1::veo3.1-fast',
|
||||
])
|
||||
expect(events[1]?.draftModel?.name).toBe('Veo 3.1 Fast')
|
||||
})
|
||||
})
|
||||
65
tests/unit/api-config/use-providers-order.test.ts
Normal file
65
tests/unit/api-config/use-providers-order.test.ts
Normal file
@@ -0,0 +1,65 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { mergeProvidersForDisplay } from '@/app/[locale]/profile/components/api-config/hooks'
|
||||
import type { Provider } from '@/app/[locale]/profile/components/api-config/types'
|
||||
|
||||
describe('useProviders provider order merge', () => {
|
||||
it('preserves saved providers order and appends missing presets at the end', () => {
|
||||
const presetProviders: Provider[] = [
|
||||
{ id: 'ark', name: '火山引擎 Ark' },
|
||||
{ id: 'google', name: 'Google AI Studio' },
|
||||
{ id: 'bailian', name: '阿里云百炼' },
|
||||
]
|
||||
const savedProviders: Provider[] = [
|
||||
{ id: 'google', name: 'Google Legacy Name', apiKey: 'google-key', hidden: true },
|
||||
{ id: 'openai-compatible:oa-2', name: 'OpenAI B', baseUrl: 'https://oa-b.test', apiKey: 'oa-key' },
|
||||
{ id: 'ark', name: 'Ark Legacy Name', apiKey: 'ark-key' },
|
||||
]
|
||||
|
||||
const merged = mergeProvidersForDisplay(savedProviders, presetProviders)
|
||||
expect(merged.map((provider) => provider.id)).toEqual([
|
||||
'google',
|
||||
'openai-compatible:oa-2',
|
||||
'ark',
|
||||
'bailian',
|
||||
])
|
||||
expect(merged[0]?.hidden).toBe(true)
|
||||
})
|
||||
|
||||
it('uses preset localized names for preset providers while keeping apiKey/baseUrl from saved data', () => {
|
||||
const presetProviders: Provider[] = [
|
||||
{ id: 'google', name: 'Google AI Studio', baseUrl: 'https://google.default' },
|
||||
]
|
||||
const savedProviders: Provider[] = [
|
||||
{ id: 'google', name: 'Google Old Name', baseUrl: 'https://google.custom', apiKey: 'google-key' },
|
||||
]
|
||||
|
||||
const merged = mergeProvidersForDisplay(savedProviders, presetProviders)
|
||||
expect(merged).toHaveLength(1)
|
||||
expect(merged[0]).toMatchObject({
|
||||
id: 'google',
|
||||
name: 'Google AI Studio',
|
||||
baseUrl: 'https://google.custom',
|
||||
apiKey: 'google-key',
|
||||
hasApiKey: true,
|
||||
})
|
||||
})
|
||||
|
||||
it('uses preset official baseUrl for minimax even when saved payload contains a custom baseUrl', () => {
|
||||
const presetProviders: Provider[] = [
|
||||
{ id: 'minimax', name: 'MiniMax Hailuo', baseUrl: 'https://api.minimaxi.com/v1' },
|
||||
]
|
||||
const savedProviders: Provider[] = [
|
||||
{ id: 'minimax', name: 'MiniMax Legacy', baseUrl: 'https://custom.minimax.proxy/v1', apiKey: 'mm-key' },
|
||||
]
|
||||
|
||||
const merged = mergeProvidersForDisplay(savedProviders, presetProviders)
|
||||
expect(merged).toHaveLength(1)
|
||||
expect(merged[0]).toMatchObject({
|
||||
id: 'minimax',
|
||||
name: 'MiniMax Hailuo',
|
||||
baseUrl: 'https://api.minimaxi.com/v1',
|
||||
apiKey: 'mm-key',
|
||||
hasApiKey: true,
|
||||
})
|
||||
})
|
||||
})
|
||||
Reference in New Issue
Block a user