feat: initial release v0.3.0

This commit is contained in:
saturn
2026-03-08 03:15:27 +08:00
commit 881ed44996
1311 changed files with 225407 additions and 0 deletions

View File

@@ -0,0 +1,58 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const chatCompletionMock = vi.hoisted(() => vi.fn(async () => ({ id: 'text-completion' })))
const chatCompletionWithVisionMock = vi.hoisted(() => vi.fn(async () => ({ id: 'vision-completion' })))
vi.mock('@/lib/llm-client', () => ({
chatCompletion: chatCompletionMock,
chatCompletionWithVision: chatCompletionWithVisionMock,
}))
import {
runModelGatewayTextCompletion,
runModelGatewayVisionCompletion,
} from '@/lib/model-gateway/llm'
describe('model-gateway llm wrappers', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('delegates text completion to llm-client chatCompletion', async () => {
const result = await runModelGatewayTextCompletion({
userId: 'user-1',
model: 'openai-compatible::gpt-image-1',
messages: [{ role: 'user', content: 'hello' }],
options: { temperature: 0.2 },
})
expect(chatCompletionMock).toHaveBeenCalledTimes(1)
expect(chatCompletionMock).toHaveBeenCalledWith(
'user-1',
'openai-compatible::gpt-image-1',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
)
expect(result).toEqual({ id: 'text-completion' })
})
it('delegates vision completion to llm-client chatCompletionWithVision', async () => {
const result = await runModelGatewayVisionCompletion({
userId: 'user-1',
model: 'google::gemini-3-pro',
prompt: 'analyze image',
imageUrls: ['https://example.com/a.png'],
options: { temperature: 0.4 },
})
expect(chatCompletionWithVisionMock).toHaveBeenCalledTimes(1)
expect(chatCompletionWithVisionMock).toHaveBeenCalledWith(
'user-1',
'google::gemini-3-pro',
'analyze image',
['https://example.com/a.png'],
{ temperature: 0.4 },
)
expect(result).toEqual({ id: 'vision-completion' })
})
})

View File

@@ -0,0 +1,67 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const resolveOpenAICompatClientConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
providerId: 'openai-compatible:node-1',
baseUrl: 'https://compat.example.com/v1',
apiKey: 'sk-test',
})),
)
vi.mock('@/lib/model-gateway/openai-compat/common', () => ({
resolveOpenAICompatClientConfig: resolveOpenAICompatClientConfigMock,
}))
import { runOpenAICompatResponsesCompletion } from '@/lib/model-gateway/openai-compat/responses'
describe('model-gateway openai-compat responses executor', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('converts responses payload to normalized chat completion', async () => {
const fetchMock = vi.fn(async () => new Response(JSON.stringify({
output: [
{ type: 'reasoning', text: 'think-' },
{ type: 'output_text', text: 'hello' },
],
usage: {
input_tokens: 12,
output_tokens: 7,
},
}), { status: 200 }))
vi.stubGlobal('fetch', fetchMock)
const completion = await runOpenAICompatResponsesCompletion({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
messages: [{ role: 'user', content: 'hello' }],
temperature: 0.2,
})
expect(completion.choices[0]?.message?.content).toEqual([
{ type: 'reasoning', text: 'think-' },
{ type: 'text', text: 'hello' },
])
expect(completion.usage?.prompt_tokens).toBe(12)
expect(completion.usage?.completion_tokens).toBe(7)
const firstCall = fetchMock.mock.calls[0] as unknown[] | undefined
expect(String(firstCall?.[0])).toBe('https://compat.example.com/v1/responses')
})
it('throws status-bearing error when responses endpoint fails', async () => {
const fetchMock = vi.fn(async () => new Response('not supported', { status: 404 }))
vi.stubGlobal('fetch', fetchMock)
await expect(
runOpenAICompatResponsesCompletion({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
messages: [{ role: 'user', content: 'hello' }],
temperature: 0.2,
}),
).rejects.toThrow('OPENAI_COMPAT_RESPONSES_FAILED: 404')
})
})

View File

@@ -0,0 +1,189 @@
import { describe, expect, it } from 'vitest'
import {
buildRenderedTemplateRequest,
buildTemplateVariables,
extractTemplateError,
readJsonPath,
renderTemplateString,
renderTemplateValue,
resolveTemplateEndpointUrl,
} from '@/lib/openai-compat-template-runtime'
describe('model-gateway openai-compat template renderer', () => {
it('renders placeholders in strings and nested body values', () => {
const variables = buildTemplateVariables({
model: 'veo3.1',
prompt: 'a cat running',
image: 'https://a.test/cat.png',
taskId: 'task_1',
})
expect(renderTemplateString('/videos/{{task_id}}', variables)).toBe('/videos/task_1')
expect(renderTemplateValue({
model: '{{model}}',
prompt: '{{prompt}}',
images: '{{images}}',
nested: [{ value: '{{task_id}}' }],
}, variables)).toEqual({
model: 'veo3.1',
prompt: 'a cat running',
images: [],
nested: [{ value: 'task_1' }],
})
})
it('resolves relative path against base url and injects auth header', async () => {
const request = await buildRenderedTemplateRequest({
baseUrl: 'https://compat.example.com/v1/',
endpoint: {
method: 'POST',
path: '/v2/videos/generations',
contentType: 'application/json',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
},
},
variables: buildTemplateVariables({
model: 'veo3.1',
prompt: 'hello',
}),
defaultAuthHeader: 'Bearer sk-test',
})
expect(resolveTemplateEndpointUrl('https://compat.example.com/v1/', '/v2/videos/generations'))
.toBe('https://compat.example.com/v1/v2/videos/generations')
expect(request.endpointUrl).toBe('https://compat.example.com/v1/v2/videos/generations')
expect(request.headers.Authorization).toBe('Bearer sk-test')
expect(request.headers['Content-Type']).toBe('application/json')
expect(request.body).toBe(JSON.stringify({
model: 'veo3.1',
prompt: 'hello',
}))
})
it('deduplicates /v1 prefix when base url already ends with /v1', async () => {
const request = await buildRenderedTemplateRequest({
baseUrl: 'https://yunwu.ai/v1',
endpoint: {
method: 'GET',
path: '/v1/video/query?id={{task_id}}',
},
variables: buildTemplateVariables({
model: 'veo_3_1-fast-4K',
prompt: '',
taskId: 'task_abc',
}),
defaultAuthHeader: 'Bearer sk-test',
})
expect(resolveTemplateEndpointUrl('https://yunwu.ai/v1', '/v1/video/create'))
.toBe('https://yunwu.ai/v1/video/create')
expect(request.endpointUrl).toBe('https://yunwu.ai/v1/video/query?id=task_abc')
expect(request.headers.Authorization).toBe('Bearer sk-test')
})
it('builds multipart form data and omits explicit content-type header', async () => {
const dataUrl = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO0p6s8AAAAASUVORK5CYII='
const request = await buildRenderedTemplateRequest({
baseUrl: 'https://compat.example.com/v1',
endpoint: {
method: 'POST',
path: '/videos',
contentType: 'multipart/form-data',
multipartFileFields: ['input_reference'],
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
input_reference: '{{image}}',
},
},
variables: buildTemplateVariables({
model: 'veo3.1',
prompt: 'hello',
image: dataUrl,
}),
defaultAuthHeader: 'Bearer sk-test',
})
expect(request.endpointUrl).toBe('https://compat.example.com/v1/videos')
expect(request.headers.Authorization).toBe('Bearer sk-test')
expect(request.headers['Content-Type']).toBeUndefined()
expect(request.body).toBeInstanceOf(FormData)
const formData = request.body as FormData
expect(formData.get('model')).toBe('veo3.1')
expect(formData.get('prompt')).toBe('hello')
const fileValue = formData.get('input_reference')
expect(fileValue).toBeInstanceOf(File)
expect((fileValue as File).name).toBe('reference-0.png')
})
it('builds application/x-www-form-urlencoded bodies', async () => {
const request = await buildRenderedTemplateRequest({
baseUrl: 'https://compat.example.com/v1',
endpoint: {
method: 'POST',
path: '/videos/query',
contentType: 'application/x-www-form-urlencoded',
bodyTemplate: {
model: '{{model}}',
task_id: '{{task_id}}',
},
},
variables: buildTemplateVariables({
model: 'veo3.1',
prompt: 'hello',
taskId: 'task_1',
}),
})
expect(request.headers['Content-Type']).toBe('application/x-www-form-urlencoded')
expect(request.body).toBeInstanceOf(URLSearchParams)
expect((request.body as URLSearchParams).toString()).toBe('model=veo3.1&task_id=task_1')
})
it('reads json path for array/object outputs', () => {
const payload = {
data: [{ url: 'https://cdn.test/1.png' }],
task: {
status: 'succeeded',
},
}
expect(readJsonPath(payload, '$.data[0].url')).toBe('https://cdn.test/1.png')
expect(readJsonPath(payload, '$.task.status')).toBe('succeeded')
})
it('extracts upstream error message from common payload shape', () => {
const message = extractTemplateError({
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/video/create',
},
status: {
method: 'GET',
path: '/video/query?id={{task_id}}',
},
response: {
taskIdPath: '$.id',
statusPath: '$.status',
},
polling: {
intervalMs: 5000,
timeoutMs: 600000,
doneStates: ['completed'],
failStates: ['failed'],
},
}, {
error: {
message_zh: '当前分组上游负载已饱和,请稍后再试',
},
}, 500)
expect(message).toContain('status 500')
expect(message).toContain('当前分组上游负载已饱和,请稍后再试')
})
})

View File

@@ -0,0 +1,70 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const resolveConfigMock = vi.hoisted(() => vi.fn(async () => ({
providerId: 'openai-compatible:33331fb0-2806-4da6-85ff-cd2433b587d0',
baseUrl: 'https://compat.example.com/v1',
apiKey: 'sk-test',
})))
vi.mock('@/lib/model-gateway/openai-compat/common', () => ({
resolveOpenAICompatClientConfig: resolveConfigMock,
}))
import { generateVideoViaOpenAICompatTemplate } from '@/lib/model-gateway/openai-compat/template-video'
describe('openai-compat template video externalId', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('encodes compact modelId token for OCOMPAT externalId', async () => {
globalThis.fetch = vi.fn(async () => new Response(JSON.stringify({
id: 'veo3.1-fast:1772734762-6TuDIS8Vvr',
status: 'pending',
}), { status: 200 })) as unknown as typeof fetch
const result = await generateVideoViaOpenAICompatTemplate({
userId: 'user-1',
providerId: 'openai-compatible:33331fb0-2806-4da6-85ff-cd2433b587d0',
modelId: 'veo3.1-fast',
modelKey: 'openai-compatible:33331fb0-2806-4da6-85ff-cd2433b587d0::veo3.1-fast',
imageUrl: 'https://example.com/seed.png',
prompt: 'animate this image',
profile: 'openai-compatible',
template: {
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/video/create',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
},
},
status: {
method: 'GET',
path: '/video/query?id={{task_id}}',
},
response: {
taskIdPath: '$.id',
statusPath: '$.status',
},
polling: {
intervalMs: 5000,
timeoutMs: 600000,
doneStates: ['completed'],
failStates: ['failed'],
},
},
})
expect(result.success).toBe(true)
expect(result.async).toBe(true)
expect(result.externalId).toContain(':u_33331fb0-2806-4da6-85ff-cd2433b587d0:')
expect(result.externalId).toContain(`:${Buffer.from('veo3.1-fast', 'utf8').toString('base64url')}:`)
expect(result.externalId).not.toContain(Buffer.from('openai-compatible:33331fb0-2806-4da6-85ff-cd2433b587d0::veo3.1-fast', 'utf8').toString('base64url'))
expect(result.externalId!.length).toBeLessThanOrEqual(128)
})
})

View File

@@ -0,0 +1,27 @@
import { describe, expect, it } from 'vitest'
import { isCompatibleProvider, resolveModelGatewayRoute } from '@/lib/model-gateway'
describe('model-gateway router', () => {
it('routes openai-compatible providers to openai-compat', () => {
expect(isCompatibleProvider('openai-compatible')).toBe(true)
expect(isCompatibleProvider('openai-compatible:oa-1')).toBe(true)
expect(resolveModelGatewayRoute('openai-compatible:oa-1')).toBe('openai-compat')
})
it('keeps gemini-compatible providers on official route', () => {
expect(isCompatibleProvider('gemini-compatible')).toBe(false)
expect(isCompatibleProvider('gemini-compatible:gm-1')).toBe(false)
expect(resolveModelGatewayRoute('gemini-compatible:gm-1')).toBe('official')
})
it('keeps official providers on official route', () => {
expect(isCompatibleProvider('google')).toBe(false)
expect(isCompatibleProvider('ark')).toBe(false)
expect(isCompatibleProvider('bailian')).toBe(false)
expect(isCompatibleProvider('siliconflow')).toBe(false)
expect(resolveModelGatewayRoute('google')).toBe('official')
expect(resolveModelGatewayRoute('ark')).toBe('official')
expect(resolveModelGatewayRoute('bailian')).toBe('official')
expect(resolveModelGatewayRoute('siliconflow')).toBe('official')
})
})