feat:Strengthen the testing framework
This commit is contained in:
154
tests/integration/provider/fal-provider.contract.test.ts
Normal file
154
tests/integration/provider/fal-provider.contract.test.ts
Normal file
@@ -0,0 +1,154 @@
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest'
|
||||
import { queryFalStatus, submitFalTask } from '@/lib/async-submit'
|
||||
import { startScenarioServer } from '../../helpers/fakes/scenario-server'
|
||||
|
||||
describe('provider contract - fal queue', () => {
|
||||
let server: Awaited<ReturnType<typeof startScenarioServer>> | null = null
|
||||
|
||||
beforeEach(async () => {
|
||||
server = await startScenarioServer()
|
||||
process.env.FAL_QUEUE_BASE_URL = `${server.baseUrl}/fal`
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
delete process.env.FAL_QUEUE_BASE_URL
|
||||
await server?.close()
|
||||
server = null
|
||||
})
|
||||
|
||||
it('submits the expected auth header and json payload', async () => {
|
||||
server!.defineScenario({
|
||||
method: 'POST',
|
||||
path: '/fal/fal-ai/nano-banana-pro',
|
||||
mode: 'success',
|
||||
submitResponse: {
|
||||
status: 200,
|
||||
body: { request_id: 'req_image_1' },
|
||||
},
|
||||
})
|
||||
|
||||
const requestId = await submitFalTask(
|
||||
'fal-ai/nano-banana-pro',
|
||||
{
|
||||
prompt: 'generate image',
|
||||
image_urls: ['data:image/png;base64,AAAA'],
|
||||
},
|
||||
'fal-key-1',
|
||||
)
|
||||
|
||||
expect(requestId).toBe('req_image_1')
|
||||
const requests = server!.getRequests('POST', '/fal/fal-ai/nano-banana-pro')
|
||||
expect(requests).toHaveLength(1)
|
||||
expect(requests[0]?.headers.authorization).toBe('Key fal-key-1')
|
||||
expect(JSON.parse(requests[0]?.bodyText || '{}')).toEqual({
|
||||
prompt: 'generate image',
|
||||
image_urls: ['data:image/png;base64,AAAA'],
|
||||
})
|
||||
})
|
||||
|
||||
it('treats transient status failure as pending and completes after retry', async () => {
|
||||
server!.defineScenario({
|
||||
method: 'GET',
|
||||
path: '/fal/fal-ai/veo3.1/requests/req_video_1/status',
|
||||
mode: 'retryable_error_then_success',
|
||||
pollSequence: [
|
||||
{ status: 503, body: { error: 'upstream unavailable' } },
|
||||
{ status: 200, body: { status: 'COMPLETED' } },
|
||||
],
|
||||
})
|
||||
server!.defineScenario({
|
||||
method: 'GET',
|
||||
path: '/fal/fal-ai/veo3.1/fast/image-to-video/requests/req_video_1',
|
||||
mode: 'success',
|
||||
submitResponse: {
|
||||
status: 200,
|
||||
body: {
|
||||
video: { url: 'https://cdn.local/video.mp4' },
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
const first = await queryFalStatus('fal-ai/veo3.1/fast/image-to-video', 'req_video_1', 'fal-key-2')
|
||||
const second = await queryFalStatus('fal-ai/veo3.1/fast/image-to-video', 'req_video_1', 'fal-key-2')
|
||||
|
||||
expect(first).toEqual({
|
||||
status: 'IN_PROGRESS',
|
||||
completed: false,
|
||||
failed: false,
|
||||
})
|
||||
expect(second).toEqual({
|
||||
status: 'COMPLETED',
|
||||
completed: true,
|
||||
failed: false,
|
||||
resultUrl: 'https://cdn.local/video.mp4',
|
||||
})
|
||||
})
|
||||
|
||||
it('marks a failed status response as failed with explicit provider error', async () => {
|
||||
server!.defineScenario({
|
||||
method: 'GET',
|
||||
path: '/fal/fal-ai/veo3.1/requests/req_failed/status',
|
||||
mode: 'fatal_error',
|
||||
submitResponse: {
|
||||
status: 200,
|
||||
body: {
|
||||
status: 'FAILED',
|
||||
error: 'content moderation failed',
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
const result = await queryFalStatus('fal-ai/veo3.1/fast/image-to-video', 'req_failed', 'fal-key-3')
|
||||
expect(result).toEqual({
|
||||
status: 'FAILED',
|
||||
completed: false,
|
||||
failed: true,
|
||||
error: 'content moderation failed',
|
||||
})
|
||||
})
|
||||
|
||||
it('fails explicitly when submit response is malformed', async () => {
|
||||
server!.defineScenario({
|
||||
method: 'POST',
|
||||
path: '/fal/fal-ai/nano-banana-pro',
|
||||
mode: 'malformed_response',
|
||||
submitResponse: {
|
||||
status: 200,
|
||||
body: { ok: true },
|
||||
},
|
||||
})
|
||||
|
||||
await expect(
|
||||
submitFalTask('fal-ai/nano-banana-pro', { prompt: 'bad response' }, 'fal-key-4'),
|
||||
).rejects.toThrow('FAL未返回request_id')
|
||||
})
|
||||
|
||||
it('treats completed result without media url as failed', async () => {
|
||||
server!.defineScenario({
|
||||
method: 'GET',
|
||||
path: '/fal/fal-ai/nano-banana-pro/requests/req_no_media/status',
|
||||
mode: 'queued_then_success',
|
||||
submitResponse: {
|
||||
status: 200,
|
||||
body: { status: 'COMPLETED' },
|
||||
},
|
||||
})
|
||||
server!.defineScenario({
|
||||
method: 'GET',
|
||||
path: '/fal/fal-ai/nano-banana-pro/requests/req_no_media',
|
||||
mode: 'malformed_response',
|
||||
submitResponse: {
|
||||
status: 200,
|
||||
body: { images: [] },
|
||||
},
|
||||
})
|
||||
|
||||
const result = await queryFalStatus('fal-ai/nano-banana-pro', 'req_no_media', 'fal-key-5')
|
||||
expect(result).toEqual({
|
||||
status: 'COMPLETED',
|
||||
completed: true,
|
||||
failed: false,
|
||||
resultUrl: undefined,
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,207 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { generateVideoViaOpenAICompatTemplate } from '@/lib/model-gateway/openai-compat/template-video'
|
||||
import { pollAsyncTask } from '@/lib/async-poll'
|
||||
import { startScenarioServer } from '../../helpers/fakes/scenario-server'
|
||||
|
||||
const getProviderConfigMock = vi.hoisted(() => vi.fn())
|
||||
const getUserModelsMock = vi.hoisted(() => vi.fn())
|
||||
|
||||
vi.mock('@/lib/api-config', () => ({
|
||||
getProviderConfig: getProviderConfigMock,
|
||||
getUserModels: getUserModelsMock,
|
||||
}))
|
||||
|
||||
function encode(value: string): string {
|
||||
return Buffer.from(value, 'utf8').toString('base64url')
|
||||
}
|
||||
|
||||
describe('provider contract - openai compatible media template', () => {
|
||||
let server: Awaited<ReturnType<typeof startScenarioServer>> | null = null
|
||||
|
||||
beforeEach(async () => {
|
||||
server = await startScenarioServer()
|
||||
vi.clearAllMocks()
|
||||
getProviderConfigMock.mockResolvedValue({
|
||||
id: 'openai-compatible:provider-local',
|
||||
apiKey: 'sk-local',
|
||||
baseUrl: `${server.baseUrl}/compat/v1`,
|
||||
})
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await server?.close()
|
||||
server = null
|
||||
})
|
||||
|
||||
it('renders create request against provider baseUrl and returns OCOMPAT externalId', async () => {
|
||||
server!.defineScenario({
|
||||
method: 'POST',
|
||||
path: '/compat/v1/video/create',
|
||||
mode: 'success',
|
||||
submitResponse: {
|
||||
status: 200,
|
||||
body: { status: 'queued', task_id: 'task_local_1' },
|
||||
},
|
||||
})
|
||||
|
||||
const result = await generateVideoViaOpenAICompatTemplate({
|
||||
userId: 'user-local',
|
||||
providerId: 'openai-compatible:provider-local',
|
||||
modelId: 'veo-local',
|
||||
modelKey: 'openai-compatible:provider-local::veo-local',
|
||||
imageUrl: 'data:image/png;base64,AAAA',
|
||||
prompt: 'animate this frame',
|
||||
options: {
|
||||
duration: 5,
|
||||
aspectRatio: '9:16',
|
||||
},
|
||||
profile: 'openai-compatible',
|
||||
template: {
|
||||
version: 1,
|
||||
mediaType: 'video',
|
||||
mode: 'async',
|
||||
create: {
|
||||
method: 'POST',
|
||||
path: '/video/create',
|
||||
bodyTemplate: {
|
||||
model: '{{model}}',
|
||||
prompt: '{{prompt}}',
|
||||
image: '{{image}}',
|
||||
duration: '{{duration}}',
|
||||
},
|
||||
},
|
||||
status: { method: 'GET', path: '/video/status/{{task_id}}' },
|
||||
response: {
|
||||
taskIdPath: '$.task_id',
|
||||
statusPath: '$.status',
|
||||
},
|
||||
polling: {
|
||||
intervalMs: 1000,
|
||||
timeoutMs: 30_000,
|
||||
doneStates: ['done'],
|
||||
failStates: ['failed'],
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
expect(result).toMatchObject({
|
||||
success: true,
|
||||
async: true,
|
||||
requestId: 'task_local_1',
|
||||
externalId: `OCOMPAT:VIDEO:b64_${encode('openai-compatible:provider-local')}:${encode('veo-local')}:task_local_1`,
|
||||
})
|
||||
|
||||
const requests = server!.getRequests('POST', '/compat/v1/video/create')
|
||||
expect(requests).toHaveLength(1)
|
||||
expect(requests[0]?.headers.authorization).toBe('Bearer sk-local')
|
||||
expect(JSON.parse(requests[0]?.bodyText || '{}')).toEqual({
|
||||
model: 'veo-local',
|
||||
prompt: 'animate this frame',
|
||||
image: 'data:image/png;base64,AAAA',
|
||||
duration: 5,
|
||||
})
|
||||
})
|
||||
|
||||
it('polls localhost provider status and falls back to content endpoint when output url is missing', async () => {
|
||||
getUserModelsMock.mockResolvedValue([
|
||||
{
|
||||
modelKey: 'openai-compatible:provider-local::veo-local',
|
||||
modelId: 'veo-local',
|
||||
name: 'Local Veo',
|
||||
type: 'video',
|
||||
provider: 'openai-compatible:provider-local',
|
||||
price: 0,
|
||||
compatMediaTemplate: {
|
||||
version: 1,
|
||||
mediaType: 'video',
|
||||
mode: 'async',
|
||||
create: { method: 'POST', path: '/video/create' },
|
||||
status: { method: 'GET', path: '/video/status/{{task_id}}' },
|
||||
content: { method: 'GET', path: '/video/content/{{task_id}}' },
|
||||
response: {
|
||||
statusPath: '$.status',
|
||||
},
|
||||
polling: {
|
||||
intervalMs: 1000,
|
||||
timeoutMs: 30_000,
|
||||
doneStates: ['done'],
|
||||
failStates: ['failed'],
|
||||
},
|
||||
},
|
||||
},
|
||||
])
|
||||
server!.defineScenario({
|
||||
method: 'GET',
|
||||
path: '/compat/v1/video/status/task_local_2',
|
||||
mode: 'queued_then_success',
|
||||
pollSequence: [
|
||||
{ status: 200, body: { status: 'running' } },
|
||||
{ status: 200, body: { status: 'done' } },
|
||||
],
|
||||
})
|
||||
|
||||
const first = await pollAsyncTask(
|
||||
`OCOMPAT:VIDEO:${encode('openai-compatible:provider-local')}:${encode('openai-compatible:provider-local::veo-local')}:task_local_2`,
|
||||
'user-local',
|
||||
)
|
||||
const second = await pollAsyncTask(
|
||||
`OCOMPAT:VIDEO:${encode('openai-compatible:provider-local')}:${encode('openai-compatible:provider-local::veo-local')}:task_local_2`,
|
||||
'user-local',
|
||||
)
|
||||
|
||||
expect(first).toEqual({ status: 'pending' })
|
||||
expect(second).toEqual({
|
||||
status: 'completed',
|
||||
resultUrl: `${server!.baseUrl}/compat/v1/video/content/task_local_2`,
|
||||
videoUrl: `${server!.baseUrl}/compat/v1/video/content/task_local_2`,
|
||||
downloadHeaders: {
|
||||
Authorization: 'Bearer sk-local',
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
it('fails explicitly when async create response omits task id', async () => {
|
||||
server!.defineScenario({
|
||||
method: 'POST',
|
||||
path: '/compat/v1/video/create',
|
||||
mode: 'malformed_response',
|
||||
submitResponse: {
|
||||
status: 200,
|
||||
body: { status: 'queued' },
|
||||
},
|
||||
})
|
||||
|
||||
await expect(
|
||||
generateVideoViaOpenAICompatTemplate({
|
||||
userId: 'user-local',
|
||||
providerId: 'openai-compatible:provider-local',
|
||||
modelId: 'veo-local',
|
||||
modelKey: 'openai-compatible:provider-local::veo-local',
|
||||
imageUrl: 'data:image/png;base64,AAAA',
|
||||
prompt: 'bad create payload',
|
||||
profile: 'openai-compatible',
|
||||
template: {
|
||||
version: 1,
|
||||
mediaType: 'video',
|
||||
mode: 'async',
|
||||
create: {
|
||||
method: 'POST',
|
||||
path: '/video/create',
|
||||
bodyTemplate: { prompt: '{{prompt}}' },
|
||||
},
|
||||
status: { method: 'GET', path: '/video/status/{{task_id}}' },
|
||||
response: {
|
||||
taskIdPath: '$.task_id',
|
||||
statusPath: '$.status',
|
||||
},
|
||||
polling: {
|
||||
intervalMs: 1000,
|
||||
timeoutMs: 30_000,
|
||||
doneStates: ['done'],
|
||||
failStates: ['failed'],
|
||||
},
|
||||
},
|
||||
}),
|
||||
).rejects.toThrow('OPENAI_COMPAT_VIDEO_TEMPLATE_TASK_ID_NOT_FOUND')
|
||||
})
|
||||
})
|
||||
Reference in New Issue
Block a user