feat: initial release v0.3.0

This commit is contained in:
saturn
2026-03-08 03:15:27 +08:00
commit 881ed44996
1311 changed files with 225407 additions and 0 deletions

View File

@@ -0,0 +1,97 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const openAIState = vi.hoisted(() => ({
modelList: vi.fn(async () => ({ data: [] })),
create: vi.fn(async () => ({
model: 'gpt-4.1-mini',
choices: [{ message: { content: '2' } }],
})),
}))
const fetchMock = vi.hoisted(() =>
vi.fn(async (input: unknown) => {
const url = String(input)
if (url.includes('/compatible-mode/v1/models')) {
return new Response(JSON.stringify({ data: [{ id: 'qwen-plus' }] }), { status: 200 })
}
if (url.endsWith('/v1/models')) {
return new Response(JSON.stringify({ data: [{ id: 'Qwen/Qwen3-32B' }] }), { status: 200 })
}
if (url.endsWith('/v1/user/info')) {
return new Response(JSON.stringify({ data: { balance: '9.8000' } }), { status: 200 })
}
return new Response('not-found', { status: 404 })
}),
)
vi.mock('openai', () => ({
default: class OpenAI {
models = {
list: openAIState.modelList,
}
chat = {
completions: {
create: openAIState.create,
},
}
},
}))
import { testLlmConnection } from '@/lib/user-api/llm-test-connection'
describe('llm test connection', () => {
beforeEach(() => {
vi.clearAllMocks()
vi.stubGlobal('fetch', fetchMock)
})
it('tests openai-compatible provider via openai-style endpoint', async () => {
const result = await testLlmConnection({
provider: 'openai-compatible',
apiKey: 'oa-key',
baseUrl: 'https://compat.example.com/v1',
model: 'gpt-4.1-mini',
})
expect(result.provider).toBe('openai-compatible')
expect(result.message).toBe('openai-compatible 连接成功')
expect(result.model).toBe('gpt-4.1-mini')
expect(result.answer).toBe('2')
expect(openAIState.create).toHaveBeenCalledWith({
model: 'gpt-4.1-mini',
messages: [{ role: 'user', content: '1+1等于几只回答数字' }],
max_tokens: 10,
temperature: 0,
})
})
it('requires baseUrl for gemini-compatible provider', async () => {
await expect(testLlmConnection({
provider: 'gemini-compatible',
apiKey: 'gm-key',
})).rejects.toThrow('自定义渠道需要提供 baseUrl')
})
it('tests bailian provider via zero-inference probe', async () => {
const result = await testLlmConnection({
provider: 'bailian',
apiKey: 'bl-key',
})
expect(result.provider).toBe('bailian')
expect(result.message).toBe('bailian 连接成功')
expect(result.model).toBe('qwen-plus')
})
it('tests siliconflow provider via zero-inference probes', async () => {
const result = await testLlmConnection({
provider: 'siliconflow',
apiKey: 'sf-key',
})
expect(result.provider).toBe('siliconflow')
expect(result.message).toBe('siliconflow 连接成功')
expect(result.model).toBe('Qwen/Qwen3-32B')
expect(result.answer).toBe('balance=9.8000')
})
})

View File

@@ -0,0 +1,198 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const resolveOpenAICompatClientConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
providerId: 'openai-compatible:node-1',
baseUrl: 'https://compat.example.com/v1',
apiKey: 'sk-test',
})),
)
vi.mock('@/lib/model-gateway/openai-compat/common', () => ({
resolveOpenAICompatClientConfig: resolveOpenAICompatClientConfigMock,
}))
import { probeModelLlmProtocol } from '@/lib/user-api/model-llm-protocol-probe'
describe('user-api model llm protocol probe', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('returns responses protocol when responses endpoint succeeds', async () => {
const fetchMock = vi.fn(async () => new Response(JSON.stringify({ id: 'resp_1' }), { status: 200 }))
vi.stubGlobal('fetch', fetchMock)
const result = await probeModelLlmProtocol({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
})
expect(result.success).toBe(true)
if (!result.success) return
expect(result.protocol).toBe('responses')
expect(fetchMock).toHaveBeenCalledTimes(1)
const firstCall = fetchMock.mock.calls[0] as unknown[] | undefined
expect(String(firstCall?.[0])).toBe('https://compat.example.com/v1/responses')
})
it('returns chat-completions when responses is unsupported and chat succeeds', async () => {
const fetchMock = vi.fn(async (input: unknown) => {
const url = String(input)
if (url.endsWith('/responses')) return new Response('not found', { status: 404 })
if (url.endsWith('/chat/completions')) return new Response(JSON.stringify({ id: 'chatcmpl_1' }), { status: 200 })
return new Response('unexpected', { status: 500 })
})
vi.stubGlobal('fetch', fetchMock)
const result = await probeModelLlmProtocol({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
})
expect(result.success).toBe(true)
if (!result.success) return
expect(result.protocol).toBe('chat-completions')
expect(result.traces.map((trace) => trace.endpoint)).toEqual(['responses', 'chat-completions'])
})
it('returns chat-completions when responses is rate limited but chat succeeds', async () => {
const fetchMock = vi.fn(async (input: unknown) => {
const url = String(input)
if (url.endsWith('/responses')) return new Response('rate limit', { status: 429 })
if (url.endsWith('/chat/completions')) return new Response(JSON.stringify({ id: 'chatcmpl_1' }), { status: 200 })
return new Response('unexpected', { status: 500 })
})
vi.stubGlobal('fetch', fetchMock)
const result = await probeModelLlmProtocol({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
})
expect(result.success).toBe(true)
if (!result.success) return
expect(result.protocol).toBe('chat-completions')
expect(result.traces[0]?.status).toBe(429)
expect(result.traces[1]?.status).toBe(200)
})
it('treats responses 5xx with not-implemented style message as unsupported', async () => {
const fetchMock = vi.fn(async (input: unknown) => {
const url = String(input)
if (url.endsWith('/responses')) {
return new Response(JSON.stringify({
error: {
message: 'not implemented (request id: x)',
code: 'local:convert_request_failed',
},
}), { status: 500 })
}
if (url.endsWith('/chat/completions')) {
return new Response(JSON.stringify({ id: 'chatcmpl_1' }), { status: 200 })
}
return new Response('unexpected', { status: 500 })
})
vi.stubGlobal('fetch', fetchMock)
const result = await probeModelLlmProtocol({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
})
expect(result.success).toBe(true)
if (!result.success) return
expect(result.protocol).toBe('chat-completions')
})
it('treats responses 400 with unsupported keywords as unsupported', async () => {
const fetchMock = vi.fn(async (input: unknown) => {
const url = String(input)
if (url.endsWith('/responses')) {
return new Response(JSON.stringify({ error: { message: 'unknown endpoint /responses' } }), { status: 400 })
}
if (url.endsWith('/chat/completions')) {
return new Response(JSON.stringify({ id: 'chatcmpl_1' }), { status: 200 })
}
return new Response('unexpected', { status: 500 })
})
vi.stubGlobal('fetch', fetchMock)
const result = await probeModelLlmProtocol({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
})
expect(result.success).toBe(true)
if (!result.success) return
expect(result.protocol).toBe('chat-completions')
})
it('returns chat-completions when responses 422 has no unsupported keywords but chat succeeds', async () => {
const fetchMock = vi.fn(async (input: unknown) => {
const url = String(input)
if (url.endsWith('/responses')) {
return new Response(JSON.stringify({ error: { message: 'invalid payload' } }), { status: 422 })
}
if (url.endsWith('/chat/completions')) {
return new Response(JSON.stringify({ id: 'chatcmpl_1' }), { status: 200 })
}
return new Response('unexpected', { status: 500 })
})
vi.stubGlobal('fetch', fetchMock)
const result = await probeModelLlmProtocol({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
})
expect(result.success).toBe(true)
if (!result.success) return
expect(result.protocol).toBe('chat-completions')
expect(result.traces[0]?.status).toBe(422)
expect(result.traces[1]?.status).toBe(200)
})
it('returns auth failure when responses and chat both return 401', async () => {
const fetchMock = vi.fn(async () => new Response('unauthorized', { status: 401 }))
vi.stubGlobal('fetch', fetchMock)
const result = await probeModelLlmProtocol({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
})
expect(result.success).toBe(false)
if (result.success) return
expect(result.code).toBe('PROBE_AUTH_FAILED')
expect(fetchMock).toHaveBeenCalledTimes(2)
})
it('returns chat-completions when responses auth fails but chat succeeds', async () => {
const fetchMock = vi.fn(async (input: unknown) => {
const url = String(input)
if (url.endsWith('/responses')) return new Response('unauthorized', { status: 401 })
if (url.endsWith('/chat/completions')) return new Response(JSON.stringify({ id: 'chatcmpl_1' }), { status: 200 })
return new Response('unexpected', { status: 500 })
})
vi.stubGlobal('fetch', fetchMock)
const result = await probeModelLlmProtocol({
userId: 'user-1',
providerId: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
})
expect(result.success).toBe(true)
if (!result.success) return
expect(result.protocol).toBe('chat-completions')
expect(fetchMock).toHaveBeenCalledTimes(2)
})
})

View File

@@ -0,0 +1,88 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const prismaMock = vi.hoisted(() => ({
userPreference: {
findUnique: vi.fn<(...args: unknown[]) => Promise<{ customProviders: string; customModels: string } | null>>(async () => null),
upsert: vi.fn<(...args: unknown[]) => Promise<unknown>>(async () => ({})),
},
}))
vi.mock('@/lib/prisma', () => ({
prisma: prismaMock,
}))
import { saveModelTemplateConfiguration } from '@/lib/user-api/model-template/save'
function readSavedModelsFromUpsert(): Array<Record<string, unknown>> {
const firstCall = prismaMock.userPreference.upsert.mock.calls[0]
if (!firstCall) throw new Error('expected upsert to be called')
const payload = (firstCall as [{ update?: { customModels?: unknown } }])[0]
const raw = payload.update?.customModels
if (typeof raw !== 'string') throw new Error('expected customModels string')
const parsed = JSON.parse(raw) as unknown
if (!Array.isArray(parsed)) throw new Error('expected customModels array')
return parsed as Array<Record<string, unknown>>
}
describe('user-api model template save', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('preserves existing model fields while updating target model template', async () => {
prismaMock.userPreference.findUnique.mockResolvedValueOnce({
customProviders: JSON.stringify([
{ id: 'openai-compatible:oa-1', name: 'OpenAI Compat' },
]),
customModels: JSON.stringify([
{
modelId: 'veo3.1',
modelKey: 'openai-compatible:oa-1::veo3.1',
name: 'Veo 3.1',
type: 'video',
provider: 'openai-compatible:oa-1',
customPricing: { video: { basePrice: 1.2 } },
capabilities: { video: { durationOptions: [5, 8] } },
},
]),
})
await saveModelTemplateConfiguration({
userId: 'user-1',
providerId: 'openai-compatible:oa-1',
modelId: 'veo3.1',
name: 'Veo 3.1',
type: 'video',
template: {
version: 1,
mediaType: 'video',
mode: 'async',
create: { method: 'POST', path: '/v2/videos/generations' },
status: { method: 'GET', path: '/v2/videos/generations/{{task_id}}' },
response: {
taskIdPath: '$.task_id',
statusPath: '$.status',
},
polling: {
intervalMs: 3000,
timeoutMs: 180000,
doneStates: ['done'],
failStates: ['failed'],
},
},
source: 'ai',
})
const savedModels = readSavedModelsFromUpsert()
const target = savedModels.find((item) => item.modelKey === 'openai-compatible:oa-1::veo3.1')
expect(target).toBeTruthy()
expect(target?.customPricing).toEqual({ video: { basePrice: 1.2 } })
expect(target?.capabilities).toEqual({ video: { durationOptions: [5, 8] } })
expect(target?.compatMediaTemplate).toMatchObject({
mediaType: 'video',
mode: 'async',
})
expect(target?.compatMediaTemplateSource).toBe('ai')
expect(typeof target?.compatMediaTemplateCheckedAt).toBe('string')
})
})

View File

@@ -0,0 +1,231 @@
import { describe, expect, it } from 'vitest'
import { validateOpenAICompatMediaTemplate } from '@/lib/user-api/model-template'
describe('user-api model template schema', () => {
it('accepts valid async video template', () => {
const result = validateOpenAICompatMediaTemplate({
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/v2/videos/generations',
contentType: 'application/json',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
},
},
status: {
method: 'GET',
path: '/v2/videos/generations/{{task_id}}',
},
response: {
taskIdPath: '$.task_id',
statusPath: '$.status',
outputUrlPath: '$.video_url',
errorPath: '$.error.message',
},
polling: {
intervalMs: 3000,
timeoutMs: 300000,
doneStates: ['succeeded'],
failStates: ['failed'],
},
})
expect(result.ok).toBe(true)
expect(result.template?.mode).toBe('async')
})
it('rejects unsupported placeholders', () => {
const result = validateOpenAICompatMediaTemplate({
version: 1,
mediaType: 'image',
mode: 'sync',
create: {
method: 'POST',
path: '/images/generations',
contentType: 'application/json',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt_text}}',
},
},
response: {
outputUrlPath: '$.data[0].url',
},
})
expect(result.ok).toBe(false)
expect(result.issues.some((issue) => issue.field.includes('bodyTemplate.prompt'))).toBe(true)
})
it('rejects async template missing polling/status fields', () => {
const result = validateOpenAICompatMediaTemplate({
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/videos',
contentType: 'application/json',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
},
},
response: {
taskIdPath: '$.id',
},
})
expect(result.ok).toBe(false)
expect(result.issues.map((issue) => issue.field)).toEqual(expect.arrayContaining(['status']))
})
it('rejects async create endpoint without bodyTemplate for POST', () => {
const result = validateOpenAICompatMediaTemplate({
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/v1/video/create',
},
status: {
method: 'GET',
path: '/v1/video/query?id={{task_id}}',
},
})
expect(result.ok).toBe(false)
expect(result.issues.map((issue) => issue.field)).toEqual(expect.arrayContaining(['create.bodyTemplate']))
})
it('rejects async status path without task_id placeholder', () => {
const result = validateOpenAICompatMediaTemplate({
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/v1/video/create',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
},
},
status: {
method: 'GET',
path: '/v1/video/query',
},
})
expect(result.ok).toBe(false)
expect(result.issues.map((issue) => issue.field)).toEqual(expect.arrayContaining(['status.path']))
})
it('rejects async template when response paths or polling are omitted', () => {
const result = validateOpenAICompatMediaTemplate({
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/v1/video/create',
contentType: 'application/json',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
},
},
status: {
method: 'GET',
path: '/v1/video/query?id={{task_id}}',
},
})
expect(result.ok).toBe(false)
expect(result.issues.map((issue) => issue.field)).toEqual(expect.arrayContaining([
'response.taskIdPath',
'response.statusPath',
'polling',
]))
})
it('accepts multipart file field declarations for media templates', () => {
const result = validateOpenAICompatMediaTemplate({
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/videos',
contentType: 'multipart/form-data',
multipartFileFields: ['input_reference'],
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
input_reference: '{{image}}',
},
},
status: {
method: 'GET',
path: '/videos/{{task_id}}',
},
content: {
method: 'GET',
path: '/videos/{{task_id}}/content',
},
response: {
taskIdPath: '$.id',
statusPath: '$.status',
},
polling: {
intervalMs: 5000,
timeoutMs: 600000,
doneStates: ['completed'],
failStates: ['failed'],
},
})
expect(result.ok).toBe(true)
expect(result.template?.create.multipartFileFields).toEqual(['input_reference'])
})
it('rejects multipart file fields that are not present in bodyTemplate', () => {
const result = validateOpenAICompatMediaTemplate({
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/videos',
contentType: 'multipart/form-data',
multipartFileFields: ['input_reference'],
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
},
},
status: {
method: 'GET',
path: '/videos/{{task_id}}',
},
response: {
taskIdPath: '$.id',
statusPath: '$.status',
},
polling: {
intervalMs: 5000,
timeoutMs: 600000,
doneStates: ['completed'],
failStates: ['failed'],
},
})
expect(result.ok).toBe(false)
expect(result.issues.some((issue) => issue.field === 'create.multipartFileFields')).toBe(true)
})
})

View File

@@ -0,0 +1,100 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const openAIState = vi.hoisted(() => ({
create: vi.fn(async () => ({
choices: [{ message: { content: 'pong' } }],
})),
}))
const fetchMock = vi.hoisted(() =>
vi.fn<typeof fetch>(async () => new Response('not-found', { status: 404 })),
)
vi.mock('openai', () => ({
default: class OpenAI {
chat = {
completions: {
create: openAIState.create,
},
}
},
}))
import { testProviderConnection } from '@/lib/user-api/provider-test'
describe('provider test connection compatible probes', () => {
beforeEach(() => {
vi.clearAllMocks()
vi.stubGlobal('fetch', fetchMock)
})
it('asks user to configure llm when free probes are unsupported', async () => {
const result = await testProviderConnection({
apiType: 'openai-compatible',
baseUrl: 'https://compat.example.com/v1',
apiKey: 'compat-key',
})
expect(result.success).toBe(false)
expect(result.steps[0]?.name).toBe('models')
expect(result.steps[0]?.status).toBe('skip')
expect(result.steps[1]?.name).toBe('credits')
expect(result.steps[1]?.status).toBe('skip')
expect(result.steps[2]).toEqual({
name: 'textGen',
status: 'fail',
message: 'No free probe endpoint detected. Please configure an LLM model first, then retry / 未发现可用的免费探测接口,请先配置 LLM 模型后再测试',
})
})
it('falls back to configured llm test when free probes are unsupported', async () => {
const result = await testProviderConnection({
apiType: 'openai-compatible',
baseUrl: 'https://compat.example.com/v1',
apiKey: 'compat-key',
llmModel: 'gpt-4.1-mini',
})
expect(result.success).toBe(true)
expect(result.steps[0]?.status).toBe('skip')
expect(result.steps[1]?.status).toBe('skip')
expect(result.steps[2]).toEqual({
name: 'textGen',
status: 'pass',
model: 'gpt-4.1-mini',
message: 'Response: pong',
})
expect(openAIState.create).toHaveBeenCalledWith({
model: 'gpt-4.1-mini',
messages: [{ role: 'user', content: 'hi' }],
max_tokens: 20,
temperature: 0,
})
})
it('marks success when any free probe endpoint passes', async () => {
fetchMock.mockImplementation(async (input: RequestInfo | URL) => {
const url = String(input)
if (url.endsWith('/v1/models')) {
return new Response(JSON.stringify({ data: [{ id: 'm1' }, { id: 'm2' }] }), { status: 200 })
}
return new Response('not-found', { status: 404 })
})
const result = await testProviderConnection({
apiType: 'gemini-compatible',
baseUrl: 'https://compat.example.com',
apiKey: 'compat-key',
})
expect(result.success).toBe(true)
expect(result.steps[0]).toMatchObject({
name: 'models',
status: 'pass',
message: 'Found 2 models',
})
expect(result.steps[1]?.name).toBe('credits')
expect(result.steps[1]?.status).toBe('skip')
expect(result.steps.length).toBe(2)
})
})

View File

@@ -0,0 +1,135 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { testProviderConnection } from '@/lib/user-api/provider-test'
const fetchMock = vi.hoisted(() =>
vi.fn(async (input: unknown) => {
const url = String(input)
if (url.includes('dashscope.aliyuncs.com/compatible-mode/v1/models')) {
return new Response(JSON.stringify({ data: [{ id: 'qwen-plus' }] }), { status: 200 })
}
if (url.includes('api.siliconflow.cn/v1/models')) {
return new Response(JSON.stringify({ data: [{ id: 'Qwen/Qwen3-32B' }] }), { status: 200 })
}
if (url.includes('api.siliconflow.cn/v1/user/info')) {
return new Response(JSON.stringify({ data: { balance: '12.3000' } }), { status: 200 })
}
return new Response('not-found', { status: 404 })
}),
)
describe('provider test connection', () => {
beforeEach(() => {
vi.clearAllMocks()
vi.stubGlobal('fetch', fetchMock)
})
it('passes bailian probe with models step and credits skip', async () => {
const result = await testProviderConnection({
apiType: 'bailian',
apiKey: 'bl-key',
})
expect(result.success).toBe(true)
expect(result.steps).toEqual([
{
name: 'models',
status: 'pass',
message: 'Found 1 models',
},
{
name: 'credits',
status: 'skip',
message: 'Not supported by Bailian probe API',
},
])
})
it('passes siliconflow probe with models and credits steps', async () => {
const result = await testProviderConnection({
apiType: 'siliconflow',
apiKey: 'sf-key',
})
expect(result.success).toBe(true)
expect(result.steps[0]).toEqual({
name: 'models',
status: 'pass',
message: 'Found 1 models',
})
expect(result.steps[1]).toEqual({
name: 'credits',
status: 'pass',
message: 'Balance: 12.3000',
})
})
it('classifies auth failures for bailian models probe', async () => {
fetchMock.mockImplementationOnce(async () => new Response('unauthorized', { status: 401 }))
const result = await testProviderConnection({
apiType: 'bailian',
apiKey: 'bad-key',
})
expect(result.success).toBe(false)
expect(result.steps[0]).toEqual({
name: 'models',
status: 'fail',
message: 'Authentication failed (401)',
detail: 'unauthorized',
})
expect(result.steps[1]).toEqual({
name: 'credits',
status: 'skip',
message: 'Not supported by Bailian probe API',
})
})
it('classifies rate limit failures for siliconflow models probe', async () => {
fetchMock.mockImplementationOnce(async () => new Response('rate limit', { status: 429 }))
const result = await testProviderConnection({
apiType: 'siliconflow',
apiKey: 'sf-key',
})
expect(result.success).toBe(false)
expect(result.steps[0]).toEqual({
name: 'models',
status: 'fail',
message: 'Rate limited (429)',
detail: 'rate limit',
})
expect(result.steps[1]).toEqual({
name: 'credits',
status: 'skip',
message: 'Skipped because model probe failed',
})
})
it('classifies network failures for siliconflow user info probe', async () => {
fetchMock.mockImplementationOnce(async () =>
new Response(JSON.stringify({ data: [{ id: 'Qwen/Qwen3-32B' }] }), { status: 200 }),
)
fetchMock.mockImplementationOnce(async () => {
throw new Error('socket hang up')
})
const result = await testProviderConnection({
apiType: 'siliconflow',
apiKey: 'sf-key',
})
expect(result.success).toBe(false)
expect(result.steps[0]).toEqual({
name: 'models',
status: 'pass',
message: 'Found 1 models',
})
expect(result.steps[1]).toEqual({
name: 'credits',
status: 'fail',
message: 'Network error: socket hang up',
})
})
})

View File

@@ -0,0 +1,11 @@
import { describe, it, expect } from 'vitest'
import { USER_FEEDBACK_FORM_URL } from '@/lib/feedback'
describe('USER_FEEDBACK_FORM_URL', () => {
it('should point to the Feishu feedback form', () => {
expect(USER_FEEDBACK_FORM_URL).toBe(
'https://ox2p5ferjnr.feishu.cn/share/base/form/shrcno200ar2SsTgGiSDYHLmNuc',
)
})
})