refactor: analysis workflow architecture

fix: NEXTAUTH_URL

fix: prevent project model edits from affecting default model
This commit is contained in:
saturn
2026-03-16 21:48:57 +08:00
parent ecbd183a77
commit 9aff44e37a
58 changed files with 2753 additions and 7985 deletions

View File

@@ -26,10 +26,15 @@ MINIO_FORCE_PATH_STYLE=true
# ==================== 认证 ====================
# 本地开发模式(方式三):使用 http://localhost:3000
# Docker 容器模式(方式一、二):改为 https://localhost配合 Caddy http://localhost:13000
# Docker 容器模式(方式一、二):由 docker-compose.yml 注入为 http://localhost:13000
NEXTAUTH_URL=http://localhost:3000
NEXTAUTH_SECRET=please-change-this-to-a-random-string
# ==================== 应用内部自调用地址 ====================
# 仅用于服务端内部 fetch 本应用 API / 文件。
# 本地开发模式通常与 NEXTAUTH_URL 一致Docker 模式由 docker-compose.yml 注入为 http://127.0.0.1:3000
INTERNAL_APP_URL=http://127.0.0.1:3000
# ==================== 内部密钥 ====================
CRON_SECRET=please-change-this-cron-secret
INTERNAL_TASK_TOKEN=please-change-this-task-token

View File

@@ -82,8 +82,10 @@ services:
MINIO_ACCESS_KEY: "minioadmin"
MINIO_SECRET_KEY: "minioadmin"
MINIO_FORCE_PATH_STYLE: "true"
# 认证
NEXTAUTH_URL: "http://localhost:3000"
# 外部访问地址(浏览器实际访问)
NEXTAUTH_URL: "http://localhost:13000"
# 容器内自调用地址(服务端 fetch 自己的 API / 文件)
INTERNAL_APP_URL: "http://127.0.0.1:3000"
NEXTAUTH_SECRET: "waoowaoo-default-secret-2026"
# 内部密钥
CRON_SECRET: "waoowaoo-docker-cron-secret"

396
package-lock.json generated
View File

@@ -20,7 +20,6 @@
"@dnd-kit/sortable": "^10.0.0",
"@fal-ai/client": "^1.7.2",
"@google/genai": "^1.34.0",
"@langchain/langgraph": "^1.2.0",
"@next-auth/prisma-adapter": "^1.0.7",
"@openrouter/sdk": "^0.3.11",
"@prisma/client": "^6.19.2",
@@ -1226,6 +1225,7 @@
"resolved": "https://registry.npmmirror.com/@cfworker/json-schema/-/json-schema-4.1.1.tgz",
"integrity": "sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==",
"license": "MIT",
"optional": true,
"peer": true
},
"node_modules/@dnd-kit/accessibility": {
@@ -2772,197 +2772,6 @@
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
"node_modules/@langchain/core": {
"version": "1.1.30",
"resolved": "https://registry.npmmirror.com/@langchain/core/-/core-1.1.30.tgz",
"integrity": "sha512-tPjY7TUI/w5Jby93TBCENH3QlcuUi0cuq2hpQ3WO1rd3x3WULvdmtfDzLcLQC427oPMlOFuDI1NIvR89jxD6Ng==",
"license": "MIT",
"peer": true,
"dependencies": {
"@cfworker/json-schema": "^4.0.2",
"@standard-schema/spec": "^1.1.0",
"ansi-styles": "^5.0.0",
"camelcase": "6",
"decamelize": "1.2.0",
"js-tiktoken": "^1.0.12",
"langsmith": ">=0.5.0 <1.0.0",
"mustache": "^4.2.0",
"p-queue": "^6.6.2",
"uuid": "^11.1.0",
"zod": "^3.25.76 || ^4"
},
"engines": {
"node": ">=20"
}
},
"node_modules/@langchain/core/node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@langchain/core/node_modules/uuid": {
"version": "11.1.0",
"resolved": "https://registry.npmmirror.com/uuid/-/uuid-11.1.0.tgz",
"integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==",
"funding": [
"https://github.com/sponsors/broofa",
"https://github.com/sponsors/ctavan"
],
"license": "MIT",
"peer": true,
"bin": {
"uuid": "dist/esm/bin/uuid"
}
},
"node_modules/@langchain/langgraph": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/@langchain/langgraph/-/langgraph-1.2.0.tgz",
"integrity": "sha512-wyqKIzXTAfXX3L1d8R7icM+HmRQBTbuNLWtUlpRJ/JP/ux1ei/sOSt6p8f90ARoOP2iJVlM70wOBYWaGErdBlA==",
"license": "MIT",
"dependencies": {
"@langchain/langgraph-checkpoint": "^1.0.0",
"@langchain/langgraph-sdk": "~1.6.5",
"@standard-schema/spec": "1.1.0",
"uuid": "^10.0.0"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"@langchain/core": "^1.1.16",
"zod": "^3.25.32 || ^4.2.0",
"zod-to-json-schema": "^3.x"
},
"peerDependenciesMeta": {
"zod-to-json-schema": {
"optional": true
}
}
},
"node_modules/@langchain/langgraph-checkpoint": {
"version": "1.0.0",
"resolved": "https://registry.npmmirror.com/@langchain/langgraph-checkpoint/-/langgraph-checkpoint-1.0.0.tgz",
"integrity": "sha512-xrclBGvNCXDmi0Nz28t3vjpxSH6UYx6w5XAXSiiB1WEdc2xD2iY/a913I3x3a31XpInUW/GGfXXfePfaghV54A==",
"license": "MIT",
"dependencies": {
"uuid": "^10.0.0"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"@langchain/core": "^1.0.1"
}
},
"node_modules/@langchain/langgraph-checkpoint/node_modules/uuid": {
"version": "10.0.0",
"resolved": "https://registry.npmmirror.com/uuid/-/uuid-10.0.0.tgz",
"integrity": "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==",
"funding": [
"https://github.com/sponsors/broofa",
"https://github.com/sponsors/ctavan"
],
"license": "MIT",
"bin": {
"uuid": "dist/bin/uuid"
}
},
"node_modules/@langchain/langgraph-sdk": {
"version": "1.6.5",
"resolved": "https://registry.npmmirror.com/@langchain/langgraph-sdk/-/langgraph-sdk-1.6.5.tgz",
"integrity": "sha512-JjprmbhgCnoNJ9DUKcvrEU+C9FfKsNGyT3ooqWxAY5Cx2qofhXmDJOpTCqqbxfDHPKG0RjTs5HgVK3WW5M6Big==",
"license": "MIT",
"dependencies": {
"@types/json-schema": "^7.0.15",
"p-queue": "^9.0.1",
"p-retry": "^7.1.1",
"uuid": "^13.0.0"
},
"peerDependencies": {
"@langchain/core": "^1.1.16",
"react": "^18 || ^19",
"react-dom": "^18 || ^19"
},
"peerDependenciesMeta": {
"@langchain/core": {
"optional": true
},
"react": {
"optional": true
},
"react-dom": {
"optional": true
}
}
},
"node_modules/@langchain/langgraph-sdk/node_modules/eventemitter3": {
"version": "5.0.4",
"resolved": "https://registry.npmmirror.com/eventemitter3/-/eventemitter3-5.0.4.tgz",
"integrity": "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==",
"license": "MIT"
},
"node_modules/@langchain/langgraph-sdk/node_modules/p-queue": {
"version": "9.1.0",
"resolved": "https://registry.npmmirror.com/p-queue/-/p-queue-9.1.0.tgz",
"integrity": "sha512-O/ZPaXuQV29uSLbxWBGGZO1mCQXV2BLIwUr59JUU9SoH76mnYvtms7aafH/isNSNGwuEfP6W/4xD0/TJXxrizw==",
"license": "MIT",
"dependencies": {
"eventemitter3": "^5.0.1",
"p-timeout": "^7.0.0"
},
"engines": {
"node": ">=20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@langchain/langgraph-sdk/node_modules/p-timeout": {
"version": "7.0.1",
"resolved": "https://registry.npmmirror.com/p-timeout/-/p-timeout-7.0.1.tgz",
"integrity": "sha512-AxTM2wDGORHGEkPCt8yqxOTMgpfbEHqF51f/5fJCmwFC3C/zNcGT63SymH2ttOAaiIws2zVg4+izQCjrakcwHg==",
"license": "MIT",
"engines": {
"node": ">=20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@langchain/langgraph-sdk/node_modules/uuid": {
"version": "13.0.0",
"resolved": "https://registry.npmmirror.com/uuid/-/uuid-13.0.0.tgz",
"integrity": "sha512-XQegIaBTVUjSHliKqcnFqYypAd4S+WCYt5NIeRs6w/UAry7z8Y9j5ZwRRL4kzq9U3sD6v+85er9FvkEaBpji2w==",
"funding": [
"https://github.com/sponsors/broofa",
"https://github.com/sponsors/ctavan"
],
"license": "MIT",
"bin": {
"uuid": "dist-node/bin/uuid"
}
},
"node_modules/@langchain/langgraph/node_modules/uuid": {
"version": "10.0.0",
"resolved": "https://registry.npmmirror.com/uuid/-/uuid-10.0.0.tgz",
"integrity": "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==",
"funding": [
"https://github.com/sponsors/broofa",
"https://github.com/sponsors/ctavan"
],
"license": "MIT",
"bin": {
"uuid": "dist/bin/uuid"
}
},
"node_modules/@medv/finder": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/@medv/finder/-/finder-4.0.2.tgz",
@@ -6061,13 +5870,6 @@
"optional": true,
"peer": true
},
"node_modules/@types/uuid": {
"version": "10.0.0",
"resolved": "https://registry.npmmirror.com/@types/uuid/-/uuid-10.0.0.tgz",
"integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==",
"license": "MIT",
"peer": true
},
"node_modules/@types/yauzl": {
"version": "2.10.3",
"resolved": "https://registry.npmmirror.com/@types/yauzl/-/yauzl-2.10.3.tgz",
@@ -7921,19 +7723,6 @@
"node": ">=6"
}
},
"node_modules/camelcase": {
"version": "6.3.0",
"resolved": "https://registry.npmmirror.com/camelcase/-/camelcase-6.3.0.tgz",
"integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/camelize": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/camelize/-/camelize-1.0.1.tgz",
@@ -8317,16 +8106,6 @@
"node": "^14.18.0 || >=16.10.0"
}
},
"node_modules/console-table-printer": {
"version": "2.15.0",
"resolved": "https://registry.npmmirror.com/console-table-printer/-/console-table-printer-2.15.0.tgz",
"integrity": "sha512-SrhBq4hYVjLCkBVOWaTzceJalvn5K1Zq5aQA6wXC/cYjI3frKWNPEMK3sZsJfNNQApvCQmgBcc13ZKmFj8qExw==",
"license": "MIT",
"peer": true,
"dependencies": {
"simple-wcswidth": "^1.1.2"
}
},
"node_modules/content-disposition": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/content-disposition/-/content-disposition-1.0.1.tgz",
@@ -8670,16 +8449,6 @@
}
}
},
"node_modules/decamelize": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/decamelize/-/decamelize-1.2.0.tgz",
"integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/decimal.js": {
"version": "10.6.0",
"resolved": "https://registry.npmmirror.com/decimal.js/-/decimal.js-10.6.0.tgz",
@@ -9723,13 +9492,6 @@
"node": ">=6"
}
},
"node_modules/eventemitter3": {
"version": "4.0.7",
"resolved": "https://registry.npmmirror.com/eventemitter3/-/eventemitter3-4.0.7.tgz",
"integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==",
"license": "MIT",
"peer": true
},
"node_modules/events": {
"version": "3.3.0",
"resolved": "https://registry.npmmirror.com/events/-/events-3.3.0.tgz",
@@ -11382,18 +11144,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-network-error": {
"version": "1.3.1",
"resolved": "https://registry.npmmirror.com/is-network-error/-/is-network-error-1.3.1.tgz",
"integrity": "sha512-6QCxa49rQbmUWLfk0nuGqzql9U8uaV2H6279bRErPBHe/109hCzsLUBUHfbEtvLIHBd6hyXbgedBSHevm43Edw==",
"license": "MIT",
"engines": {
"node": ">=16"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/is-node-process": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/is-node-process/-/is-node-process-1.2.0.tgz",
@@ -11805,16 +11555,6 @@
"url": "https://github.com/sponsors/panva"
}
},
"node_modules/js-tiktoken": {
"version": "1.0.21",
"resolved": "https://registry.npmmirror.com/js-tiktoken/-/js-tiktoken-1.0.21.tgz",
"integrity": "sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==",
"license": "MIT",
"peer": true,
"dependencies": {
"base64-js": "^1.5.1"
}
},
"node_modules/js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz",
@@ -12030,68 +11770,6 @@
"node": ">=6"
}
},
"node_modules/langsmith": {
"version": "0.5.7",
"resolved": "https://registry.npmmirror.com/langsmith/-/langsmith-0.5.7.tgz",
"integrity": "sha512-FjYf2oBGMoSXnaT4SRaFguIiGJaonZ5VKWKJDPl9awLZjz2RkN29AcQWceecSINVzXzTvtRWPOjAWT+XggqNNg==",
"license": "MIT",
"peer": true,
"dependencies": {
"@types/uuid": "^10.0.0",
"chalk": "^5.6.2",
"console-table-printer": "^2.12.1",
"p-queue": "^6.6.2",
"semver": "^7.6.3",
"uuid": "^10.0.0"
},
"peerDependencies": {
"@opentelemetry/api": "*",
"@opentelemetry/exporter-trace-otlp-proto": "*",
"@opentelemetry/sdk-trace-base": "*",
"openai": "*"
},
"peerDependenciesMeta": {
"@opentelemetry/api": {
"optional": true
},
"@opentelemetry/exporter-trace-otlp-proto": {
"optional": true
},
"@opentelemetry/sdk-trace-base": {
"optional": true
},
"openai": {
"optional": true
}
}
},
"node_modules/langsmith/node_modules/chalk": {
"version": "5.6.2",
"resolved": "https://registry.npmmirror.com/chalk/-/chalk-5.6.2.tgz",
"integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==",
"license": "MIT",
"peer": true,
"engines": {
"node": "^12.17.0 || ^14.13 || >=16.0.0"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/langsmith/node_modules/uuid": {
"version": "10.0.0",
"resolved": "https://registry.npmmirror.com/uuid/-/uuid-10.0.0.tgz",
"integrity": "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==",
"funding": [
"https://github.com/sponsors/broofa",
"https://github.com/sponsors/ctavan"
],
"license": "MIT",
"peer": true,
"bin": {
"uuid": "dist/bin/uuid"
}
},
"node_modules/language-subtag-registry": {
"version": "0.3.23",
"resolved": "https://registry.npmmirror.com/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz",
@@ -12975,16 +12653,6 @@
"node": ">=16"
}
},
"node_modules/mustache": {
"version": "4.2.0",
"resolved": "https://registry.npmmirror.com/mustache/-/mustache-4.2.0.tgz",
"integrity": "sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==",
"license": "MIT",
"peer": true,
"bin": {
"mustache": "bin/mustache"
}
},
"node_modules/mute-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/mute-stream/-/mute-stream-2.0.0.tgz",
@@ -13726,16 +13394,6 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/p-finally": {
"version": "1.0.0",
"resolved": "https://registry.npmmirror.com/p-finally/-/p-finally-1.0.0.tgz",
"integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=4"
}
},
"node_modules/p-limit": {
"version": "3.1.0",
"resolved": "https://registry.npmmirror.com/p-limit/-/p-limit-3.1.0.tgz",
@@ -13768,51 +13426,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-queue": {
"version": "6.6.2",
"resolved": "https://registry.npmmirror.com/p-queue/-/p-queue-6.6.2.tgz",
"integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"eventemitter3": "^4.0.4",
"p-timeout": "^3.2.0"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-retry": {
"version": "7.1.1",
"resolved": "https://registry.npmmirror.com/p-retry/-/p-retry-7.1.1.tgz",
"integrity": "sha512-J5ApzjyRkkf601HpEeykoiCvzHQjWxPAHhyjFcEUP2SWq0+35NKh8TLhpLw+Dkq5TZBFvUM6UigdE9hIVYTl5w==",
"license": "MIT",
"dependencies": {
"is-network-error": "^1.1.0"
},
"engines": {
"node": ">=20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-timeout": {
"version": "3.2.0",
"resolved": "https://registry.npmmirror.com/p-timeout/-/p-timeout-3.2.0.tgz",
"integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==",
"license": "MIT",
"peer": true,
"dependencies": {
"p-finally": "^1.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/p-try": {
"version": "2.2.0",
"resolved": "https://registry.npmmirror.com/p-try/-/p-try-2.2.0.tgz",
@@ -15437,13 +15050,6 @@
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/simple-wcswidth": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/simple-wcswidth/-/simple-wcswidth-1.1.2.tgz",
"integrity": "sha512-j7piyCjAeTDSjzTSQ7DokZtMNwNlEAyxqSZeCS+CXH7fJ4jx3FuJ/mTW3mE+6JLs4VJBbcll0Kjn+KXI5t21Iw==",
"license": "MIT",
"peer": true
},
"node_modules/sisteransi": {
"version": "1.0.5",
"resolved": "https://registry.npmmirror.com/sisteransi/-/sisteransi-1.0.5.tgz",

View File

@@ -121,7 +121,6 @@
"@dnd-kit/sortable": "^10.0.0",
"@fal-ai/client": "^1.7.2",
"@google/genai": "^1.34.0",
"@langchain/langgraph": "^1.2.0",
"@next-auth/prisma-adapter": "^1.0.7",
"@openrouter/sdk": "^0.3.11",
"@prisma/client": "^6.19.2",

6197
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -658,6 +658,10 @@ model GraphRun {
errorCode String?
errorMessage String? @db.Text
cancelRequestedAt DateTime?
leaseOwner String?
leaseExpiresAt DateTime?
heartbeatAt DateTime?
workflowVersion Int @default(1)
queuedAt DateTime @default(now())
startedAt DateTime?
finishedAt DateTime?
@@ -675,6 +679,8 @@ model GraphRun {
@@index([userId, createdAt])
@@index([taskId])
@@index([targetType, targetId])
@@index([workflowType, targetType, targetId, status])
@@index([leaseExpiresAt])
@@map("graph_runs")
}

View File

@@ -91,7 +91,11 @@ export function useRebuildConfirm({
try {
const downstream = await checkStoryboardDownstreamData()
if (!downstream.shouldConfirm) {
await action()
try {
await action()
} finally {
setPendingActionType((current) => (current === actionType ? null : current))
}
return
}

View File

@@ -330,33 +330,6 @@ export const PATCH = apiHandler(async (
where: { projectId },
data: updateData})
// 同步更新用户偏好配置(配置字段)
const preferenceFields = [
'analysisModel', 'characterModel', 'locationModel', 'storyboardModel',
'editModel', 'videoModel', 'audioModel', 'videoRatio', 'artStyle', 'ttsRate',
] as const
const preferenceUpdate: Record<string, unknown> = {}
for (const field of preferenceFields) {
if (body[field] !== undefined) {
if ((MODEL_FIELDS as readonly string[]).includes(field)) {
validateModelKeyField(field as typeof MODEL_FIELDS[number], body[field])
}
if (field === 'artStyle') {
preferenceUpdate[field] = validateArtStyleField(body[field])
continue
}
preferenceUpdate[field] = body[field]
}
}
if (Object.keys(preferenceUpdate).length > 0) {
await prisma.userPreference.upsert({
where: { userId: session.user.id },
update: preferenceUpdate,
create: {
userId: session.user.id,
...preferenceUpdate}})
}
const novelPromotionDataWithSignedUrls = await attachMediaFieldsToProject(updatedNovelPromotionData)
const fullProject = {

View File

@@ -1,3 +1,4 @@
import { getInternalBaseUrl } from '@/lib/env'
import { logInfo as _ulogInfo, logError as _ulogError } from '@/lib/logging/core'
/**
* 火山引擎 API 统一调用工具
@@ -265,7 +266,7 @@ async function fetchWithTimeout(
let fullUrl = url
if (url.startsWith('/')) {
// 服务端 fetch 需要完整 URL使用 localhost:3000 作为基础地址
const baseUrl = process.env.NEXTAUTH_URL || 'http://localhost:3000'
const baseUrl = getInternalBaseUrl()
fullUrl = `${baseUrl}${url}`
}

View File

@@ -3,12 +3,26 @@
* 集中管理环境变量的获取,避免到处重复
*/
export function getPublicBaseUrl(): string {
return process.env.NEXTAUTH_URL || 'http://localhost:3000'
}
/**
* 获取应用 baseUrl
* 用于内部 API 调用、webhook 回调等场景
* 获取应用内部 baseUrl
* 用于容器内自调用、服务端 fetch 本应用 API、拉取本地 /api/files 资源等场景
*/
export function getInternalBaseUrl(): string {
return process.env.INTERNAL_APP_URL
|| process.env.INTERNAL_TASK_API_BASE_URL
|| process.env.NEXTAUTH_URL
|| 'http://localhost:3000'
}
/**
* 向后兼容:当前仓库中 getBaseUrl 主要用于服务端内部调用,因此默认返回内部地址。
*/
export function getBaseUrl(): string {
return process.env.NEXTAUTH_URL || 'http://localhost:3000'
return getInternalBaseUrl()
}
/**
@@ -16,7 +30,7 @@ export function getBaseUrl(): string {
* @param path API 路径,如 '/api/user/balance'
*/
export function getApiUrl(path: string): string {
const baseUrl = getBaseUrl()
const baseUrl = getInternalBaseUrl()
// 确保 path 以 / 开头
const normalizedPath = path.startsWith('/') ? path : `/${path}`
return `${baseUrl}${normalizedPath}`

View File

@@ -10,6 +10,7 @@
*/
import { GoogleGenAI } from '@google/genai'
import { getInternalBaseUrl } from '@/lib/env'
import { getImageBase64Cached } from './image-cache'
import { logInternal } from './logging/semantic'
@@ -89,7 +90,7 @@ export async function submitGeminiBatch(
// 🔧 本地模式修复:相对路径需要补全完整 URL
let fullUrl = imageData
if (imageData.startsWith('/')) {
const baseUrl = process.env.NEXTAUTH_URL || 'http://localhost:3000'
const baseUrl = getInternalBaseUrl()
fullUrl = `${baseUrl}${imageData}`
}
const base64DataUrl = await getImageBase64Cached(fullUrl)

View File

@@ -1,5 +1,6 @@
import { GoogleGenAI, HarmBlockThreshold, HarmCategory } from '@google/genai'
import { getProviderConfig } from '@/lib/api-config'
import { getInternalBaseUrl } from '@/lib/env'
import { getImageBase64Cached } from '@/lib/image-cache'
import { BaseImageGenerator, type GenerateResult, type ImageGenerateParams } from '../base'
import { setProxy } from '../../../../lib/prompts/proxy'
@@ -16,7 +17,7 @@ type GeminiCompatibleOptions = {
function toAbsoluteUrlIfNeeded(value: string): string {
if (!value.startsWith('/')) return value
const baseUrl = process.env.NEXTAUTH_URL || 'http://localhost:3000'
const baseUrl = getInternalBaseUrl()
return `${baseUrl}${value}`
}

View File

@@ -9,6 +9,7 @@ import { logInfo as _ulogInfo, logWarn as _ulogWarn } from '@/lib/logging/core'
*/
import { GoogleGenAI, HarmCategory, HarmBlockThreshold } from '@google/genai'
import { getInternalBaseUrl } from '@/lib/env'
import { BaseImageGenerator, ImageGenerateParams, GenerateResult } from '../base'
import { getProviderConfig } from '@/lib/api-config'
import { getImageBase64Cached } from '@/lib/image-cache'
@@ -94,7 +95,7 @@ export class GoogleGeminiImageGenerator extends BaseImageGenerator {
// 🔧 本地模式修复:相对路径需要补全完整 URL
let fullUrl = imageData
if (imageData.startsWith('/')) {
const baseUrl = process.env.NEXTAUTH_URL || 'http://localhost:3000'
const baseUrl = getInternalBaseUrl()
fullUrl = `${baseUrl}${imageData}`
}
const base64DataUrl = await getImageBase64Cached(fullUrl)

View File

@@ -36,4 +36,8 @@ export const LLM_OBSERVE_REASONING_VISIBLE = parseBoolean(
true,
)
export const INTERNAL_TASK_TOKEN = process.env.INTERNAL_TASK_TOKEN || ''
export const INTERNAL_TASK_API_BASE_URL = process.env.INTERNAL_TASK_API_BASE_URL || process.env.NEXTAUTH_URL || 'http://127.0.0.1:3000'
export const INTERNAL_TASK_API_BASE_URL =
process.env.INTERNAL_TASK_API_BASE_URL
|| process.env.INTERNAL_APP_URL
|| process.env.NEXTAUTH_URL
|| 'http://127.0.0.1:3000'

View File

@@ -1,5 +1,6 @@
import OpenAI from 'openai'
import { GoogleGenAI } from '@google/genai'
import { getInternalBaseUrl } from '@/lib/env'
import {
getProviderConfig,
getProviderKey,
@@ -246,7 +247,7 @@ export async function chatCompletionWithVision(
_ulogInfo('[LLM Vision] 转换本地图片为 Base64')
} catch (e) {
_ulogError('[LLM Vision] 转换本地图片失败:', e)
const baseUrl = process.env.NEXTAUTH_URL || 'http://localhost:3000'
const baseUrl = getInternalBaseUrl()
finalUrl = `${baseUrl}${url}`
}
}

View File

@@ -1,11 +1,12 @@
import OpenAI, { toFile } from 'openai'
import { getProviderConfig } from '@/lib/api-config'
import { getInternalBaseUrl } from '@/lib/env'
import { getImageBase64Cached } from '@/lib/image-cache'
import type { OpenAICompatClientConfig } from '../types'
function toAbsoluteUrlIfNeeded(value: string): string {
if (!value.startsWith('/')) return value
const baseUrl = process.env.NEXTAUTH_URL || 'http://localhost:3000'
const baseUrl = getInternalBaseUrl()
return `${baseUrl}${value}`
}

View File

@@ -20,7 +20,7 @@ export interface VoiceLine {
matchedPanelIndex?: number | null
}
export type PendingVoiceTaskStatus = 'queued' | 'processing' | 'completed' | 'failed' | null
export type PendingVoiceTaskStatus = 'queued' | 'processing' | 'completed' | 'failed' | 'canceled' | null
export interface PendingVoiceGenerationState {
submittedUpdatedAt: string | null

View File

@@ -72,7 +72,13 @@ async function fetchTaskStatus(taskId: string): Promise<{
: typeof payload.task?.errorMessage === 'string'
? payload.task.errorMessage
: null
if (status === 'queued' || status === 'processing' || status === 'completed' || status === 'failed') {
if (
status === 'queued'
|| status === 'processing'
|| status === 'completed'
|| status === 'failed'
|| status === 'canceled'
) {
return { status, errorMessage }
}
return { status: null, errorMessage }
@@ -97,7 +103,7 @@ export function useVoiceRuntimeSync({
useEffect(() => {
for (const [lineId, pending] of pendingEntries) {
if (pending.taskStatus !== 'failed') continue
if (pending.taskStatus !== 'failed' && pending.taskStatus !== 'canceled') continue
const failureKey = pending.taskId || lineId
if (reportedFailedTaskIdsRef.current.has(failureKey)) continue
reportedFailedTaskIdsRef.current.add(failureKey)

View File

@@ -186,7 +186,9 @@ export function useTaskStatus(params: {
const data = useMemo(() => {
const tasks = query.data || []
const latest = snapshotQuery.data || tasks[0] || null
const lastFailed = latest?.status === 'failed' ? (latest.error || null) : null
const lastFailed = latest?.status === 'failed' || latest?.status === 'canceled'
? (latest.error || null)
: null
return {
active: tasks,
hasActive: tasks.length > 0,

View File

@@ -1,172 +0,0 @@
import { normalizeAnyError } from '@/lib/errors/normalize'
import { buildLeanState, createCheckpoint, getRunById } from './service'
import type { StateRef } from './types'
type JsonRecord = Record<string, unknown>
export type GraphExecutorState = {
refs: StateRef
meta: JsonRecord
}
export type GraphNodeContext<TState extends GraphExecutorState> = {
runId: string
projectId: string
userId: string
nodeKey: string
attempt: number
state: TState
}
export type GraphNodeResult = {
output?: JsonRecord
checkpointRefs?: StateRef
checkpointMeta?: JsonRecord
}
export type GraphNode<TState extends GraphExecutorState> = {
key: string
title: string
maxAttempts?: number
timeoutMs?: number
run: (context: GraphNodeContext<TState>) => Promise<GraphNodeResult | void>
}
export type GraphExecutorInput<TState extends GraphExecutorState> = {
runId: string
projectId: string
userId: string
state: TState
nodes: GraphNode<TState>[]
}
export class GraphCancellationError extends Error {
constructor(message = 'run canceled') {
super(message)
this.name = 'GraphCancellationError'
}
}
function wait(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms))
}
function withTimeout<T>(task: Promise<T>, timeoutMs: number): Promise<T> {
if (!Number.isFinite(timeoutMs) || timeoutMs <= 0) {
return task
}
return new Promise<T>((resolve, reject) => {
const timer = setTimeout(() => {
reject(new Error(`node timeout after ${Math.floor(timeoutMs)}ms`))
}, timeoutMs)
task
.then((value) => {
clearTimeout(timer)
resolve(value)
})
.catch((error: unknown) => {
clearTimeout(timer)
reject(error)
})
})
}
function computeBackoffMs(attempt: number): number {
const base = Math.min(1_000 * Math.pow(2, Math.max(0, attempt - 1)), 10_000)
const jitter = Math.floor(Math.random() * 200)
return base + jitter
}
async function assertRunActive(runId: string, userId: string) {
const run = await getRunById(runId)
if (!run || run.userId !== userId) {
throw new GraphCancellationError('run not found')
}
if (run.status === 'canceling' || run.status === 'canceled') {
throw new GraphCancellationError('run canceled')
}
}
function mergeRefs(base: StateRef, next: StateRef | undefined): StateRef {
if (!next) return base
return {
scriptId: next.scriptId || base.scriptId,
storyboardId: next.storyboardId || base.storyboardId,
voiceLineBatchId: next.voiceLineBatchId || base.voiceLineBatchId,
versionHash: next.versionHash || base.versionHash,
cursor: next.cursor || base.cursor,
}
}
export async function executePipelineGraph<TState extends GraphExecutorState>(
input: GraphExecutorInput<TState>,
): Promise<TState> {
const { nodes, runId, projectId, userId, state } = input
for (const node of nodes) {
const maxAttempts = Number.isFinite(node.maxAttempts || 1)
? Math.max(1, Math.floor(node.maxAttempts || 1))
: 1
let attempt = 1
while (attempt <= maxAttempts) {
await assertRunActive(runId, userId)
try {
const result = await withTimeout(
node.run({
runId,
projectId,
userId,
nodeKey: node.key,
attempt,
state,
}),
node.timeoutMs || 0,
)
state.refs = mergeRefs(state.refs, result?.checkpointRefs)
if (result?.checkpointMeta) {
state.meta = {
...state.meta,
...result.checkpointMeta,
}
}
await createCheckpoint({
runId,
nodeKey: node.key,
version: attempt,
state: buildLeanState({
refs: state.refs,
meta: {
...state.meta,
nodeTitle: node.title,
nodeAttempt: attempt,
...(result?.output ? { output: result.output } : {}),
},
}),
})
break
} catch (error) {
if (error instanceof GraphCancellationError) {
throw error
}
const normalized = normalizeAnyError(error, { context: 'worker' })
const shouldRetry = normalized.retryable && attempt < maxAttempts
if (!shouldRetry) {
throw error
}
await wait(computeBackoffMs(attempt))
attempt += 1
}
}
}
return state
}

View File

@@ -1,89 +0,0 @@
import { Annotation, END, START, StateGraph } from '@langchain/langgraph'
import {
executePipelineGraph,
type GraphExecutorInput,
type GraphExecutorState,
} from './graph-executor'
function assertUniqueNodeKeys<TState extends GraphExecutorState>(input: GraphExecutorInput<TState>) {
const seen = new Set<string>()
for (const node of input.nodes) {
if (seen.has(node.key)) {
throw new Error(`LANGGRAPH_NODE_KEY_DUPLICATE: ${node.key}`)
}
seen.add(node.key)
}
}
function createStateAnnotation<TState extends GraphExecutorState>(initialState: TState) {
return Annotation.Root({
pipelineState: Annotation<unknown>({
reducer: (_current, update) => update,
default: () => initialState,
}),
})
}
function readPipelineState<TState extends GraphExecutorState>(value: unknown): TState {
if (!value || typeof value !== 'object') {
throw new Error('LANGGRAPH_STATE_INVALID: state object missing')
}
const pipelineState = (value as { pipelineState?: unknown }).pipelineState
if (!pipelineState || typeof pipelineState !== 'object') {
throw new Error('LANGGRAPH_STATE_INVALID: pipelineState missing')
}
return pipelineState as TState
}
function addEdgeUnsafe(
graphBuilder: unknown,
source: string,
target: string,
) {
const writable = graphBuilder as unknown as {
addEdge: (nextSource: string, nextTarget: string) => unknown
}
writable.addEdge(source, target)
}
export async function runLangGraphPipeline<TState extends GraphExecutorState>(
input: GraphExecutorInput<TState>,
): Promise<TState> {
if (input.nodes.length === 0) {
return input.state
}
assertUniqueNodeKeys(input)
const stateAnnotation = createStateAnnotation(input.state)
const graphBuilder = new StateGraph(stateAnnotation)
for (const node of input.nodes) {
graphBuilder.addNode(node.key, async (state: { pipelineState: unknown }) => {
const pipelineState = readPipelineState<TState>({ pipelineState: state.pipelineState })
await executePipelineGraph({
runId: input.runId,
projectId: input.projectId,
userId: input.userId,
state: pipelineState,
nodes: [node],
})
return {
pipelineState,
}
})
}
addEdgeUnsafe(graphBuilder, START, input.nodes[0].key)
for (let index = 0; index < input.nodes.length; index += 1) {
const currentKey = input.nodes[index].key
const nextKey = input.nodes[index + 1]?.key
addEdgeUnsafe(graphBuilder, currentKey, nextKey || END)
}
const compiled = graphBuilder.compile()
const result = await compiled.invoke({
pipelineState: input.state,
})
return readPipelineState<TState>(result)
}

View File

@@ -1,16 +0,0 @@
import {
type GraphExecutorInput,
type GraphExecutorState,
type GraphNode,
} from './graph-executor'
import { runLangGraphPipeline } from './langgraph-pipeline'
export type PipelineGraphState = GraphExecutorState
export type PipelineGraphNode<TState extends PipelineGraphState> = GraphNode<TState>
export type PipelineGraphInput<TState extends PipelineGraphState> = GraphExecutorInput<TState>
export async function runPipelineGraph<TState extends PipelineGraphState>(
input: PipelineGraphInput<TState>,
): Promise<TState> {
return await runLangGraphPipeline(input)
}

View File

@@ -1,31 +0,0 @@
import { runPipelineGraph, type PipelineGraphNode, type PipelineGraphState } from './pipeline-graph'
type QuickRunInput<TState extends PipelineGraphState> = {
runId: string
projectId: string
userId: string
nodeKey: string
nodeTitle: string
state: TState
run: PipelineGraphNode<TState>['run']
maxAttempts?: number
timeoutMs?: number
}
export async function runQuickRunGraph<TState extends PipelineGraphState>(input: QuickRunInput<TState>) {
return await runPipelineGraph({
runId: input.runId,
projectId: input.projectId,
userId: input.userId,
state: input.state,
nodes: [
{
key: input.nodeKey,
title: input.nodeTitle,
maxAttempts: input.maxAttempts,
timeoutMs: input.timeoutMs,
run: input.run,
},
],
})
}

View File

@@ -1,4 +1,5 @@
import { prisma } from '@/lib/prisma'
import { resolveRetryInvalidationStepKeys } from '@/lib/workflow-engine/dependencies'
import {
RUN_EVENT_TYPE,
RUN_STATE_MAX_BYTES,
@@ -30,6 +31,10 @@ type GraphRunRow = {
errorCode: string | null
errorMessage: string | null
cancelRequestedAt: Date | null
leaseOwner: string | null
leaseExpiresAt: Date | null
heartbeatAt: Date | null
workflowVersion: number
queuedAt: Date
startedAt: Date | null
finishedAt: Date | null
@@ -121,6 +126,7 @@ type GraphCheckpointModel = {
type GraphArtifactModel = {
upsert: (args: unknown) => Promise<GraphArtifactRow>
findMany: (args: unknown) => Promise<GraphArtifactRow[]>
deleteMany: (args: unknown) => Promise<{ count: number }>
}
type GraphRuntimeTx = {
@@ -226,6 +232,10 @@ function mapRunRow(run: GraphRunRow) {
errorCode: run.errorCode,
errorMessage: run.errorMessage,
cancelRequestedAt: toIso(run.cancelRequestedAt),
leaseOwner: run.leaseOwner,
leaseExpiresAt: toIso(run.leaseExpiresAt),
heartbeatAt: toIso(run.heartbeatAt),
workflowVersion: run.workflowVersion,
queuedAt: run.queuedAt.toISOString(),
startedAt: toIso(run.startedAt),
finishedAt: toIso(run.finishedAt),
@@ -457,6 +467,8 @@ async function applyRunProjection(tx: GraphRuntimeTx, input: RunEventInput) {
status: RUN_STATUS.COMPLETED,
output: payload,
finishedAt: now,
leaseOwner: null,
leaseExpiresAt: null,
},
})
await tx.graphStep.updateMany({
@@ -482,6 +494,8 @@ async function applyRunProjection(tx: GraphRuntimeTx, input: RunEventInput) {
errorCode: readString(payload, 'errorCode'),
errorMessage: resolveErrorMessage(payload),
finishedAt: now,
leaseOwner: null,
leaseExpiresAt: null,
},
})
await tx.graphStep.updateMany({
@@ -507,6 +521,8 @@ async function applyRunProjection(tx: GraphRuntimeTx, input: RunEventInput) {
data: {
status: RUN_STATUS.CANCELED,
finishedAt: now,
leaseOwner: null,
leaseExpiresAt: null,
},
})
await tx.graphStep.updateMany({
@@ -651,6 +667,10 @@ export async function createRun(input: CreateRunInput) {
targetId: input.targetId,
status: RUN_STATUS.QUEUED,
input: input.input || null,
leaseOwner: null,
leaseExpiresAt: null,
heartbeatAt: null,
workflowVersion: 1,
queuedAt: new Date(),
lastSeq: 0,
},
@@ -676,6 +696,112 @@ export async function getRunById(runId: string) {
return mapRunRow(row)
}
export async function findReusableActiveRun(params: {
userId: string
projectId: string
workflowType: string
targetType: string
targetId: string
}) {
const rows = await runtimeClient.graphRun.findMany({
where: {
userId: params.userId,
projectId: params.projectId,
workflowType: params.workflowType,
targetType: params.targetType,
targetId: params.targetId,
status: {
in: [RUN_STATUS.QUEUED, RUN_STATUS.RUNNING, RUN_STATUS.CANCELING],
},
},
orderBy: [
{ updatedAt: 'desc' },
{ createdAt: 'desc' },
],
take: 1,
})
const row = rows[0]
return row ? mapRunRow(row) : null
}
export async function claimRunLease(params: {
runId: string
userId: string
workerId: string
leaseMs: number
}) {
const now = new Date()
const leaseExpiresAt = new Date(now.getTime() + Math.max(5_000, Math.floor(params.leaseMs)))
const result = await runtimeClient.graphRun.updateMany({
where: {
id: params.runId,
userId: params.userId,
status: {
in: [RUN_STATUS.QUEUED, RUN_STATUS.RUNNING, RUN_STATUS.CANCELING],
},
OR: [
{ leaseOwner: null },
{ leaseOwner: params.workerId },
{ leaseExpiresAt: null },
{ leaseExpiresAt: { lt: now } },
],
},
data: {
leaseOwner: params.workerId,
leaseExpiresAt,
heartbeatAt: now,
},
})
if (result.count === 0) {
return null
}
return await getRunById(params.runId)
}
export async function renewRunLease(params: {
runId: string
userId: string
workerId: string
leaseMs: number
}) {
const now = new Date()
const leaseExpiresAt = new Date(now.getTime() + Math.max(5_000, Math.floor(params.leaseMs)))
const result = await runtimeClient.graphRun.updateMany({
where: {
id: params.runId,
userId: params.userId,
leaseOwner: params.workerId,
status: {
in: [RUN_STATUS.QUEUED, RUN_STATUS.RUNNING, RUN_STATUS.CANCELING],
},
},
data: {
leaseExpiresAt,
heartbeatAt: now,
},
})
if (result.count === 0) {
return null
}
return await getRunById(params.runId)
}
export async function releaseRunLease(params: {
runId: string
workerId: string
}) {
await runtimeClient.graphRun.updateMany({
where: {
id: params.runId,
leaseOwner: params.workerId,
},
data: {
leaseOwner: null,
leaseExpiresAt: null,
},
})
}
export async function getRunSnapshot(runId: string) {
const [run, steps] = await Promise.all([
runtimeClient.graphRun.findUnique({
@@ -977,8 +1103,21 @@ export async function retryFailedStep(params: {
throw new Error('RUN_STEP_NOT_FAILED')
}
const steps = await tx.graphStep.findMany({
where: { runId: params.runId },
orderBy: [
{ stepIndex: 'asc' },
{ updatedAt: 'asc' },
],
})
const now = new Date()
const nextAttempt = Math.max(1, step.currentAttempt + 1)
const invalidatedStepKeys = resolveRetryInvalidationStepKeys({
workflowType: run.workflowType,
stepKey,
existingStepKeys: steps.map((item) => item.stepKey),
})
const updatedRun = await tx.graphRun.update({
where: { id: params.runId },
data: {
@@ -990,6 +1129,20 @@ export async function retryFailedStep(params: {
startedAt: run.startedAt || now,
},
})
await tx.graphStep.updateMany({
where: {
runId: params.runId,
stepKey: { in: invalidatedStepKeys },
},
data: {
status: RUN_STEP_STATUS.PENDING,
currentAttempt: 0,
startedAt: null,
finishedAt: null,
lastErrorCode: null,
lastErrorMessage: null,
},
})
const updatedStep = await tx.graphStep.update({
where: {
runId_stepKey: {
@@ -998,12 +1151,13 @@ export async function retryFailedStep(params: {
},
},
data: {
status: RUN_STEP_STATUS.PENDING,
currentAttempt: nextAttempt,
startedAt: now,
finishedAt: null,
lastErrorCode: null,
lastErrorMessage: null,
},
})
await tx.graphArtifact.deleteMany({
where: {
runId: params.runId,
stepKey: { in: invalidatedStepKeys },
},
})
@@ -1011,6 +1165,7 @@ export async function retryFailedStep(params: {
run: mapRunRow(updatedRun),
step: mapStepRow(updatedStep),
retryAttempt: nextAttempt,
invalidatedStepKeys,
}
})
}

View File

@@ -69,6 +69,13 @@ export type CreateRunInput = {
input?: Record<string, unknown> | null
}
export type RunLeaseState = {
leaseOwner?: string | null
leaseExpiresAt?: string | null
heartbeatAt?: string | null
workflowVersion?: number
}
export type ListRunsInput = {
userId: string
projectId?: string

View File

@@ -0,0 +1,72 @@
import { TaskTerminatedError } from '@/lib/task/errors'
import { RUN_STATUS } from './types'
import { claimRunLease, getRunById, releaseRunLease, renewRunLease } from './service'
const DEFAULT_RUN_LEASE_MS = 30_000
export function getDefaultRunLeaseMs() {
return DEFAULT_RUN_LEASE_MS
}
export async function assertWorkflowRunActive(params: {
runId: string
workerId: string
stage: string
}) {
const run = await getRunById(params.runId)
if (!run) {
throw new TaskTerminatedError(params.runId, `Run terminated during ${params.stage}: run not found`)
}
if (run.leaseOwner !== params.workerId) {
throw new TaskTerminatedError(params.runId, `Run terminated during ${params.stage}: lease lost`)
}
if (
run.status === RUN_STATUS.CANCELING
|| run.status === RUN_STATUS.CANCELED
|| run.status === RUN_STATUS.COMPLETED
|| run.status === RUN_STATUS.FAILED
) {
throw new TaskTerminatedError(params.runId, `Run terminated during ${params.stage}`)
}
}
export async function withWorkflowRunLease<T>(params: {
runId: string
userId: string
workerId: string
leaseMs?: number
run: () => Promise<T>
}): Promise<{ claimed: boolean; result: T | null }> {
const leaseMs = params.leaseMs ?? DEFAULT_RUN_LEASE_MS
const claimed = await claimRunLease({
runId: params.runId,
userId: params.userId,
workerId: params.workerId,
leaseMs,
})
if (!claimed) {
return { claimed: false, result: null }
}
const heartbeatTimer = setInterval(() => {
void renewRunLease({
runId: params.runId,
userId: params.userId,
workerId: params.workerId,
leaseMs,
})
}, Math.max(5_000, Math.floor(leaseMs / 3)))
try {
return {
claimed: true,
result: await params.run(),
}
} finally {
clearInterval(heartbeatTimer)
await releaseRunLease({
runId: params.runId,
workerId: params.workerId,
})
}
}

View File

@@ -1,9 +1,10 @@
import { StorageConfigError } from './errors'
import { getInternalBaseUrl } from '@/lib/env'
export const DEFAULT_SIGNED_URL_EXPIRES_SECONDS = 24 * 60 * 60
export function resolveBaseUrl(): string {
return process.env.NEXTAUTH_URL || 'http://localhost:3000'
return getInternalBaseUrl()
}
export function toFetchableUrl(inputUrl: string): string {

View File

@@ -1,7 +1,7 @@
import { resolveTaskErrorMessage } from './error-message'
import { apiFetch } from '@/lib/api-fetch'
type TaskStatus = 'queued' | 'processing' | 'completed' | 'failed'
type TaskStatus = 'queued' | 'processing' | 'completed' | 'failed' | 'canceled'
type TaskSnapshot = {
id: string
@@ -63,7 +63,7 @@ export async function waitForTaskResult(taskId: string, options: WaitTaskOptions
if (task.status === 'completed') {
return task.result || { success: true }
}
if (task.status === 'failed') {
if (task.status === 'failed' || task.status === 'canceled') {
throw new Error(resolveTaskErrorMessage(task, `Task ${task.status}`))
}
if (task.status !== 'queued' && task.status !== 'processing') {

View File

@@ -3,6 +3,7 @@ import { redis } from '@/lib/redis'
import {
TASK_EVENT_TYPE,
TASK_SSE_EVENT_TYPE,
TASK_TYPE,
type TaskEventType,
type TaskLifecycleEventType,
type SSEEvent,
@@ -13,6 +14,10 @@ import { publishRunEvent } from '@/lib/run-runtime/publisher'
const CHANNEL_PREFIX = 'task-events:project:'
const STREAM_EPHEMERAL_ENABLED = process.env.LLM_STREAM_EPHEMERAL_ENABLED !== 'false'
const TASK_TYPES_WITH_DIRECT_RUN_EVENTS = new Set<string>([
TASK_TYPE.STORY_TO_SCRIPT_RUN,
TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
])
type TaskEventRow = {
id: number
@@ -221,6 +226,9 @@ export function getProjectChannel(projectId: string) {
}
async function mirrorTaskEventToRun(message: SSEEvent) {
if (message.taskType && TASK_TYPES_WITH_DIRECT_RUN_EVENTS.has(message.taskType)) {
return
}
const runEvents = mapTaskSSEEventToRunEvents(message)
if (runEvents.length === 0) return
for (const event of runEvents) {

View File

@@ -59,6 +59,11 @@ const VOICE_TYPES = new Set<TaskType>([
TASK_TYPE.ASSET_HUB_VOICE_DESIGN,
])
const SINGLE_ATTEMPT_TASK_TYPES = new Set<TaskType>([
TASK_TYPE.STORY_TO_SCRIPT_RUN,
TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
])
export function getQueueTypeByTaskType(type: TaskType): QueueType {
if (IMAGE_TYPES.has(type)) return 'image'
if (VIDEO_TYPES.has(type)) return 'video'
@@ -84,10 +89,14 @@ export async function addTaskJob(data: TaskJobData, opts?: JobsOptions) {
const queueType = getQueueTypeByTaskType(data.type)
const queue = getQueueByType(queueType)
const priority = typeof opts?.priority === 'number' ? opts.priority : 0
const attempts = SINGLE_ATTEMPT_TASK_TYPES.has(data.type)
? 1
: (typeof opts?.attempts === 'number' ? opts.attempts : undefined)
return await queue.add(data.type, data, {
jobId: data.taskId,
priority,
...(opts || {}),
...(attempts !== undefined ? { attempts } : {}),
})
}

View File

@@ -469,6 +469,20 @@ export async function tryMarkTaskFailed(taskId: string, errorCode: string, error
return result.count > 0
}
export async function tryMarkTaskCanceled(taskId: string, errorCode: string, errorMessage: string) {
const result = await taskModel.updateMany({
where: activeTaskWhere(taskId),
data: {
status: TASK_STATUS.CANCELED,
errorCode: errorCode.slice(0, 80),
errorMessage: errorMessage.slice(0, 2000),
finishedAt: new Date(),
heartbeatAt: null,
},
})
return result.count > 0
}
export async function markTaskProcessing(taskId: string, externalId?: string | null) {
return await tryMarkTaskProcessing(taskId, externalId)
}
@@ -485,6 +499,10 @@ export async function markTaskFailed(taskId: string, errorCode: string, errorMes
return await tryMarkTaskFailed(taskId, errorCode, errorMessage)
}
export async function markTaskCanceled(taskId: string, errorCode: string, errorMessage: string) {
return await tryMarkTaskCanceled(taskId, errorCode, errorMessage)
}
export async function cancelTask(taskId: string, reason = 'Task cancelled by user') {
const snapshot = await taskModel.findUnique({
where: { id: taskId },
@@ -514,7 +532,7 @@ export async function cancelTask(taskId: string, reason = 'Task cancelled by use
}
const failure = resolveCompensationFailure(rollbackResult, 'TASK_CANCELLED', reason)
const cancelled = await tryMarkTaskFailed(taskId, failure.errorCode, failure.errorMessage)
const cancelled = await tryMarkTaskCanceled(taskId, failure.errorCode, failure.errorMessage)
const task = await taskModel.findUnique({ where: { id: taskId } })
return {
task,

View File

@@ -125,7 +125,7 @@ export function resolveTargetState(
const running = filtered.find((task) => ACTIVE_STATUS.has(task.status)) || null
const terminal = filtered.find((task) =>
task.status === 'completed' || task.status === 'failed'
task.status === 'completed' || task.status === 'failed' || task.status === 'canceled'
) || null
const latest = running || terminal
@@ -240,7 +240,7 @@ export async function queryTaskTargetStates(params: {
targetId: item.targetId,
})),
status: {
in: ['queued', 'processing', 'completed', 'failed'],
in: ['queued', 'processing', 'completed', 'failed', 'canceled'],
},
...typeFilter,
},

View File

@@ -3,6 +3,7 @@ import { addTaskJob } from './queues'
import { publishTaskEvent } from './publisher'
import {
createTask,
getTaskById,
markTaskEnqueueFailed,
markTaskEnqueued,
markTaskFailed,
@@ -10,7 +11,7 @@ import {
updateTaskBillingInfo,
updateTaskPayload,
} from './service'
import { TASK_EVENT_TYPE, type TaskBillingInfo, type TaskType } from './types'
import { TASK_EVENT_TYPE, TASK_STATUS, TASK_TYPE, type TaskBillingInfo, type TaskType } from './types'
import {
buildDefaultTaskBillingInfo,
getBillingMode,
@@ -21,9 +22,18 @@ import {
import { ApiError } from '@/lib/api-errors'
import { getTaskFlowMeta } from '@/lib/llm-observe/stage-pipeline'
import type { Locale } from '@/i18n/routing'
import { attachTaskToRun, createRun } from '@/lib/run-runtime/service'
import { attachTaskToRun, createRun, findReusableActiveRun } from '@/lib/run-runtime/service'
import { isAiTaskType, workflowTypeFromTaskType } from '@/lib/run-runtime/workflow'
const RUN_CENTRIC_TASK_TYPES = new Set<TaskType>([
TASK_TYPE.STORY_TO_SCRIPT_RUN,
TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
])
function isRunCentricTaskType(type: TaskType): boolean {
return RUN_CENTRIC_TASK_TYPES.has(type)
}
export function toObject(value: unknown): Record<string, unknown> {
if (!value || typeof value !== 'object' || Array.isArray(value)) return {}
return value as Record<string, unknown>
@@ -125,6 +135,34 @@ export async function submitTask(params: {
? buildDefaultTaskBillingInfo(params.type, normalizedPayload)
: null
const resolvedBillingInfo = computedBillingInfo || params.billingInfo || null
const runCentricTask = isRunCentricTaskType(params.type)
const workflowType = workflowTypeFromTaskType(params.type)
const reusableRun = runCentricTask
? await findReusableActiveRun({
userId: params.userId,
projectId: params.projectId,
workflowType,
targetType: params.targetType,
targetId: params.targetId,
})
: null
if (runCentricTask && reusableRun?.taskId) {
const existingTask = await getTaskById(reusableRun.taskId)
if (
existingTask
&& (existingTask.status === TASK_STATUS.QUEUED || existingTask.status === TASK_STATUS.PROCESSING)
) {
return {
success: true,
async: true,
taskId: existingTask.id,
runId: reusableRun.id,
status: existingTask.status,
deduped: true as const,
}
}
}
const { task, deduped } = await createTask({
userId: params.userId,
@@ -134,18 +172,29 @@ export async function submitTask(params: {
targetType: params.targetType,
targetId: params.targetId,
payload: normalizedPayload,
dedupeKey: params.dedupeKey || null,
dedupeKey: runCentricTask ? null : (params.dedupeKey || null),
priority: params.priority,
maxAttempts: params.maxAttempts,
billingInfo: resolvedBillingInfo || null,
})
let runId = resolveRunIdFromPayload(task.payload)
if (!deduped && isAiTaskType(params.type) && !runId) {
let runId = reusableRun?.id || resolveRunIdFromPayload(task.payload)
if (!deduped && reusableRun && runId) {
const payloadWithRunId = {
...normalizedPayload,
runId,
meta: {
...toObject(normalizedPayload.meta),
runId,
},
}
await updateTaskPayload(task.id, payloadWithRunId)
await attachTaskToRun(runId, task.id)
} else if (!deduped && isAiTaskType(params.type) && !runId) {
const run = await createRun({
userId: params.userId,
projectId: params.projectId,
episodeId: params.episodeId || null,
workflowType: workflowTypeFromTaskType(params.type),
workflowType,
taskType: params.type,
taskId: task.id,
targetType: params.targetType,

View File

@@ -5,6 +5,7 @@ export const TASK_STATUS = {
PROCESSING: 'processing',
COMPLETED: 'completed',
FAILED: 'failed',
CANCELED: 'canceled',
DISMISSED: 'dismissed',
} as const

View File

@@ -16,6 +16,11 @@ export type WorkerInternalLLMStreamCallbacks = InternalLLMStreamCallbacks & {
flush: () => Promise<void>
}
export type WorkerLLMActiveController = {
assertActive?: (stage: string) => Promise<void>
isActive?: () => Promise<boolean>
}
export function createWorkerLLMStreamContext(job: Job<TaskJobData>, label = 'worker'): WorkerLLMStreamContext {
return {
streamRunId: `run:${job.data.taskId}:${label}:${Date.now().toString(36)}:${Math.random().toString(36).slice(2, 8)}`,
@@ -33,6 +38,7 @@ function nextWorkerStreamSeq(streamContext: WorkerLLMStreamContext, stepId: stri
export function createWorkerLLMStreamCallbacks(
job: Job<TaskJobData>,
streamContext: WorkerLLMStreamContext,
activeController?: WorkerLLMActiveController,
): WorkerInternalLLMStreamCallbacks {
const maxChunkChars = 128
const activeProbeIntervalMs = 600
@@ -54,13 +60,28 @@ export function createWorkerLLMStreamCallbacks(
if (terminatedError) throw terminatedError
}
const assertActive = async (stage: string) => {
if (activeController?.assertActive) {
await activeController.assertActive(stage)
return
}
await assertTaskActive(job, stage)
}
const probeActive = async () => {
if (activeController?.isActive) {
return await activeController.isActive()
}
return await isTaskActive(job.data.taskId)
}
const scheduleActiveProbe = () => {
if (terminatedError || checkingActive) return
const now = Date.now()
if (now - lastActiveProbeAt < activeProbeIntervalMs) return
checkingActive = true
lastActiveProbeAt = now
void isTaskActive(job.data.taskId)
void probeActive()
.then((active) => {
if (!active) {
markTerminated('worker_llm_stream_probe')
@@ -78,7 +99,7 @@ export function createWorkerLLMStreamCallbacks(
.catch(() => undefined)
.then(async () => {
ensureActiveOrThrow(stage)
await assertTaskActive(job, stage)
await assertActive(stage)
await work()
})
.catch((error) => {

View File

@@ -11,12 +11,13 @@ import { onProjectNameAvailable } from '@/lib/logging/file-writer'
import { buildCharactersIntroduction } from '@/lib/constants'
import { TaskTerminatedError } from '@/lib/task/errors'
import { reportTaskProgress } from '@/lib/workers/shared'
import { assertTaskActive } from '@/lib/workers/utils'
import {
JsonParseError,
runScriptToStoryboardOrchestrator,
type ScriptToStoryboardStepMeta,
type ScriptToStoryboardStepOutput,
type ScriptToStoryboardOrchestratorResult,
} from '@/lib/novel-promotion/script-to-storyboard/orchestrator'
import { runScriptToStoryboardGraph } from '@/lib/workflows/script-to-storyboard/graph'
import { createWorkerLLMStreamCallbacks, createWorkerLLMStreamContext } from './llm-stream'
import type { TaskJobData } from '@/lib/task/types'
import {
@@ -32,6 +33,7 @@ import {
import { buildPrompt, getPromptTemplate, PROMPT_IDS } from '@/lib/prompt-i18n'
import { resolveAnalysisModel } from './resolve-analysis-model'
import { createArtifact } from '@/lib/run-runtime/service'
import { assertWorkflowRunActive, withWorkflowRunLease } from '@/lib/run-runtime/workflow-lease'
import {
parseStoryboardRetryTarget,
runScriptToStoryboardAtomicRetry,
@@ -40,6 +42,10 @@ import {
type AnyObj = Record<string, unknown>
const MAX_VOICE_ANALYZE_ATTEMPTS = 2
function buildWorkflowWorkerId(job: Job<TaskJobData>, label: string) {
return `${label}:${job.queueName}:${job.data.taskId}`
}
function isReasoningEffort(value: unknown): value is 'minimal' | 'low' | 'medium' | 'high' {
return value === 'minimal' || value === 'low' || value === 'medium' || value === 'high'
}
@@ -135,19 +141,44 @@ export async function handleScriptToStoryboardTask(job: Job<TaskJobData>) {
const reasoningEffort = requestedReasoningEffort
|| (isReasoningEffort(capabilityReasoningEffort) ? capabilityReasoningEffort : 'high')
await reportTaskProgress(job, 10, {
stage: 'script_to_storyboard_prepare',
stageLabel: 'progress.stage.scriptToStoryboardPrepare',
displayMode: 'detail',
})
const phase1PlanTemplate = getPromptTemplate(PROMPT_IDS.NP_AGENT_STORYBOARD_PLAN, job.data.locale)
const phase2CinematographyTemplate = getPromptTemplate(PROMPT_IDS.NP_AGENT_CINEMATOGRAPHER, job.data.locale)
const phase2ActingTemplate = getPromptTemplate(PROMPT_IDS.NP_AGENT_ACTING_DIRECTION, job.data.locale)
const phase3DetailTemplate = getPromptTemplate(PROMPT_IDS.NP_AGENT_STORYBOARD_DETAIL, job.data.locale)
const payloadMeta = typeof payload.meta === 'object' && payload.meta !== null
? (payload.meta as AnyObj)
: {}
const runId = typeof payload.runId === 'string' && payload.runId.trim()
? payload.runId.trim()
: (typeof payloadMeta.runId === 'string' ? payloadMeta.runId.trim() : '')
if (!runId) {
throw new Error('runId is required for script_to_storyboard pipeline')
}
const workerId = buildWorkflowWorkerId(job, 'script_to_storyboard')
const assertRunActive = async (stage: string) => {
await assertWorkflowRunActive({
runId,
workerId,
stage,
})
}
const streamContext = createWorkerLLMStreamContext(job, 'script_to_storyboard')
const callbacks = createWorkerLLMStreamCallbacks(job, streamContext)
const callbacks = createWorkerLLMStreamCallbacks(job, streamContext, {
assertActive: async (stage) => {
await assertRunActive(stage)
},
isActive: async () => {
try {
await assertRunActive('worker_llm_stream_probe')
return true
} catch (error) {
if (error instanceof TaskTerminatedError) {
return false
}
throw error
}
},
})
const runStep = async (
meta: ScriptToStoryboardStepMeta,
@@ -158,7 +189,7 @@ export async function handleScriptToStoryboardTask(job: Job<TaskJobData>) {
void _maxOutputTokens
const stepAttempt = meta.stepAttempt
|| (retryStepKey && meta.stepId === retryStepKey ? retryStepAttempt : 1)
await assertTaskActive(job, `script_to_storyboard_step:${meta.stepId}`)
await assertRunActive(`script_to_storyboard_step:${meta.stepId}`)
const progress = 15 + Math.min(70, Math.floor((meta.stepIndex / Math.max(1, meta.stepTotal)) * 70))
await reportTaskProgress(job, progress, {
stage: 'script_to_storyboard_step',
@@ -177,7 +208,6 @@ export async function handleScriptToStoryboardTask(job: Job<TaskJobData>) {
blockedBy: Array.isArray(meta.blockedBy) ? meta.blockedBy : [],
})
// Log prompt input
logAIAnalysis(job.data.userId, 'worker', projectId, project.name, {
action: `SCRIPT_TO_STORYBOARD_PROMPT:${action}`,
input: { stepId: meta.stepId, stepTitle: meta.stepTitle, prompt },
@@ -198,10 +228,8 @@ export async function handleScriptToStoryboardTask(job: Job<TaskJobData>) {
reasoning,
reasoningEffort,
})
// Ensure this step's stream terminal events are flushed before entering dependent steps.
await callbacks.flush()
// Log AI response output (full raw text included for JSON parse debugging)
logAIAnalysis(job.data.userId, 'worker', projectId, project.name, {
action: `SCRIPT_TO_STORYBOARD_OUTPUT:${action}`,
output: {
@@ -220,405 +248,415 @@ export async function handleScriptToStoryboardTask(job: Job<TaskJobData>) {
}
}
const payloadMeta = typeof payload.meta === 'object' && payload.meta !== null
? (payload.meta as AnyObj)
: {}
const runId = typeof payload.runId === 'string' && payload.runId.trim()
? payload.runId.trim()
: (typeof payloadMeta.runId === 'string' ? payloadMeta.runId.trim() : '')
if (!runId) {
throw new Error('runId is required for script_to_storyboard pipeline')
}
const leaseResult = await withWorkflowRunLease({
runId,
userId: job.data.userId,
workerId,
run: async () => {
await reportTaskProgress(job, 10, {
stage: 'script_to_storyboard_prepare',
stageLabel: 'progress.stage.scriptToStoryboardPrepare',
displayMode: 'detail',
})
const orchestratorResult = await (async () => {
try {
return await withInternalLLMStreamCallbacks(
callbacks,
async () => {
if (retryTarget) {
const clipIndex = clips.findIndex((clip) => clip.id === retryTarget.clipId)
if (clipIndex < 0) {
throw new Error(`Retry clip not found: ${retryTarget.clipId}`)
}
const clip = clips[clipIndex]
const atomicResult = await runScriptToStoryboardAtomicRetry({
runId,
retryTarget,
retryStepAttempt,
clip: {
id: clip.id,
content: clip.content,
characters: clip.characters,
location: clip.location,
screenplay: clip.screenplay,
},
clipIndex,
totalClipCount: clips.length,
novelPromotionData: {
characters: novelData.characters || [],
locations: novelData.locations || [],
},
promptTemplates: {
phase1PlanTemplate,
phase2CinematographyTemplate,
phase2ActingTemplate,
phase3DetailTemplate,
},
runStep,
})
return {
clipPanels: atomicResult.clipPanels,
phase1PanelsByClipId: atomicResult.phase1PanelsByClipId,
phase2CinematographyByClipId: atomicResult.phase2CinematographyByClipId,
phase2ActingByClipId: atomicResult.phase2ActingByClipId,
phase3PanelsByClipId: atomicResult.phase3PanelsByClipId,
summary: {
clipCount: selectedClips.length,
totalPanelCount: atomicResult.totalPanelCount,
totalStepCount: atomicResult.totalStepCount,
},
}
}
const orchestratorResult: ScriptToStoryboardOrchestratorResult = await (async () => {
try {
return await withInternalLLMStreamCallbacks(
callbacks,
async () => {
if (retryTarget) {
const clipIndex = clips.findIndex((clip) => clip.id === retryTarget.clipId)
if (clipIndex < 0) {
throw new Error(`Retry clip not found: ${retryTarget.clipId}`)
}
const clip = clips[clipIndex]
const atomicResult = await runScriptToStoryboardAtomicRetry({
runId,
retryTarget,
retryStepAttempt,
clip: {
id: clip.id,
content: clip.content,
characters: clip.characters,
location: clip.location,
screenplay: clip.screenplay,
},
clipIndex,
totalClipCount: clips.length,
novelPromotionData: {
characters: novelData.characters || [],
locations: novelData.locations || [],
},
promptTemplates: {
phase1PlanTemplate,
phase2CinematographyTemplate,
phase2ActingTemplate,
phase3DetailTemplate,
},
runStep,
})
return {
clipPanels: atomicResult.clipPanels,
phase1PanelsByClipId: atomicResult.phase1PanelsByClipId,
phase2CinematographyByClipId: atomicResult.phase2CinematographyByClipId,
phase2ActingByClipId: atomicResult.phase2ActingByClipId,
phase3PanelsByClipId: atomicResult.phase3PanelsByClipId,
summary: {
clipCount: selectedClips.length,
totalPanelCount: atomicResult.totalPanelCount,
totalStepCount: atomicResult.totalStepCount,
},
}
}
const pipelineState = await runScriptToStoryboardGraph({
try {
return await runScriptToStoryboardOrchestrator({
concurrency: workflowConcurrency.analysis,
clips: selectedClips.map((clip) => ({
id: clip.id,
content: clip.content,
characters: clip.characters,
location: clip.location,
screenplay: clip.screenplay,
})),
novelPromotionData: {
characters: novelData.characters || [],
locations: novelData.locations || [],
},
promptTemplates: {
phase1PlanTemplate,
phase2CinematographyTemplate,
phase2ActingTemplate,
phase3DetailTemplate,
},
runStep,
})
} catch (error) {
if (error instanceof JsonParseError) {
logAIAnalysis(job.data.userId, 'worker', projectId, project.name, {
action: 'SCRIPT_TO_STORYBOARD_PARSE_ERROR',
error: {
message: error.message,
rawTextPreview: error.rawText.slice(0, 3000),
rawTextLength: error.rawText.length,
},
model,
})
}
throw error
}
},
)
} finally {
await callbacks.flush()
}
})()
const phase1Map = orchestratorResult.phase1PanelsByClipId || {}
const phase2CinematographyMap = orchestratorResult.phase2CinematographyByClipId || {}
const phase2ActingMap = orchestratorResult.phase2ActingByClipId || {}
const phase3Map = orchestratorResult.phase3PanelsByClipId || {}
for (const clip of selectedClips) {
const phase1Panels = phase1Map[clip.id] || []
if (phase1Panels.length > 0) {
await createArtifact({
runId,
projectId,
userId: job.data.userId,
concurrency: workflowConcurrency.analysis,
clips: selectedClips.map((clip) => ({
id: clip.id,
content: clip.content,
characters: clip.characters,
location: clip.location,
screenplay: clip.screenplay,
})),
novelPromotionData: {
characters: novelData.characters || [],
locations: novelData.locations || [],
},
promptTemplates: {
phase1PlanTemplate,
phase2CinematographyTemplate,
phase2ActingTemplate,
phase3DetailTemplate,
},
runStep,
onParseError: (err) => {
logAIAnalysis(job.data.userId, 'worker', projectId, project.name, {
action: 'SCRIPT_TO_STORYBOARD_PARSE_ERROR',
error: {
message: err.message,
rawTextPreview: err.rawText.slice(0, 3000),
rawTextLength: err.rawText.length,
},
model,
})
stepKey: `clip_${clip.id}_phase1`,
artifactType: 'storyboard.clip.phase1',
refId: clip.id,
payload: {
panels: phase1Panels,
},
})
const result = pipelineState.orchestratorResult
if (!result) {
throw new Error('script_to_storyboard orchestrator produced no result')
}
return result
},
)
} finally {
await callbacks.flush()
}
})()
}
const phase2Cinematography = phase2CinematographyMap[clip.id] || []
if (phase2Cinematography.length > 0) {
await createArtifact({
runId,
stepKey: `clip_${clip.id}_phase2_cinematography`,
artifactType: 'storyboard.clip.phase2.cine',
refId: clip.id,
payload: {
rules: phase2Cinematography,
},
})
}
const phase2Acting = phase2ActingMap[clip.id] || []
if (phase2Acting.length > 0) {
await createArtifact({
runId,
stepKey: `clip_${clip.id}_phase2_acting`,
artifactType: 'storyboard.clip.phase2.acting',
refId: clip.id,
payload: {
directions: phase2Acting,
},
})
}
const phase3Panels = phase3Map[clip.id] || []
if (phase3Panels.length > 0) {
await createArtifact({
runId,
stepKey: `clip_${clip.id}_phase3_detail`,
artifactType: 'storyboard.clip.phase3',
refId: clip.id,
payload: {
panels: phase3Panels,
},
})
}
}
const phase1Map = orchestratorResult.phase1PanelsByClipId || {}
const phase2CinematographyMap = orchestratorResult.phase2CinematographyByClipId || {}
const phase2ActingMap = orchestratorResult.phase2ActingByClipId || {}
const phase3Map = orchestratorResult.phase3PanelsByClipId || {}
await reportTaskProgress(job, 80, {
stage: 'script_to_storyboard_persist',
stageLabel: 'progress.stage.scriptToStoryboardPersist',
displayMode: 'detail',
})
await assertRunActive('script_to_storyboard_persist')
for (const clip of selectedClips) {
const phase1Panels = phase1Map[clip.id] || []
if (phase1Panels.length > 0) {
await createArtifact({
runId,
stepKey: `clip_${clip.id}_phase1`,
artifactType: 'storyboard.clip.phase1',
refId: clip.id,
payload: {
panels: phase1Panels,
const persistedStoryboards = await persistStoryboardsAndPanels({
episodeId,
clipPanels: orchestratorResult.clipPanels,
})
if (skipVoiceAnalyze) {
await reportTaskProgress(job, 96, {
stage: 'script_to_storyboard_persist_done',
stageLabel: 'progress.stage.scriptToStoryboardPersistDone',
displayMode: 'detail',
message: 'step retry complete',
stepId: retryStepKey || undefined,
stepAttempt:
typeof payload.retryStepAttempt === 'number' && Number.isFinite(payload.retryStepAttempt)
? Math.max(1, Math.floor(payload.retryStepAttempt))
: undefined,
})
return {
episodeId,
storyboardCount: persistedStoryboards.length,
panelCount: orchestratorResult.summary.totalPanelCount,
voiceLineCount: 0,
retryStepKey,
}
}
if (!episode.novelText || !episode.novelText.trim()) {
throw new Error('No novel text to analyze')
}
const voicePrompt = buildPrompt({
promptId: PROMPT_IDS.NP_VOICE_ANALYSIS,
locale: job.data.locale,
variables: {
input: episode.novelText,
characters_lib_name: (novelData.characters || []).length > 0
? (novelData.characters || []).map((item) => item.name).join('、')
: '无',
characters_introduction: buildCharactersIntroduction(novelData.characters || []),
storyboard_json: buildStoryboardJson(persistedStoryboards),
},
})
}
const phase2Cinematography = phase2CinematographyMap[clip.id] || []
if (phase2Cinematography.length > 0) {
await createArtifact({
runId,
stepKey: `clip_${clip.id}_phase2_cinematography`,
artifactType: 'storyboard.clip.phase2.cine',
refId: clip.id,
payload: {
rules: phase2Cinematography,
},
})
}
const phase2Acting = phase2ActingMap[clip.id] || []
if (phase2Acting.length > 0) {
await createArtifact({
runId,
stepKey: `clip_${clip.id}_phase2_acting`,
artifactType: 'storyboard.clip.phase2.acting',
refId: clip.id,
payload: {
directions: phase2Acting,
},
})
}
const phase3Panels = phase3Map[clip.id] || []
if (phase3Panels.length > 0) {
await createArtifact({
runId,
stepKey: `clip_${clip.id}_phase3_detail`,
artifactType: 'storyboard.clip.phase3',
refId: clip.id,
payload: {
panels: phase3Panels,
},
})
}
}
await reportTaskProgress(job, 80, {
stage: 'script_to_storyboard_persist',
stageLabel: 'progress.stage.scriptToStoryboardPersist',
displayMode: 'detail',
})
await assertTaskActive(job, 'script_to_storyboard_persist')
const persistedStoryboards = await persistStoryboardsAndPanels({
episodeId,
clipPanels: orchestratorResult.clipPanels,
})
if (skipVoiceAnalyze) {
await reportTaskProgress(job, 96, {
stage: 'script_to_storyboard_persist_done',
stageLabel: 'progress.stage.scriptToStoryboardPersistDone',
displayMode: 'detail',
message: 'step retry complete',
stepId: retryStepKey || undefined,
stepAttempt:
typeof payload.retryStepAttempt === 'number' && Number.isFinite(payload.retryStepAttempt)
? Math.max(1, Math.floor(payload.retryStepAttempt))
: undefined,
})
return {
episodeId,
storyboardCount: persistedStoryboards.length,
panelCount: orchestratorResult.summary.totalPanelCount,
voiceLineCount: 0,
retryStepKey,
}
}
if (!episode.novelText || !episode.novelText.trim()) {
throw new Error('No novel text to analyze')
}
const voicePrompt = buildPrompt({
promptId: PROMPT_IDS.NP_VOICE_ANALYSIS,
locale: job.data.locale,
variables: {
input: episode.novelText,
characters_lib_name: (novelData.characters || []).length > 0
? (novelData.characters || []).map((item) => item.name).join('、')
: '无',
characters_introduction: buildCharactersIntroduction(novelData.characters || []),
storyboard_json: buildStoryboardJson(persistedStoryboards),
},
})
let voiceLineRows: JsonRecord[] | null = null
let voiceLastError: Error | null = null
const voiceStepMeta: ScriptToStoryboardStepMeta = {
stepId: 'voice_analyze',
stepTitle: 'progress.streamStep.voiceAnalyze',
stepIndex: orchestratorResult.summary.totalStepCount,
stepTotal: orchestratorResult.summary.totalStepCount,
retryable: true,
}
try {
for (let voiceAttempt = 1; voiceAttempt <= MAX_VOICE_ANALYZE_ATTEMPTS; voiceAttempt++) {
const meta: ScriptToStoryboardStepMeta = {
...voiceStepMeta,
stepAttempt: voiceAttempt,
let voiceLineRows: JsonRecord[] | null = null
let voiceLastError: Error | null = null
const voiceStepMeta: ScriptToStoryboardStepMeta = {
stepId: 'voice_analyze',
stepTitle: 'progress.streamStep.voiceAnalyze',
stepIndex: orchestratorResult.summary.totalStepCount,
stepTotal: orchestratorResult.summary.totalStepCount,
retryable: true,
}
try {
const voiceOutput = await withInternalLLMStreamCallbacks(
callbacks,
async () => await runStep(meta, voicePrompt, 'voice_analyze', 2600),
)
voiceLineRows = parseVoiceLinesJson(voiceOutput.text)
break
} catch (error) {
if (error instanceof TaskTerminatedError) {
throw error
for (let voiceAttempt = 1; voiceAttempt <= MAX_VOICE_ANALYZE_ATTEMPTS; voiceAttempt++) {
const meta: ScriptToStoryboardStepMeta = {
...voiceStepMeta,
stepAttempt: voiceAttempt,
}
try {
const voiceOutput = await withInternalLLMStreamCallbacks(
callbacks,
async () => await runStep(meta, voicePrompt, 'voice_analyze', 2600),
)
voiceLineRows = parseVoiceLinesJson(voiceOutput.text)
break
} catch (error) {
if (error instanceof TaskTerminatedError) {
throw error
}
voiceLastError = error instanceof Error ? error : new Error(String(error))
if (voiceAttempt < MAX_VOICE_ANALYZE_ATTEMPTS) {
await reportTaskProgress(job, 84, {
stage: 'script_to_storyboard_step',
stageLabel: 'progress.stage.scriptToStoryboardStep',
displayMode: 'detail',
message: `台词分析失败,准备重试 (${voiceAttempt + 1}/${MAX_VOICE_ANALYZE_ATTEMPTS})`,
stepId: voiceStepMeta.stepId,
stepAttempt: voiceAttempt + 1,
stepTitle: voiceStepMeta.stepTitle,
stepIndex: voiceStepMeta.stepIndex,
stepTotal: voiceStepMeta.stepTotal,
})
}
}
}
voiceLastError = error instanceof Error ? error : new Error(String(error))
if (voiceAttempt < MAX_VOICE_ANALYZE_ATTEMPTS) {
await reportTaskProgress(job, 84, {
stage: 'script_to_storyboard_step',
stageLabel: 'progress.stage.scriptToStoryboardStep',
displayMode: 'detail',
message: `台词分析失败,准备重试 (${voiceAttempt + 1}/${MAX_VOICE_ANALYZE_ATTEMPTS})`,
stepId: voiceStepMeta.stepId,
stepAttempt: voiceAttempt + 1,
stepTitle: voiceStepMeta.stepTitle,
stepIndex: voiceStepMeta.stepIndex,
stepTotal: voiceStepMeta.stepTotal,
})
} finally {
await callbacks.flush()
}
if (!voiceLineRows) {
throw voiceLastError!
}
await createArtifact({
runId,
stepKey: 'voice_analyze',
artifactType: 'voice.lines',
refId: episodeId,
payload: {
lines: voiceLineRows,
},
})
await assertRunActive('script_to_storyboard_voice_persist')
const panelIdByStoryboardPanel = new Map<string, string>()
for (const storyboard of persistedStoryboards) {
for (const panel of storyboard.panels) {
panelIdByStoryboardPanel.set(`${storyboard.storyboardId}:${panel.panelIndex}`, panel.id)
}
}
}
} finally {
await callbacks.flush()
}
if (!voiceLineRows) {
throw voiceLastError!
}
await createArtifact({
runId,
stepKey: 'voice_analyze',
artifactType: 'voice.lines',
refId: episodeId,
payload: {
lines: voiceLineRows,
const createdVoiceLines = await prisma.$transaction(async (tx) => {
const voiceLineModel = tx.novelPromotionVoiceLine as unknown as {
upsert?: (args: unknown) => Promise<{ id: string }>
create: (args: unknown) => Promise<{ id: string }>
deleteMany: (args: unknown) => Promise<unknown>
}
const created: Array<{ id: string }> = []
for (let i = 0; i < voiceLineRows.length; i += 1) {
const row = voiceLineRows[i] || {}
const matchedPanel = asJsonRecord(row.matchedPanel)
const matchedStoryboardId =
matchedPanel && typeof matchedPanel.storyboardId === 'string'
? matchedPanel.storyboardId.trim()
: null
const matchedPanelIndex = matchedPanel ? toPositiveInt(matchedPanel.panelIndex) : null
let matchedPanelId: string | null = null
if (matchedPanel !== null) {
if (!matchedStoryboardId || matchedPanelIndex === null) {
throw new Error(`voice line ${i + 1} has invalid matchedPanel reference`)
}
const panelKey = `${matchedStoryboardId}:${matchedPanelIndex}`
const resolvedPanelId = panelIdByStoryboardPanel.get(panelKey)
if (!resolvedPanelId) {
throw new Error(`voice line ${i + 1} references non-existent panel ${panelKey}`)
}
matchedPanelId = resolvedPanelId
}
if (typeof row.emotionStrength !== 'number' || !Number.isFinite(row.emotionStrength)) {
throw new Error(`voice line ${i + 1} is missing valid emotionStrength`)
}
const emotionStrength = Math.min(1, Math.max(0.1, row.emotionStrength))
if (typeof row.lineIndex !== 'number' || !Number.isFinite(row.lineIndex)) {
throw new Error(`voice line ${i + 1} is missing valid lineIndex`)
}
const lineIndex = Math.floor(row.lineIndex)
if (lineIndex <= 0) {
throw new Error(`voice line ${i + 1} has invalid lineIndex`)
}
if (typeof row.speaker !== 'string' || !row.speaker.trim()) {
throw new Error(`voice line ${i + 1} is missing valid speaker`)
}
if (typeof row.content !== 'string' || !row.content.trim()) {
throw new Error(`voice line ${i + 1} is missing valid content`)
}
const upsertArgs = {
where: {
episodeId_lineIndex: {
episodeId,
lineIndex,
},
},
create: {
episodeId,
lineIndex,
speaker: row.speaker.trim(),
content: row.content,
emotionStrength,
matchedPanelId,
matchedStoryboardId: matchedPanelId ? matchedStoryboardId : null,
matchedPanelIndex,
},
update: {
speaker: row.speaker.trim(),
content: row.content,
emotionStrength,
matchedPanelId,
matchedStoryboardId: matchedPanelId ? matchedStoryboardId : null,
matchedPanelIndex,
},
select: { id: true },
}
const createdRow = typeof voiceLineModel.upsert === 'function'
? await voiceLineModel.upsert(upsertArgs)
: (
process.env.NODE_ENV === 'test'
? await voiceLineModel.create({
data: upsertArgs.create,
select: { id: true },
})
: (() => { throw new Error('novelPromotionVoiceLine.upsert unavailable') })()
)
created.push(createdRow)
}
const nextLineIndexes = voiceLineRows
.map((row) => (typeof row.lineIndex === 'number' && Number.isFinite(row.lineIndex) ? Math.floor(row.lineIndex) : -1))
.filter((value) => value > 0)
if (nextLineIndexes.length === 0) {
await voiceLineModel.deleteMany({
where: {
episodeId,
},
})
} else {
await voiceLineModel.deleteMany({
where: {
episodeId,
lineIndex: {
notIn: nextLineIndexes,
},
},
})
}
return created
}, { timeout: 15000 })
await reportTaskProgress(job, 96, {
stage: 'script_to_storyboard_persist_done',
stageLabel: 'progress.stage.scriptToStoryboardPersistDone',
displayMode: 'detail',
})
return {
episodeId,
storyboardCount: persistedStoryboards.length,
panelCount: orchestratorResult.summary.totalPanelCount,
voiceLineCount: createdVoiceLines.length,
}
},
})
await assertTaskActive(job, 'script_to_storyboard_voice_persist')
const panelIdByStoryboardPanel = new Map<string, string>()
for (const storyboard of persistedStoryboards) {
for (const panel of storyboard.panels) {
panelIdByStoryboardPanel.set(`${storyboard.storyboardId}:${panel.panelIndex}`, panel.id)
if (!leaseResult.claimed || !leaseResult.result) {
return {
runId,
skipped: true,
episodeId,
}
}
const createdVoiceLines = await prisma.$transaction(async (tx) => {
const voiceLineModel = tx.novelPromotionVoiceLine as unknown as {
upsert?: (args: unknown) => Promise<{ id: string }>
create: (args: unknown) => Promise<{ id: string }>
deleteMany: (args: unknown) => Promise<unknown>
}
const created: Array<{ id: string }> = []
for (let i = 0; i < voiceLineRows.length; i += 1) {
const row = voiceLineRows[i] || {}
const matchedPanel = asJsonRecord(row.matchedPanel)
const matchedStoryboardId =
matchedPanel && typeof matchedPanel.storyboardId === 'string'
? matchedPanel.storyboardId.trim()
: null
const matchedPanelIndex = matchedPanel ? toPositiveInt(matchedPanel.panelIndex) : null
let matchedPanelId: string | null = null
if (matchedPanel !== null) {
if (!matchedStoryboardId || matchedPanelIndex === null) {
throw new Error(`voice line ${i + 1} has invalid matchedPanel reference`)
}
const panelKey = `${matchedStoryboardId}:${matchedPanelIndex}`
const resolvedPanelId = panelIdByStoryboardPanel.get(panelKey)
if (!resolvedPanelId) {
throw new Error(`voice line ${i + 1} references non-existent panel ${panelKey}`)
}
matchedPanelId = resolvedPanelId
}
if (typeof row.emotionStrength !== 'number' || !Number.isFinite(row.emotionStrength)) {
throw new Error(`voice line ${i + 1} is missing valid emotionStrength`)
}
const emotionStrength = Math.min(1, Math.max(0.1, row.emotionStrength))
if (typeof row.lineIndex !== 'number' || !Number.isFinite(row.lineIndex)) {
throw new Error(`voice line ${i + 1} is missing valid lineIndex`)
}
const lineIndex = Math.floor(row.lineIndex)
if (lineIndex <= 0) {
throw new Error(`voice line ${i + 1} has invalid lineIndex`)
}
if (typeof row.speaker !== 'string' || !row.speaker.trim()) {
throw new Error(`voice line ${i + 1} is missing valid speaker`)
}
if (typeof row.content !== 'string' || !row.content.trim()) {
throw new Error(`voice line ${i + 1} is missing valid content`)
}
const upsertArgs = {
where: {
episodeId_lineIndex: {
episodeId,
lineIndex,
},
},
create: {
episodeId,
lineIndex,
speaker: row.speaker.trim(),
content: row.content,
emotionStrength,
matchedPanelId,
matchedStoryboardId: matchedPanelId ? matchedStoryboardId : null,
matchedPanelIndex,
},
update: {
speaker: row.speaker.trim(),
content: row.content,
emotionStrength,
matchedPanelId,
matchedStoryboardId: matchedPanelId ? matchedStoryboardId : null,
matchedPanelIndex,
},
select: { id: true },
}
const createdRow = typeof voiceLineModel.upsert === 'function'
? await voiceLineModel.upsert(upsertArgs)
: (
process.env.NODE_ENV === 'test'
? await voiceLineModel.create({
data: upsertArgs.create,
select: { id: true },
})
: (() => { throw new Error('novelPromotionVoiceLine.upsert unavailable') })()
)
created.push(createdRow)
}
const nextLineIndexes = voiceLineRows
.map((row) => (typeof row.lineIndex === 'number' && Number.isFinite(row.lineIndex) ? Math.floor(row.lineIndex) : -1))
.filter((value) => value > 0)
if (nextLineIndexes.length === 0) {
await voiceLineModel.deleteMany({
where: {
episodeId,
},
})
} else {
await voiceLineModel.deleteMany({
where: {
episodeId,
lineIndex: {
notIn: nextLineIndexes,
},
},
})
}
return created
}, { timeout: 15000 })
await reportTaskProgress(job, 96, {
stage: 'script_to_storyboard_persist_done',
stageLabel: 'progress.stage.scriptToStoryboardPersistDone',
displayMode: 'detail',
})
return {
episodeId,
storyboardCount: persistedStoryboards.length,
panelCount: orchestratorResult.summary.totalPanelCount,
voiceLineCount: createdVoiceLines.length,
}
return leaseResult.result
}

View File

@@ -8,14 +8,14 @@ import {
import { withInternalLLMStreamCallbacks } from '@/lib/llm-observe/internal-stream-context'
import { logAIAnalysis } from '@/lib/logging/semantic'
import { onProjectNameAvailable } from '@/lib/logging/file-writer'
import { TaskTerminatedError } from '@/lib/task/errors'
import { reportTaskProgress } from '@/lib/workers/shared'
import { assertTaskActive } from '@/lib/workers/utils'
import {
runStoryToScriptOrchestrator,
type StoryToScriptStepMeta,
type StoryToScriptStepOutput,
type StoryToScriptOrchestratorResult,
} from '@/lib/novel-promotion/story-to-script/orchestrator'
import { runStoryToScriptGraph } from '@/lib/workflows/story-to-script/graph'
import { createWorkerLLMStreamCallbacks, createWorkerLLMStreamContext } from './llm-stream'
import type { TaskJobData } from '@/lib/task/types'
import {
@@ -31,6 +31,7 @@ import {
import { getPromptTemplate, PROMPT_IDS } from '@/lib/prompt-i18n'
import { resolveAnalysisModel } from './resolve-analysis-model'
import { createArtifact, listArtifacts } from '@/lib/run-runtime/service'
import { assertWorkflowRunActive, withWorkflowRunLease } from '@/lib/run-runtime/workflow-lease'
import { parseScreenplayPayload } from './screenplay-convert-helpers'
function isReasoningEffort(value: unknown): value is 'minimal' | 'low' | 'medium' | 'high' {
@@ -43,6 +44,10 @@ function resolveRetryClipId(retryStepKey: string): string | null {
return clipId || null
}
function buildWorkflowWorkerId(job: Job<TaskJobData>, label: string) {
return `${label}:${job.queueName}:${job.data.taskId}`
}
export async function handleStoryToScriptTask(job: Job<TaskJobData>) {
const payload = (job.data.payload || {}) as AnyObj
const projectId = job.data.projectId
@@ -125,22 +130,50 @@ export async function handleStoryToScriptTask(job: Job<TaskJobData>) {
if (!mergedContent.trim()) {
throw new Error('content is required')
}
const maxLength = 30000
const content = mergedContent.length > maxLength ? mergedContent.slice(0, maxLength) : mergedContent
await reportTaskProgress(job, 10, {
stage: 'story_to_script_prepare',
stageLabel: 'progress.stage.storyToScriptPrepare',
displayMode: 'detail',
})
const characterPromptTemplate = getPromptTemplate(PROMPT_IDS.NP_AGENT_CHARACTER_PROFILE, job.data.locale)
const locationPromptTemplate = getPromptTemplate(PROMPT_IDS.NP_SELECT_LOCATION, job.data.locale)
const clipPromptTemplate = getPromptTemplate(PROMPT_IDS.NP_AGENT_CLIP, job.data.locale)
const screenplayPromptTemplate = getPromptTemplate(PROMPT_IDS.NP_SCREENPLAY_CONVERSION, job.data.locale)
const maxLength = 30000
const content = mergedContent.length > maxLength ? mergedContent.slice(0, maxLength) : mergedContent
const payloadMeta = typeof payload.meta === 'object' && payload.meta !== null
? (payload.meta as AnyObj)
: {}
const runId = typeof payload.runId === 'string' && payload.runId.trim()
? payload.runId.trim()
: (typeof payloadMeta.runId === 'string' ? payloadMeta.runId.trim() : '')
if (!runId) {
throw new Error('runId is required for story_to_script pipeline')
}
const retryClipId = resolveRetryClipId(retryStepKey)
if (retryStepKey && !retryClipId) {
throw new Error(`unsupported retry step for story_to_script: ${retryStepKey}`)
}
const workerId = buildWorkflowWorkerId(job, 'story_to_script')
const assertRunActive = async (stage: string) => {
await assertWorkflowRunActive({
runId,
workerId,
stage,
})
}
const streamContext = createWorkerLLMStreamContext(job, 'story_to_script')
const callbacks = createWorkerLLMStreamCallbacks(job, streamContext)
const callbacks = createWorkerLLMStreamCallbacks(job, streamContext, {
assertActive: async (stage) => {
await assertRunActive(stage)
},
isActive: async () => {
try {
await assertRunActive('worker_llm_stream_probe')
return true
} catch (error) {
if (error instanceof TaskTerminatedError) {
return false
}
throw error
}
},
})
const runStep = async (
meta: StoryToScriptStepMeta,
@@ -151,7 +184,7 @@ export async function handleStoryToScriptTask(job: Job<TaskJobData>) {
void _maxOutputTokens
const stepAttempt = meta.stepAttempt
|| (retryStepKey && meta.stepId === retryStepKey ? retryStepAttempt : 1)
await assertTaskActive(job, `story_to_script_step:${meta.stepId}`)
await assertRunActive(`story_to_script_step:${meta.stepId}`)
const progress = 15 + Math.min(55, Math.floor((meta.stepIndex / Math.max(1, meta.stepTotal)) * 55))
await reportTaskProgress(job, progress, {
stage: 'story_to_script_step',
@@ -170,7 +203,6 @@ export async function handleStoryToScriptTask(job: Job<TaskJobData>) {
blockedBy: Array.isArray(meta.blockedBy) ? meta.blockedBy : [],
})
// Log prompt input
logAIAnalysis(job.data.userId, 'worker', projectId, project.name, {
action: `STORY_TO_SCRIPT_PROMPT:${action}`,
input: { stepId: meta.stepId, stepTitle: meta.stepTitle, prompt },
@@ -191,10 +223,8 @@ export async function handleStoryToScriptTask(job: Job<TaskJobData>) {
reasoning,
reasoningEffort,
})
// Ensure this step's stream terminal events are flushed before the orchestrator moves on.
await callbacks.flush()
// Log AI response output (full raw text included for debugging)
logAIAnalysis(job.data.userId, 'worker', projectId, project.name, {
action: `STORY_TO_SCRIPT_OUTPUT:${action}`,
output: {
@@ -213,319 +243,315 @@ export async function handleStoryToScriptTask(job: Job<TaskJobData>) {
}
}
let result: StoryToScriptOrchestratorResult | null = null
const payloadMeta = typeof payload.meta === 'object' && payload.meta !== null
? (payload.meta as AnyObj)
: {}
const runId = typeof payload.runId === 'string' && payload.runId.trim()
? payload.runId.trim()
: (typeof payloadMeta.runId === 'string' ? payloadMeta.runId.trim() : '')
if (!runId) {
throw new Error('runId is required for story_to_script pipeline')
}
const retryClipId = resolveRetryClipId(retryStepKey)
if (retryStepKey && !retryClipId) {
throw new Error(`unsupported retry step for story_to_script: ${retryStepKey}`)
}
const leaseResult = await withWorkflowRunLease({
runId,
userId: job.data.userId,
workerId,
run: async () => {
await reportTaskProgress(job, 10, {
stage: 'story_to_script_prepare',
stageLabel: 'progress.stage.storyToScriptPrepare',
displayMode: 'detail',
})
if (retryClipId) {
const splitArtifacts = await listArtifacts({
runId,
artifactType: 'clips.split',
limit: 1,
})
const latestSplit = splitArtifacts[0]
const splitPayload = latestSplit && typeof latestSplit.payload === 'object' && latestSplit.payload !== null
? (latestSplit.payload as Record<string, unknown>)
: null
if (!splitPayload) {
throw new Error('missing clips.split artifact for retry')
}
if (retryClipId) {
const splitArtifacts = await listArtifacts({
runId,
artifactType: 'clips.split',
limit: 1,
})
const latestSplit = splitArtifacts[0]
const splitPayload = latestSplit && typeof latestSplit.payload === 'object' && latestSplit.payload !== null
? (latestSplit.payload as Record<string, unknown>)
: null
if (!splitPayload) {
throw new Error('missing clips.split artifact for retry')
}
const clipRows = Array.isArray(splitPayload.clipList) ? splitPayload.clipList : []
const retryClip = clipRows.find((item) => {
if (!item || typeof item !== 'object' || Array.isArray(item)) return false
return asString((item as Record<string, unknown>).id).trim() === retryClipId
}) as Record<string, unknown> | undefined
if (!retryClip) {
throw new Error(`retry clip not found in artifact: ${retryClipId}`)
}
const clipRows = Array.isArray(splitPayload.clipList) ? splitPayload.clipList : []
const retryClip = clipRows.find((item) => {
if (!item || typeof item !== 'object' || Array.isArray(item)) return false
return asString((item as Record<string, unknown>).id).trim() === retryClipId
}) as Record<string, unknown> | undefined
if (!retryClip) {
throw new Error(`retry clip not found in artifact: ${retryClipId}`)
}
const clipContent = asString(retryClip.content)
if (!clipContent.trim()) {
throw new Error(`retry clip content is empty: ${retryClipId}`)
}
const clipContent = asString(retryClip.content)
if (!clipContent.trim()) {
throw new Error(`retry clip content is empty: ${retryClipId}`)
}
const screenplayPrompt = screenplayPromptTemplate
.replace('{clip_content}', clipContent)
.replace('{locations_lib_name}', asString(splitPayload.locationsLibName) || '无')
.replace('{characters_lib_name}', asString(splitPayload.charactersLibName) || '无')
.replace('{characters_introduction}', asString(splitPayload.charactersIntroduction) || '暂无角色介绍')
.replace('{clip_id}', retryClipId)
const screenplayPrompt = screenplayPromptTemplate
.replace('{clip_content}', clipContent)
.replace('{locations_lib_name}', asString(splitPayload.locationsLibName) || '无')
.replace('{characters_lib_name}', asString(splitPayload.charactersLibName) || '无')
.replace('{characters_introduction}', asString(splitPayload.charactersIntroduction) || '暂无角色介绍')
.replace('{clip_id}', retryClipId)
const stepMeta: StoryToScriptStepMeta = {
stepId: retryStepKey,
stepAttempt: retryStepAttempt,
stepTitle: 'progress.streamStep.screenplayConversion',
stepIndex: 1,
stepTotal: 1,
dependsOn: ['split_clips'],
retryable: true,
}
let screenplay: AnyObj | null = null
try {
const stepOutput = await (async () => {
const stepMeta: StoryToScriptStepMeta = {
stepId: retryStepKey,
stepAttempt: retryStepAttempt,
stepTitle: 'progress.streamStep.screenplayConversion',
stepIndex: 1,
stepTotal: 1,
dependsOn: ['split_clips'],
retryable: true,
}
let screenplay: AnyObj | null = null
try {
const stepOutput = await (async () => {
try {
return await withInternalLLMStreamCallbacks(
callbacks,
async () => await runStep(stepMeta, screenplayPrompt, 'screenplay_conversion', 2200),
)
} finally {
await callbacks.flush()
}
})()
screenplay = parseScreenplayPayload(stepOutput.text)
} catch (error) {
await createArtifact({
runId,
stepKey: retryStepKey,
artifactType: 'screenplay.clip',
refId: retryClipId,
payload: {
clipId: retryClipId,
success: false,
error: error instanceof Error ? error.message : String(error),
},
})
throw error
}
if (!screenplay) {
throw new Error('retry screenplay output is empty')
}
await createArtifact({
runId,
stepKey: retryStepKey,
artifactType: 'screenplay.clip',
refId: retryClipId,
payload: {
clipId: retryClipId,
success: true,
sceneCount: Array.isArray(screenplay.scenes) ? screenplay.scenes.length : 0,
screenplay,
},
})
let clipRecord = await prisma.novelPromotionClip.findFirst({
where: {
episodeId,
startText: asString(retryClip.startText) || null,
endText: asString(retryClip.endText) || null,
},
select: { id: true },
})
if (!clipRecord) {
clipRecord = await prisma.novelPromotionClip.create({
data: {
episodeId,
startText: asString(retryClip.startText) || null,
endText: asString(retryClip.endText) || null,
summary: asString(retryClip.summary),
location: asString(retryClip.location) || null,
characters: Array.isArray(retryClip.characters) ? JSON.stringify(retryClip.characters) : null,
content: clipContent,
},
select: { id: true },
})
}
await prisma.novelPromotionClip.update({
where: { id: clipRecord.id },
data: {
screenplay: JSON.stringify(screenplay),
},
})
await reportTaskProgress(job, 96, {
stage: 'story_to_script_persist_done',
stageLabel: 'progress.stage.storyToScriptPersistDone',
displayMode: 'detail',
message: 'retry step completed',
stepId: retryStepKey,
stepAttempt: retryStepAttempt,
stepTitle: 'progress.streamStep.screenplayConversion',
stepIndex: 1,
stepTotal: 1,
})
return {
episodeId,
clipCount: 1,
screenplaySuccessCount: 1,
screenplayFailedCount: 0,
persistedCharacters: 0,
persistedLocations: 0,
persistedClips: 1,
retryStepKey,
}
}
const result: StoryToScriptOrchestratorResult = await (async () => {
try {
return await withInternalLLMStreamCallbacks(
callbacks,
async () => await runStep(stepMeta, screenplayPrompt, 'screenplay_conversion', 2200),
async () => await runStoryToScriptOrchestrator({
concurrency: workflowConcurrency.analysis,
content,
baseCharacters: (novelData.characters || []).map((item) => item.name),
baseLocations: (novelData.locations || []).map((item) => item.name),
baseCharacterIntroductions: (novelData.characters || []).map((item) => ({
name: item.name,
introduction: item.introduction || '',
})),
promptTemplates: {
characterPromptTemplate,
locationPromptTemplate,
clipPromptTemplate,
screenplayPromptTemplate,
},
runStep,
}),
)
} finally {
await callbacks.flush()
}
})()
screenplay = parseScreenplayPayload(stepOutput.text)
} catch (error) {
await createArtifact({
runId,
stepKey: retryStepKey,
artifactType: 'screenplay.clip',
refId: retryClipId,
stepKey: 'analyze_characters',
artifactType: 'analysis.characters',
refId: episodeId,
payload: {
clipId: retryClipId,
success: false,
error: error instanceof Error ? error.message : String(error),
characters: result.analyzedCharacters,
raw: result.charactersObject,
},
})
throw error
}
if (!screenplay) {
throw new Error('retry screenplay output is empty')
}
await createArtifact({
runId,
stepKey: retryStepKey,
artifactType: 'screenplay.clip',
refId: retryClipId,
payload: {
clipId: retryClipId,
success: true,
sceneCount: Array.isArray(screenplay.scenes) ? screenplay.scenes.length : 0,
screenplay,
},
})
let clipRecord = await prisma.novelPromotionClip.findFirst({
where: {
episodeId,
startText: asString(retryClip.startText) || null,
endText: asString(retryClip.endText) || null,
},
select: { id: true },
})
if (!clipRecord) {
clipRecord = await prisma.novelPromotionClip.create({
data: {
episodeId,
startText: asString(retryClip.startText) || null,
endText: asString(retryClip.endText) || null,
summary: asString(retryClip.summary),
location: asString(retryClip.location) || null,
characters: Array.isArray(retryClip.characters) ? JSON.stringify(retryClip.characters) : null,
content: clipContent,
await createArtifact({
runId,
stepKey: 'analyze_locations',
artifactType: 'analysis.locations',
refId: episodeId,
payload: {
locations: result.analyzedLocations,
raw: result.locationsObject,
},
})
await createArtifact({
runId,
stepKey: 'split_clips',
artifactType: 'clips.split',
refId: episodeId,
payload: {
clipList: result.clipList,
charactersLibName: result.charactersLibName,
locationsLibName: result.locationsLibName,
charactersIntroduction: result.charactersIntroduction,
},
})
for (const screenplayResult of result.screenplayResults) {
await createArtifact({
runId,
stepKey: `screenplay_${screenplayResult.clipId}`,
artifactType: 'screenplay.clip',
refId: screenplayResult.clipId,
payload: {
...screenplayResult,
},
})
}
if (result.summary.screenplayFailedCount > 0) {
const failed = result.screenplayResults.filter((item) => !item.success)
const preview = failed
.slice(0, 3)
.map((item) => `${item.clipId}:${item.error || 'unknown error'}`)
.join(' | ')
throw new Error(
`STORY_TO_SCRIPT_PARTIAL_FAILED: ${result.summary.screenplayFailedCount}/${result.summary.clipCount} screenplay steps failed. ${preview}`,
)
}
await reportTaskProgress(job, 80, {
stage: 'story_to_script_persist',
stageLabel: 'progress.stage.storyToScriptPersist',
displayMode: 'detail',
})
await assertRunActive('story_to_script_persist')
const episodeStillExists = await prisma.novelPromotionEpisode.findUnique({
where: { id: episodeId },
select: { id: true },
})
}
await prisma.novelPromotionClip.update({
where: { id: clipRecord.id },
data: {
screenplay: JSON.stringify(screenplay),
},
})
if (!episodeStillExists) {
throw new Error(`NOT_FOUND: Episode ${episodeId} was deleted while the task was running`)
}
await reportTaskProgress(job, 96, {
stage: 'story_to_script_persist_done',
stageLabel: 'progress.stage.storyToScriptPersistDone',
displayMode: 'detail',
message: 'retry step completed',
stepId: retryStepKey,
stepAttempt: retryStepAttempt,
stepTitle: 'progress.streamStep.screenplayConversion',
stepIndex: 1,
stepTotal: 1,
})
return {
episodeId,
clipCount: 1,
screenplaySuccessCount: 1,
screenplayFailedCount: 0,
persistedCharacters: 0,
persistedLocations: 0,
persistedClips: 1,
retryStepKey,
}
}
const pipelineState = await (async () => {
try {
return await withInternalLLMStreamCallbacks(
callbacks,
async () => await runStoryToScriptGraph({
runId,
projectId,
userId: job.data.userId,
concurrency: workflowConcurrency.analysis,
content,
baseCharacters: (novelData.characters || []).map((item) => item.name),
baseLocations: (novelData.locations || []).map((item) => item.name),
baseCharacterIntroductions: (novelData.characters || []).map((item) => ({
name: item.name,
introduction: item.introduction || '',
})),
promptTemplates: {
characterPromptTemplate,
locationPromptTemplate,
clipPromptTemplate,
screenplayPromptTemplate,
},
runStep,
}),
const existingCharacterNames = new Set<string>(
(novelData.characters || []).map((item) => String(item.name || '').toLowerCase()),
)
const existingLocationNames = new Set<string>(
(novelData.locations || []).map((item) => String(item.name || '').toLowerCase()),
)
} finally {
await callbacks.flush()
}
})()
result = pipelineState.orchestratorResult
if (!result) {
throw new Error('story_to_script orchestrator produced no result')
}
const createdCharacters = await persistAnalyzedCharacters({
projectInternalId: novelData.id,
existingNames: existingCharacterNames,
analyzedCharacters: result.analyzedCharacters,
})
await createArtifact({
runId,
stepKey: 'analyze_characters',
artifactType: 'analysis.characters',
refId: episodeId,
payload: {
characters: result.analyzedCharacters,
raw: result.charactersObject,
const createdLocations = await persistAnalyzedLocations({
projectInternalId: novelData.id,
existingNames: existingLocationNames,
analyzedLocations: result.analyzedLocations,
})
const createdClipRows = await persistClips({
episodeId,
clipList: result.clipList,
})
const clipIdMap = new Map(createdClipRows.map((item) => [item.clipKey, item.id]))
for (const screenplayResult of result.screenplayResults) {
if (!screenplayResult.success || !screenplayResult.screenplay) continue
const clipRecordId = resolveClipRecordId(clipIdMap, screenplayResult.clipId)
if (!clipRecordId) continue
await prisma.novelPromotionClip.update({
where: { id: clipRecordId },
data: {
screenplay: JSON.stringify(screenplayResult.screenplay),
},
})
}
await reportTaskProgress(job, 96, {
stage: 'story_to_script_persist_done',
stageLabel: 'progress.stage.storyToScriptPersistDone',
displayMode: 'detail',
})
return {
episodeId,
clipCount: result.summary.clipCount,
screenplaySuccessCount: result.summary.screenplaySuccessCount,
screenplayFailedCount: result.summary.screenplayFailedCount,
persistedCharacters: createdCharacters.length,
persistedLocations: createdLocations.length,
persistedClips: createdClipRows.length,
}
},
})
await createArtifact({
runId,
stepKey: 'analyze_locations',
artifactType: 'analysis.locations',
refId: episodeId,
payload: {
locations: result.analyzedLocations,
raw: result.locationsObject,
},
})
await createArtifact({
runId,
stepKey: 'split_clips',
artifactType: 'clips.split',
refId: episodeId,
payload: {
clipList: result.clipList,
charactersLibName: result.charactersLibName,
locationsLibName: result.locationsLibName,
charactersIntroduction: result.charactersIntroduction,
},
})
for (const screenplayResult of result.screenplayResults) {
await createArtifact({
if (!leaseResult.claimed || !leaseResult.result) {
return {
runId,
stepKey: `screenplay_${screenplayResult.clipId}`,
artifactType: 'screenplay.clip',
refId: screenplayResult.clipId,
payload: {
...screenplayResult,
},
})
}
if (result.summary.screenplayFailedCount > 0) {
const failed = result.screenplayResults.filter((item) => !item.success)
const preview = failed
.slice(0, 3)
.map((item) => `${item.clipId}:${item.error || 'unknown error'}`)
.join(' | ')
throw new Error(
`STORY_TO_SCRIPT_PARTIAL_FAILED: ${result.summary.screenplayFailedCount}/${result.summary.clipCount} screenplay steps failed. ${preview}`,
)
}
await reportTaskProgress(job, 80, {
stage: 'story_to_script_persist',
stageLabel: 'progress.stage.storyToScriptPersist',
displayMode: 'detail',
})
await assertTaskActive(job, 'story_to_script_persist')
// Re-verify episode still exists before persisting — it may have been deleted
// while AI steps were running, which would cause a Prisma foreign key error.
const episodeStillExists = await prisma.novelPromotionEpisode.findUnique({
where: { id: episodeId },
select: { id: true },
})
if (!episodeStillExists) {
throw new Error(`NOT_FOUND: Episode ${episodeId} was deleted while the task was running`)
}
const existingCharacterNames = new Set<string>(
(novelData.characters || []).map((item) => String(item.name || '').toLowerCase()),
)
const existingLocationNames = new Set<string>(
(novelData.locations || []).map((item) => String(item.name || '').toLowerCase()),
)
const createdCharacters = await persistAnalyzedCharacters({
projectInternalId: novelData.id,
existingNames: existingCharacterNames,
analyzedCharacters: result.analyzedCharacters,
})
const createdLocations = await persistAnalyzedLocations({
projectInternalId: novelData.id,
existingNames: existingLocationNames,
analyzedLocations: result.analyzedLocations,
})
const createdClipRows = await persistClips({
episodeId,
clipList: result.clipList,
})
const clipIdMap = new Map(createdClipRows.map((item) => [item.clipKey, item.id]))
for (const screenplayResult of result.screenplayResults) {
if (!screenplayResult.success || !screenplayResult.screenplay) continue
const clipRecordId = resolveClipRecordId(clipIdMap, screenplayResult.clipId)
if (!clipRecordId) continue
await prisma.novelPromotionClip.update({
where: { id: clipRecordId },
data: {
screenplay: JSON.stringify(screenplayResult.screenplay),
},
})
}
await reportTaskProgress(job, 96, {
stage: 'story_to_script_persist_done',
stageLabel: 'progress.stage.storyToScriptPersistDone',
displayMode: 'detail',
})
return {
episodeId,
clipCount: result.summary.clipCount,
screenplaySuccessCount: result.summary.screenplaySuccessCount,
screenplayFailedCount: result.summary.screenplayFailedCount,
persistedCharacters: createdCharacters.length,
persistedLocations: createdLocations.length,
persistedClips: createdClipRows.length,
skipped: true,
episodeId,
}
}
return leaseResult.result
}

View File

@@ -13,13 +13,16 @@ import {
updateTaskBillingInfo,
} from '@/lib/task/service'
import { publishTaskEvent, publishTaskStreamEvent } from '@/lib/task/publisher'
import { TASK_EVENT_TYPE, TASK_TYPE, type TaskBillingInfo, type TaskJobData } from '@/lib/task/types'
import { TASK_EVENT_TYPE, TASK_SSE_EVENT_TYPE, TASK_TYPE, type SSEEvent, type TaskBillingInfo, type TaskJobData } from '@/lib/task/types'
import { buildTaskProgressMessage, getTaskStageLabel } from '@/lib/task/progress-message'
import { normalizeAnyError } from '@/lib/errors/normalize'
import { rollbackTaskBilling, settleTaskBilling } from '@/lib/billing'
import { withTextUsageCollection } from '@/lib/billing/runtime-usage'
import { onProjectNameAvailable } from '@/lib/logging/file-writer'
import type { NormalizedError } from '@/lib/errors/types'
import { mapTaskSSEEventToRunEvents } from '@/lib/run-runtime/task-bridge'
import { publishRunEvent } from '@/lib/run-runtime/publisher'
import { RUN_EVENT_TYPE } from '@/lib/run-runtime/types'
function toObject(value: unknown): Record<string, unknown> {
if (!value || typeof value !== 'object' || Array.isArray(value)) return {}
@@ -76,6 +79,12 @@ function withFlowFields(jobData: TaskJobData, payload?: Record<string, unknown>
return base
}
function resolveRunId(jobData: TaskJobData): string | null {
const flowFields = extractFlowFields(jobData)
const runId = flowFields.runId
return typeof runId === 'string' && runId.trim() ? runId.trim() : null
}
function buildWorkerLogger(data: TaskJobData, queueName: string) {
return createScopedLogger({
module: `worker.${queueName}`,
@@ -91,10 +100,131 @@ const RUN_STREAM_REPLAY_PERSIST_TYPES = new Set<string>([
TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
])
const DIRECT_RUN_EVENT_TASK_TYPES = new Set<string>([
TASK_TYPE.STORY_TO_SCRIPT_RUN,
TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
])
function shouldPersistRunStreamReplay(taskType: string): boolean {
return RUN_STREAM_REPLAY_PERSIST_TYPES.has(taskType)
}
function shouldDirectPublishRunEvents(taskType: string): boolean {
return DIRECT_RUN_EVENT_TASK_TYPES.has(taskType)
}
async function publishMirroredRunEvents(params: {
taskId: string
projectId: string
userId: string
taskType: string
targetType: string
targetId: string
episodeId?: string | null
eventType: typeof TASK_SSE_EVENT_TYPE[keyof typeof TASK_SSE_EVENT_TYPE]
payload?: Record<string, unknown> | null
}) {
if (!shouldDirectPublishRunEvents(params.taskType)) return
const message: SSEEvent = {
id: `direct:${params.taskId}:${Date.now().toString(36)}:${Math.random().toString(36).slice(2, 8)}`,
type: params.eventType,
taskId: params.taskId,
projectId: params.projectId,
userId: params.userId,
ts: new Date().toISOString(),
taskType: params.taskType,
targetType: params.targetType,
targetId: params.targetId,
episodeId: params.episodeId || null,
payload: (params.payload || null) as SSEEvent['payload'],
}
const runEvents = mapTaskSSEEventToRunEvents(message)
for (const event of runEvents) {
await publishRunEvent(event)
}
}
async function publishLifecycleEvent(params: {
taskId: string
projectId: string
userId: string
type: typeof TASK_EVENT_TYPE[keyof typeof TASK_EVENT_TYPE]
taskType: string
targetType: string
targetId: string
episodeId?: string | null
payload?: Record<string, unknown> | null
persist?: boolean
}) {
await publishTaskEvent({
taskId: params.taskId,
projectId: params.projectId,
userId: params.userId,
type: params.type,
taskType: params.taskType,
targetType: params.targetType,
targetId: params.targetId,
episodeId: params.episodeId || null,
payload: params.payload,
persist: params.persist,
})
await publishMirroredRunEvents({
taskId: params.taskId,
projectId: params.projectId,
userId: params.userId,
taskType: params.taskType,
targetType: params.targetType,
targetId: params.targetId,
episodeId: params.episodeId || null,
eventType: TASK_SSE_EVENT_TYPE.LIFECYCLE,
payload: {
...params.payload,
lifecycleType:
params.type === TASK_EVENT_TYPE.PROGRESS
? TASK_EVENT_TYPE.PROCESSING
: params.type,
},
})
}
async function publishStreamEvent(params: {
taskId: string
projectId: string
userId: string
taskType: string
targetType: string
targetId: string
episodeId?: string | null
payload?: Record<string, unknown> | null
persist?: boolean
}) {
await publishTaskStreamEvent({
taskId: params.taskId,
projectId: params.projectId,
userId: params.userId,
taskType: params.taskType,
targetType: params.targetType,
targetId: params.targetId,
episodeId: params.episodeId || null,
payload: params.payload,
persist: params.persist,
})
await publishMirroredRunEvents({
taskId: params.taskId,
projectId: params.projectId,
userId: params.userId,
taskType: params.taskType,
targetType: params.targetType,
targetId: params.targetId,
episodeId: params.episodeId || null,
eventType: TASK_SSE_EVENT_TYPE.STREAM,
payload: params.payload,
})
}
function resolveQueueAttempts(job: Job<TaskJobData>): number {
const attempts = (job.opts?.attempts ?? 1)
const value = typeof attempts === 'number' && Number.isFinite(attempts) ? Math.floor(attempts) : 1
@@ -243,7 +373,26 @@ export async function withTaskLifecycle(job: Job<TaskJobData>, handler: (job: Jo
requestId: data.trace?.requestId || null,
},
})
await publishTaskEvent({
if (shouldDirectPublishRunEvents(data.type)) {
const runId = resolveRunId(data)
if (runId) {
await publishRunEvent({
runId,
projectId: data.projectId,
userId: data.userId,
eventType: RUN_EVENT_TYPE.RUN_START,
payload: {
...processingPayload,
message: buildTaskProgressMessage({
eventType: TASK_EVENT_TYPE.PROCESSING,
taskType: data.type,
payload: processingPayload,
}),
},
})
}
}
await publishLifecycleEvent({
taskId,
projectId: data.projectId,
userId: data.userId,
@@ -297,7 +446,7 @@ export async function withTaskLifecycle(job: Job<TaskJobData>, handler: (job: Jo
requestId: data.trace?.requestId || null,
},
})
await publishTaskEvent({
await publishLifecycleEvent({
taskId,
projectId: data.projectId,
userId: data.userId,
@@ -411,7 +560,7 @@ export async function withTaskLifecycle(job: Job<TaskJobData>, handler: (job: Jo
})
try {
await publishTaskEvent({
await publishLifecycleEvent({
taskId,
projectId: data.projectId,
userId: data.userId,
@@ -468,7 +617,7 @@ export async function withTaskLifecycle(job: Job<TaskJobData>, handler: (job: Jo
if (process.env.NODE_ENV !== 'production' && error instanceof Error && typeof error.stack === 'string') {
failedPayload.errorStack = error.stack.slice(0, 8000)
}
await publishTaskEvent({
await publishLifecycleEvent({
taskId,
projectId: data.projectId,
userId: data.userId,
@@ -529,7 +678,7 @@ export async function reportTaskProgress(job: Job<TaskJobData>, progress: number
if (!updated) {
return
}
await publishTaskEvent({
await publishLifecycleEvent({
taskId: job.data.taskId,
projectId: job.data.projectId,
userId: job.data.userId,
@@ -562,7 +711,7 @@ export async function reportTaskStreamChunk(
message: payload?.message || (chunk.kind === 'reasoning' ? 'progress.runtime.llm.reasoning' : 'progress.runtime.llm.output'),
})
await publishTaskStreamEvent({
await publishStreamEvent({
taskId: job.data.taskId,
projectId: job.data.projectId,
userId: job.data.userId,

View File

@@ -0,0 +1,95 @@
const STORY_TO_SCRIPT_WORKFLOW = 'story_to_script_run'
const SCRIPT_TO_STORYBOARD_WORKFLOW = 'script_to_storyboard_run'
function uniqueStepKeys(stepKeys: Iterable<string>): string[] {
return Array.from(new Set(Array.from(stepKeys).filter((stepKey) => stepKey.trim().length > 0)))
}
function resolveStoryToScriptInvalidation(params: {
stepKey: string
existingStepKeys: ReadonlySet<string>
}): string[] {
const affected = new Set<string>([params.stepKey])
if (params.stepKey === 'analyze_characters' || params.stepKey === 'analyze_locations') {
if (params.existingStepKeys.has('split_clips')) {
affected.add('split_clips')
}
for (const stepKey of params.existingStepKeys) {
if (stepKey.startsWith('screenplay_')) {
affected.add(stepKey)
}
}
} else if (params.stepKey === 'split_clips') {
for (const stepKey of params.existingStepKeys) {
if (stepKey.startsWith('screenplay_')) {
affected.add(stepKey)
}
}
}
return uniqueStepKeys(affected)
}
type StoryboardPhase = 'phase1' | 'phase2_cinematography' | 'phase2_acting' | 'phase3_detail'
function parseStoryboardStepKey(stepKey: string): { clipId: string; phase: StoryboardPhase } | null {
const match = /^clip_(.+)_(phase1|phase2_cinematography|phase2_acting|phase3_detail)$/.exec(stepKey.trim())
if (!match) return null
const clipId = (match[1] || '').trim()
const phase = match[2] as StoryboardPhase
if (!clipId) return null
return { clipId, phase }
}
function resolveScriptToStoryboardInvalidation(params: {
stepKey: string
existingStepKeys: ReadonlySet<string>
}): string[] {
const affected = new Set<string>([params.stepKey])
if (params.stepKey === 'voice_analyze') {
return uniqueStepKeys(affected)
}
const parsed = parseStoryboardStepKey(params.stepKey)
if (!parsed) {
return uniqueStepKeys(affected)
}
const clipPrefix = `clip_${parsed.clipId}_`
if (parsed.phase === 'phase1') {
affected.add(`${clipPrefix}phase2_cinematography`)
affected.add(`${clipPrefix}phase2_acting`)
affected.add(`${clipPrefix}phase3_detail`)
affected.add('voice_analyze')
return uniqueStepKeys(Array.from(affected).filter((stepKey) => params.existingStepKeys.has(stepKey)))
}
if (parsed.phase === 'phase2_cinematography' || parsed.phase === 'phase2_acting') {
affected.add(`${clipPrefix}phase3_detail`)
affected.add('voice_analyze')
return uniqueStepKeys(Array.from(affected).filter((stepKey) => params.existingStepKeys.has(stepKey)))
}
affected.add('voice_analyze')
return uniqueStepKeys(Array.from(affected).filter((stepKey) => params.existingStepKeys.has(stepKey)))
}
export function resolveRetryInvalidationStepKeys(params: {
workflowType: string
stepKey: string
existingStepKeys: string[]
}): string[] {
const existingStepKeys = new Set(params.existingStepKeys)
if (params.workflowType === STORY_TO_SCRIPT_WORKFLOW) {
return resolveStoryToScriptInvalidation({
stepKey: params.stepKey,
existingStepKeys,
})
}
if (params.workflowType === SCRIPT_TO_STORYBOARD_WORKFLOW) {
return resolveScriptToStoryboardInvalidation({
stepKey: params.stepKey,
existingStepKeys,
})
}
return uniqueStepKeys([params.stepKey].filter((stepKey) => existingStepKeys.has(stepKey)))
}

View File

@@ -1,110 +0,0 @@
import { runPipelineGraph, type PipelineGraphState } from '@/lib/run-runtime/pipeline-graph'
import {
runScriptToStoryboardOrchestrator,
type ScriptToStoryboardOrchestratorResult,
type ScriptToStoryboardPromptTemplates,
type ScriptToStoryboardStepMeta,
type ScriptToStoryboardStepOutput,
JsonParseError,
} from '@/lib/novel-promotion/script-to-storyboard/orchestrator'
import type { CharacterAsset, LocationAsset } from '@/lib/storyboard-phases'
type ClipInput = {
id: string
content: string | null
characters: string | null
location: string | null
screenplay: string | null
}
type NovelPromotionData = {
characters: CharacterAsset[]
locations: LocationAsset[]
}
export type ScriptToStoryboardGraphState = PipelineGraphState & {
orchestratorResult: ScriptToStoryboardOrchestratorResult | null
}
export type ScriptToStoryboardGraphInput = {
runId: string
projectId: string
userId: string
concurrency: number
clips: ClipInput[]
novelPromotionData: NovelPromotionData
promptTemplates: ScriptToStoryboardPromptTemplates
runStep: (
meta: ScriptToStoryboardStepMeta,
prompt: string,
action: string,
maxOutputTokens: number,
) => Promise<ScriptToStoryboardStepOutput>
onParseError?: (error: JsonParseError) => Promise<void> | void
}
export async function runScriptToStoryboardGraph(
input: ScriptToStoryboardGraphInput,
): Promise<ScriptToStoryboardGraphState> {
const initialState: ScriptToStoryboardGraphState = {
refs: {},
meta: {},
orchestratorResult: null,
}
try {
return await runPipelineGraph({
runId: input.runId,
projectId: input.projectId,
userId: input.userId,
state: initialState,
nodes: [
{
key: 'script_to_storyboard_orchestrator',
title: 'script_to_storyboard_orchestrator',
maxAttempts: 2,
timeoutMs: 1000 * 60 * 20,
run: async (context) => {
const nextResult = await runScriptToStoryboardOrchestrator({
concurrency: input.concurrency,
clips: input.clips,
novelPromotionData: input.novelPromotionData,
promptTemplates: input.promptTemplates,
runStep: input.runStep,
})
context.state.orchestratorResult = nextResult
return {
output: {
clipCount: nextResult.summary.clipCount,
totalPanelCount: nextResult.summary.totalPanelCount,
},
}
},
},
{
key: 'script_to_storyboard_validate',
title: 'script_to_storyboard_validate',
maxAttempts: 1,
timeoutMs: 1000 * 30,
run: async (context) => {
if (!context.state.orchestratorResult) {
throw new Error('script_to_storyboard orchestrator produced no result')
}
return {
output: {
validated: true,
totalPanelCount: context.state.orchestratorResult.summary.totalPanelCount,
},
}
},
},
],
})
} catch (error) {
if (error instanceof JsonParseError) {
await input.onParseError?.(error)
}
throw error
}
}

View File

@@ -1,103 +0,0 @@
import { runPipelineGraph, type PipelineGraphState } from '@/lib/run-runtime/pipeline-graph'
import {
runStoryToScriptOrchestrator,
type StoryToScriptOrchestratorResult,
type StoryToScriptPromptTemplates,
type StoryToScriptStepMeta,
type StoryToScriptStepOutput,
} from '@/lib/novel-promotion/story-to-script/orchestrator'
export type StoryToScriptGraphState = PipelineGraphState & {
orchestratorResult: StoryToScriptOrchestratorResult | null
}
export type StoryToScriptGraphInput = {
runId: string
projectId: string
userId: string
concurrency: number
content: string
baseCharacters: string[]
baseLocations: string[]
baseCharacterIntroductions: Array<{ name: string; introduction?: string | null }>
promptTemplates: StoryToScriptPromptTemplates
runStep: (
meta: StoryToScriptStepMeta,
prompt: string,
action: string,
maxOutputTokens: number,
) => Promise<StoryToScriptStepOutput>
}
export async function runStoryToScriptGraph(
input: StoryToScriptGraphInput,
): Promise<StoryToScriptGraphState> {
const initialState: StoryToScriptGraphState = {
refs: {},
meta: {},
orchestratorResult: null,
}
return await runPipelineGraph({
runId: input.runId,
projectId: input.projectId,
userId: input.userId,
state: initialState,
nodes: [
{
key: 'story_to_script_orchestrator',
title: 'story_to_script_orchestrator',
maxAttempts: 2,
timeoutMs: 1000 * 60 * 15,
run: async (context) => {
const orchestratorResult = await runStoryToScriptOrchestrator({
content: input.content,
concurrency: input.concurrency,
baseCharacters: input.baseCharacters,
baseLocations: input.baseLocations,
baseCharacterIntroductions: input.baseCharacterIntroductions,
promptTemplates: input.promptTemplates,
runStep: input.runStep,
})
context.state.orchestratorResult = orchestratorResult
return {
output: {
clipCount: orchestratorResult.summary.clipCount,
screenplaySuccessCount: orchestratorResult.summary.screenplaySuccessCount,
screenplayFailedCount: orchestratorResult.summary.screenplayFailedCount,
},
}
},
},
{
key: 'story_to_script_validate',
title: 'story_to_script_validate',
maxAttempts: 1,
timeoutMs: 1000 * 30,
run: async (context) => {
const result = context.state.orchestratorResult
if (!result) {
throw new Error('story_to_script orchestrator produced no result')
}
if (result.summary.screenplayFailedCount > 0) {
const failed = result.screenplayResults.filter((item) => !item.success)
const preview = failed
.slice(0, 3)
.map((item) => `${item.clipId}:${item.error || 'unknown error'}`)
.join(' | ')
throw new Error(
`STORY_TO_SCRIPT_PARTIAL_FAILED: ${result.summary.screenplayFailedCount}/${result.summary.clipCount} screenplay steps failed. ${preview}`,
)
}
return {
output: {
validated: true,
clipCount: result.summary.clipCount,
},
}
},
},
],
})
}

View File

@@ -0,0 +1,96 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { buildMockRequest } from '../../../helpers/request'
const authState = vi.hoisted(() => ({ authenticated: true }))
const getRunByIdMock = vi.hoisted(() => vi.fn())
const requestRunCancelMock = vi.hoisted(() => vi.fn())
const cancelTaskMock = vi.hoisted(() => vi.fn())
const publishRunEventMock = vi.hoisted(() => vi.fn(async () => undefined))
vi.mock('@/lib/api-auth', () => {
const unauthorized = () => new Response(
JSON.stringify({ error: { code: 'UNAUTHORIZED' } }),
{ status: 401, headers: { 'content-type': 'application/json' } },
)
return {
isErrorResponse: (value: unknown) => value instanceof Response,
requireUserAuth: async () => {
if (!authState.authenticated) return unauthorized()
return { session: { user: { id: 'user-1' } } }
},
}
})
vi.mock('@/lib/run-runtime/service', () => ({
getRunById: getRunByIdMock,
requestRunCancel: requestRunCancelMock,
}))
vi.mock('@/lib/task/service', () => ({
cancelTask: cancelTaskMock,
}))
vi.mock('@/lib/run-runtime/publisher', () => ({
publishRunEvent: publishRunEventMock,
}))
describe('api contract - run cancel route', () => {
beforeEach(() => {
vi.clearAllMocks()
authState.authenticated = true
getRunByIdMock.mockResolvedValue({
id: 'run-1',
userId: 'user-1',
projectId: 'project-1',
taskId: 'task-1',
})
requestRunCancelMock.mockResolvedValue({
id: 'run-1',
userId: 'user-1',
projectId: 'project-1',
taskId: 'task-1',
status: 'canceling',
})
cancelTaskMock.mockResolvedValue({
task: {
id: 'task-1',
status: 'canceled',
errorCode: 'TASK_CANCELLED',
errorMessage: 'Run cancelled by user',
},
cancelled: true,
})
})
it('marks the run canceled and mirrors task cancellation without failing the task', async () => {
const { POST } = await import('@/app/api/runs/[runId]/cancel/route')
const req = buildMockRequest({
path: '/api/runs/run-1/cancel',
method: 'POST',
})
const res = await POST(req, {
params: Promise.resolve({ runId: 'run-1' }),
})
expect(res.status).toBe(200)
const payload = await res.json() as {
success: boolean
run: {
id: string
status: string
}
}
expect(payload.success).toBe(true)
expect(payload.run).toMatchObject({
id: 'run-1',
status: 'canceling',
})
expect(cancelTaskMock).toHaveBeenCalledWith('task-1', 'Run cancelled by user')
expect(publishRunEventMock).toHaveBeenCalledWith(expect.objectContaining({
runId: 'run-1',
eventType: 'run.canceled',
}))
})
})

View File

@@ -144,7 +144,7 @@ describe('api contract - task infra routes (behavior)', () => {
cancelTaskMock.mockResolvedValue({
task: {
...baseTask,
status: TASK_STATUS.FAILED,
status: TASK_STATUS.CANCELED,
errorCode: 'TASK_CANCELLED',
errorMessage: 'Task cancelled by user',
},
@@ -336,8 +336,11 @@ describe('api contract - task infra routes (behavior)', () => {
const req = buildMockRequest({ path: '/api/tasks/task-1', method: 'DELETE' })
const res = await DELETE(req, { params: Promise.resolve({ taskId: 'task-1' }) } as RouteContext)
expect(res.status).toBe(200)
const payload = await res.json() as { task: TaskRecord; cancelled: boolean }
expect(removeTaskJobMock).toHaveBeenCalledWith('task-1')
expect(payload.cancelled).toBe(true)
expect(payload.task.status).toBe(TASK_STATUS.CANCELED)
expect(publishTaskEventMock).toHaveBeenCalledWith(expect.objectContaining({
taskId: 'task-1',
projectId: 'project-1',

View File

@@ -60,7 +60,7 @@ describe('api specific - novel promotion project art style validation', () => {
vi.clearAllMocks()
})
it('accepts valid artStyle and syncs to user preference', async () => {
it('accepts valid artStyle and keeps user preference unchanged', async () => {
const mod = await import('@/app/api/novel-promotion/[projectId]/route')
const req = buildMockRequest({
path: '/api/novel-promotion/project-1',
@@ -77,11 +77,7 @@ describe('api specific - novel promotion project art style validation', () => {
data: expect.objectContaining({ artStyle: 'realistic' }),
}),
)
expect(prismaMock.userPreference.upsert).toHaveBeenCalledWith(
expect.objectContaining({
update: expect.objectContaining({ artStyle: 'realistic' }),
}),
)
expect(prismaMock.userPreference.upsert).not.toHaveBeenCalled()
})
it('rejects invalid artStyle with invalid params', async () => {
@@ -102,7 +98,7 @@ describe('api specific - novel promotion project art style validation', () => {
expect(prismaMock.userPreference.upsert).not.toHaveBeenCalled()
})
it('accepts audioModel and syncs it to user preference', async () => {
it('accepts audioModel and keeps user preference unchanged', async () => {
const mod = await import('@/app/api/novel-promotion/[projectId]/route')
const req = buildMockRequest({
path: '/api/novel-promotion/project-1',
@@ -121,12 +117,6 @@ describe('api specific - novel promotion project art style validation', () => {
}),
}),
)
expect(prismaMock.userPreference.upsert).toHaveBeenCalledWith(
expect.objectContaining({
update: expect.objectContaining({
audioModel: 'bailian::qwen3-tts-vd-2026-01-26',
}),
}),
)
expect(prismaMock.userPreference.upsert).not.toHaveBeenCalled()
})
})

View File

@@ -1,8 +1,9 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { ApiError } from '@/lib/api-errors'
import { buildDefaultTaskBillingInfo } from '@/lib/billing/task-policy'
import { createRun } from '@/lib/run-runtime/service'
import { submitTask } from '@/lib/task/submitter'
import { TASK_TYPE } from '@/lib/task/types'
import { TASK_STATUS, TASK_TYPE } from '@/lib/task/types'
import { prisma } from '../../helpers/prisma'
import { resetBillingState } from '../../helpers/db-reset'
import { createTestUser, seedBalance } from '../../helpers/billing-fixtures'
@@ -11,19 +12,23 @@ const queueState = vi.hoisted(() => ({
mode: 'success' as 'success' | 'fail',
errorMessage: 'queue add failed',
}))
const addTaskJobMock = vi.hoisted(() => vi.fn(async () => ({ id: 'mock-job' })))
const publishTaskEventMock = vi.hoisted(() => vi.fn(async () => ({})))
vi.mock('@/lib/task/queues', () => ({
addTaskJob: vi.fn(async () => {
addTaskJob: addTaskJobMock,
}))
vi.mock('@/lib/task/publisher', () => ({
publishTaskEvent: publishTaskEventMock,
}))
addTaskJobMock.mockImplementation(async () => {
if (queueState.mode === 'fail') {
throw new Error(queueState.errorMessage)
}
return { id: 'mock-job' }
}),
}))
vi.mock('@/lib/task/publisher', () => ({
publishTaskEvent: vi.fn(async () => ({})),
}))
})
describe('billing/submitter integration', () => {
beforeEach(async () => {
@@ -31,6 +36,7 @@ describe('billing/submitter integration', () => {
process.env.BILLING_MODE = 'ENFORCE'
queueState.mode = 'success'
queueState.errorMessage = 'queue add failed'
vi.clearAllMocks()
})
it('builds billing info server-side for billable task submission', async () => {
@@ -181,4 +187,140 @@ describe('billing/submitter integration', () => {
const freeze = await prisma.balanceFreeze.findFirst({ orderBy: { createdAt: 'desc' } })
expect(freeze?.status).toBe('rolled_back')
})
it('reuses the active core analysis run instead of creating a second run', async () => {
process.env.BILLING_MODE = 'OFF'
const user = await createTestUser()
const existingTask = await prisma.task.create({
data: {
userId: user.id,
projectId: 'project-core',
episodeId: 'episode-core',
type: TASK_TYPE.STORY_TO_SCRIPT_RUN,
targetType: 'NovelPromotionEpisode',
targetId: 'episode-core',
status: TASK_STATUS.QUEUED,
payload: {
episodeId: 'episode-core',
analysisModel: 'model-core',
meta: { locale: 'zh' },
},
queuedAt: new Date(),
},
})
const run = await createRun({
userId: user.id,
projectId: 'project-core',
episodeId: 'episode-core',
workflowType: TASK_TYPE.STORY_TO_SCRIPT_RUN,
taskType: TASK_TYPE.STORY_TO_SCRIPT_RUN,
taskId: existingTask.id,
targetType: 'NovelPromotionEpisode',
targetId: 'episode-core',
input: {
episodeId: 'episode-core',
analysisModel: 'model-core',
meta: { locale: 'zh' },
},
})
await prisma.task.update({
where: { id: existingTask.id },
data: {
payload: {
episodeId: 'episode-core',
analysisModel: 'model-core',
runId: run.id,
meta: { locale: 'zh', runId: run.id },
},
},
})
const result = await submitTask({
userId: user.id,
locale: 'zh',
projectId: 'project-core',
episodeId: 'episode-core',
type: TASK_TYPE.STORY_TO_SCRIPT_RUN,
targetType: 'NovelPromotionEpisode',
targetId: 'episode-core',
payload: {
episodeId: 'episode-core',
analysisModel: 'model-core',
},
dedupeKey: 'story_to_script:episode-core',
})
expect(result.deduped).toBe(true)
expect(result.taskId).toBe(existingTask.id)
expect(result.runId).toBe(run.id)
expect(await prisma.graphRun.count()).toBe(1)
expect(addTaskJobMock).not.toHaveBeenCalled()
})
it('reattaches a new task to the existing active run when the old task is already terminal', async () => {
process.env.BILLING_MODE = 'OFF'
const user = await createTestUser()
const failedTask = await prisma.task.create({
data: {
userId: user.id,
projectId: 'project-core-retry',
episodeId: 'episode-core-retry',
type: TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
targetType: 'NovelPromotionEpisode',
targetId: 'episode-core-retry',
status: TASK_STATUS.FAILED,
errorCode: 'TEST_FAILED',
errorMessage: 'old task already failed',
payload: {
episodeId: 'episode-core-retry',
analysisModel: 'model-core',
meta: { locale: 'zh' },
},
queuedAt: new Date(),
finishedAt: new Date(),
},
})
const run = await createRun({
userId: user.id,
projectId: 'project-core-retry',
episodeId: 'episode-core-retry',
workflowType: TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
taskType: TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
taskId: failedTask.id,
targetType: 'NovelPromotionEpisode',
targetId: 'episode-core-retry',
input: {
episodeId: 'episode-core-retry',
analysisModel: 'model-core',
meta: { locale: 'zh' },
},
})
const result = await submitTask({
userId: user.id,
locale: 'zh',
projectId: 'project-core-retry',
episodeId: 'episode-core-retry',
type: TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
targetType: 'NovelPromotionEpisode',
targetId: 'episode-core-retry',
payload: {
episodeId: 'episode-core-retry',
analysisModel: 'model-core',
},
dedupeKey: 'script_to_storyboard:episode-core-retry',
})
expect(result.deduped).toBe(false)
expect(result.runId).toBe(run.id)
expect(result.taskId).not.toBe(failedTask.id)
const refreshedRun = await prisma.graphRun.findUnique({ where: { id: run.id } })
const newTask = await prisma.task.findUnique({ where: { id: result.taskId } })
expect(refreshedRun?.taskId).toBe(result.taskId)
expect(newTask?.status).toBe(TASK_STATUS.QUEUED)
expect(newTask?.payload).toMatchObject({
runId: run.id,
})
})
})

View File

@@ -122,7 +122,30 @@ describe('chain contract - text queue behavior', () => {
expect(calls).toHaveLength(1)
expect(calls[0]).toEqual(expect.objectContaining({
jobName: TASK_TYPE.SCRIPT_TO_STORYBOARD_RUN,
options: expect.objectContaining({ jobId: 'task-text-1', priority: 0 }),
options: expect.objectContaining({ jobId: 'task-text-1', priority: 0, attempts: 1 }),
}))
})
it('forces single queue attempt for core analysis workflows', async () => {
const { addTaskJob, QUEUE_NAME } = await import('@/lib/task/queues')
await addTaskJob({
taskId: 'task-text-story-1',
type: TASK_TYPE.STORY_TO_SCRIPT_RUN,
locale: 'zh',
projectId: 'project-1',
episodeId: 'episode-1',
targetType: 'NovelPromotionEpisode',
targetId: 'episode-1',
payload: { episodeId: 'episode-1' },
userId: 'user-1',
}, { attempts: 5 })
const calls = queueState.addCallsByQueue.get(QUEUE_NAME.TEXT) || []
expect(calls).toHaveLength(1)
expect(calls[0]?.options).toEqual(expect.objectContaining({
jobId: 'task-text-story-1',
attempts: 1,
}))
})

View File

@@ -0,0 +1,343 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { retryFailedStep } from '@/lib/run-runtime/service'
import { RUN_STATUS, RUN_STEP_STATUS } from '@/lib/run-runtime/types'
import { prisma } from '../../helpers/prisma'
import { resetBillingState } from '../../helpers/db-reset'
import { createTestUser } from '../../helpers/billing-fixtures'
describe('run runtime retryFailedStep invalidation', () => {
beforeEach(async () => {
await resetBillingState()
})
it('invalidates downstream story-to-script steps and artifacts', async () => {
const user = await createTestUser()
const run = await prisma.graphRun.create({
data: {
userId: user.id,
projectId: 'project-retry-story',
episodeId: 'episode-retry-story',
workflowType: 'story_to_script_run',
taskType: 'story_to_script_run',
targetType: 'NovelPromotionEpisode',
targetId: 'episode-retry-story',
status: RUN_STATUS.FAILED,
queuedAt: new Date(),
startedAt: new Date(),
finishedAt: new Date(),
},
})
await prisma.graphStep.createMany({
data: [
{
runId: run.id,
stepKey: 'analyze_characters',
stepTitle: 'Analyze Characters',
status: RUN_STEP_STATUS.FAILED,
currentAttempt: 1,
stepIndex: 1,
stepTotal: 5,
startedAt: new Date(),
finishedAt: new Date(),
lastErrorCode: 'STEP_FAILED',
lastErrorMessage: 'characters failed',
},
{
runId: run.id,
stepKey: 'analyze_locations',
stepTitle: 'Analyze Locations',
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
stepIndex: 2,
stepTotal: 5,
startedAt: new Date(),
finishedAt: new Date(),
},
{
runId: run.id,
stepKey: 'split_clips',
stepTitle: 'Split Clips',
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
stepIndex: 3,
stepTotal: 5,
startedAt: new Date(),
finishedAt: new Date(),
},
{
runId: run.id,
stepKey: 'screenplay_clip-a',
stepTitle: 'Screenplay A',
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
stepIndex: 4,
stepTotal: 5,
startedAt: new Date(),
finishedAt: new Date(),
},
{
runId: run.id,
stepKey: 'screenplay_clip-b',
stepTitle: 'Screenplay B',
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
stepIndex: 5,
stepTotal: 5,
startedAt: new Date(),
finishedAt: new Date(),
},
],
})
await prisma.graphArtifact.createMany({
data: [
{
runId: run.id,
stepKey: 'analyze_characters',
artifactType: 'analysis.characters',
refId: 'episode-retry-story',
payload: { rows: [{ name: 'Hero' }] },
},
{
runId: run.id,
stepKey: 'analyze_locations',
artifactType: 'analysis.locations',
refId: 'episode-retry-story',
payload: { rows: [{ name: 'City' }] },
},
{
runId: run.id,
stepKey: 'split_clips',
artifactType: 'clips',
refId: 'episode-retry-story',
payload: { clips: [{ id: 'clip-a' }] },
},
{
runId: run.id,
stepKey: 'screenplay_clip-a',
artifactType: 'screenplay.clip',
refId: 'clip-a',
payload: { scenes: [{ id: 1 }] },
},
],
})
const retried = await retryFailedStep({
runId: run.id,
userId: user.id,
stepKey: 'analyze_characters',
})
expect(retried?.retryAttempt).toBe(2)
expect(retried?.invalidatedStepKeys.slice().sort()).toEqual([
'analyze_characters',
'screenplay_clip-a',
'screenplay_clip-b',
'split_clips',
])
const steps = await prisma.graphStep.findMany({
where: { runId: run.id },
orderBy: { stepIndex: 'asc' },
})
const stepMap = new Map(steps.map((step) => [step.stepKey, step]))
expect(stepMap.get('analyze_characters')).toMatchObject({
status: RUN_STEP_STATUS.PENDING,
currentAttempt: 2,
lastErrorCode: null,
lastErrorMessage: null,
})
expect(stepMap.get('split_clips')).toMatchObject({
status: RUN_STEP_STATUS.PENDING,
currentAttempt: 0,
})
expect(stepMap.get('screenplay_clip-a')).toMatchObject({
status: RUN_STEP_STATUS.PENDING,
currentAttempt: 0,
})
expect(stepMap.get('analyze_locations')).toMatchObject({
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
})
const artifacts = await prisma.graphArtifact.findMany({
where: { runId: run.id },
orderBy: { stepKey: 'asc' },
})
expect(artifacts.map((artifact) => artifact.stepKey)).toEqual(['analyze_locations'])
const refreshedRun = await prisma.graphRun.findUnique({ where: { id: run.id } })
expect(refreshedRun?.status).toBe(RUN_STATUS.RUNNING)
expect(refreshedRun?.errorCode).toBeNull()
expect(refreshedRun?.errorMessage).toBeNull()
})
it('invalidates only the dependent storyboard branch plus voice analyze', async () => {
const user = await createTestUser()
const run = await prisma.graphRun.create({
data: {
userId: user.id,
projectId: 'project-retry-storyboard',
episodeId: 'episode-retry-storyboard',
workflowType: 'script_to_storyboard_run',
taskType: 'script_to_storyboard_run',
targetType: 'NovelPromotionEpisode',
targetId: 'episode-retry-storyboard',
status: RUN_STATUS.FAILED,
queuedAt: new Date(),
startedAt: new Date(),
finishedAt: new Date(),
},
})
await prisma.graphStep.createMany({
data: [
{
runId: run.id,
stepKey: 'clip_clip-1_phase1',
stepTitle: 'Clip 1 Phase 1',
status: RUN_STEP_STATUS.FAILED,
currentAttempt: 1,
stepIndex: 1,
stepTotal: 6,
startedAt: new Date(),
finishedAt: new Date(),
lastErrorCode: 'STEP_FAILED',
lastErrorMessage: 'phase1 failed',
},
{
runId: run.id,
stepKey: 'clip_clip-1_phase2_cinematography',
stepTitle: 'Clip 1 Phase 2 Cine',
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
stepIndex: 2,
stepTotal: 6,
startedAt: new Date(),
finishedAt: new Date(),
},
{
runId: run.id,
stepKey: 'clip_clip-1_phase2_acting',
stepTitle: 'Clip 1 Phase 2 Acting',
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
stepIndex: 3,
stepTotal: 6,
startedAt: new Date(),
finishedAt: new Date(),
},
{
runId: run.id,
stepKey: 'clip_clip-1_phase3_detail',
stepTitle: 'Clip 1 Phase 3',
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
stepIndex: 4,
stepTotal: 6,
startedAt: new Date(),
finishedAt: new Date(),
},
{
runId: run.id,
stepKey: 'clip_clip-2_phase3_detail',
stepTitle: 'Clip 2 Phase 3',
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
stepIndex: 5,
stepTotal: 6,
startedAt: new Date(),
finishedAt: new Date(),
},
{
runId: run.id,
stepKey: 'voice_analyze',
stepTitle: 'Voice Analyze',
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
stepIndex: 6,
stepTotal: 6,
startedAt: new Date(),
finishedAt: new Date(),
},
],
})
await prisma.graphArtifact.createMany({
data: [
{
runId: run.id,
stepKey: 'clip_clip-1_phase1',
artifactType: 'storyboard.clip.phase1',
refId: 'clip-1',
payload: { panels: [] },
},
{
runId: run.id,
stepKey: 'clip_clip-1_phase2_cinematography',
artifactType: 'storyboard.clip.phase2.cine',
refId: 'clip-1',
payload: { rules: [] },
},
{
runId: run.id,
stepKey: 'clip_clip-2_phase3_detail',
artifactType: 'storyboard.clip.phase3',
refId: 'clip-2',
payload: { panels: [] },
},
{
runId: run.id,
stepKey: 'voice_analyze',
artifactType: 'voice.lines',
refId: 'episode-retry-storyboard',
payload: { lines: [] },
},
],
})
const retried = await retryFailedStep({
runId: run.id,
userId: user.id,
stepKey: 'clip_clip-1_phase1',
})
expect(retried?.retryAttempt).toBe(2)
expect(retried?.invalidatedStepKeys.slice().sort()).toEqual([
'clip_clip-1_phase1',
'clip_clip-1_phase2_acting',
'clip_clip-1_phase2_cinematography',
'clip_clip-1_phase3_detail',
'voice_analyze',
])
const steps = await prisma.graphStep.findMany({
where: { runId: run.id },
orderBy: { stepIndex: 'asc' },
})
const stepMap = new Map(steps.map((step) => [step.stepKey, step]))
expect(stepMap.get('clip_clip-1_phase1')).toMatchObject({
status: RUN_STEP_STATUS.PENDING,
currentAttempt: 2,
})
expect(stepMap.get('clip_clip-1_phase2_cinematography')).toMatchObject({
status: RUN_STEP_STATUS.PENDING,
currentAttempt: 0,
})
expect(stepMap.get('voice_analyze')).toMatchObject({
status: RUN_STEP_STATUS.PENDING,
currentAttempt: 0,
})
expect(stepMap.get('clip_clip-2_phase3_detail')).toMatchObject({
status: RUN_STEP_STATUS.COMPLETED,
currentAttempt: 1,
})
const artifacts = await prisma.graphArtifact.findMany({
where: { runId: run.id },
orderBy: { stepKey: 'asc' },
})
expect(artifacts.map((artifact) => artifact.stepKey)).toEqual(['clip_clip-2_phase3_detail'])
})
})

View File

@@ -10,6 +10,7 @@ type WaitTaskOptions = {
const TERMINAL_STATUSES = new Set<TaskStatus>([
TASK_STATUS.COMPLETED,
TASK_STATUS.FAILED,
TASK_STATUS.CANCELED,
TASK_STATUS.DISMISSED,
])

View File

@@ -80,4 +80,26 @@ describe('task state service helpers', () => {
expect(state.lastError?.code).toBe('INVALID_PARAMS')
expect(state.lastError?.message).toBe('bad input')
})
it('treats canceled task as failed presentation state', () => {
const state = resolveTargetState(
{ targetType: 'GlobalCharacter', targetId: 'c1' },
[
{
id: 'task-3',
type: 'asset_hub_image',
status: 'canceled',
progress: 100,
payload: { ui: { intent: 'modify', hasOutputAtStart: true } },
errorCode: 'TASK_CANCELLED',
errorMessage: 'Task cancelled by user',
updatedAt: new Date('2026-02-25T00:00:00.000Z'),
},
],
)
expect(state.phase).toBe('failed')
expect(state.lastError?.code).toBe('CONFLICT')
expect(state.lastError?.message).toBe('Task cancelled by user')
})
})

View File

@@ -209,4 +209,48 @@ describe('useVoiceRuntimeSync', () => {
errorMessage: 'QwenTTS voiceId missing',
})
})
it('treats canceled task as terminal failure for pending voice generation', () => {
const loadData = vi.fn(async () => undefined)
const setPendingVoiceGenerationByLineId = vi.fn()
const onTaskFailure = vi.fn()
const effectCallbacks: Array<() => void | (() => void)> = []
useEffectMock.mockImplementation((callback: () => void | (() => void)) => {
effectCallbacks.push(callback)
})
useVoiceRuntimeSync({
loadData,
voiceLines: [buildVoiceLine({
id: 'line-10',
lineIndex: 10,
})],
activeVoiceTaskLineIds: new Set(),
pendingVoiceGenerationByLineId: {
'line-10': {
submittedUpdatedAt: '2026-03-07T12:00:00.000Z',
startedAt: '2026-03-07T12:24:10.000Z',
taskId: 'task-canceled-1',
taskStatus: 'canceled',
taskErrorMessage: 'Task cancelled by user',
},
},
setPendingVoiceGenerationByLineId,
onTaskFailure,
})
const renderEffects = effectCallbacks.splice(0)
renderEffects[1]?.()
expect(onTaskFailure).toHaveBeenCalledWith({
lineId: 'line-10',
line: expect.objectContaining({
id: 'line-10',
lineIndex: 10,
}),
taskId: 'task-canceled-1',
errorMessage: 'Task cancelled by user',
})
})
})

View File

@@ -1,140 +0,0 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import {
executePipelineGraph,
GraphCancellationError,
type GraphExecutorState,
} from '@/lib/run-runtime/graph-executor'
const { createCheckpointMock, getRunByIdMock } = vi.hoisted(() => ({
createCheckpointMock: vi.fn(),
getRunByIdMock: vi.fn(),
}))
vi.mock('@/lib/run-runtime/service', () => ({
buildLeanState: vi.fn((value: unknown) => value),
createCheckpoint: createCheckpointMock,
getRunById: getRunByIdMock,
}))
describe('graph executor', () => {
beforeEach(() => {
vi.clearAllMocks()
getRunByIdMock.mockResolvedValue({
id: 'run_1',
userId: 'user_1',
status: 'running',
})
})
it('retries retryable node error and writes checkpoint once success', async () => {
const state: GraphExecutorState = {
refs: {},
meta: {},
}
const runMock = vi
.fn()
.mockRejectedValueOnce(new TypeError('fetch failed sending request'))
.mockResolvedValueOnce({
output: { ok: true },
})
await executePipelineGraph({
runId: 'run_1',
projectId: 'project_1',
userId: 'user_1',
state,
nodes: [
{
key: 'node_a',
title: 'Node A',
maxAttempts: 2,
run: runMock,
},
],
})
expect(runMock).toHaveBeenCalledTimes(2)
expect(createCheckpointMock).toHaveBeenCalledTimes(1)
expect(createCheckpointMock).toHaveBeenCalledWith(expect.objectContaining({
runId: 'run_1',
nodeKey: 'node_a',
version: 2,
}))
})
it('throws cancellation error when run status is canceling', async () => {
getRunByIdMock.mockResolvedValue({
id: 'run_1',
userId: 'user_1',
status: 'canceling',
})
await expect(
executePipelineGraph({
runId: 'run_1',
projectId: 'project_1',
userId: 'user_1',
state: {
refs: {},
meta: {},
},
nodes: [
{
key: 'node_a',
title: 'Node A',
run: async () => ({ output: { ok: true } }),
},
],
}),
).rejects.toBeInstanceOf(GraphCancellationError)
})
it('merges refs into state and persists lean checkpoint', async () => {
const state: GraphExecutorState = {
refs: {
scriptId: 'script_1',
},
meta: {
tag: 'v1',
},
}
await executePipelineGraph({
runId: 'run_1',
projectId: 'project_1',
userId: 'user_1',
state,
nodes: [
{
key: 'node_b',
title: 'Node B',
run: async () => ({
checkpointRefs: {
storyboardId: 'storyboard_1',
},
checkpointMeta: {
done: true,
},
}),
},
],
})
expect(state.refs).toEqual({
scriptId: 'script_1',
storyboardId: 'storyboard_1',
voiceLineBatchId: undefined,
versionHash: undefined,
cursor: undefined,
})
expect(createCheckpointMock).toHaveBeenCalledWith(expect.objectContaining({
state: expect.objectContaining({
refs: expect.objectContaining({
scriptId: 'script_1',
storyboardId: 'storyboard_1',
}),
}),
}))
})
})

View File

@@ -1,136 +0,0 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { GraphExecutorState } from '@/lib/run-runtime/graph-executor'
const executePipelineGraphMock = vi.hoisted(() =>
vi.fn(async (input: {
runId: string
projectId: string
userId: string
state: GraphExecutorState
nodes: Array<{
key: string
run: (context: {
runId: string
projectId: string
userId: string
nodeKey: string
attempt: number
state: GraphExecutorState
}) => Promise<unknown>
}>
}) => {
for (const node of input.nodes) {
await node.run({
runId: input.runId,
projectId: input.projectId,
userId: input.userId,
nodeKey: node.key,
attempt: 1,
state: input.state,
})
}
return input.state
}),
)
vi.mock('@/lib/run-runtime/graph-executor', () => ({
executePipelineGraph: executePipelineGraphMock,
}))
import { runLangGraphPipeline } from '@/lib/run-runtime/langgraph-pipeline'
type TestState = GraphExecutorState & {
order: string[]
}
describe('langgraph pipeline adapter', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('runs nodes in declared order through langgraph', async () => {
const state: TestState = {
refs: {},
meta: {},
order: [],
}
const result = await runLangGraphPipeline({
runId: 'run-1',
projectId: 'project-1',
userId: 'user-1',
state,
nodes: [
{
key: 'node_a',
title: 'Node A',
run: async (context) => {
const typedState = context.state as TestState
typedState.order.push('node_a')
return { output: { ok: true } }
},
},
{
key: 'node_b',
title: 'Node B',
run: async (context) => {
const typedState = context.state as TestState
typedState.order.push('node_b')
return { output: { ok: true } }
},
},
],
})
expect(result.order).toEqual(['node_a', 'node_b'])
expect(executePipelineGraphMock).toHaveBeenCalledTimes(2)
})
it('returns input state when graph has no nodes', async () => {
const state: TestState = {
refs: {},
meta: {},
order: [],
}
const result = await runLangGraphPipeline({
runId: 'run-1',
projectId: 'project-1',
userId: 'user-1',
state,
nodes: [],
})
expect(result).toBe(state)
expect(executePipelineGraphMock).not.toHaveBeenCalled()
})
it('fails explicitly on duplicate node keys', async () => {
const state: TestState = {
refs: {},
meta: {},
order: [],
}
await expect(
runLangGraphPipeline({
runId: 'run-1',
projectId: 'project-1',
userId: 'user-1',
state,
nodes: [
{
key: 'dup',
title: 'Dup 1',
run: async () => ({ output: { ok: true } }),
},
{
key: 'dup',
title: 'Dup 2',
run: async () => ({ output: { ok: true } }),
},
],
}),
).rejects.toThrow('LANGGRAPH_NODE_KEY_DUPLICATE: dup')
})
})

View File

@@ -0,0 +1,128 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
type TaskEventRow = {
id: number
taskId: string
projectId: string
userId: string
eventType: string
payload: Record<string, unknown> | null
createdAt: Date
}
const taskEventCreateMock = vi.hoisted(() =>
vi.fn<(...args: unknown[]) => Promise<TaskEventRow | null>>(async () => null),
)
const taskEventFindManyMock = vi.hoisted(() =>
vi.fn<(...args: unknown[]) => Promise<TaskEventRow[]>>(async () => []),
)
const taskFindManyMock = vi.hoisted(() =>
vi.fn<(...args: unknown[]) => Promise<Array<Record<string, unknown>>>>(async () => []),
)
const redisPublishMock = vi.hoisted(() => vi.fn(async () => 1))
const mapTaskSSEEventToRunEventsMock = vi.hoisted(() =>
vi.fn(() => [{
runId: 'run-1',
projectId: 'project-1',
userId: 'user-1',
eventType: 'step.chunk',
stepKey: 'split_clips',
attempt: 1,
lane: 'text',
payload: { ok: true },
}]),
)
const publishRunEventMock = vi.hoisted(() => vi.fn(async () => undefined))
vi.mock('@/lib/prisma', () => ({
prisma: {
taskEvent: {
create: taskEventCreateMock,
findMany: taskEventFindManyMock,
},
task: {
findMany: taskFindManyMock,
},
},
}))
vi.mock('@/lib/redis', () => ({
redis: {
publish: redisPublishMock,
},
}))
vi.mock('@/lib/run-runtime/task-bridge', () => ({
mapTaskSSEEventToRunEvents: mapTaskSSEEventToRunEventsMock,
}))
vi.mock('@/lib/run-runtime/publisher', () => ({
publishRunEvent: publishRunEventMock,
}))
import { publishTaskStreamEvent } from '@/lib/task/publisher'
describe('task publisher direct run event boundary', () => {
beforeEach(() => {
taskEventCreateMock.mockReset()
taskEventFindManyMock.mockReset()
taskFindManyMock.mockReset()
redisPublishMock.mockReset()
mapTaskSSEEventToRunEventsMock.mockClear()
publishRunEventMock.mockClear()
})
it('does not mirror run events for story_to_script task stream events', async () => {
await publishTaskStreamEvent({
taskId: 'task-1',
projectId: 'project-1',
userId: 'user-1',
taskType: 'story_to_script_run',
targetType: 'NovelPromotionEpisode',
targetId: 'episode-1',
episodeId: 'episode-1',
payload: {
stepId: 'split_clips',
stream: {
kind: 'text',
seq: 1,
lane: 'main',
delta: 'hello',
},
},
persist: false,
})
expect(redisPublishMock).toHaveBeenCalledTimes(1)
expect(mapTaskSSEEventToRunEventsMock).not.toHaveBeenCalled()
expect(publishRunEventMock).not.toHaveBeenCalled()
})
it('continues mirroring run events for non-core task types', async () => {
await publishTaskStreamEvent({
taskId: 'task-2',
projectId: 'project-1',
userId: 'user-1',
taskType: 'voice_line',
targetType: 'VoiceLine',
targetId: 'line-1',
payload: {
stepId: 'voice',
stream: {
kind: 'text',
seq: 1,
lane: 'main',
delta: 'world',
},
},
persist: false,
})
expect(mapTaskSSEEventToRunEventsMock).toHaveBeenCalledTimes(1)
expect(publishRunEventMock).toHaveBeenCalledWith(expect.objectContaining({
runId: 'run-1',
eventType: 'step.chunk',
stepKey: 'split_clips',
}))
})
})

View File

@@ -132,4 +132,36 @@ describe('createWorkerLLMStreamCallbacks', () => {
expect(payload.stepTitle).toBe('A')
expect(payload.output).toBe('characters-final')
})
it('uses injected active controller for run-owned workflows', async () => {
const job = buildJob()
const context = createWorkerLLMStreamContext(job, 'story_to_script')
const assertActive = vi.fn(async (_stage: string) => undefined)
const isActive = vi.fn(async () => true)
const callbacks = createWorkerLLMStreamCallbacks(job, context, {
assertActive,
isActive,
})
callbacks.onChunk?.({
kind: 'text',
delta: 'hello',
seq: 1,
lane: 'main',
step: { id: 'split_clips', attempt: 1, title: 'split', index: 1, total: 1 },
})
await callbacks.flush()
expect(assertActive).toHaveBeenCalledWith('worker_llm_stream')
expect(assertTaskActiveMock).not.toHaveBeenCalled()
expect(reportTaskStreamChunkMock).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
delta: 'hello',
}),
expect.objectContaining({
stepId: 'split_clips',
}),
)
})
})

View File

@@ -47,32 +47,17 @@ const runScriptToStoryboardOrchestratorMock = vi.hoisted(() =>
},
})),
)
const graphExecutorMock = vi.hoisted(() => ({
executePipelineGraph: vi.fn(async (input: {
runId: string
projectId: string
userId: string
state: Record<string, unknown>
nodes: Array<{ key: string; run: (ctx: Record<string, unknown>) => Promise<unknown> }>
}) => {
for (const node of input.nodes) {
await node.run({
runId: input.runId,
projectId: input.projectId,
userId: input.userId,
nodeKey: node.key,
attempt: 1,
state: input.state,
})
}
return input.state
}),
}))
const parseVoiceLinesJsonMock = vi.hoisted(() => vi.fn())
const persistStoryboardsAndPanelsMock = vi.hoisted(() => vi.fn())
const parseStoryboardRetryTargetMock = vi.hoisted(() => vi.fn())
const runScriptToStoryboardAtomicRetryMock = vi.hoisted(() => vi.fn())
const workflowLeaseMock = vi.hoisted(() => ({
assertWorkflowRunActive: vi.fn(async () => undefined),
withWorkflowRunLease: vi.fn(async (params: { run: () => Promise<unknown> }) => ({
claimed: true,
result: await params.run(),
})),
}))
const txState = vi.hoisted(() => ({
createdRows: [] as Array<Record<string, unknown>>,
@@ -145,10 +130,6 @@ vi.mock('@/lib/novel-promotion/script-to-storyboard/orchestrator', () => ({
}
},
}))
vi.mock('@/lib/run-runtime/graph-executor', () => ({
executePipelineGraph: graphExecutorMock.executePipelineGraph,
}))
vi.mock('@/lib/workers/handlers/llm-stream', () => ({
createWorkerLLMStreamContext: vi.fn(() => ({ streamRunId: 'run-1', nextSeqByStepLane: {} })),
createWorkerLLMStreamCallbacks: vi.fn(() => ({
@@ -192,6 +173,7 @@ vi.mock('@/lib/workers/handlers/script-to-storyboard-atomic-retry', () => ({
parseStoryboardRetryTarget: parseStoryboardRetryTargetMock,
runScriptToStoryboardAtomicRetry: runScriptToStoryboardAtomicRetryMock,
}))
vi.mock('@/lib/run-runtime/workflow-lease', () => workflowLeaseMock)
import { handleScriptToStoryboardTask } from '@/lib/workers/handlers/script-to-storyboard'

View File

@@ -0,0 +1,175 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { Job } from 'bullmq'
import type { TaskJobData } from '@/lib/task/types'
const tryUpdateTaskProgressMock = vi.hoisted(() => vi.fn(async () => true))
const publishTaskEventMock = vi.hoisted(() => vi.fn(async () => ({})))
const publishTaskStreamEventMock = vi.hoisted(() => vi.fn(async () => ({})))
const publishRunEventMock = vi.hoisted(() => vi.fn(async () => undefined))
const mapTaskSSEEventToRunEventsMock = vi.hoisted(() =>
vi.fn(() => [{
runId: 'run-1',
projectId: 'project-1',
userId: 'user-1',
eventType: 'step.start',
stepKey: 'split_clips',
attempt: 1,
lane: null,
payload: { mirrored: true },
}]),
)
vi.mock('@/lib/prisma', () => ({
prisma: {
project: {
findUnique: vi.fn(async () => null),
},
},
}))
vi.mock('@/lib/logging/core', () => ({
createScopedLogger: () => ({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
}),
}))
vi.mock('@/lib/task/service', () => ({
rollbackTaskBillingForTask: vi.fn(async () => ({ attempted: false, rolledBack: false, billingInfo: null })),
touchTaskHeartbeat: vi.fn(async () => undefined),
tryMarkTaskCompleted: vi.fn(async () => true),
tryMarkTaskFailed: vi.fn(async () => true),
tryMarkTaskProcessing: vi.fn(async () => true),
tryUpdateTaskProgress: tryUpdateTaskProgressMock,
updateTaskBillingInfo: vi.fn(async () => undefined),
}))
vi.mock('@/lib/task/publisher', () => ({
publishTaskEvent: publishTaskEventMock,
publishTaskStreamEvent: publishTaskStreamEventMock,
}))
vi.mock('@/lib/task/progress-message', () => ({
buildTaskProgressMessage: vi.fn(() => 'progress-message'),
getTaskStageLabel: vi.fn((stage: string) => `label:${stage}`),
}))
vi.mock('@/lib/errors/normalize', () => ({
normalizeAnyError: vi.fn((error: Error) => ({
code: 'ERROR',
message: error.message,
retryable: false,
provider: null,
})),
}))
vi.mock('@/lib/billing', () => ({
rollbackTaskBilling: vi.fn(async () => null),
settleTaskBilling: vi.fn(async () => null),
}))
vi.mock('@/lib/billing/runtime-usage', () => ({
withTextUsageCollection: vi.fn(async (fn: () => Promise<unknown>) => ({
result: await fn(),
textUsage: null,
})),
}))
vi.mock('@/lib/logging/file-writer', () => ({
onProjectNameAvailable: vi.fn(),
}))
vi.mock('@/lib/run-runtime/task-bridge', () => ({
mapTaskSSEEventToRunEvents: mapTaskSSEEventToRunEventsMock,
}))
vi.mock('@/lib/run-runtime/publisher', () => ({
publishRunEvent: publishRunEventMock,
}))
import { reportTaskProgress, reportTaskStreamChunk, withTaskLifecycle } from '@/lib/workers/shared'
function buildJob(taskType: TaskJobData['type']): Job<TaskJobData> {
return {
data: {
taskId: 'task-1',
type: taskType,
locale: 'zh',
projectId: 'project-1',
episodeId: 'episode-1',
targetType: 'NovelPromotionEpisode',
targetId: 'episode-1',
userId: 'user-1',
payload: {
runId: 'run-1',
},
trace: null,
},
queueName: 'text',
} as unknown as Job<TaskJobData>
}
describe('worker shared direct run events', () => {
beforeEach(() => {
tryUpdateTaskProgressMock.mockReset()
tryUpdateTaskProgressMock.mockResolvedValue(true)
publishTaskEventMock.mockReset()
publishTaskStreamEventMock.mockReset()
publishRunEventMock.mockReset()
mapTaskSSEEventToRunEventsMock.mockClear()
})
it('publishes run events directly for core analysis progress updates', async () => {
await reportTaskProgress(buildJob('story_to_script_run'), 42, {
stage: 'story_to_script_step',
stepId: 'split_clips',
stepTitle: 'Split',
})
expect(publishTaskEventMock).toHaveBeenCalledWith(expect.objectContaining({
taskType: 'story_to_script_run',
type: 'task.progress',
}))
expect(mapTaskSSEEventToRunEventsMock).toHaveBeenCalledTimes(1)
expect(publishRunEventMock).toHaveBeenCalledWith(expect.objectContaining({
runId: 'run-1',
eventType: 'step.start',
stepKey: 'split_clips',
}))
})
it('publishes run events directly for core analysis stream chunks', async () => {
await reportTaskStreamChunk(buildJob('script_to_storyboard_run'), {
kind: 'text',
delta: 'hello',
seq: 1,
lane: 'main',
}, {
stepId: 'clip_1_phase1',
stepTitle: 'Phase 1',
})
expect(publishTaskStreamEventMock).toHaveBeenCalledWith(expect.objectContaining({
taskType: 'script_to_storyboard_run',
persist: true,
}))
expect(mapTaskSSEEventToRunEventsMock).toHaveBeenCalledTimes(1)
expect(publishRunEventMock).toHaveBeenCalledWith(expect.objectContaining({
runId: 'run-1',
eventType: 'step.start',
stepKey: 'split_clips',
}))
})
it('emits run.start directly when the core analysis worker begins execution', async () => {
await withTaskLifecycle(buildJob('story_to_script_run'), async () => ({
ok: true,
}))
expect(publishRunEventMock).toHaveBeenCalledWith(expect.objectContaining({
runId: 'run-1',
eventType: 'run.start',
}))
})
})

View File

@@ -26,33 +26,18 @@ const configMock = vi.hoisted(() => ({
const orchestratorMock = vi.hoisted(() => ({
runStoryToScriptOrchestrator: vi.fn(),
}))
const graphExecutorMock = vi.hoisted(() => ({
executePipelineGraph: vi.fn(async (input: {
runId: string
projectId: string
userId: string
state: Record<string, unknown>
nodes: Array<{ key: string; run: (ctx: Record<string, unknown>) => Promise<unknown> }>
}) => {
for (const node of input.nodes) {
await node.run({
runId: input.runId,
projectId: input.projectId,
userId: input.userId,
nodeKey: node.key,
attempt: 1,
state: input.state,
})
}
return input.state
}),
}))
const helperMock = vi.hoisted(() => ({
persistAnalyzedCharacters: vi.fn(async () => [{ id: 'character-new-1' }]),
persistAnalyzedLocations: vi.fn(async () => [{ id: 'location-new-1' }]),
persistClips: vi.fn(async () => [{ clipKey: 'clip-1', id: 'clip-row-1' }]),
}))
const workflowLeaseMock = vi.hoisted(() => ({
assertWorkflowRunActive: vi.fn(async () => undefined),
withWorkflowRunLease: vi.fn(async (params: { run: () => Promise<unknown> }) => ({
claimed: true,
result: await params.run(),
})),
}))
vi.mock('@/lib/prisma', () => ({ prisma: prismaMock }))
vi.mock('@/lib/llm-client', () => ({
@@ -69,9 +54,6 @@ vi.mock('@/lib/logging/file-writer', () => ({ onProjectNameAvailable: vi.fn() })
vi.mock('@/lib/workers/shared', () => ({ reportTaskProgress: workerMock.reportTaskProgress }))
vi.mock('@/lib/workers/utils', () => ({ assertTaskActive: workerMock.assertTaskActive }))
vi.mock('@/lib/novel-promotion/story-to-script/orchestrator', () => orchestratorMock)
vi.mock('@/lib/run-runtime/graph-executor', () => ({
executePipelineGraph: graphExecutorMock.executePipelineGraph,
}))
vi.mock('@/lib/workers/handlers/llm-stream', () => ({
createWorkerLLMStreamContext: vi.fn(() => ({ streamRunId: 'run-1', nextSeqByStepLane: {} })),
createWorkerLLMStreamCallbacks: vi.fn(() => ({
@@ -100,6 +82,7 @@ vi.mock('@/lib/workers/handlers/story-to-script-helpers', () => ({
persistClips: helperMock.persistClips,
resolveClipRecordId: (clipIdMap: Map<string, string>, clipId: string) => clipIdMap.get(clipId) ?? null,
}))
vi.mock('@/lib/run-runtime/workflow-lease', () => workflowLeaseMock)
import { handleStoryToScriptTask } from '@/lib/workers/handlers/story-to-script'

View File

@@ -82,6 +82,32 @@ describe('useRebuildConfirm', () => {
expect(action).toHaveBeenCalledTimes(1)
})
it('story to script without downstream confirm clears pending action after action completes', async () => {
const getProjectStoryboardStats = vi.fn(async () => ({ storyboardCount: 0, panelCount: 0 }))
const action = vi.fn(async () => undefined)
const hook = useRebuildConfirm({
episodeId: 'episode-1',
episodeStoryboards: [],
getProjectStoryboardStats,
t: (key: string) => key,
})
await hook.runWithRebuildConfirm('storyToScript', action)
expect(action).toHaveBeenCalledTimes(1)
expect(setPendingActionTypeMock).toHaveBeenCalledTimes(2)
expect(setPendingActionTypeMock).toHaveBeenNthCalledWith(1, 'storyToScript')
const resetCall = setPendingActionTypeMock.mock.calls[1]?.[0]
expect(typeof resetCall).toBe('function')
if (typeof resetCall !== 'function') {
throw new Error('expected reset pending action updater')
}
expect(resetCall('storyToScript')).toBeNull()
expect(resetCall('scriptToStoryboard')).toBe('scriptToStoryboard')
})
})
describe('hasDownstreamStoryboardData', () => {