feat: add configurable node runtime snapshots

This commit is contained in:
eust-w 2026-03-27 03:08:21 +08:00
parent 45b4a99af1
commit f6ca6246f9
24 changed files with 836 additions and 16 deletions

View File

@ -66,7 +66,9 @@ The local validation path currently used for embodied data testing is:
You can register that directory from the Assets page or via `POST /api/assets/register`.
The workflow editor currently requires selecting at least one registered asset before a run can be created.
The editor now also persists per-node runtime config in workflow versions, including executor overrides, optional artifact title overrides, and Python code-hook source for inspect and transform style nodes.
The Runs workspace now shows project-scoped run history, run-level aggregated summaries, cancel/retry controls, and run detail views with persisted task summaries, stdout/stderr sections, result previews, and artifact links into Explore.
Selected run tasks now expose the frozen node definition id, executor config snapshot, and code-hook metadata that were captured when the run was created.
## Repository Structure

View File

@ -5,7 +5,12 @@ export const runTaskSchemaDefinition = {
workflowVersionId: { type: "string", required: true },
nodeId: { type: "string", required: true },
nodeType: { type: "string", required: true },
nodeDefinitionId: { type: "string", required: false, default: null },
executorType: { type: "string", required: true },
executorConfig: { type: "object", required: false, default: null },
codeHookSpec: { type: "object", required: false, default: null },
artifactType: { type: "string", required: false, default: null },
artifactTitle: { type: "string", required: false, default: null },
status: { type: "string", required: true },
attempt: { type: "number", required: true, default: 1 },
assetIds: { type: "array", required: true, default: [] },

View File

@ -6,6 +6,7 @@ export const workflowRunSchemaDefinition = {
status: { type: "string", required: true },
triggeredBy: { type: "string", required: true },
assetIds: { type: "array", required: true, default: [] },
runtimeSnapshot: { type: "object", required: false, default: null },
summary: { type: "object", required: false, default: null },
startedAt: { type: "date", required: false, default: null },
finishedAt: { type: "date", required: false, default: null },

View File

@ -6,8 +6,11 @@ import type { AssetType } from "../../../../packages/contracts/src/domain.ts";
import { DELIVERY_NODE_DEFINITIONS } from "../modules/plugins/builtin/delivery-nodes.ts";
import { probeLocalSourcePath } from "./local-source-probe.ts";
import type {
CodeHookSpec,
NodeRuntimeConfig,
ExecutorType,
RunExecutionSummary,
RunRuntimeSnapshot,
TaskExecutionSummary,
TaskStatusCounts,
} from "../../../worker/src/contracts/execution-context.ts";
@ -102,6 +105,7 @@ type WorkflowRunDocument = Timestamped & {
status: "queued" | "running" | "success" | "failed" | "cancelled";
triggeredBy: string;
assetIds: string[];
runtimeSnapshot?: RunRuntimeSnapshot;
startedAt?: string;
finishedAt?: string;
durationMs?: number;
@ -114,7 +118,12 @@ type RunTaskDocument = Timestamped & {
workflowVersionId: string;
nodeId: string;
nodeType: string;
nodeDefinitionId?: string;
executorType: ExecutorType;
executorConfig?: Record<string, unknown>;
codeHookSpec?: CodeHookSpec;
artifactType?: "json" | "directory" | "video";
artifactTitle?: string;
status: "queued" | "pending" | "running" | "success" | "failed" | "cancelled";
attempt: number;
assetIds: string[];
@ -140,6 +149,12 @@ type ArtifactDocument = Timestamped & {
payload: Record<string, unknown>;
};
type WorkflowRuntimeGraph = Record<string, unknown> & {
selectedPreset?: string;
nodeBindings?: Record<string, string>;
nodeConfigs?: Record<string, NodeRuntimeConfig>;
};
function nowIso(): string {
return new Date().toISOString();
}
@ -186,6 +201,108 @@ function buildRunExecutionSummary(tasks: RunTaskDocument[]): RunExecutionSummary
};
}
function inferDefinitionId(nodeId: string) {
return nodeId.replace(/-\d+$/, "");
}
function isRecord(value: unknown): value is Record<string, unknown> {
return Boolean(value) && typeof value === "object" && !Array.isArray(value);
}
function sanitizeCodeHookSpec(value: unknown): CodeHookSpec | undefined {
if (!isRecord(value)) {
return undefined;
}
if (value.language !== "python" || typeof value.source !== "string" || value.source.trim().length === 0) {
return undefined;
}
const entrypoint = typeof value.entrypoint === "string" && value.entrypoint.trim().length > 0
? value.entrypoint
: undefined;
return {
language: "python",
entrypoint,
source: value.source,
};
}
function sanitizeArtifactType(value: unknown): "json" | "directory" | "video" | undefined {
return value === "json" || value === "directory" || value === "video" ? value : undefined;
}
function sanitizeNodeRuntimeConfig(value: unknown, fallbackDefinitionId: string): NodeRuntimeConfig | undefined {
if (!isRecord(value)) {
return undefined;
}
const executorType = value.executorType === "python" || value.executorType === "docker" || value.executorType === "http"
? value.executorType
: undefined;
const definitionId = typeof value.definitionId === "string" && value.definitionId.trim().length > 0
? value.definitionId
: fallbackDefinitionId;
const executorConfig = isRecord(value.executorConfig) ? { ...value.executorConfig } : undefined;
const codeHookSpec = sanitizeCodeHookSpec(value.codeHookSpec);
const artifactType = sanitizeArtifactType(value.artifactType);
const artifactTitle = typeof value.artifactTitle === "string" && value.artifactTitle.trim().length > 0
? value.artifactTitle
: undefined;
if (!executorType && !executorConfig && !codeHookSpec && !artifactType && !artifactTitle) {
return definitionId !== fallbackDefinitionId ? { definitionId } : undefined;
}
const config: NodeRuntimeConfig = {};
if (definitionId !== fallbackDefinitionId) {
config.definitionId = definitionId;
}
if (executorType) {
config.executorType = executorType;
}
if (executorConfig) {
config.executorConfig = executorConfig;
}
if (codeHookSpec) {
config.codeHookSpec = codeHookSpec;
}
if (artifactType) {
config.artifactType = artifactType;
}
if (artifactTitle) {
config.artifactTitle = artifactTitle;
}
return config;
}
function buildRuntimeSnapshot(
runtimeGraph: Record<string, unknown>,
logicGraph: WorkflowDefinitionVersionDocument["logicGraph"],
pluginRefs: string[],
): RunRuntimeSnapshot {
const graph = runtimeGraph as WorkflowRuntimeGraph;
const nodeBindings: Record<string, string> = {};
const nodeConfigs: Record<string, NodeRuntimeConfig> = {};
for (const node of logicGraph.nodes) {
const definitionId = graph.nodeBindings?.[node.id] ?? inferDefinitionId(node.id);
nodeBindings[node.id] = definitionId;
const config = sanitizeNodeRuntimeConfig(graph.nodeConfigs?.[node.id], definitionId);
if (config) {
nodeConfigs[node.id] = config;
}
}
return {
selectedPreset: typeof graph.selectedPreset === "string" ? graph.selectedPreset : undefined,
nodeBindings,
nodeConfigs,
pluginRefs: [...pluginRefs],
};
}
function collectRetryNodeIds(tasks: RunTaskDocument[], rootNodeId: string) {
const pending = [rootNodeId];
const collected = new Set<string>([rootNodeId]);
@ -527,6 +644,12 @@ export class MongoAppStore {
throw new Error("bound assets must belong to the workflow project");
}
const runtimeSnapshot = buildRuntimeSnapshot(
version.runtimeGraph,
version.logicGraph,
version.pluginRefs,
);
const run: WorkflowRunDocument = {
_id: `run-${randomUUID()}`,
workflowDefinitionId: input.workflowDefinitionId,
@ -536,18 +659,26 @@ export class MongoAppStore {
status: "queued",
triggeredBy: input.triggeredBy,
assetIds,
runtimeSnapshot,
createdAt: nowIso(),
updatedAt: nowIso(),
};
const targetNodes = new Set(version.logicGraph.edges.map((edge) => edge.to));
const tasks = version.logicGraph.nodes.map<RunTaskDocument>((node) => ({
const tasks = version.logicGraph.nodes.map<RunTaskDocument>((node) => {
const config = runtimeSnapshot.nodeConfigs?.[node.id];
return {
_id: `task-${randomUUID()}`,
workflowRunId: run._id,
workflowVersionId: version._id,
nodeId: node.id,
nodeType: node.type,
executorType: "python",
nodeDefinitionId: runtimeSnapshot.nodeBindings?.[node.id] ?? inferDefinitionId(node.id),
executorType: config?.executorType ?? "python",
executorConfig: config?.executorConfig,
codeHookSpec: config?.codeHookSpec,
artifactType: config?.artifactType,
artifactTitle: config?.artifactTitle,
status: targetNodes.has(node.id) ? "pending" : "queued",
attempt: 1,
assetIds,
@ -558,7 +689,8 @@ export class MongoAppStore {
logLines: [],
createdAt: nowIso(),
updatedAt: nowIso(),
}));
};
});
run.summary = buildRunExecutionSummary(tasks);
await this.db.collection("workflow_runs").insertOne(run);

View File

@ -234,6 +234,180 @@ test("mongo-backed runtime persists probed assets and workflow runs through the
assert.equal(tasks[1]?.status, "pending");
});
test("mongo-backed runtime snapshots per-node executor config into runs and tasks", async (t) => {
const sourceDir = await mkdtemp(path.join(os.tmpdir(), "emboflow-runtime-snapshot-"));
await mkdir(path.join(sourceDir, "DJI_001"));
await writeFile(path.join(sourceDir, "meta.json"), "{}");
await writeFile(path.join(sourceDir, "intrinsics.json"), "{}");
await writeFile(path.join(sourceDir, "video_meta.json"), "{}");
await writeFile(path.join(sourceDir, "DJI_001", "DJI_001.mp4"), "");
const mongod = await MongoMemoryServer.create({
instance: {
ip: "127.0.0.1",
port: 27124,
},
});
t.after(async () => {
await mongod.stop();
});
const server = await startRuntimeServer({
host: "127.0.0.1",
port: 0,
mongoUri: mongod.getUri(),
database: "emboflow-runtime-snapshots",
corsOrigin: "http://127.0.0.1:3000",
});
t.after(async () => {
await server.close();
});
const bootstrap = await readJson<{
workspace: { _id: string };
project: { _id: string };
}>(
await fetch(`${server.baseUrl}/api/dev/bootstrap`, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({ userId: "snapshot-user", projectName: "Snapshot Project" }),
}),
);
const asset = await readJson<{ _id: string }>(
await fetch(`${server.baseUrl}/api/assets/register`, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
workspaceId: bootstrap.workspace._id,
projectId: bootstrap.project._id,
sourcePath: sourceDir,
}),
}),
);
await readJson(await fetch(`${server.baseUrl}/api/assets/${asset._id}/probe`, { method: "POST" }));
const workflow = await readJson<{ _id: string }>(
await fetch(`${server.baseUrl}/api/workflows`, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
workspaceId: bootstrap.workspace._id,
projectId: bootstrap.project._id,
name: "Snapshot Flow",
}),
}),
);
const version = await readJson<{ _id: string }>(
await fetch(`${server.baseUrl}/api/workflows/${workflow._id}/versions`, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
visualGraph: { viewport: { x: 0, y: 0, zoom: 1 } },
logicGraph: {
nodes: [
{ id: "source-asset", type: "source" },
{ id: "validate-structure", type: "inspect" },
{ id: "export-delivery-package", type: "export" },
],
edges: [
{ from: "source-asset", to: "validate-structure" },
{ from: "validate-structure", to: "export-delivery-package" },
],
},
runtimeGraph: {
selectedPreset: "delivery-normalization",
nodeConfigs: {
"source-asset": {
executorType: "docker",
executorConfig: {
image: "python:3.11",
command: ["python", "-V"],
},
},
"validate-structure": {
executorType: "python",
codeHookSpec: {
language: "python",
entrypoint: "process",
source: [
"def process(task, context):",
" return {'nodeId': task['nodeId'], 'hooked': True}",
].join("\n"),
},
},
"export-delivery-package": {
executorType: "http",
executorConfig: {
url: "http://127.0.0.1:3010/mock-executor",
method: "POST",
},
},
},
},
pluginRefs: ["builtin:delivery-nodes"],
}),
}),
);
const createdRun = await readJson<{ _id: string }>(
await fetch(`${server.baseUrl}/api/runs`, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
workflowDefinitionId: workflow._id,
workflowVersionId: version._id,
assetIds: [asset._id],
}),
}),
);
const run = await readJson<{
_id: string;
runtimeSnapshot?: {
selectedPreset?: string;
nodeConfigs?: Record<
string,
{
executorType?: string;
executorConfig?: { image?: string; url?: string };
codeHookSpec?: { source?: string };
}
>;
};
}>(await fetch(`${server.baseUrl}/api/runs/${createdRun._id}`));
const tasks = await readJson<
Array<{
nodeId: string;
executorType: string;
executorConfig?: { image?: string; url?: string };
codeHookSpec?: { source?: string };
}>
>(await fetch(`${server.baseUrl}/api/runs/${createdRun._id}/tasks`));
const sourceTask = tasks.find((task) => task.nodeId === "source-asset");
const validateTask = tasks.find((task) => task.nodeId === "validate-structure");
const exportTask = tasks.find((task) => task.nodeId === "export-delivery-package");
assert.equal(run.runtimeSnapshot?.selectedPreset, "delivery-normalization");
assert.equal(run.runtimeSnapshot?.nodeConfigs?.["source-asset"]?.executorType, "docker");
assert.equal(
run.runtimeSnapshot?.nodeConfigs?.["source-asset"]?.executorConfig?.image,
"python:3.11",
);
assert.match(
run.runtimeSnapshot?.nodeConfigs?.["validate-structure"]?.codeHookSpec?.source ?? "",
/def process/,
);
assert.equal(sourceTask?.executorType, "docker");
assert.equal(sourceTask?.executorConfig?.image, "python:3.11");
assert.equal(validateTask?.executorType, "python");
assert.match(validateTask?.codeHookSpec?.source ?? "", /hooked/);
assert.equal(exportTask?.executorType, "http");
assert.equal(exportTask?.executorConfig?.url, "http://127.0.0.1:3010/mock-executor");
});
test("mongo-backed runtime rejects workflow runs without bound assets", async (t) => {
const mongod = await MongoMemoryServer.create({
instance: {

View File

@ -2,6 +2,7 @@ export type RunTaskView = {
id: string;
nodeId: string;
nodeName: string;
nodeDefinitionId?: string;
status: string;
assetIds?: string[];
artifactIds?: string[];
@ -10,6 +11,8 @@ export type RunTaskView = {
errorMessage?: string;
stdoutLines?: string[];
stderrLines?: string[];
codeHookLabel?: string;
executorConfigLabel?: string;
canRetry?: boolean;
logLines: string[];
};

View File

@ -14,10 +14,13 @@ export function renderTaskLogPanel(
<aside data-view="task-log-panel">
<h2>${task.nodeName}</h2>
<p>Status: ${task.status}</p>
${task.nodeDefinitionId ? `<p>Definition: ${task.nodeDefinitionId}</p>` : ""}
<p>Input assets: ${(task.assetIds ?? []).join(", ") || "none"}</p>
<p>Duration: ${typeof task.durationMs === "number" ? `${task.durationMs} ms` : "n/a"}</p>
${task.summaryLabel ? `<p>Summary: ${task.summaryLabel}</p>` : ""}
${task.errorMessage ? `<p>Error: ${task.errorMessage}</p>` : ""}
${task.codeHookLabel ? `<p>Code Hook: ${task.codeHookLabel}</p>` : ""}
${task.executorConfigLabel ? `<p>Executor Config: ${task.executorConfigLabel}</p>` : ""}
${task.canRetry ? `<button type="button">Retry Task</button>` : ""}
<p>Artifacts: ${(task.artifactIds ?? []).length}</p>
${

View File

@ -26,6 +26,8 @@ export function renderNodeConfigPanel(selectedNodeId?: string): string {
<h3>${node.name}</h3>
<p>${node.description}</p>
<p>Executor: ${node.executorType}</p>
<p>Runtime Target: configurable per workflow node</p>
<p>Artifact Title: optional override per node run</p>
<p>Input Schema: ${node.inputSchemaSummary}</p>
<p>Output Schema: ${node.outputSchemaSummary}</p>
<p>Code Hook: ${node.supportsCodeHook ? "enabled" : "not available"}</p>

View File

@ -26,6 +26,9 @@ test("node config panel opens when a node is selected", () => {
assert.match(html, /Node Configuration/);
assert.match(html, /Rename Delivery Folder/);
assert.match(html, /Executor/);
assert.match(html, /Runtime Target/);
assert.match(html, /Artifact Title/);
assert.match(html, /Code Hook/);
});
test("run detail view shows node status badges from run data", () => {
@ -69,6 +72,9 @@ test("run detail view shows node status badges from run data", () => {
stderrLines: ["Minor warning"],
logLines: ["Checking metadata"],
canRetry: true,
nodeDefinitionId: "validate-structure",
codeHookLabel: "python:process",
executorConfigLabel: "{\"url\":\"http://127.0.0.1:3010/mock\"}",
},
],
selectedTaskId: "task-2",
@ -88,6 +94,9 @@ test("run detail view shows node status badges from run data", () => {
assert.match(html, /Stdout/);
assert.match(html, /Minor warning/);
assert.match(html, /Retry Task/);
assert.match(html, /Definition: validate-structure/);
assert.match(html, /Code Hook: python:process/);
assert.match(html, /Executor Config/);
assert.match(html, /\/explore\/artifact-2/);
});

View File

@ -4,11 +4,14 @@ import { ApiClient } from "./api-client.ts";
import {
addNodeToDraft,
createDefaultWorkflowDraft,
getNodeRuntimeConfig,
removeNodeFromDraft,
resolveDefinitionIdForNode,
serializeWorkflowDraft,
setNodeRuntimeConfig,
workflowDraftFromVersion,
type WorkflowDraft,
type WorkflowNodeRuntimeConfig,
} from "./workflow-editor-state.ts";
type NavItem = "Assets" | "Workflows" | "Runs" | "Explore" | "Labels" | "Admin";
@ -45,6 +48,13 @@ function formatRunSummary(run: any) {
return `${successCount} success, ${failedCount} failed, ${runningCount} running, ${cancelledCount} cancelled, ${stdoutLineCount} stdout lines, ${stderrLineCount} stderr lines, ${totalTaskCount} total tasks`;
}
function formatExecutorConfigLabel(config?: Record<string, unknown>) {
if (!config || Object.keys(config).length === 0) {
return "none";
}
return JSON.stringify(config);
}
function usePathname() {
const [pathname, setPathname] = useState(
typeof window === "undefined" ? "/assets" : window.location.pathname || "/assets",
@ -406,6 +416,25 @@ function WorkflowEditorPage(props: {
nodes.find((node) => node.id === resolveDefinitionIdForNode(draft, selectedNodeId)) ?? null,
[draft, nodes, selectedNodeId],
);
const selectedNodeRuntimeConfig = useMemo(
() => getNodeRuntimeConfig(draft, selectedNodeId),
[draft, selectedNodeId],
);
function updateSelectedNodeRuntimeConfig(
nextConfig: WorkflowNodeRuntimeConfig | ((current: WorkflowNodeRuntimeConfig) => WorkflowNodeRuntimeConfig),
) {
if (!selectedNodeId) {
return;
}
const currentConfig = {
definitionId: resolveDefinitionIdForNode(draft, selectedNodeId),
...(getNodeRuntimeConfig(draft, selectedNodeId) ?? {}),
};
const resolved = typeof nextConfig === "function" ? nextConfig(currentConfig) : nextConfig;
setDraft(setNodeRuntimeConfig(draft, selectedNodeId, resolved));
setDirty(true);
}
async function saveCurrentDraft() {
const version = await props.api.saveWorkflowVersion(
@ -545,6 +574,104 @@ function WorkflowEditorPage(props: {
<p><strong>{selectedNode.name}</strong></p>
<p>{selectedNode.description}</p>
<p>Category: {selectedNode.category}</p>
<p>Definition: {resolveDefinitionIdForNode(draft, selectedNodeId)}</p>
<div className="field-grid">
<label>
Executor Type
<select
value={selectedNodeRuntimeConfig?.executorType ?? "python"}
onChange={(event) =>
updateSelectedNodeRuntimeConfig((current) => ({
...current,
executorType: event.target.value as "python" | "docker" | "http",
}))
}
>
<option value="python">python</option>
<option value="docker">docker</option>
<option value="http">http</option>
</select>
</label>
<label>
Runtime Target
<input
value={
selectedNodeRuntimeConfig?.executorType === "http"
? String(selectedNodeRuntimeConfig?.executorConfig?.url ?? "")
: selectedNodeRuntimeConfig?.executorType === "docker"
? String(selectedNodeRuntimeConfig?.executorConfig?.image ?? "")
: ""
}
placeholder={
selectedNodeRuntimeConfig?.executorType === "http"
? "http://127.0.0.1:3010/mock-executor"
: selectedNodeRuntimeConfig?.executorType === "docker"
? "python:3.11"
: "python executor uses inline hook or default"
}
onChange={(event) =>
updateSelectedNodeRuntimeConfig((current) => ({
...current,
executorConfig:
current.executorType === "http"
? {
...(current.executorConfig ?? {}),
url: event.target.value,
method: "POST",
}
: current.executorType === "docker"
? {
...(current.executorConfig ?? {}),
image: event.target.value,
}
: current.executorConfig,
}))
}
/>
</label>
<label>
Artifact Title
<input
value={selectedNodeRuntimeConfig?.artifactTitle ?? ""}
placeholder="Task Result: validate-structure"
onChange={(event) =>
updateSelectedNodeRuntimeConfig((current) => ({
...current,
artifactTitle: event.target.value,
}))
}
/>
</label>
</div>
{selectedNode.category === "Transform" ||
selectedNode.category === "Inspect" ||
selectedNode.category === "Utility" ? (
<label style={{ display: "grid", gap: 8 }}>
<span>Python Code Hook</span>
<textarea
rows={8}
value={selectedNodeRuntimeConfig?.codeHookSpec?.source ?? ""}
placeholder={[
"def process(task, context):",
" return {'nodeId': task['nodeId'], 'ok': True}",
].join("\n")}
onChange={(event) =>
updateSelectedNodeRuntimeConfig((current) => ({
...current,
codeHookSpec: event.target.value.trim().length > 0
? {
language: "python",
entrypoint: "process",
source: event.target.value,
}
: undefined,
}))
}
/>
</label>
) : (
<p className="empty-state">This node does not expose a code hook in V1.</p>
)}
</>
) : (
<p className="empty-state">Select a node.</p>
@ -791,7 +918,15 @@ function RunDetailPage(props: {
<>
<p>Node: {selectedTask.nodeId}</p>
<p>Status: {selectedTask.status}</p>
<p>Definition: {selectedTask.nodeDefinitionId ?? "n/a"}</p>
<p>Executor: {selectedTask.executorType}</p>
<p>Executor config: {formatExecutorConfigLabel(selectedTask.executorConfig)}</p>
<p>
Code hook:{" "}
{selectedTask.codeHookSpec
? `${selectedTask.codeHookSpec.language}:${selectedTask.codeHookSpec.entrypoint ?? "process"}`
: "none"}
</p>
<p>Input assets: {(selectedTask.assetIds ?? []).join(", ") || "none"}</p>
<p>Started at: {selectedTask.startedAt ?? "n/a"}</p>
<p>Finished at: {selectedTask.finishedAt ?? "n/a"}</p>

View File

@ -4,8 +4,10 @@ import assert from "node:assert/strict";
import {
addNodeToDraft,
createDefaultWorkflowDraft,
getNodeRuntimeConfig,
removeNodeFromDraft,
serializeWorkflowDraft,
setNodeRuntimeConfig,
workflowDraftFromVersion,
} from "./workflow-editor-state.ts";
@ -70,3 +72,33 @@ test("remove node prunes attached edges and serialize emits workflow version pay
assert.equal(payload.runtimeGraph.selectedPreset, "delivery-normalization");
assert.equal(payload.logicGraph.nodes[1]?.id, "validate-structure");
});
test("set per-node runtime config and keep it in the serialized workflow payload", () => {
const draft = createDefaultWorkflowDraft();
const next = setNodeRuntimeConfig(draft, "validate-structure", {
executorType: "python",
codeHookSpec: {
language: "python",
entrypoint: "process",
source: "def process(task, context):\n return {'hooked': True}",
},
});
const payload = serializeWorkflowDraft(next);
assert.equal(getNodeRuntimeConfig(draft, "validate-structure"), undefined);
assert.equal(getNodeRuntimeConfig(next, "validate-structure")?.executorType, "python");
assert.match(
getNodeRuntimeConfig(next, "validate-structure")?.codeHookSpec?.source ?? "",
/hooked/,
);
assert.equal(
(
payload.runtimeGraph.nodeConfigs as Record<
string,
{ executorType?: string }
>
)["validate-structure"]?.executorType,
"python",
);
});

View File

@ -14,6 +14,21 @@ export type WorkflowNodeDefinitionSummary = {
category?: string;
};
export type WorkflowCodeHookSpec = {
language: "python";
entrypoint?: string;
source: string;
};
export type WorkflowNodeRuntimeConfig = {
definitionId?: string;
executorType?: "python" | "docker" | "http";
executorConfig?: Record<string, unknown>;
codeHookSpec?: WorkflowCodeHookSpec;
artifactType?: "json" | "directory" | "video";
artifactTitle?: string;
};
export type WorkflowDraft = {
visualGraph: Record<string, unknown>;
logicGraph: {
@ -23,6 +38,7 @@ export type WorkflowDraft = {
runtimeGraph: Record<string, unknown> & {
selectedPreset?: string;
nodeBindings?: Record<string, string>;
nodeConfigs?: Record<string, WorkflowNodeRuntimeConfig>;
};
pluginRefs: string[];
};
@ -39,6 +55,16 @@ function cloneDraft(draft: WorkflowDraft): WorkflowDraft {
runtimeGraph: {
...draft.runtimeGraph,
nodeBindings: { ...(draft.runtimeGraph.nodeBindings ?? {}) },
nodeConfigs: Object.fromEntries(
Object.entries(draft.runtimeGraph.nodeConfigs ?? {}).map(([nodeId, config]) => [
nodeId,
{
...config,
executorConfig: config.executorConfig ? { ...config.executorConfig } : undefined,
codeHookSpec: config.codeHookSpec ? { ...config.codeHookSpec } : undefined,
},
]),
),
},
pluginRefs: [...draft.pluginRefs],
};
@ -96,6 +122,7 @@ export function createDefaultWorkflowDraft(): WorkflowDraft {
"rename-folder": "rename-folder",
"validate-structure": "validate-structure",
},
nodeConfigs: {},
},
pluginRefs: ["builtin:delivery-nodes"],
};
@ -109,6 +136,20 @@ export function workflowDraftFromVersion(version?: WorkflowVersionLike | null):
const nodeBindings = {
...(version.runtimeGraph?.nodeBindings ?? {}),
} as Record<string, string>;
const nodeConfigs = Object.fromEntries(
Object.entries(version.runtimeGraph?.nodeConfigs ?? {}).map(([nodeId, config]) => [
nodeId,
{
...(config as WorkflowNodeRuntimeConfig),
executorConfig: (config as WorkflowNodeRuntimeConfig)?.executorConfig
? { ...(config as WorkflowNodeRuntimeConfig).executorConfig }
: undefined,
codeHookSpec: (config as WorkflowNodeRuntimeConfig)?.codeHookSpec
? { ...(config as WorkflowNodeRuntimeConfig).codeHookSpec }
: undefined,
},
]),
) as Record<string, WorkflowNodeRuntimeConfig>;
for (const node of version.logicGraph.nodes) {
nodeBindings[node.id] ??= inferDefinitionId(node.id);
@ -123,6 +164,7 @@ export function workflowDraftFromVersion(version?: WorkflowVersionLike | null):
runtimeGraph: {
...(version.runtimeGraph ?? {}),
nodeBindings,
nodeConfigs,
},
pluginRefs: [...(version.pluginRefs ?? [])],
};
@ -152,6 +194,7 @@ export function addNodeToDraft(
}
next.runtimeGraph.nodeBindings ??= {};
next.runtimeGraph.nodeBindings[nodeId] = definition.id;
next.runtimeGraph.nodeConfigs ??= {};
return { draft: next, nodeId };
}
@ -165,6 +208,9 @@ export function removeNodeFromDraft(draft: WorkflowDraft, nodeId: string): Workf
if (next.runtimeGraph.nodeBindings) {
delete next.runtimeGraph.nodeBindings[nodeId];
}
if (next.runtimeGraph.nodeConfigs) {
delete next.runtimeGraph.nodeConfigs[nodeId];
}
return next;
}
@ -172,6 +218,25 @@ export function resolveDefinitionIdForNode(draft: WorkflowDraft, nodeId: string)
return draft.runtimeGraph.nodeBindings?.[nodeId] ?? inferDefinitionId(nodeId);
}
export function getNodeRuntimeConfig(draft: WorkflowDraft, nodeId: string) {
return draft.runtimeGraph.nodeConfigs?.[nodeId];
}
export function setNodeRuntimeConfig(
draft: WorkflowDraft,
nodeId: string,
config: WorkflowNodeRuntimeConfig,
): WorkflowDraft {
const next = cloneDraft(draft);
next.runtimeGraph.nodeConfigs ??= {};
next.runtimeGraph.nodeConfigs[nodeId] = {
...config,
executorConfig: config.executorConfig ? { ...config.executorConfig } : undefined,
codeHookSpec: config.codeHookSpec ? { ...config.codeHookSpec } : undefined,
};
return next;
}
export function serializeWorkflowDraft(draft: WorkflowDraft): WorkflowDraft {
return cloneDraft(draft);
}

View File

@ -1,4 +1,5 @@
export type ExecutorType = "python" | "docker" | "http";
export type ArtifactType = "json" | "directory" | "video";
export type TaskStatus = "pending" | "queued" | "running" | "success" | "failed" | "cancelled";
export type TaskStatusCounts = {
pending: number;
@ -35,13 +36,40 @@ export type ExecutorExecutionResult = {
stderrLines?: string[];
};
export type CodeHookSpec = {
language: "python";
entrypoint?: string;
source: string;
};
export type NodeRuntimeConfig = {
definitionId?: string;
executorType?: ExecutorType;
executorConfig?: Record<string, unknown>;
codeHookSpec?: CodeHookSpec;
artifactType?: ArtifactType;
artifactTitle?: string;
};
export type RunRuntimeSnapshot = {
selectedPreset?: string;
nodeBindings?: Record<string, string>;
nodeConfigs?: Record<string, NodeRuntimeConfig>;
pluginRefs?: string[];
};
export type TaskRecord = {
id: string;
workflowRunId?: string;
workflowVersionId?: string;
nodeId: string;
nodeType?: string;
nodeDefinitionId?: string;
executorType: ExecutorType;
executorConfig?: Record<string, unknown>;
codeHookSpec?: CodeHookSpec;
artifactType?: ArtifactType;
artifactTitle?: string;
status: TaskStatus;
attempt?: number;
assetIds?: string[];
@ -64,4 +92,5 @@ export type ExecutionContext = {
workflowVersionId?: string;
nodeId: string;
assetIds?: string[];
nodeDefinitionId?: string;
};

View File

@ -9,9 +9,18 @@ export class DockerExecutor {
async execute(task: TaskRecord, _context: ExecutionContext): Promise<ExecutorExecutionResult> {
this.executionCount += 1;
const image = typeof task.executorConfig?.image === "string" ? task.executorConfig.image : "docker://local-simulated";
const command = Array.isArray(task.executorConfig?.command)
? task.executorConfig.command.filter((item): item is string => typeof item === "string")
: [];
return {
result: { taskId: task.id, executor: "docker" as const },
stdoutLines: [`docker executor processed ${task.nodeId}`],
result: {
taskId: task.id,
executor: "docker" as const,
image,
command,
},
stdoutLines: [`docker executor processed ${task.nodeId} with ${image}`],
stderrLines: [],
};
}

View File

@ -7,8 +7,38 @@ import type {
export class HttpExecutor {
executionCount = 0;
async execute(task: TaskRecord, _context: ExecutionContext): Promise<ExecutorExecutionResult> {
async execute(task: TaskRecord, context: ExecutionContext): Promise<ExecutorExecutionResult> {
this.executionCount += 1;
const url = typeof task.executorConfig?.url === "string" ? task.executorConfig.url : undefined;
if (url) {
const method = typeof task.executorConfig?.method === "string" ? task.executorConfig.method : "POST";
const response = await fetch(url, {
method,
headers: {
"content-type": "application/json",
},
body: JSON.stringify({
task,
context,
}),
});
const responseText = await response.text();
if (!response.ok) {
throw Object.assign(new Error(`http executor request failed: ${response.status}`), {
stdoutLines: [],
stderrLines: responseText ? [responseText] : [],
});
}
const payload = responseText ? JSON.parse(responseText) as ExecutorExecutionResult : { result: null };
return {
result: payload.result,
stdoutLines: payload.stdoutLines ?? [`http executor called ${url}`],
stderrLines: payload.stderrLines ?? [],
};
}
return {
result: { taskId: task.id, executor: "http" as const },
stdoutLines: [`http executor processed ${task.nodeId}`],

View File

@ -1,18 +1,112 @@
import { spawn } from "node:child_process";
import { mkdtemp, readFile, rm, writeFile } from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import type {
ExecutionContext,
ExecutorExecutionResult,
TaskRecord,
} from "../contracts/execution-context.ts";
function splitOutputLines(output: string) {
return output
.split(/\r?\n/u)
.map((line) => line.trimEnd())
.filter((line) => line.length > 0);
}
function createDefaultResult(task: TaskRecord): ExecutorExecutionResult {
return {
result: { taskId: task.id, executor: "python" as const },
stdoutLines: [`python executor processed ${task.nodeId}`],
stderrLines: [],
};
}
function createPythonHarness() {
return [
"import contextlib",
"import io",
"import json",
"import pathlib",
"import sys",
"",
"payload = json.loads(pathlib.Path(sys.argv[1]).read_text())",
"namespace = {}",
"stdout_buffer = io.StringIO()",
"with contextlib.redirect_stdout(stdout_buffer):",
" exec(payload['source'], namespace)",
" entrypoint = namespace.get(payload['entrypoint'])",
" if not callable(entrypoint):",
" raise RuntimeError(f\"Python hook entrypoint not found: {payload['entrypoint']}\")",
" result = entrypoint(payload['task'], payload['context'])",
"pathlib.Path(payload['resultPath']).write_text(json.dumps({'result': result}))",
"sys.stdout.write(stdout_buffer.getvalue())",
].join("\n");
}
export class PythonExecutor {
executionCount = 0;
async execute(task: TaskRecord, _context: ExecutionContext): Promise<ExecutorExecutionResult> {
async execute(task: TaskRecord, context: ExecutionContext): Promise<ExecutorExecutionResult> {
this.executionCount += 1;
return {
result: { taskId: task.id, executor: "python" as const },
stdoutLines: [`python executor processed ${task.nodeId}`],
stderrLines: [],
};
if (!task.codeHookSpec?.source) {
return createDefaultResult(task);
}
const tempDir = await mkdtemp(path.join(os.tmpdir(), "emboflow-python-executor-"));
const inputPath = path.join(tempDir, "input.json");
const resultPath = path.join(tempDir, "result.json");
const runnerPath = path.join(tempDir, "runner.py");
await writeFile(
inputPath,
JSON.stringify({
task,
context,
entrypoint: task.codeHookSpec.entrypoint ?? "process",
source: task.codeHookSpec.source,
resultPath,
}),
);
await writeFile(runnerPath, createPythonHarness());
const child = spawn("python3", [runnerPath, inputPath], {
stdio: ["ignore", "pipe", "pipe"],
});
let stdout = "";
let stderr = "";
child.stdout.on("data", (chunk) => {
stdout += String(chunk);
});
child.stderr.on("data", (chunk) => {
stderr += String(chunk);
});
const exitCode = await new Promise<number>((resolve, reject) => {
child.on("error", reject);
child.on("close", (code) => resolve(code ?? 1));
});
try {
if (exitCode !== 0) {
throw Object.assign(new Error(`python executor failed with exit code ${exitCode}`), {
stdoutLines: splitOutputLines(stdout),
stderrLines: splitOutputLines(stderr),
});
}
const resultPayload = JSON.parse(await readFile(resultPath, "utf8")) as { result?: unknown };
return {
result: resultPayload.result,
stdoutLines: splitOutputLines(stdout),
stderrLines: splitOutputLines(stderr),
};
} finally {
await rm(tempDir, { recursive: true, force: true });
}
}
}

View File

@ -33,6 +33,7 @@ export class TaskRunner {
const context: ExecutionContext = {
taskId: task.id,
nodeId: task.nodeId,
nodeDefinitionId: task.nodeDefinitionId,
};
await this.executors[task.executorType as ExecutorType].execute(task, context);
this.scheduler.transition(task.id, "success");

View File

@ -3,7 +3,9 @@ import { randomUUID } from "node:crypto";
import type { Db } from "mongodb";
import type {
CodeHookSpec,
ExecutorType,
NodeRuntimeConfig,
RunExecutionSummary,
TaskExecutionSummary,
TaskRecord,
@ -18,6 +20,12 @@ type WorkflowRunDocument = {
status: "queued" | "running" | "success" | "failed" | "cancelled";
triggeredBy: string;
assetIds: string[];
runtimeSnapshot?: {
selectedPreset?: string;
nodeBindings?: Record<string, string>;
nodeConfigs?: Record<string, NodeRuntimeConfig>;
pluginRefs?: string[];
};
startedAt?: string;
finishedAt?: string;
durationMs?: number;
@ -42,7 +50,12 @@ type RunTaskDocument = {
workflowVersionId: string;
nodeId: string;
nodeType: string;
nodeDefinitionId?: string;
executorType: ExecutorType;
executorConfig?: Record<string, unknown>;
codeHookSpec?: CodeHookSpec;
artifactType?: "json" | "directory" | "video";
artifactTitle?: string;
status: TaskStatus;
attempt: number;
assetIds: string[];
@ -72,7 +85,12 @@ function toTaskRecord(task: RunTaskDocument): TaskRecord {
workflowVersionId: task.workflowVersionId,
nodeId: task.nodeId,
nodeType: task.nodeType,
nodeDefinitionId: task.nodeDefinitionId,
executorType: task.executorType,
executorConfig: task.executorConfig,
codeHookSpec: task.codeHookSpec,
artifactType: task.artifactType,
artifactTitle: task.artifactTitle,
status: task.status,
attempt: task.attempt,
assetIds: task.assetIds,
@ -111,7 +129,7 @@ function buildRunExecutionSummary(tasks: TaskRecord[]): RunExecutionSummary {
const taskCounts = buildTaskStatusCounts(tasks);
return {
totalTaskCount: tasks.length,
completedTaskCount: taskCounts.success + taskCounts.failed,
completedTaskCount: taskCounts.success + taskCounts.failed + taskCounts.cancelled,
artifactCount: tasks.reduce((total, task) => total + (task.outputArtifactIds?.length ?? 0), 0),
stdoutLineCount: tasks.reduce((total, task) => total + (task.stdoutLines?.length ?? 0), 0),
stderrLineCount: tasks.reduce((total, task) => total + (task.stderrLines?.length ?? 0), 0),
@ -241,8 +259,8 @@ export class MongoWorkerStore {
async createTaskArtifact(task: TaskRecord, payload: Record<string, unknown>) {
const artifact = {
_id: `artifact-${randomUUID()}`,
type: "json",
title: `Task Result: ${task.nodeId}`,
type: task.artifactType ?? "json",
title: task.artifactTitle ?? `Task Result: ${task.nodeId}`,
producerType: "run_task",
producerId: task.id,
payload,

View File

@ -44,6 +44,7 @@ export class WorkerRuntime {
workflowRunId: task.workflowRunId,
workflowVersionId: task.workflowVersionId,
nodeId: task.nodeId,
nodeDefinitionId: task.nodeDefinitionId,
assetIds: task.assetIds,
};
@ -54,6 +55,7 @@ export class WorkerRuntime {
const artifact = await this.store.createTaskArtifact(task, {
nodeId: task.nodeId,
nodeType: task.nodeType,
nodeDefinitionId: task.nodeDefinitionId,
executorType: task.executorType,
assetIds: task.assetIds,
result: execution.result,

View File

@ -427,3 +427,61 @@ test("worker skips queued tasks that belong to a cancelled run", async (t) => {
assert.match(cancelledTask?.logLines?.at(-1) ?? "", /cancelled/i);
assert.equal(activeTask?.status, "success");
});
test("worker executes a python code hook snapshot from the queued task", async (t) => {
const fixture = await createRuntimeFixture("emboflow-worker-python-hook");
t.after(async () => {
await fixture.close();
});
await fixture.db.collection("workflow_runs").insertOne({
_id: "run-python-hook",
workflowDefinitionId: "workflow-python-hook",
workflowVersionId: "workflow-python-hook-v1",
status: "queued",
triggeredBy: "local-user",
assetIds: ["asset-hook"],
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
});
await fixture.db.collection("run_tasks").insertOne({
_id: "task-python-hook",
workflowRunId: "run-python-hook",
workflowVersionId: "workflow-python-hook-v1",
nodeId: "validate-structure",
nodeType: "inspect",
executorType: "python",
status: "queued",
attempt: 1,
assetIds: ["asset-hook"],
upstreamNodeIds: [],
outputArtifactIds: [],
codeHookSpec: {
language: "python",
entrypoint: "process",
source: [
"def process(task, context):",
" print(f\"hook running for {task['nodeId']}\")",
" return {'nodeId': task['nodeId'], 'assetIds': context['assetIds'], 'hooked': True}",
].join("\n"),
},
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
});
await fixture.runtime.runNextTask();
const task = await fixture.store.getRunTask("task-python-hook");
const artifact = await fixture.db.collection("artifacts").findOne({ producerId: "task-python-hook" });
assert.equal(task?.status, "success");
assert.deepEqual(task?.stdoutLines, ["hook running for validate-structure"]);
assert.deepEqual(task?.stderrLines, []);
assert.equal(task?.summary?.executorType, "python");
assert.deepEqual((artifact?.payload as { result?: { hooked?: boolean } } | undefined)?.result, {
nodeId: "validate-structure",
assetIds: ["asset-hook"],
hooked: true,
});
});

View File

@ -61,6 +61,8 @@ Visual changes must not change workflow semantics. Runtime changes must produce
The current V1 editor implementation keeps a mutable local draft that is initialized from the latest saved workflow version. Saving the draft creates a new immutable workflow version. Triggering a run from a dirty draft first saves a fresh workflow version, then creates the run from that saved snapshot. The V1 editor also requires binding at least one project asset before run creation, and the selected asset ids are persisted with the run snapshot.
The current local runtime also persists per-node runtime config under `runtimeGraph.nodeConfigs`. That config includes executor overrides, executor-specific config payloads, optional artifact metadata, and Python code-hook source for supported node categories. When a run is created, the API freezes those node configs into `workflow_runs.runtimeSnapshot` and copies the effective executor choice plus code-hook snapshot onto each `run_task`.
## Node Categories
V1 node categories:
@ -124,6 +126,8 @@ def process(input_data, context):
This keeps serialization, logging, and runtime control predictable.
The current V1 worker executes trusted-local Python hooks when a `run_task` carries a `codeHookSpec`. The hook is executed through a constrained Python harness with the task snapshot and execution context passed in as JSON. Hook stdout is captured into `stdoutLines`, hook failures populate `stderrLines`, and the returned object becomes the task artifact payload.
## Data Flow Contract
Tasks should exchange managed references, not loose file paths.
@ -281,6 +285,8 @@ The persisted local runtime now covers:
The React workflow editor now loads the latest persisted version from the Mongo-backed API instead of rendering only a fixed starter graph. Draft edits are local editor state until the user saves, at which point the draft is serialized into a new workflow version document. Before a run is created, the editor loads project assets, requires one to be selected, and passes that binding to the API.
The editor right panel now exposes the first writable runtime controls instead of read-only node metadata. V1 users can override the executor type per node, configure a simple executor target such as HTTP URL or Docker image, override the produced artifact title, and author Python code-hook source for supported node categories.
The runtime Runs workspace now loads recent runs for the active project. Run detail views poll active runs until they settle and let the operator inspect task-level artifacts directly through Explore links.
The worker-backed runtime now persists task execution summaries directly on `run_tasks` instead of treating artifacts as the only observable output. Each completed or failed task records:
@ -319,6 +325,8 @@ The current V1 runtime also implements the first run-control loop:
V1 cancellation is scheduler-level only. It does not attempt to hard-stop an executor that is already running inside the local worker loop.
The selected-task panel in the current Runs workspace also shows the frozen node definition id, executor config snapshot, and code-hook metadata, so an operator can inspect what exact runtime settings were used without reopening the workflow editor.
The API and worker runtimes now both have direct integration coverage against a real Mongo runtime through `mongodb-memory-server`, in addition to the older in-memory contract tests.
The first web authoring surface already follows the three-pane layout contract with:

View File

@ -304,7 +304,7 @@ The current local runtime now exposes these surfaces as a real React application
The current implementation uses direct API-driven page loads, lightweight route handling, and incremental polling for active run detail views instead of a deeper client-side state framework.
The workflow editor surface now reflects persisted workflow versions instead of a hardcoded sample graph. It exposes draft status, node add and remove actions, reload-latest behavior, asset selection for run binding, and version-save / run-trigger controls against the live API. The Runs workspace now exposes project-scoped run history and selected-task artifact links into Explore.
The workflow editor surface now reflects persisted workflow versions instead of a hardcoded sample graph. It exposes draft status, node add and remove actions, reload-latest behavior, asset selection for run binding, and version-save / run-trigger controls against the live API. The right panel also now includes the first writable runtime controls for executor override, runtime target, artifact title, and Python code-hook source. The Runs workspace now exposes project-scoped run history, selected-task artifact links into Explore, and the frozen executor/code-hook snapshot for the selected task.
Do not rename the same concept differently across pages.

View File

@ -316,7 +316,12 @@ Core fields:
- `workflowVersionId`
- `nodeId`
- `nodeType`
- `nodeDefinitionId`
- `executorType`
- `executorConfig`
- `codeHookSpec`
- `artifactType`
- `artifactTitle`
- `status`
- `attempt`
- `assetIds`
@ -341,6 +346,7 @@ This collection should remain separate from `workflow_runs` because task volume
The current executable worker path expects `run_tasks` to be self-sufficient enough for dequeue and dependency promotion. That means V1 runtime tasks already persist:
- executor choice
- node definition id and frozen per-node runtime config
- bound asset ids
- upstream node dependencies
- produced artifact ids
@ -350,6 +356,7 @@ The current executable worker path expects `run_tasks` to be self-sufficient eno
The current runtime also aggregates task execution back onto `workflow_runs`, so run documents now carry:
- a frozen `runtimeSnapshot` copied from the workflow version runtime layer at run creation time
- task counts by status
- completed task count
- artifact count

View File

@ -23,6 +23,7 @@
- `2026-03-27`: The current observability pass persists task execution summaries, timestamps, log lines, and result previews on Mongo-backed `run_tasks`, and surfaces those fields in the React run detail view.
- `2026-03-27`: The current follow-up observability pass adds persisted stdout/stderr fields on `run_tasks` plus aggregated run summaries, durations, and task counts on `workflow_runs`.
- `2026-03-27`: The current run-control pass adds run cancellation, run retry from immutable snapshots, and task retry for failed/cancelled nodes with downstream reset semantics.
- `2026-03-27`: The current runtime-config pass freezes per-node executor config into `workflow_runs` and `run_tasks`, exposes runtime editing controls in the React workflow editor, and executes trusted-local Python code hooks from the task snapshot.
---