feat(ui): add operator UI server, stores, and insights
This commit is contained in:
35
tests/env-store.test.ts
Normal file
35
tests/env-store.test.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { mkdtemp, readFile, writeFile } from "node:fs/promises";
|
||||
import { tmpdir } from "node:os";
|
||||
import { resolve } from "node:path";
|
||||
import { parseEnvFile, writeEnvFileUpdates } from "../src/ui/env-store.js";
|
||||
|
||||
test("parseEnvFile handles missing files", async () => {
|
||||
const root = await mkdtemp(resolve(tmpdir(), "ai-ops-env-store-"));
|
||||
const envPath = resolve(root, ".env");
|
||||
|
||||
const parsed = await parseEnvFile(envPath);
|
||||
assert.deepEqual(parsed.values, {});
|
||||
assert.deepEqual(parsed.lines, []);
|
||||
});
|
||||
|
||||
test("writeEnvFileUpdates merges and appends keys", async () => {
|
||||
const root = await mkdtemp(resolve(tmpdir(), "ai-ops-env-store-"));
|
||||
const envPath = resolve(root, ".env");
|
||||
|
||||
await writeFile(envPath, "FOO=bar\nAGENT_MAX_CONCURRENT=4\n", "utf8");
|
||||
|
||||
const updated = await writeEnvFileUpdates(envPath, {
|
||||
AGENT_MAX_CONCURRENT: "9",
|
||||
AGENT_RUNTIME_DISCORD_MIN_SEVERITY: "warning",
|
||||
});
|
||||
|
||||
assert.equal(updated.values.FOO, "bar");
|
||||
assert.equal(updated.values.AGENT_MAX_CONCURRENT, "9");
|
||||
assert.equal(updated.values.AGENT_RUNTIME_DISCORD_MIN_SEVERITY, "warning");
|
||||
|
||||
const rendered = await readFile(envPath, "utf8");
|
||||
assert.match(rendered, /AGENT_MAX_CONCURRENT=9/);
|
||||
assert.match(rendered, /AGENT_RUNTIME_DISCORD_MIN_SEVERITY=warning/);
|
||||
});
|
||||
66
tests/provider-executor.test.ts
Normal file
66
tests/provider-executor.test.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { parseActorExecutionResultFromModelOutput } from "../src/ui/provider-executor.js";
|
||||
|
||||
test("parseActorExecutionResultFromModelOutput parses strict JSON payload", () => {
|
||||
const parsed = parseActorExecutionResultFromModelOutput({
|
||||
rawText: JSON.stringify({
|
||||
status: "validation_fail",
|
||||
payload: {
|
||||
summary: "missing test",
|
||||
},
|
||||
stateFlags: {
|
||||
needs_fix: true,
|
||||
},
|
||||
stateMetadata: {
|
||||
stage: "qa",
|
||||
},
|
||||
events: [
|
||||
{
|
||||
type: "validation_failed",
|
||||
payload: {
|
||||
summary: "failed",
|
||||
},
|
||||
},
|
||||
],
|
||||
failureKind: "soft",
|
||||
failureCode: "missing_test",
|
||||
}),
|
||||
});
|
||||
|
||||
assert.equal(parsed.status, "validation_fail");
|
||||
assert.equal(parsed.payload?.summary, "missing test");
|
||||
assert.equal(parsed.stateFlags?.needs_fix, true);
|
||||
assert.equal(parsed.stateMetadata?.stage, "qa");
|
||||
assert.equal(parsed.events?.[0]?.type, "validation_failed");
|
||||
assert.equal(parsed.failureKind, "soft");
|
||||
assert.equal(parsed.failureCode, "missing_test");
|
||||
});
|
||||
|
||||
test("parseActorExecutionResultFromModelOutput parses fenced JSON", () => {
|
||||
const parsed = parseActorExecutionResultFromModelOutput({
|
||||
rawText: [
|
||||
"Here is the result",
|
||||
"```json",
|
||||
JSON.stringify({
|
||||
status: "success",
|
||||
payload: {
|
||||
code: "done",
|
||||
},
|
||||
}),
|
||||
"```",
|
||||
].join("\n"),
|
||||
});
|
||||
|
||||
assert.equal(parsed.status, "success");
|
||||
assert.equal(parsed.payload?.code, "done");
|
||||
});
|
||||
|
||||
test("parseActorExecutionResultFromModelOutput falls back when response is not JSON", () => {
|
||||
const parsed = parseActorExecutionResultFromModelOutput({
|
||||
rawText: "Implemented update successfully.",
|
||||
});
|
||||
|
||||
assert.equal(parsed.status, "success");
|
||||
assert.equal(parsed.payload?.assistantResponse, "Implemented update successfully.");
|
||||
});
|
||||
207
tests/session-insights.test.ts
Normal file
207
tests/session-insights.test.ts
Normal file
@@ -0,0 +1,207 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { mkdir, writeFile } from "node:fs/promises";
|
||||
import { tmpdir } from "node:os";
|
||||
import { resolve } from "node:path";
|
||||
import { mkdtemp } from "node:fs/promises";
|
||||
import { buildSessionGraphInsight, buildSessionSummaries } from "../src/ui/session-insights.js";
|
||||
import { parseAgentManifest } from "../src/agents/manifest.js";
|
||||
|
||||
function createManifest() {
|
||||
return parseAgentManifest({
|
||||
schemaVersion: "1",
|
||||
topologies: ["sequential", "retry-unrolled"],
|
||||
personas: [
|
||||
{
|
||||
id: "planner",
|
||||
displayName: "Planner",
|
||||
systemPromptTemplate: "Plan",
|
||||
toolClearance: {
|
||||
allowlist: ["read_file"],
|
||||
banlist: [],
|
||||
},
|
||||
},
|
||||
],
|
||||
relationships: [],
|
||||
topologyConstraints: {
|
||||
maxDepth: 3,
|
||||
maxRetries: 2,
|
||||
},
|
||||
pipeline: {
|
||||
entryNodeId: "n1",
|
||||
nodes: [
|
||||
{
|
||||
id: "n1",
|
||||
actorId: "a1",
|
||||
personaId: "planner",
|
||||
topology: { kind: "sequential" },
|
||||
},
|
||||
{
|
||||
id: "n2",
|
||||
actorId: "a2",
|
||||
personaId: "planner",
|
||||
topology: { kind: "retry-unrolled" },
|
||||
},
|
||||
],
|
||||
edges: [
|
||||
{
|
||||
from: "n1",
|
||||
to: "n2",
|
||||
event: "validation_failed",
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
test("buildSessionGraphInsight maps attempts, edge visits, and sandbox payload", async () => {
|
||||
const root = await mkdtemp(resolve(tmpdir(), "ai-ops-session-insights-"));
|
||||
const stateRoot = resolve(root, "state");
|
||||
const sessionId = "session-1";
|
||||
const handoffDir = resolve(stateRoot, sessionId, "handoffs");
|
||||
const runtimeLogPath = resolve(root, "runtime-events.ndjson");
|
||||
|
||||
await mkdir(handoffDir, { recursive: true });
|
||||
await writeFile(
|
||||
resolve(handoffDir, "n2.json"),
|
||||
`${JSON.stringify({
|
||||
nodeId: "n2",
|
||||
fromNodeId: "n1",
|
||||
payload: {},
|
||||
createdAt: new Date().toISOString(),
|
||||
})}\n`,
|
||||
"utf8",
|
||||
);
|
||||
|
||||
const lines = [
|
||||
{
|
||||
id: "1",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
type: "session.started",
|
||||
severity: "info",
|
||||
message: "started",
|
||||
sessionId,
|
||||
},
|
||||
{
|
||||
id: "2",
|
||||
timestamp: "2026-01-01T00:00:01.000Z",
|
||||
type: "node.attempt.completed",
|
||||
severity: "info",
|
||||
message: "n1 success",
|
||||
sessionId,
|
||||
nodeId: "n1",
|
||||
attempt: 1,
|
||||
usage: { durationMs: 100, costUsd: 0.001 },
|
||||
metadata: {
|
||||
status: "success",
|
||||
executionContext: { phase: "n1", allowedTools: ["read_file"] },
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "3",
|
||||
timestamp: "2026-01-01T00:00:02.000Z",
|
||||
type: "node.attempt.completed",
|
||||
severity: "warning",
|
||||
message: "n2 validation",
|
||||
sessionId,
|
||||
nodeId: "n2",
|
||||
attempt: 1,
|
||||
usage: { durationMs: 140, costUsd: 0.002 },
|
||||
metadata: {
|
||||
status: "validation_fail",
|
||||
retrySpawned: true,
|
||||
subtasks: ["fix tests"],
|
||||
executionContext: { phase: "n2", allowedTools: ["read_file"] },
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "4",
|
||||
timestamp: "2026-01-01T00:00:03.000Z",
|
||||
type: "node.attempt.completed",
|
||||
severity: "info",
|
||||
message: "n2 success",
|
||||
sessionId,
|
||||
nodeId: "n2",
|
||||
attempt: 2,
|
||||
usage: { durationMs: 120, costUsd: 0.0025 },
|
||||
metadata: {
|
||||
status: "success",
|
||||
executionContext: { phase: "n2", allowedTools: ["read_file"] },
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "5",
|
||||
timestamp: "2026-01-01T00:00:04.000Z",
|
||||
type: "session.completed",
|
||||
severity: "info",
|
||||
message: "completed",
|
||||
sessionId,
|
||||
metadata: {
|
||||
status: "success",
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
await writeFile(runtimeLogPath, `${lines.map((line) => JSON.stringify(line)).join("\n")}\n`, "utf8");
|
||||
|
||||
const manifest = createManifest();
|
||||
const graph = await buildSessionGraphInsight({
|
||||
stateRoot,
|
||||
runtimeEventLogPath: runtimeLogPath,
|
||||
sessionId,
|
||||
manifest,
|
||||
});
|
||||
|
||||
assert.equal(graph.status, "success");
|
||||
assert.equal(graph.nodes.length, 2);
|
||||
|
||||
const node2 = graph.nodes.find((node) => node.nodeId === "n2");
|
||||
assert.ok(node2);
|
||||
assert.equal(node2.attemptCount, 2);
|
||||
assert.equal(node2.subtaskCount, 1);
|
||||
assert.equal(node2.sandboxPayload?.phase, "n2");
|
||||
|
||||
const edge = graph.edges.find((entry) => entry.from === "n1" && entry.to === "n2");
|
||||
assert.ok(edge);
|
||||
assert.equal(edge.visited, true);
|
||||
assert.equal(edge.trigger, "event:validation_failed");
|
||||
});
|
||||
|
||||
test("buildSessionSummaries reflects aborted failed session", async () => {
|
||||
const root = await mkdtemp(resolve(tmpdir(), "ai-ops-session-insights-"));
|
||||
const stateRoot = resolve(root, "state");
|
||||
const sessionId = "session-abort";
|
||||
const runtimeLogPath = resolve(root, "runtime-events.ndjson");
|
||||
|
||||
await mkdir(resolve(stateRoot, sessionId), { recursive: true });
|
||||
|
||||
const lines = [
|
||||
{
|
||||
id: "1",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
type: "session.started",
|
||||
severity: "info",
|
||||
message: "started",
|
||||
sessionId,
|
||||
},
|
||||
{
|
||||
id: "2",
|
||||
timestamp: "2026-01-01T00:00:01.000Z",
|
||||
type: "session.failed",
|
||||
severity: "critical",
|
||||
message: "Pipeline aborted after hard failures.",
|
||||
sessionId,
|
||||
},
|
||||
];
|
||||
|
||||
await writeFile(runtimeLogPath, `${lines.map((line) => JSON.stringify(line)).join("\n")}\n`, "utf8");
|
||||
|
||||
const sessions = await buildSessionSummaries({
|
||||
stateRoot,
|
||||
runtimeEventLogPath: runtimeLogPath,
|
||||
});
|
||||
|
||||
assert.equal(sessions.length, 1);
|
||||
assert.equal(sessions[0]?.status, "failure");
|
||||
assert.equal(sessions[0]?.aborted, true);
|
||||
});
|
||||
Reference in New Issue
Block a user