first commit

This commit is contained in:
2026-02-25 23:49:54 -05:00
commit 4d097161cb
1775 changed files with 452827 additions and 0 deletions

View File

@@ -0,0 +1,173 @@
import logging
from pathlib import Path
from typing import List
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from app.api.routes.orders_routes.dependencies import (
get_address_label_service,
get_fulfillment_service,
get_label_printer,
get_pull_sheet_service,
get_tracking_store,
order_service,
)
from app.db.database import get_db
from app.schemas.order import TCGPlayerOrderSchema
from app.services.address_label_service import AddressLabelService
from app.services.fulfillment import FulfillmentService, LabelAlreadyExistsError
from app.services.label_printer import LabelPrinterService
from app.services.order_fulfillment import merge_tracking_info
from app.services.order_processor import OrderProcessingError, build_shipment_request
from app.services.pull_sheet_service import PullSheetService
from app.services.tracking_store import TrackingStore
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("", response_model=List[TCGPlayerOrderSchema])
async def list_orders(
skip: int = 0,
limit: int = 100,
db: Session = Depends(get_db),
):
"""List local TCGPlayer orders."""
return order_service.get_orders(db, skip=skip, limit=limit)
@router.get("/{order_number}", response_model=TCGPlayerOrderSchema)
async def get_order(
order_number: str,
db: Session = Depends(get_db),
):
"""Get a specific TCGPlayer order."""
order = order_service.get_order(db, order_number)
if not order:
raise HTTPException(status_code=404, detail="Order not found")
return order
@router.post("/sync")
async def sync_orders(db: Session = Depends(get_db)):
"""Sync local TCGPlayer order mirror from TCGPlayer order-management API."""
return await order_service.sync_orders(db)
@router.post("/tcgplayer/{order_number}/purchase-postage")
@router.post("/{order_number}/purchase_postage", include_in_schema=False)
async def purchase_tcgplayer_postage(
order_number: str,
db: Session = Depends(get_db),
fulfillment: FulfillmentService = Depends(get_fulfillment_service),
):
"""Purchase postage for a local TCGPlayer order and cache label PDF on disk."""
order = order_service.get_order(db, order_number)
if not order:
raise HTTPException(status_code=404, detail="Order not found")
try:
request = build_shipment_request(order)
result = await fulfillment.purchase_postage(request)
existing_tracking = order.tracking_numbers if isinstance(order.tracking_numbers, dict) else None
tracking_info = merge_tracking_info(
existing_tracking,
tracking_company="USPS",
tracking_number=result.tracking_number,
tracking_url=result.tracking_url,
) or {}
tracking_info["label_url"] = str(result.label_path)
order.tracking_numbers = tracking_info
order.status = "Shipped"
db.commit()
return {
"status": "purchased",
"integration": "tcgplayer",
"order_id": result.order_id,
"tracking_number": result.tracking_number,
"tracking_url": result.tracking_url,
"label_path": str(result.label_path),
}
except OrderProcessingError as exc:
raise HTTPException(status_code=422, detail=str(exc))
except LabelAlreadyExistsError as exc:
raise HTTPException(status_code=409, detail=str(exc))
except Exception:
logger.exception("Purchase postage failed for order_number=%s", order_number)
raise HTTPException(status_code=500, detail="Purchase postage failed")
@router.post("/tcgplayer/{order_number}/print-label")
@router.post("/{order_number}/print_label", include_in_schema=False)
async def print_tcgplayer_label(
order_number: str,
tracking_store: TrackingStore = Depends(get_tracking_store),
printer: LabelPrinterService = Depends(get_label_printer),
):
"""Print label for an order."""
record = tracking_store.get(order_number)
if not record:
raise HTTPException(status_code=404, detail="No postage purchased for this order")
path = Path(record.label_path)
if not path.exists():
raise HTTPException(status_code=500, detail="Label file missing")
success = await printer.print_label(path)
if not success:
raise HTTPException(status_code=500, detail="Print failed")
return {"status": "printed", "integration": "tcgplayer", "order_number": order_number}
@router.post("/generate-pull-sheets")
async def generate_pull_sheets(
order_ids: List[str],
db: Session = Depends(get_db),
service: PullSheetService = Depends(get_pull_sheet_service),
):
"""Generate Pull Sheets for orders."""
try:
file = await service.get_or_create_rendered_pull_sheet(db, order_ids)
return {"filename": file.name, "path": file.path, "url": f"/files/{file.id}"}
except Exception:
logger.exception("Failed to generate pull sheets")
raise HTTPException(status_code=500, detail="Failed to generate pull sheets")
@router.post("/generate-packing-slips")
async def generate_packing_slips(
order_ids: List[str],
db: Session = Depends(get_db),
):
"""Generate Packing Slips for orders."""
try:
file = await order_service.generate_packing_slip(db, order_ids)
return {"filename": file.name, "path": file.path, "url": f"/files/{file.id}"}
except Exception:
logger.exception("Failed to generate packing slips")
raise HTTPException(status_code=500, detail="Failed to generate packing slips")
@router.post("/generate-address-labels")
async def generate_address_labels(
order_ids: List[str],
label_type: str = "dk1201",
db: Session = Depends(get_db),
service: AddressLabelService = Depends(get_address_label_service),
):
"""Generate Address Labels for orders."""
if label_type not in ["dk1201", "dk1241"]:
raise HTTPException(status_code=400, detail="Invalid label type")
try:
files = await service.get_or_create_address_labels(db, order_ids, label_type) # type: ignore
return [{"filename": f.name, "path": f.path, "url": f"/files/{f.id}"} for f in files]
except Exception:
logger.exception("Failed to generate address labels")
raise HTTPException(status_code=500, detail="Failed to generate address labels")

View File

@@ -0,0 +1,589 @@
import fs from 'node:fs';
import path from 'node:path';
import { DatabaseSync } from 'node:sqlite';
import {
createPostgresPool,
initializePostgresSchema,
runPostgresHealthcheck,
} from './dbPostgres.js';
import { openPostgresSyncBridge } from './dbPostgresSyncBridge.js';
import { parseBooleanFlag } from './utils.js';
const defaultDbPath = path.join(process.cwd(), 'data', 'gigagimbank.sqlite');
const DB_DRIVER_SQLITE = 'sqlite';
const DB_DRIVER_POSTGRES = 'postgres';
function normalizeDbDriver(value) {
const normalized = String(value ?? '').trim().toLowerCase();
if (normalized === DB_DRIVER_POSTGRES) {
return DB_DRIVER_POSTGRES;
}
return DB_DRIVER_SQLITE;
}
export function resolveDatabaseRuntimeConfig(env = process.env) {
const requestedDriver = normalizeDbDriver(env.DB_DRIVER);
const sqliteDevFallback = parseBooleanFlag(env.DEV_DB_SQLITE_FALLBACK);
if (requestedDriver === DB_DRIVER_POSTGRES && sqliteDevFallback) {
return {
requested_driver: requestedDriver,
resolved_driver: DB_DRIVER_SQLITE,
mode: 'sqlite_dev_fallback',
sqlite_dev_fallback: true,
postgres_adapter_available: true,
postgres_store_compatibility: 'sqlite_fallback',
};
}
return {
requested_driver: requestedDriver,
resolved_driver: requestedDriver,
mode: requestedDriver === DB_DRIVER_SQLITE ? 'sqlite_primary' : 'postgres_primary',
sqlite_dev_fallback: sqliteDevFallback,
postgres_adapter_available: true,
postgres_store_compatibility:
requestedDriver === DB_DRIVER_POSTGRES ? 'sync_bridge' : 'sqlite_primary',
};
}
export function openDatabase(env = process.env) {
const runtime = resolveDatabaseRuntimeConfig(env);
if (runtime.resolved_driver === DB_DRIVER_POSTGRES) {
const bridge = openPostgresSyncBridge(env);
return bridge;
}
const filePath = env.DB_PATH || defaultDbPath;
fs.mkdirSync(path.dirname(filePath), { recursive: true });
const db = new DatabaseSync(filePath);
db.exec('PRAGMA journal_mode = WAL;');
db.exec('PRAGMA foreign_keys = ON;');
return db;
}
export async function openPostgresMigrationAdapter(env = process.env) {
const runtime = resolveDatabaseRuntimeConfig(env);
if (runtime.requested_driver !== DB_DRIVER_POSTGRES) {
throw new Error('openPostgresMigrationAdapter requires DB_DRIVER=postgres.');
}
const { pool, runtime: postgresRuntime } = createPostgresPool(env);
return {
pool,
runtime: {
...runtime,
postgres: postgresRuntime,
},
};
}
export async function initializePostgresMigrationSchema(pool) {
await initializePostgresSchema(pool);
}
export async function checkPostgresMigrationHealth(pool) {
return runPostgresHealthcheck(pool);
}
export function initializeSchema(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS users (
id TEXT PRIMARY KEY,
discord_id TEXT,
discord_avatar_url TEXT,
runelite_account_hash TEXT UNIQUE NOT NULL,
runelite_linked INTEGER NOT NULL DEFAULT 1,
default_display_name TEXT NOT NULL,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
last_seen_at TEXT,
created_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS user_runelite_accounts (
account_hash TEXT PRIMARY KEY,
user_id TEXT NOT NULL,
linked_at TEXT NOT NULL,
is_active INTEGER NOT NULL DEFAULT 1,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_user_runelite_accounts_user_id
ON user_runelite_accounts(user_id);
CREATE TABLE IF NOT EXISTS oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
account_hash TEXT NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS web_oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS groups_table (
id TEXT PRIMARY KEY,
group_name TEXT NOT NULL,
leader_user_id TEXT NOT NULL,
join_code TEXT UNIQUE NOT NULL,
join_code_expires_at TEXT NOT NULL,
allow_open_invite_join INTEGER NOT NULL DEFAULT 0,
open_invite_expires_at TEXT,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
webhook_config_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
FOREIGN KEY (leader_user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS group_members (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
user_id TEXT,
account_hash TEXT,
expected_runescape_name TEXT NOT NULL,
role TEXT NOT NULL,
webhook_config_perms INTEGER NOT NULL DEFAULT 0,
loadout_admin_perms INTEGER NOT NULL DEFAULT 0,
joined_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (user_id) REFERENCES users (id),
FOREIGN KEY (account_hash) REFERENCES user_runelite_accounts (account_hash)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL;
CREATE TABLE IF NOT EXISTS group_join_requests (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
requester_user_id TEXT NOT NULL,
requester_account_hash TEXT,
status TEXT NOT NULL,
requested_at TEXT NOT NULL,
resolved_at TEXT,
resolved_by_user_id TEXT,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (requester_user_id) REFERENCES users (id),
FOREIGN KEY (requester_account_hash) REFERENCES user_runelite_accounts (account_hash),
FOREIGN KEY (resolved_by_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_join_requests_group_status
ON group_join_requests(group_id, status, requested_at DESC);
CREATE INDEX IF NOT EXISTS idx_join_requests_requester
ON group_join_requests(requester_user_id, requested_at DESC);
CREATE TABLE IF NOT EXISTS storage_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
location_type TEXT NOT NULL,
items_json TEXT NOT NULL,
state_hash TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_group_slot
ON storage_snapshots(group_id, location_type)
WHERE group_member_id IS NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_member_slot
ON storage_snapshots(group_id, group_member_id, location_type)
WHERE group_member_id IS NOT NULL;
CREATE TABLE IF NOT EXISTS audit_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
location_type TEXT NOT NULL,
item_id INTEGER NOT NULL,
quantity_delta INTEGER NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_audit_group_created_at
ON audit_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS activity_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
event_type TEXT NOT NULL,
event_data_json TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_activity_group_created_at
ON activity_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS webhook_delivery_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
activity_id TEXT,
event_type TEXT NOT NULL,
delivery_status TEXT NOT NULL,
attempt_count INTEGER NOT NULL DEFAULT 0,
http_status INTEGER,
error_message TEXT,
webhook_host TEXT,
payload_json TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_group_created_at
ON webhook_delivery_logs(group_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_status_created_at
ON webhook_delivery_logs(delivery_status, created_at DESC);
CREATE TABLE IF NOT EXISTS billing_customers (
user_id TEXT PRIMARY KEY,
stripe_customer_id TEXT UNIQUE NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_billing_customers_customer_id
ON billing_customers(stripe_customer_id);
CREATE TABLE IF NOT EXISTS billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
stripe_customer_id TEXT NOT NULL,
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_billing_subscriptions_customer_updated
ON billing_subscriptions(stripe_customer_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_billing_subscriptions_group_updated
ON group_billing_subscriptions(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_boost_credits (
user_id TEXT PRIMARY KEY,
available_boosts INTEGER NOT NULL DEFAULT 0,
updated_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS group_manual_boost_allocations (
user_id TEXT NOT NULL,
group_id TEXT NOT NULL,
boosts_assigned INTEGER NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
PRIMARY KEY (user_id, group_id),
FOREIGN KEY (user_id) REFERENCES users (id),
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_manual_boost_allocations_group
ON group_manual_boost_allocations(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_subscription_boost_overrides (
user_id TEXT NOT NULL,
allocation_month TEXT NOT NULL,
allocations_json TEXT NOT NULL DEFAULT '{}',
configured_at TEXT NOT NULL,
PRIMARY KEY (user_id, allocation_month),
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS item_catalog (
item_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
catalog_source TEXT,
catalog_version TEXT,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_name
ON item_catalog(name);
CREATE TABLE IF NOT EXISTS item_catalog_snapshots (
id TEXT PRIMARY KEY,
source_name TEXT NOT NULL,
source_version TEXT,
checksum_sha256 TEXT NOT NULL,
item_count INTEGER NOT NULL,
created_at TEXT NOT NULL,
notes TEXT
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_snapshots_created_at
ON item_catalog_snapshots(created_at DESC);
CREATE TABLE IF NOT EXISTS item_catalog_snapshot_items (
snapshot_id TEXT NOT NULL,
item_id INTEGER NOT NULL,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
PRIMARY KEY (snapshot_id, item_id),
FOREIGN KEY (snapshot_id) REFERENCES item_catalog_snapshots (id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS hiscores_fallback_state (
group_member_id TEXT PRIMARY KEY,
runescape_name TEXT NOT NULL,
snapshot_json TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE TABLE IF NOT EXISTS group_wealth_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
total_value_gp INTEGER NOT NULL DEFAULT 0,
captured_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_wealth_snapshots_group_time
ON group_wealth_snapshots(group_id, captured_at DESC);
CREATE TABLE IF NOT EXISTS group_goals (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
created_by_user_id TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT,
target_value_gp INTEGER NOT NULL,
current_value_gp INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'ACTIVE',
due_at TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (created_by_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_group_goals_group_status
ON group_goals(group_id, status, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadouts (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
owner_user_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
scope TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (owner_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_group_loadouts_group_scope_updated
ON group_loadouts(group_id, scope, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadout_items (
id TEXT PRIMARY KEY,
loadout_id TEXT NOT NULL,
item_id INTEGER NOT NULL,
required_qty INTEGER NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (loadout_id) REFERENCES group_loadouts (id)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_loadout_items_unique
ON group_loadout_items(loadout_id, item_id);
CREATE TABLE IF NOT EXISTS feature_usage_events (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
user_id TEXT,
feature_key TEXT NOT NULL,
action_key TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_feature_time
ON feature_usage_events(feature_key, action_key, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_group_time
ON feature_usage_events(group_id, created_at DESC);
`);
addColumnIfMissing(db, 'groups_table', 'opt_out_hiscores INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', 'opt_out_activity_feed INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', 'allow_open_invite_join INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', "join_code_expires_at TEXT NOT NULL DEFAULT ''");
addColumnIfMissing(db, 'groups_table', 'open_invite_expires_at TEXT');
addColumnIfMissing(db, 'groups_table', "webhook_config_json TEXT NOT NULL DEFAULT '{}'");
addColumnIfMissing(db, 'users', 'runelite_linked INTEGER NOT NULL DEFAULT 1');
addColumnIfMissing(db, 'users', 'discord_avatar_url TEXT');
addColumnIfMissing(db, 'users', 'last_seen_at TEXT');
addColumnIfMissing(db, 'item_catalog', 'is_tradeable INTEGER NOT NULL DEFAULT 1');
addColumnIfMissing(db, 'item_catalog', 'catalog_source TEXT');
addColumnIfMissing(db, 'item_catalog', 'catalog_version TEXT');
addColumnIfMissing(db, 'group_members', 'webhook_config_perms INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'group_members', 'loadout_admin_perms INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'group_members', 'account_hash TEXT');
addColumnIfMissing(db, 'group_join_requests', 'requester_account_hash TEXT');
const defaultInviteExpiry = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString();
db.prepare(
`UPDATE groups_table
SET join_code_expires_at = ?
WHERE join_code_expires_at IS NULL
OR join_code_expires_at = ''`
).run(defaultInviteExpiry);
db.prepare(
`UPDATE groups_table
SET webhook_config_json = '{}'
WHERE webhook_config_json IS NULL
OR webhook_config_json = ''`
).run();
db.prepare(
`INSERT OR IGNORE INTO user_runelite_accounts (
account_hash,
user_id,
linked_at,
is_active
)
SELECT
runelite_account_hash,
id,
created_at,
CASE WHEN runelite_linked = 1 THEN 1 ELSE 0 END
FROM users
WHERE runelite_account_hash IS NOT NULL
AND runelite_account_hash <> ''`
).run();
db.prepare(
`UPDATE users
SET runelite_linked = CASE
WHEN EXISTS (
SELECT 1
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
) THEN 1
ELSE 0
END`
).run();
db.prepare(
`UPDATE users
SET runelite_account_hash = COALESCE(
(
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
),
runelite_account_hash
)`
).run();
db.prepare(
`UPDATE group_members
SET account_hash = (
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = group_members.user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
)
WHERE group_members.user_id IS NOT NULL
AND (group_members.account_hash IS NULL OR group_members.account_hash = '')`
).run();
db.prepare(
`UPDATE group_join_requests
SET requester_account_hash = (
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = group_join_requests.requester_user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
)
WHERE requester_account_hash IS NULL
OR requester_account_hash = ''`
).run();
db.exec(`DROP INDEX IF EXISTS idx_group_members_claimed_user;`);
db.exec(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL;`
);
db.exec(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL;`
);
}
export function nowIso() {
return new Date().toISOString();
}
function addColumnIfMissing(db, tableName, columnDefinition) {
try {
db.exec(`ALTER TABLE ${tableName} ADD COLUMN ${columnDefinition};`);
} catch (error) {
const message = String(error?.message ?? '').toLowerCase();
const duplicateColumn =
message.includes('duplicate column name') || message.includes('already exists');
if (!duplicateColumn) {
throw error;
}
}
}

View File

@@ -0,0 +1,79 @@
services:
app:
build: .
ports:
- "3000:3000"
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
PORT: 3000
DB_DRIVER: postgres
POSTGRES_URL: postgres://gigagimbank:gigagimbank@postgres:5432/gigagimbank
WEB_SESSION_SECRET: ${WEB_SESSION_SECRET:-change_me_in_production}
DISCORD_CLIENT_ID: ${DISCORD_CLIENT_ID:-}
DISCORD_CLIENT_SECRET: ${DISCORD_CLIENT_SECRET:-}
DISCORD_REDIRECT_URI: ${DISCORD_REDIRECT_URI:-http://localhost:3000/api/v1/auth/discord/callback}
POST_AUTH_REDIRECT_URL: ${POST_AUTH_REDIRECT_URL:-http://localhost:3000/}
WEB_POST_AUTH_REDIRECT_URL: ${WEB_POST_AUTH_REDIRECT_URL:-http://localhost:3000/}
ENFORCE_GROUP_AUTH: ${ENFORCE_GROUP_AUTH:-1}
INTERNAL_API_TOKEN: ${INTERNAL_API_TOKEN:-}
ACTIVITY_WEBHOOK_QUEUE_DRIVER: bullmq
ACTIVITY_WEBHOOK_QUEUE_ROLE: producer
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL: redis://redis:6379
FEATURE_FLAGS: ${FEATURE_FLAGS:-}
STRIPE_SECRET_KEY: ${STRIPE_SECRET_KEY:-}
STRIPE_WEBHOOK_SECRET: ${STRIPE_WEBHOOK_SECRET:-}
DEV_MOCK_DISCORD_OAUTH: ${DEV_MOCK_DISCORD_OAUTH:-0}
restart: unless-stopped
worker:
build: .
command: ["node", "src/activityWebhookWorker.js"]
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
DB_DRIVER: postgres
POSTGRES_URL: postgres://gigagimbank:gigagimbank@postgres:5432/gigagimbank
ACTIVITY_WEBHOOK_QUEUE_DRIVER: bullmq
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL: redis://redis:6379
restart: unless-stopped
postgres:
image: postgres:17-alpine
volumes:
- pgdata:/var/lib/postgresql/data
environment:
POSTGRES_USER: gigagimbank
POSTGRES_PASSWORD: gigagimbank
POSTGRES_DB: gigagimbank
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U gigagimbank"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
redis:
image: redis:7-alpine
volumes:
- redisdata:/data
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
volumes:
pgdata:
redisdata:

View File

@@ -0,0 +1,64 @@
services:
app:
build: .
ports:
- "3000:3000"
env_file: .env.docker
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
# Override DB/Redis hosts to use docker network names
POSTGRES_URL: postgres://gigagimbank:gigagimbank@postgres:5432/gigagimbank
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL: redis://redis:6379
restart: unless-stopped
worker:
build: .
command: ["node", "src/activityWebhookWorker.js"]
env_file: .env.docker
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
POSTGRES_URL: postgres://gigagimbank:gigagimbank@postgres:5432/gigagimbank
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL: redis://redis:6379
restart: unless-stopped
postgres:
image: postgres:17-alpine
volumes:
- pgdata:/var/lib/postgresql/data
environment:
POSTGRES_USER: gigagimbank
POSTGRES_PASSWORD: gigagimbank
POSTGRES_DB: gigagimbank
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U gigagimbank"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
redis:
image: redis:7-alpine
volumes:
- redisdata:/data
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
volumes:
pgdata:
redisdata:

View File

@@ -0,0 +1,45 @@
# === Docker Compose dev environment (postgres + redis) ===
# Discord OAuth — mock enabled, no real app needed
DISCORD_CLIENT_ID=
DISCORD_CLIENT_SECRET=
DISCORD_REDIRECT_URI=http://localhost:3000/api/v1/auth/discord/callback
POST_AUTH_REDIRECT_URL=http://localhost:3000/
WEB_POST_AUTH_REDIRECT_URL=http://localhost:3000/
DEV_MOCK_DISCORD_OAUTH=1
DEV_MOCK_DISCORD_ACCOUNTS=[{"id":"dev_qm","discord_id":"dev_discord_qm","display_name":"Dev Quartermaster"}]
# Server
PORT=3000
# Database — postgres via docker-compose
DB_DRIVER=postgres
POSTGRES_URL=postgres://gigagimbank:gigagimbank@localhost:5432/gigagimbank
# Auth / sessions
ENFORCE_GROUP_AUTH=0
WEB_SESSION_SECRET=docker_dev_session_secret
INTERNAL_API_TOKEN=docker_dev_internal_token
WEB_SESSION_INACTIVITY_DAYS=30
GROUP_INVITE_TTL_HOURS=168
OPEN_INVITE_TTL_HOURS=24
# Activity webhook queue — Redis via docker-compose
ACTIVITY_WEBHOOK_QUEUE_DRIVER=bullmq
ACTIVITY_WEBHOOK_QUEUE_ROLE=producer
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL=redis://localhost:6379
ACTIVITY_WEBHOOK_QUEUE_NAME=ggb-activity-webhook
# Dev toggles
ALLOW_FALLBACK_ACCOUNT_HASH=0
SHOW_UNTRADEABLE_ITEMS=0
SHOW_UNKNOWN_ITEMS=0
DEV_WIPE_NON_TEST_DATA_ON_BOOT=1
DISABLE_BOOT_SYNC_JOBS=0
# Sync intervals
ITEM_CATALOG_SYNC_MS=86400000
HISCORES_SYNC_MS=900000
# Feature flags
FEATURE_FLAGS=

View File

@@ -0,0 +1,35 @@
FROM node:22-alpine AS base
WORKDIR /app
# Install dependencies
COPY package.json package-lock.json* ./
RUN npm ci --omit=dev
# Copy source
COPY src/ src/
COPY config/ config/
COPY public/ public/
COPY web/ web/
# Build the React frontend
FROM node:22-alpine AS web-build
WORKDIR /app
COPY package.json package-lock.json* ./
RUN npm ci
COPY web/ web/
RUN npx vite build --config web/vite.config.js
# Final image
FROM node:22-alpine
WORKDIR /app
COPY --from=base /app/node_modules node_modules/
COPY --from=base /app/package.json .
COPY --from=base /app/src/ src/
COPY --from=base /app/config/ config/
COPY --from=base /app/public/ public/
COPY --from=web-build /app/web/dist/ web/dist/
EXPOSE 3000
CMD ["node", "src/server.js"]

View File

@@ -0,0 +1,11 @@
node_modules
data
.git
.env
*.sqlite
runelite-plugin
e2e
test
.gitea
.claude
web/dist

View File

@@ -0,0 +1,46 @@
{
"models": {
"main": {
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 120000,
"temperature": 0.2
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 120000,
"temperature": 0.2
}
},
"global": {
"logLevel": "info",
"debug": false,
"defaultNumTasks": 10,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"projectName": "Taskmaster",
"ollamaBaseURL": "http://localhost:11434/api",
"bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com",
"responseLanguage": "English",
"enableCodebaseAnalysis": true,
"enableProxy": false,
"anonymousTelemetry": true,
"defaultTag": "master",
"azureOpenaiBaseURL": "https://your-endpoint.openai.azure.com/",
"userId": "1234567890"
},
"claudeCode": {},
"codexCli": {},
"grokCli": {
"timeout": 120000,
"workingDirectory": null,
"defaultModel": "grok-4-latest"
}
}

View File

@@ -0,0 +1,71 @@
"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress using task-master's task list.
"""
import json
from pathlib import Path
TASKS_FILE = ".taskmaster/tasks/tasks.json"
def _load_tasks(project_dir: Path) -> list[dict]:
tasks_file = project_dir / TASKS_FILE
if not tasks_file.exists():
return []
try:
with open(tasks_file) as f:
data = json.load(f)
return data.get("tasks", [])
except (json.JSONDecodeError, IOError):
return []
def get_available_tasks(project_dir: Path) -> list[dict]:
"""Return tasks that are pending with all dependencies done."""
tasks = _load_tasks(project_dir)
done_ids = {t["id"] for t in tasks if t.get("status") == "done"}
return [
t for t in tasks
if t.get("status") == "pending"
and all(dep in done_ids for dep in t.get("dependencies", []))
]
def all_tasks_done(project_dir: Path) -> bool:
"""Return True if every task is done."""
tasks = _load_tasks(project_dir)
return bool(tasks) and all(t.get("status") == "done" for t in tasks)
def count_task_progress(project_dir: Path) -> tuple[int, int]:
"""Return (done_count, total_count)."""
tasks = _load_tasks(project_dir)
done = sum(1 for t in tasks if t.get("status") == "done")
return done, len(tasks)
def is_initialized(project_dir: Path) -> bool:
"""Return True if task-master has been initialized with tasks."""
return (project_dir / TASKS_FILE).exists()
def print_session_header(label: str) -> None:
"""Print a formatted session header."""
print("\n" + "=" * 70)
print(f" {label.upper()}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current task progress."""
done, total = count_task_progress(project_dir)
if total > 0:
pct = (done / total) * 100
print(f"\nProgress: {done}/{total} tasks done ({pct:.1f}%)")
else:
print("\nProgress: no tasks found (initializer not yet run?)")

View File

@@ -0,0 +1,103 @@
"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress using task-master's task list.
"""
import json
from pathlib import Path
TASKS_FILE = ".taskmaster/tasks/tasks.json"
def _load_tasks(project_dir: Path) -> list[dict]:
tasks_file = project_dir / TASKS_FILE
if not tasks_file.exists():
return []
try:
with open(tasks_file) as f:
data = json.load(f)
return data.get("tasks", [])
except (json.JSONDecodeError, IOError):
return []
def _save_tasks(project_dir: Path, tasks: list[dict]) -> None:
"""Write the tasks list back to tasks.json."""
tasks_file = project_dir / TASKS_FILE
with open(tasks_file) as f:
data = json.load(f)
data["tasks"] = tasks
with open(tasks_file, "w") as f:
json.dump(data, f, indent=2)
def get_available_tasks(project_dir: Path) -> list[dict]:
"""Return tasks that are pending with all dependencies done."""
tasks = _load_tasks(project_dir)
done_ids = {t["id"] for t in tasks if t.get("status") == "done"}
return [
t for t in tasks
if t.get("status") == "pending"
and all(dep in done_ids for dep in t.get("dependencies", []))
]
def all_tasks_done(project_dir: Path) -> bool:
"""Return True if every task is done."""
tasks = _load_tasks(project_dir)
return bool(tasks) and all(t.get("status") == "done" for t in tasks)
def count_task_progress(project_dir: Path) -> tuple[int, int]:
"""Return (done_count, total_count)."""
tasks = _load_tasks(project_dir)
done = sum(1 for t in tasks if t.get("status") == "done")
return done, len(tasks)
def reset_stale_tasks(project_dir: Path) -> int:
"""
Reset 'in-progress' tasks back to 'pending'.
When the orchestrator restarts, no agents are actually running, so any
task stuck as 'in-progress' from a previous interrupted run needs to be
made available again. Returns the number of tasks reset.
"""
tasks = _load_tasks(project_dir)
if not tasks:
return 0
count = 0
for t in tasks:
if t.get("status") == "in-progress":
t["status"] = "pending"
count += 1
if count:
_save_tasks(project_dir, tasks)
print(f"Reset {count} stale in-progress task(s) back to pending")
return count
def is_initialized(project_dir: Path) -> bool:
"""Return True if task-master has been initialized with tasks."""
return (project_dir / TASKS_FILE).exists()
def print_session_header(label: str) -> None:
"""Print a formatted session header."""
print("\n" + "=" * 70)
print(f" {label.upper()}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current task progress."""
done, total = count_task_progress(project_dir)
if total > 0:
pct = (done / total) * 100
print(f"\nProgress: {done}/{total} tasks done ({pct:.1f}%)")
else:
print("\nProgress: no tasks found (initializer not yet run?)")

View File

@@ -0,0 +1,119 @@
"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress using task-master's task list.
"""
import json
from pathlib import Path
TASKS_FILE = ".taskmaster/tasks/tasks.json"
def _load_tasks(project_dir: Path) -> list[dict]:
tasks_file = project_dir / TASKS_FILE
if not tasks_file.exists():
return []
try:
with open(tasks_file) as f:
data = json.load(f)
return data.get("tasks", [])
except (json.JSONDecodeError, IOError):
return []
def _save_tasks(project_dir: Path, tasks: list[dict]) -> None:
"""Write the tasks list back to tasks.json."""
tasks_file = project_dir / TASKS_FILE
with open(tasks_file) as f:
data = json.load(f)
data["tasks"] = tasks
with open(tasks_file, "w") as f:
json.dump(data, f, indent=2)
def get_available_tasks(project_dir: Path) -> list[dict]:
"""Return tasks that are pending with all dependencies done."""
tasks = _load_tasks(project_dir)
done_ids = {t["id"] for t in tasks if t.get("status") == "done"}
return [
t for t in tasks
if t.get("status") == "pending"
and all(dep in done_ids for dep in t.get("dependencies", []))
]
def all_tasks_done(project_dir: Path) -> bool:
"""Return True if every task is done."""
tasks = _load_tasks(project_dir)
return bool(tasks) and all(t.get("status") == "done" for t in tasks)
def count_task_progress(project_dir: Path) -> tuple[int, int]:
"""Return (done_count, total_count)."""
tasks = _load_tasks(project_dir)
done = sum(1 for t in tasks if t.get("status") == "done")
return done, len(tasks)
def set_task_status(project_dir: Path, task_id: int, status: str) -> None:
"""
Directly update a task's status in tasks.json.
Used by the orchestrator as a belt-and-suspenders guarantee — even if
the agent's MCP call to set_task_status fails or never fires, the
orchestrator ensures the canonical tasks.json reflects reality.
"""
tasks = _load_tasks(project_dir)
for t in tasks:
if t.get("id") == task_id:
t["status"] = status
break
_save_tasks(project_dir, tasks)
def reset_stale_tasks(project_dir: Path) -> int:
"""
Reset 'in-progress' tasks back to 'pending'.
When the orchestrator restarts, no agents are actually running, so any
task stuck as 'in-progress' from a previous interrupted run needs to be
made available again. Returns the number of tasks reset.
"""
tasks = _load_tasks(project_dir)
if not tasks:
return 0
count = 0
for t in tasks:
if t.get("status") == "in-progress":
t["status"] = "pending"
count += 1
if count:
_save_tasks(project_dir, tasks)
print(f"Reset {count} stale in-progress task(s) back to pending")
return count
def is_initialized(project_dir: Path) -> bool:
"""Return True if task-master has been initialized with tasks."""
return (project_dir / TASKS_FILE).exists()
def print_session_header(label: str) -> None:
"""Print a formatted session header."""
print("\n" + "=" * 70)
print(f" {label.upper()}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current task progress."""
done, total = count_task_progress(project_dir)
if total > 0:
pct = (done / total) * 100
print(f"\nProgress: {done}/{total} tasks done ({pct:.1f}%)")
else:
print("\nProgress: no tasks found (initializer not yet run?)")

View File

@@ -0,0 +1,129 @@
"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress using task-master's task list.
"""
import json
from pathlib import Path
TASKS_FILE = ".taskmaster/tasks/tasks.json"
def _load_tasks(project_dir: Path) -> list[dict]:
tasks_file = project_dir / TASKS_FILE
if not tasks_file.exists():
return []
try:
with open(tasks_file) as f:
data = json.load(f)
# task-master writes {"master": {"tasks": [...]}} but some versions
# use a flat {"tasks": [...]} structure. Handle both.
if "tasks" in data:
return data["tasks"]
if "master" in data and "tasks" in data["master"]:
return data["master"]["tasks"]
return []
except (json.JSONDecodeError, IOError):
return []
def _save_tasks(project_dir: Path, tasks: list[dict]) -> None:
"""Write the tasks list back to tasks.json, preserving the envelope."""
tasks_file = project_dir / TASKS_FILE
with open(tasks_file) as f:
data = json.load(f)
# Write back into whichever envelope format was used
if "master" in data and "tasks" in data["master"]:
data["master"]["tasks"] = tasks
else:
data["tasks"] = tasks
with open(tasks_file, "w") as f:
json.dump(data, f, indent=2)
def get_available_tasks(project_dir: Path) -> list[dict]:
"""Return tasks that are pending with all dependencies done."""
tasks = _load_tasks(project_dir)
done_ids = {t["id"] for t in tasks if t.get("status") == "done"}
return [
t for t in tasks
if t.get("status") == "pending"
and all(dep in done_ids for dep in t.get("dependencies", []))
]
def all_tasks_done(project_dir: Path) -> bool:
"""Return True if every task is done."""
tasks = _load_tasks(project_dir)
return bool(tasks) and all(t.get("status") == "done" for t in tasks)
def count_task_progress(project_dir: Path) -> tuple[int, int]:
"""Return (done_count, total_count)."""
tasks = _load_tasks(project_dir)
done = sum(1 for t in tasks if t.get("status") == "done")
return done, len(tasks)
def set_task_status(project_dir: Path, task_id: int, status: str) -> None:
"""
Directly update a task's status in tasks.json.
Used by the orchestrator as a belt-and-suspenders guarantee — even if
the agent's MCP call to set_task_status fails or never fires, the
orchestrator ensures the canonical tasks.json reflects reality.
"""
tasks = _load_tasks(project_dir)
for t in tasks:
if t.get("id") == task_id:
t["status"] = status
break
_save_tasks(project_dir, tasks)
def reset_stale_tasks(project_dir: Path) -> int:
"""
Reset 'in-progress' tasks back to 'pending'.
When the orchestrator restarts, no agents are actually running, so any
task stuck as 'in-progress' from a previous interrupted run needs to be
made available again. Returns the number of tasks reset.
"""
tasks = _load_tasks(project_dir)
if not tasks:
return 0
count = 0
for t in tasks:
if t.get("status") == "in-progress":
t["status"] = "pending"
count += 1
if count:
_save_tasks(project_dir, tasks)
print(f"Reset {count} stale in-progress task(s) back to pending")
return count
def is_initialized(project_dir: Path) -> bool:
"""Return True if task-master has been initialized with tasks."""
return (project_dir / TASKS_FILE).exists()
def print_session_header(label: str) -> None:
"""Print a formatted session header."""
print("\n" + "=" * 70)
print(f" {label.upper()}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current task progress."""
done, total = count_task_progress(project_dir)
if total > 0:
pct = (done / total) * 100
print(f"\nProgress: {done}/{total} tasks done ({pct:.1f}%)")
else:
print("\nProgress: no tasks found (initializer not yet run?)")

View File

@@ -0,0 +1,130 @@
"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress using task-master's task list.
"""
import json
from pathlib import Path
TASKS_FILE = ".taskmaster/tasks/tasks.json"
def _load_tasks(project_dir: Path) -> list[dict]:
tasks_file = project_dir / TASKS_FILE
if not tasks_file.exists():
return []
try:
with open(tasks_file) as f:
data = json.load(f)
# task-master writes {"master": {"tasks": [...]}} but some versions
# use a flat {"tasks": [...]} structure. Handle both.
if "tasks" in data:
return data["tasks"]
if "master" in data and "tasks" in data["master"]:
return data["master"]["tasks"]
return []
except (json.JSONDecodeError, IOError):
return []
def _save_tasks(project_dir: Path, tasks: list[dict]) -> None:
"""Write the tasks list back to tasks.json, preserving the envelope."""
tasks_file = project_dir / TASKS_FILE
with open(tasks_file) as f:
data = json.load(f)
# Write back into whichever envelope format was used
if "master" in data and "tasks" in data["master"]:
data["master"]["tasks"] = tasks
else:
data["tasks"] = tasks
with open(tasks_file, "w") as f:
json.dump(data, f, indent=2)
def get_available_tasks(project_dir: Path) -> list[dict]:
"""Return tasks that are pending with all dependencies done."""
tasks = _load_tasks(project_dir)
done_ids = {t["id"] for t in tasks if t.get("status") == "done"}
return [
t for t in tasks
if t.get("status") == "pending"
and all(dep in done_ids for dep in t.get("dependencies", []))
]
def all_tasks_done(project_dir: Path) -> bool:
"""Return True if every task is done."""
tasks = _load_tasks(project_dir)
return bool(tasks) and all(t.get("status") == "done" for t in tasks)
def count_task_progress(project_dir: Path) -> tuple[int, int]:
"""Return (done_count, total_count)."""
tasks = _load_tasks(project_dir)
done = sum(1 for t in tasks if t.get("status") == "done")
return done, len(tasks)
def set_task_status(project_dir: Path, task_id: int, status: str) -> None:
"""
Directly update a task's status in tasks.json.
Used by the orchestrator as a belt-and-suspenders guarantee — even if
the agent's MCP call to set_task_status fails or never fires, the
orchestrator ensures the canonical tasks.json reflects reality.
"""
tasks = _load_tasks(project_dir)
for t in tasks:
if t.get("id") == task_id:
t["status"] = status
break
_save_tasks(project_dir, tasks)
def reset_stale_tasks(project_dir: Path) -> int:
"""
Reset 'in-progress' tasks back to 'pending'.
When the orchestrator restarts, no agents are actually running, so any
task stuck as 'in-progress' from a previous interrupted run needs to be
made available again. Returns the number of tasks reset.
"""
tasks = _load_tasks(project_dir)
if not tasks:
return 0
count = 0
for t in tasks:
if t.get("status") == "in-progress":
t["status"] = "pending"
count += 1
if count:
_save_tasks(project_dir, tasks)
from colors import style, WARN
print(style(f"Reset {count} stale in-progress task(s) back to pending", WARN))
return count
def is_initialized(project_dir: Path) -> bool:
"""Return True if task-master has been initialized with tasks."""
return (project_dir / TASKS_FILE).exists()
def print_session_header(label: str) -> None:
"""Print a formatted session header."""
print("\n" + "=" * 70)
print(f" {label.upper()}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current task progress."""
done, total = count_task_progress(project_dir)
if total > 0:
pct = (done / total) * 100
print(f"\nProgress: {done}/{total} tasks done ({pct:.1f}%)")
else:
print("\nProgress: no tasks found (initializer not yet run?)")

View File

@@ -0,0 +1,163 @@
"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress using task-master's task list.
"""
import json
from pathlib import Path
TASKS_FILE = ".taskmaster/tasks/tasks.json"
def _load_tasks(project_dir: Path) -> list[dict]:
tasks_file = project_dir / TASKS_FILE
if not tasks_file.exists():
return []
try:
with open(tasks_file) as f:
data = json.load(f)
# task-master writes {"master": {"tasks": [...]}} but some versions
# use a flat {"tasks": [...]} structure. Handle both.
if "tasks" in data:
return data["tasks"]
if "master" in data and "tasks" in data["master"]:
return data["master"]["tasks"]
return []
except (json.JSONDecodeError, IOError):
return []
def _save_tasks(project_dir: Path, tasks: list[dict]) -> None:
"""Write the tasks list back to tasks.json, preserving the envelope."""
tasks_file = project_dir / TASKS_FILE
with open(tasks_file) as f:
data = json.load(f)
# Write back into whichever envelope format was used
if "master" in data and "tasks" in data["master"]:
data["master"]["tasks"] = tasks
else:
data["tasks"] = tasks
with open(tasks_file, "w") as f:
json.dump(data, f, indent=2)
def get_available_tasks(project_dir: Path) -> list[dict]:
"""Return tasks that are pending with all dependencies done."""
tasks = _load_tasks(project_dir)
done_ids = {t["id"] for t in tasks if t.get("status") == "done"}
return [
t for t in tasks
if t.get("status") == "pending"
and all(dep in done_ids for dep in t.get("dependencies", []))
]
def all_tasks_done(project_dir: Path) -> bool:
"""Return True if every task is done."""
tasks = _load_tasks(project_dir)
return bool(tasks) and all(t.get("status") == "done" for t in tasks)
def count_task_progress(project_dir: Path) -> tuple[int, int]:
"""Return (done_count, total_count)."""
tasks = _load_tasks(project_dir)
done = sum(1 for t in tasks if t.get("status") == "done")
return done, len(tasks)
def set_task_status(project_dir: Path, task_id: int, status: str) -> None:
"""
Directly update a single task's status in tasks.json.
Only used by the orchestrator for crash recovery (reset to pending).
Agents handle their own status + subtask updates via MCP.
"""
tasks = _load_tasks(project_dir)
for t in tasks:
if t.get("id") == task_id:
t["status"] = status
break
_save_tasks(project_dir, tasks)
def sync_task_from_worktree(main_dir: Path, worktree_dir: Path, task_id: int) -> None:
"""
Copy a task's full entry (status + subtasks) from the worktree back
to the main project's tasks.json.
The agent updates task status and subtask statuses via MCP, which
writes to the worktree's local copy of tasks.json. This function
propagates those changes to the canonical copy so the orchestrator
sees them.
"""
wt_tasks = _load_tasks(worktree_dir)
main_tasks = _load_tasks(main_dir)
# Find the task in both copies
wt_task = next((t for t in wt_tasks if t.get("id") == task_id), None)
if wt_task is None:
return
for i, t in enumerate(main_tasks):
if t.get("id") == task_id:
main_tasks[i] = wt_task
break
_save_tasks(main_dir, main_tasks)
from colors import style, SYSTEM
status = wt_task.get("status", "?")
subs = wt_task.get("subtasks", [])
if subs:
sub_done = sum(1 for s in subs if s.get("status") == "done")
print(style(f"[sync] Task {task_id}: {status} ({sub_done}/{len(subs)} subtasks done)", SYSTEM))
else:
print(style(f"[sync] Task {task_id}: {status}", SYSTEM))
def reset_stale_tasks(project_dir: Path) -> int:
"""
Reset 'in-progress' tasks back to 'pending'.
When the orchestrator restarts, no agents are actually running, so any
task stuck as 'in-progress' from a previous interrupted run needs to be
made available again. Returns the number of tasks reset.
"""
tasks = _load_tasks(project_dir)
if not tasks:
return 0
count = 0
for t in tasks:
if t.get("status") == "in-progress":
t["status"] = "pending"
count += 1
if count:
_save_tasks(project_dir, tasks)
from colors import style, WARN
print(style(f"Reset {count} stale in-progress task(s) back to pending", WARN))
return count
def is_initialized(project_dir: Path) -> bool:
"""Return True if task-master has been initialized with tasks."""
return (project_dir / TASKS_FILE).exists()
def print_session_header(label: str) -> None:
"""Print a formatted session header."""
print("\n" + "=" * 70)
print(f" {label.upper()}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current task progress."""
done, total = count_task_progress(project_dir)
if total > 0:
pct = (done / total) * 100
print(f"\nProgress: {done}/{total} tasks done ({pct:.1f}%)")
else:
print("\nProgress: no tasks found (initializer not yet run?)")

View File

@@ -0,0 +1,191 @@
"""
Claude SDK Client Configuration
===============================
Functions for creating and configuring the Claude Agent SDK client.
"""
import json
import os
from pathlib import Path
from claude_code_sdk import ClaudeCodeOptions, ClaudeSDKClient
from claude_code_sdk.types import HookMatcher
from security import bash_security_hook
# Puppeteer MCP tools for browser automation
PUPPETEER_TOOLS = [
"mcp__puppeteer__puppeteer_navigate",
"mcp__puppeteer__puppeteer_screenshot",
"mcp__puppeteer__puppeteer_click",
"mcp__puppeteer__puppeteer_fill",
"mcp__puppeteer__puppeteer_select",
"mcp__puppeteer__puppeteer_hover",
"mcp__puppeteer__puppeteer_evaluate",
]
# Task-master-ai MCP tools (core set)
TASKMASTER_TOOLS = [
"mcp__task-master-ai__parse_prd",
"mcp__task-master-ai__get_tasks",
"mcp__task-master-ai__get_task",
"mcp__task-master-ai__next_task",
"mcp__task-master-ai__set_task_status",
"mcp__task-master-ai__add_task",
"mcp__task-master-ai__update_task",
"mcp__task-master-ai__expand_task",
"mcp__task-master-ai__expand_all",
"mcp__task-master-ai__add_subtask",
"mcp__task-master-ai__update_subtask",
"mcp__task-master-ai__add_dependency",
"mcp__task-master-ai__validate_dependencies",
"mcp__task-master-ai__complexity_report",
]
# Built-in Claude Code tools
BUILTIN_TOOLS = [
"Read",
"Write",
"Edit",
"Glob",
"Grep",
"Bash",
]
def _make_mcp_servers() -> dict:
"""Build MCP server config, forwarding ANTHROPIC_API_KEY to task-master if set."""
taskmaster_env: dict = {"TASK_MASTER_TOOLS": "core"}
api_key = os.environ.get("ANTHROPIC_API_KEY")
if api_key:
taskmaster_env["ANTHROPIC_API_KEY"] = api_key
return {
"puppeteer": {"command": "npx", "args": ["puppeteer-mcp-server"]},
"task-master-ai": {
"command": "npx",
"args": ["-y", "task-master-ai"],
"env": taskmaster_env,
},
}
# task-master config using claude-code provider for all roles (works with OAuth, no API key needed)
TASKMASTER_CONFIG = {
"models": {
"main": {
"provider": "claude-code",
"modelId": "opus",
"maxTokens": 32000,
"temperature": 0.2,
},
"research": {
"provider": "claude-code",
"modelId": "sonnet",
"maxTokens": 64000,
"temperature": 0.1,
},
"fallback": {
"provider": "claude-code",
"modelId": "sonnet",
"maxTokens": 64000,
"temperature": 0.2,
},
},
"global": {
"logLevel": "info",
"debug": False,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"projectName": "TaskMaster",
},
}
def setup_taskmaster(project_dir: Path) -> None:
"""
Write .taskmaster/config.json before the agent runs.
This prevents task-master from using its default Perplexity research
provider, which would fail without a PERPLEXITY_API_KEY.
"""
config_dir = project_dir / ".taskmaster"
config_dir.mkdir(parents=True, exist_ok=True)
(config_dir / "docs").mkdir(exist_ok=True)
(config_dir / "tasks").mkdir(exist_ok=True)
config_file = config_dir / "config.json"
with open(config_file, "w") as f:
json.dump(TASKMASTER_CONFIG, f, indent=2)
print("Wrote .taskmaster/config.json (Anthropic-only, no Perplexity)")
def _make_settings(project_dir: Path) -> Path:
"""Write .claude_settings.json and return its path."""
security_settings = {
"sandbox": {"enabled": True, "autoAllowBashIfSandboxed": True},
"permissions": {
"defaultMode": "acceptEdits",
"allow": [
"Read(./**)",
"Write(./**)",
"Edit(./**)",
"Glob(./**)",
"Grep(./**)",
"Bash(*)",
*PUPPETEER_TOOLS,
*TASKMASTER_TOOLS,
],
},
}
project_dir.mkdir(parents=True, exist_ok=True)
setup_taskmaster(project_dir)
settings_file = project_dir / ".claude_settings.json"
with open(settings_file, "w") as f:
json.dump(security_settings, f, indent=2)
return settings_file
def create_client(
project_dir: Path,
model: str,
resume: str | None = None,
system_prompt: str = "You are an expert full-stack developer building a production-quality web application.",
) -> ClaudeSDKClient:
"""
Create a Claude Agent SDK client.
Pass resume=session_id to continue a previous session (e.g. sending
test failure feedback back to the dev agent that built the feature).
"""
settings_file = _make_settings(project_dir)
if not resume:
print(f"Created security settings at {settings_file}")
print(" - Sandbox enabled (OS-level bash isolation)")
print(f" - Filesystem restricted to: {project_dir.resolve()}")
print(" - Bash commands restricted to allowlist (see security.py)")
print(" - MCP servers: puppeteer, task-master-ai")
print()
return ClaudeSDKClient(
options=ClaudeCodeOptions(
model=model,
system_prompt=system_prompt,
allowed_tools=[
*BUILTIN_TOOLS,
*PUPPETEER_TOOLS,
*TASKMASTER_TOOLS,
],
mcp_servers=_make_mcp_servers(),
hooks={
"PreToolUse": [
HookMatcher(matcher="Bash", hooks=[bash_security_hook]),
],
},
max_turns=1000,
cwd=str(project_dir.resolve()),
settings=str(settings_file.resolve()),
resume=resume,
)
)

View File

@@ -0,0 +1,192 @@
"""
Claude SDK Client Configuration
===============================
Functions for creating and configuring the Claude Agent SDK client.
"""
import json
import os
from pathlib import Path
from claude_code_sdk import ClaudeCodeOptions, ClaudeSDKClient
from claude_code_sdk.types import HookMatcher
from security import bash_security_hook
# Puppeteer MCP tools for browser automation
PUPPETEER_TOOLS = [
"mcp__puppeteer__puppeteer_navigate",
"mcp__puppeteer__puppeteer_screenshot",
"mcp__puppeteer__puppeteer_click",
"mcp__puppeteer__puppeteer_fill",
"mcp__puppeteer__puppeteer_select",
"mcp__puppeteer__puppeteer_hover",
"mcp__puppeteer__puppeteer_evaluate",
]
# Task-master-ai MCP tools (core set)
TASKMASTER_TOOLS = [
"mcp__task-master-ai__parse_prd",
"mcp__task-master-ai__get_tasks",
"mcp__task-master-ai__get_task",
"mcp__task-master-ai__next_task",
"mcp__task-master-ai__set_task_status",
"mcp__task-master-ai__add_task",
"mcp__task-master-ai__update_task",
"mcp__task-master-ai__expand_task",
"mcp__task-master-ai__expand_all",
"mcp__task-master-ai__add_subtask",
"mcp__task-master-ai__update_subtask",
"mcp__task-master-ai__add_dependency",
"mcp__task-master-ai__validate_dependencies",
"mcp__task-master-ai__complexity_report",
]
# Built-in Claude Code tools
BUILTIN_TOOLS = [
"Read",
"Write",
"Edit",
"Glob",
"Grep",
"Bash",
]
def _make_mcp_servers() -> dict:
"""Build MCP server config, forwarding ANTHROPIC_API_KEY to task-master if set."""
taskmaster_env: dict = {"TASK_MASTER_TOOLS": "core"}
api_key = os.environ.get("ANTHROPIC_API_KEY")
if api_key:
taskmaster_env["ANTHROPIC_API_KEY"] = api_key
return {
"puppeteer": {"command": "npx", "args": ["puppeteer-mcp-server"]},
"task-master-ai": {
"command": "npx",
"args": ["-y", "task-master-ai"],
"env": taskmaster_env,
},
}
# task-master config using claude-code provider for all roles (works with OAuth, no API key needed)
TASKMASTER_CONFIG = {
"models": {
"main": {
"provider": "claude-code",
"modelId": "opus",
"maxTokens": 32000,
"temperature": 0.2,
},
"research": {
"provider": "claude-code",
"modelId": "sonnet",
"maxTokens": 64000,
"temperature": 0.1,
},
"fallback": {
"provider": "claude-code",
"modelId": "sonnet",
"maxTokens": 64000,
"temperature": 0.2,
},
},
"global": {
"logLevel": "info",
"debug": False,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"projectName": "TaskMaster",
},
}
def setup_taskmaster(project_dir: Path) -> None:
"""
Write .taskmaster/config.json before the agent runs.
This prevents task-master from using its default Perplexity research
provider, which would fail without a PERPLEXITY_API_KEY.
"""
config_dir = project_dir / ".taskmaster"
config_dir.mkdir(parents=True, exist_ok=True)
(config_dir / "docs").mkdir(exist_ok=True)
(config_dir / "tasks").mkdir(exist_ok=True)
config_file = config_dir / "config.json"
with open(config_file, "w") as f:
json.dump(TASKMASTER_CONFIG, f, indent=2)
print("Wrote .taskmaster/config.json (Anthropic-only, no Perplexity)")
def _make_settings(project_dir: Path) -> Path:
"""Write .claude_settings.json and return its path."""
security_settings = {
"sandbox": {"enabled": True, "autoAllowBashIfSandboxed": True},
"permissions": {
"defaultMode": "acceptEdits",
"allow": [
"Read(./**)",
"Write(./**)",
"Edit(./**)",
"Glob(./**)",
"Grep(./**)",
"Bash(*)",
*PUPPETEER_TOOLS,
*TASKMASTER_TOOLS,
],
},
}
project_dir.mkdir(parents=True, exist_ok=True)
setup_taskmaster(project_dir)
settings_file = project_dir / ".claude_settings.json"
with open(settings_file, "w") as f:
json.dump(security_settings, f, indent=2)
return settings_file
def create_client(
project_dir: Path,
model: str,
resume: str | None = None,
system_prompt: str = "You are an expert full-stack developer building a production-quality web application.",
) -> ClaudeSDKClient:
"""
Create a Claude Agent SDK client.
Pass resume=session_id to continue a previous session (e.g. sending
test failure feedback back to the dev agent that built the feature).
"""
settings_file = _make_settings(project_dir)
if not resume:
from colors import style, SYSTEM, DIM
print(style(f"Created security settings at {settings_file}", SYSTEM))
print(style(" - Sandbox enabled (OS-level bash isolation)", DIM))
print(style(f" - Filesystem restricted to: {project_dir.resolve()}", DIM))
print(style(" - Bash commands restricted to allowlist (see security.py)", DIM))
print(style(" - MCP servers: puppeteer, task-master-ai", DIM))
print()
return ClaudeSDKClient(
options=ClaudeCodeOptions(
model=model,
system_prompt=system_prompt,
allowed_tools=[
*BUILTIN_TOOLS,
*PUPPETEER_TOOLS,
*TASKMASTER_TOOLS,
],
mcp_servers=_make_mcp_servers(),
hooks={
"PreToolUse": [
HookMatcher(matcher="Bash", hooks=[bash_security_hook]),
],
},
max_turns=1000,
cwd=str(project_dir.resolve()),
settings=str(settings_file.resolve()),
resume=resume,
)
)

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env python3
"""
Autonomous Coding Agent Demo
============================
A minimal harness demonstrating long-running autonomous coding with Claude.
This script implements the two-agent pattern (initializer + coding agent) and
incorporates all the strategies from the long-running agents guide.
Example Usage:
python autonomous_agent_demo.py --project-dir ./claude_clone_demo
python autonomous_agent_demo.py --project-dir ./claude_clone_demo --max-iterations 5
"""
import argparse
import asyncio
from pathlib import Path
from agent import run_autonomous_agent
# Configuration
DEFAULT_MODEL = "claude-opus-4-6"
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Autonomous Coding Agent Demo - Long-running agent harness",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Start fresh project
python autonomous_agent_demo.py --project-dir ./claude_clone
# Use a specific model
python autonomous_agent_demo.py --project-dir ./claude_clone --model claude-sonnet-4-5-20250929
# Limit iterations for testing
python autonomous_agent_demo.py --project-dir ./claude_clone --max-iterations 5
# Continue existing project
python autonomous_agent_demo.py --project-dir ./claude_clone
Authentication:
Run `claude login` to authenticate via OAuth (recommended).
Or set ANTHROPIC_API_KEY for direct API key auth.
""",
)
parser.add_argument(
"--project-dir",
type=Path,
default=Path("./autonomous_demo_project"),
help="Directory for the project (default: generations/autonomous_demo_project). Relative paths automatically placed in generations/ directory.",
)
parser.add_argument(
"--max-iterations",
type=int,
default=None,
help="Maximum number of agent iterations (default: unlimited)",
)
parser.add_argument(
"--concurrency",
type=int,
default=2,
help="Number of tasks to execute in parallel (default: 2)",
)
parser.add_argument(
"--model",
type=str,
default=DEFAULT_MODEL,
help=f"Claude model to use (default: {DEFAULT_MODEL})",
)
return parser.parse_args()
def main() -> None:
"""Main entry point."""
args = parse_args()
# Automatically place projects in generations/ directory unless already specified
project_dir = args.project_dir
if not str(project_dir).startswith("generations/"):
# Convert relative paths to be under generations/
if project_dir.is_absolute():
# If absolute path, use as-is
pass
else:
# Prepend generations/ to relative paths
project_dir = Path("generations") / project_dir
# Run the agent
try:
asyncio.run(
run_autonomous_agent(
project_dir=project_dir,
model=args.model,
max_iterations=args.max_iterations,
concurrency=args.concurrency,
)
)
except KeyboardInterrupt:
print("\n\nInterrupted by user")
print("To resume, run the same command again")
except Exception as e:
print(f"\nFatal error: {e}")
raise
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,124 @@
#!/usr/bin/env python3
"""
Autonomous Coding Agent Demo
============================
A minimal harness demonstrating long-running autonomous coding with Claude.
This script implements the two-agent pattern (initializer + coding agent) and
incorporates all the strategies from the long-running agents guide.
Example Usage:
python autonomous_agent_demo.py --project-dir ./claude_clone_demo
python autonomous_agent_demo.py --project-dir ./claude_clone_demo --max-iterations 5
"""
import argparse
import asyncio
from pathlib import Path
from agent import run_autonomous_agent
# Configuration
DEFAULT_MODEL = "claude-opus-4-6"
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Autonomous Coding Agent Demo - Long-running agent harness",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Start fresh project
python autonomous_agent_demo.py --project-dir ./claude_clone
# Use a specific model
python autonomous_agent_demo.py --project-dir ./claude_clone --model claude-sonnet-4-5-20250929
# Limit iterations for testing
python autonomous_agent_demo.py --project-dir ./claude_clone --max-iterations 5
# Continue existing project
python autonomous_agent_demo.py --project-dir ./claude_clone
Authentication:
Run `claude login` to authenticate via OAuth (recommended).
Or set ANTHROPIC_API_KEY for direct API key auth.
""",
)
parser.add_argument(
"--project-dir",
type=Path,
default=Path("./autonomous_demo_project"),
help="Directory for the project (default: generations/autonomous_demo_project). Relative paths automatically placed in generations/ directory.",
)
parser.add_argument(
"--max-iterations",
type=int,
default=None,
help="Maximum number of agent iterations (default: unlimited)",
)
parser.add_argument(
"--concurrency",
type=int,
default=2,
help="Number of tasks to execute in parallel (default: 2)",
)
parser.add_argument(
"--model",
type=str,
default=DEFAULT_MODEL,
help=f"Claude model to use (default: {DEFAULT_MODEL})",
)
parser.add_argument(
"--no-worktrees",
action="store_true",
default=False,
help="Disable git worktrees (all agents share one directory — original behavior)",
)
return parser.parse_args()
def main() -> None:
"""Main entry point."""
args = parse_args()
# Automatically place projects in generations/ directory unless already specified
project_dir = args.project_dir
if not str(project_dir).startswith("generations/"):
# Convert relative paths to be under generations/
if project_dir.is_absolute():
# If absolute path, use as-is
pass
else:
# Prepend generations/ to relative paths
project_dir = Path("generations") / project_dir
# Run the agent
try:
asyncio.run(
run_autonomous_agent(
project_dir=project_dir,
model=args.model,
max_iterations=args.max_iterations,
concurrency=args.concurrency,
use_worktrees=not args.no_worktrees,
)
)
except KeyboardInterrupt:
print("\n\nInterrupted by user")
print("To resume, run the same command again")
except Exception as e:
print(f"\nFatal error: {e}")
raise
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,133 @@
#!/usr/bin/env python3
"""
Autonomous Coding Agent Demo
============================
A minimal harness demonstrating long-running autonomous coding with Claude.
This script implements the two-agent pattern (initializer + coding agent) and
incorporates all the strategies from the long-running agents guide.
Example Usage:
python autonomous_agent_demo.py --project-dir ./claude_clone_demo
python autonomous_agent_demo.py --project-dir ./claude_clone_demo --max-iterations 5
"""
import argparse
import asyncio
from pathlib import Path
from agent import run_autonomous_agent
# Configuration
DEFAULT_MODEL = "claude-opus-4-6"
DEFAULT_TESTER_MODEL = "claude-sonnet-4-6"
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Autonomous Coding Agent Demo - Long-running agent harness",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Start fresh project
python autonomous_agent_demo.py --project-dir ./claude_clone
# Use a specific model
python autonomous_agent_demo.py --project-dir ./claude_clone --model claude-sonnet-4-5-20250929
# Limit iterations for testing
python autonomous_agent_demo.py --project-dir ./claude_clone --max-iterations 5
# Continue existing project
python autonomous_agent_demo.py --project-dir ./claude_clone
Authentication:
Run `claude login` to authenticate via OAuth (recommended).
Or set ANTHROPIC_API_KEY for direct API key auth.
""",
)
parser.add_argument(
"--project-dir",
type=Path,
default=Path("./autonomous_demo_project"),
help="Directory for the project (default: generations/autonomous_demo_project). Relative paths automatically placed in generations/ directory.",
)
parser.add_argument(
"--max-iterations",
type=int,
default=None,
help="Maximum number of agent iterations (default: unlimited)",
)
parser.add_argument(
"--concurrency",
type=int,
default=2,
help="Number of tasks to execute in parallel (default: 2)",
)
parser.add_argument(
"--model",
type=str,
default=DEFAULT_MODEL,
help=f"Claude model to use for coding agents (default: {DEFAULT_MODEL})",
)
parser.add_argument(
"--tester-model",
type=str,
default=DEFAULT_TESTER_MODEL,
help=f"Claude model to use for tester agents (default: {DEFAULT_TESTER_MODEL})",
)
parser.add_argument(
"--no-worktrees",
action="store_true",
default=False,
help="Disable git worktrees (all agents share one directory — original behavior)",
)
return parser.parse_args()
def main() -> None:
"""Main entry point."""
args = parse_args()
# Automatically place projects in generations/ directory unless already specified
project_dir = args.project_dir
if not str(project_dir).startswith("generations/"):
# Convert relative paths to be under generations/
if project_dir.is_absolute():
# If absolute path, use as-is
pass
else:
# Prepend generations/ to relative paths
project_dir = Path("generations") / project_dir
# Run the agent
try:
asyncio.run(
run_autonomous_agent(
project_dir=project_dir,
model=args.model,
tester_model=args.tester_model,
max_iterations=args.max_iterations,
concurrency=args.concurrency,
use_worktrees=not args.no_worktrees,
),
)
except KeyboardInterrupt:
print("\n\nInterrupted by user")
print("To resume, run the same command again")
except Exception as e:
print(f"\nFatal error: {e}")
raise
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,169 @@
"""
Git Worktree Management
=======================
Creates isolated git worktrees for parallel coding agents.
Each task gets its own worktree with a dedicated branch, preventing
agents from stepping on each other's file changes.
Shared orchestration state (.taskmaster) is symlinked so all agents
see the same task definitions and status updates.
"""
import asyncio
import shutil
from pathlib import Path
async def run_git(cwd: Path, *args: str, check: bool = True) -> str:
"""Run a git command and return stdout."""
proc = await asyncio.create_subprocess_exec(
"git", *args,
cwd=str(cwd),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate()
if check and proc.returncode != 0:
raise RuntimeError(
f"git {' '.join(args)} failed (exit {proc.returncode}): "
f"{stderr.decode().strip()}"
)
return stdout.decode().strip()
async def create_worktree(project_dir: Path, task_id: int) -> Path:
"""
Create an isolated git worktree for a task.
Creates branch 'task-{id}' from HEAD and checks it out in
.worktrees/task-{id}. Shared orchestration state (.taskmaster)
is symlinked so all agents share the same task data.
Returns the worktree path.
"""
worktrees_root = project_dir / ".worktrees"
worktrees_root.mkdir(exist_ok=True)
worktree_dir = worktrees_root / f"task-{task_id}"
branch = f"task-{task_id}"
# Clean up stale worktree/branch from a previous run
if worktree_dir.exists():
await run_git(project_dir, "worktree", "remove", "--force",
str(worktree_dir), check=False)
if worktree_dir.exists():
shutil.rmtree(worktree_dir)
await run_git(project_dir, "branch", "-D", branch, check=False)
# Create worktree with new branch based on current HEAD
await run_git(
project_dir, "worktree", "add", "-b", branch,
str(worktree_dir), "HEAD",
)
# Replace tracked copies of orchestration state with symlinks
_symlink_shared(project_dir, worktree_dir)
print(f"[worktree] Created .worktrees/task-{task_id} on branch {branch}")
return worktree_dir
async def merge_worktree(
project_dir: Path,
task_id: int,
merge_lock: asyncio.Lock,
) -> bool:
"""
Merge the task branch back into the main branch.
Uses a lock so only one merge runs at a time (git operations on the
same repo are not concurrency-safe).
Returns True on success, False if there was a conflict.
On conflict the merge is aborted and the branch is preserved.
"""
branch = f"task-{task_id}"
async with merge_lock:
try:
await run_git(
project_dir, "merge", branch,
"-m", f"Merge task-{task_id}",
)
print(f"[worktree] Merged {branch} into main")
return True
except RuntimeError as e:
print(f"[worktree] Merge conflict for {branch}: {e}")
await run_git(project_dir, "merge", "--abort", check=False)
return False
async def cleanup_worktree(
project_dir: Path,
task_id: int,
delete_branch: bool = True,
) -> None:
"""
Remove the worktree directory.
If delete_branch is True (merge succeeded), also delete the task branch.
If False (merge conflict), preserve the branch for manual resolution.
"""
worktree_dir = project_dir / ".worktrees" / f"task-{task_id}"
branch = f"task-{task_id}"
# Remove symlinks first so git worktree remove doesn't follow them
_remove_symlinks(worktree_dir)
if worktree_dir.exists():
await run_git(project_dir, "worktree", "remove", "--force",
str(worktree_dir), check=False)
if worktree_dir.exists():
shutil.rmtree(worktree_dir)
if delete_branch:
await run_git(project_dir, "branch", "-D", branch, check=False)
print(f"[worktree] Cleaned up {branch}")
else:
print(f"[worktree] Removed worktree but preserved branch {branch}")
# ---------------------------------------------------------------------------
# Shared state management
# ---------------------------------------------------------------------------
# Orchestration state that must be shared (not per-branch).
_SHARED_DIRS = [".taskmaster"]
_SHARED_FILES = ["app_spec.txt", "claude-progress.txt", "events.jsonl"]
def _symlink_shared(project_dir: Path, worktree_dir: Path) -> None:
"""Replace copies in the worktree with symlinks to the main project."""
for name in _SHARED_DIRS:
src = project_dir / name
dst = worktree_dir / name
if src.exists():
if dst.is_dir() and not dst.is_symlink():
shutil.rmtree(dst)
elif dst.exists() or dst.is_symlink():
dst.unlink()
dst.symlink_to(src.resolve())
for name in _SHARED_FILES:
src = project_dir / name
dst = worktree_dir / name
if src.exists():
if dst.exists() or dst.is_symlink():
dst.unlink()
dst.symlink_to(src.resolve())
def _remove_symlinks(worktree_dir: Path) -> None:
"""Remove our symlinks before git worktree remove."""
if not worktree_dir.exists():
return
for name in _SHARED_DIRS + _SHARED_FILES:
path = worktree_dir / name
if path.is_symlink():
path.unlink()

View File

@@ -0,0 +1,172 @@
"""
Git Worktree Management
=======================
Creates isolated git worktrees for parallel coding agents.
Each task gets its own worktree with a dedicated branch, preventing
agents from stepping on each other's file changes.
Shared orchestration state (.taskmaster) is symlinked so all agents
see the same task definitions and status updates.
"""
import asyncio
import shutil
from pathlib import Path
from colors import style, SYSTEM, BRIGHT_GREEN, WARN, DIM, BOLD, task_color
async def run_git(cwd: Path, *args: str, check: bool = True) -> str:
"""Run a git command and return stdout."""
proc = await asyncio.create_subprocess_exec(
"git", *args,
cwd=str(cwd),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate()
if check and proc.returncode != 0:
raise RuntimeError(
f"git {' '.join(args)} failed (exit {proc.returncode}): "
f"{stderr.decode().strip()}"
)
return stdout.decode().strip()
async def create_worktree(project_dir: Path, task_id: int) -> Path:
"""
Create an isolated git worktree for a task.
Creates branch 'task-{id}' from HEAD and checks it out in
.worktrees/task-{id}. Shared orchestration state (.taskmaster)
is symlinked so all agents share the same task data.
Returns the worktree path.
"""
worktrees_root = project_dir / ".worktrees"
worktrees_root.mkdir(exist_ok=True)
worktree_dir = worktrees_root / f"task-{task_id}"
branch = f"task-{task_id}"
# Clean up stale worktree/branch from a previous run
if worktree_dir.exists():
await run_git(project_dir, "worktree", "remove", "--force",
str(worktree_dir), check=False)
if worktree_dir.exists():
shutil.rmtree(worktree_dir)
await run_git(project_dir, "branch", "-D", branch, check=False)
# Create worktree with new branch based on current HEAD
await run_git(
project_dir, "worktree", "add", "-b", branch,
str(worktree_dir), "HEAD",
)
# Replace tracked copies of orchestration state with symlinks
_symlink_shared(project_dir, worktree_dir)
tc = task_color(task_id)
print(style(f"[worktree] Created .worktrees/task-{task_id}", SYSTEM) + " on branch " + style(branch, BOLD, tc))
return worktree_dir
async def merge_worktree(
project_dir: Path,
task_id: int,
merge_lock: asyncio.Lock,
) -> bool:
"""
Merge the task branch back into the main branch.
Uses a lock so only one merge runs at a time (git operations on the
same repo are not concurrency-safe).
Returns True on success, False if there was a conflict.
On conflict the merge is aborted and the branch is preserved.
"""
branch = f"task-{task_id}"
async with merge_lock:
try:
await run_git(
project_dir, "merge", branch,
"-m", f"Merge task-{task_id}",
)
print(style(f"[worktree] Merged {branch}", SYSTEM) + " " + style("into main", BRIGHT_GREEN))
return True
except RuntimeError as e:
print(style(f"[worktree] Merge conflict for {branch}: {e}", WARN))
await run_git(project_dir, "merge", "--abort", check=False)
return False
async def cleanup_worktree(
project_dir: Path,
task_id: int,
delete_branch: bool = True,
) -> None:
"""
Remove the worktree directory.
If delete_branch is True (merge succeeded), also delete the task branch.
If False (merge conflict), preserve the branch for manual resolution.
"""
worktree_dir = project_dir / ".worktrees" / f"task-{task_id}"
branch = f"task-{task_id}"
# Remove symlinks first so git worktree remove doesn't follow them
_remove_symlinks(worktree_dir)
if worktree_dir.exists():
await run_git(project_dir, "worktree", "remove", "--force",
str(worktree_dir), check=False)
if worktree_dir.exists():
shutil.rmtree(worktree_dir)
if delete_branch:
await run_git(project_dir, "branch", "-D", branch, check=False)
print(style(f"[worktree] Cleaned up {branch}", SYSTEM))
else:
print(style(f"[worktree] Preserved branch {branch} for manual merge", WARN))
# ---------------------------------------------------------------------------
# Shared state management
# ---------------------------------------------------------------------------
# Orchestration state that must be shared (not per-branch).
_SHARED_DIRS = [".taskmaster"]
_SHARED_FILES = ["app_spec.txt", "claude-progress.txt", "events.jsonl"]
def _symlink_shared(project_dir: Path, worktree_dir: Path) -> None:
"""Replace copies in the worktree with symlinks to the main project."""
for name in _SHARED_DIRS:
src = project_dir / name
dst = worktree_dir / name
if src.exists():
if dst.is_dir() and not dst.is_symlink():
shutil.rmtree(dst)
elif dst.exists() or dst.is_symlink():
dst.unlink()
dst.symlink_to(src.resolve())
for name in _SHARED_FILES:
src = project_dir / name
dst = worktree_dir / name
if src.exists():
if dst.exists() or dst.is_symlink():
dst.unlink()
dst.symlink_to(src.resolve())
def _remove_symlinks(worktree_dir: Path) -> None:
"""Remove our symlinks before git worktree remove."""
if not worktree_dir.exists():
return
for name in _SHARED_DIRS + _SHARED_FILES:
path = worktree_dir / name
if path.is_symlink():
path.unlink()

View File

@@ -0,0 +1,176 @@
"""
Git Worktree Management
=======================
Creates isolated git worktrees for parallel coding agents.
Each task gets its own worktree with a dedicated branch, preventing
agents from stepping on each other's file changes.
Shared orchestration state (.taskmaster) is symlinked so all agents
see the same task definitions and status updates.
"""
import asyncio
import shutil
from pathlib import Path
from colors import style, SYSTEM, BRIGHT_GREEN, WARN, DIM, BOLD, task_color
async def run_git(cwd: Path, *args: str, check: bool = True) -> str:
"""Run a git command and return stdout."""
proc = await asyncio.create_subprocess_exec(
"git", *args,
cwd=str(cwd),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate()
if check and proc.returncode != 0:
raise RuntimeError(
f"git {' '.join(args)} failed (exit {proc.returncode}): "
f"{stderr.decode().strip()}"
)
return stdout.decode().strip()
async def create_worktree(project_dir: Path, task_id: int) -> Path:
"""
Create an isolated git worktree for a task.
Creates branch 'task-{id}' from HEAD and checks it out in
.worktrees/task-{id}. Shared orchestration state (.taskmaster)
is symlinked so all agents share the same task data.
Returns the worktree path.
"""
worktrees_root = project_dir / ".worktrees"
worktrees_root.mkdir(exist_ok=True)
worktree_dir = worktrees_root / f"task-{task_id}"
branch = f"task-{task_id}"
# Clean up stale worktree/branch from a previous run
if worktree_dir.exists():
await run_git(project_dir, "worktree", "remove", "--force",
str(worktree_dir), check=False)
if worktree_dir.exists():
shutil.rmtree(worktree_dir)
await run_git(project_dir, "branch", "-D", branch, check=False)
# Create worktree with new branch based on current HEAD
await run_git(
project_dir, "worktree", "add", "-b", branch,
str(worktree_dir), "HEAD",
)
# Replace tracked copies of orchestration state with symlinks
_symlink_shared(project_dir, worktree_dir)
tc = task_color(task_id)
print(style(f"[worktree] Created .worktrees/task-{task_id}", SYSTEM) + " on branch " + style(branch, BOLD, tc))
return worktree_dir
async def merge_worktree(
project_dir: Path,
task_id: int,
merge_lock: asyncio.Lock,
) -> bool:
"""
Merge the task branch back into the main branch.
Uses a lock so only one merge runs at a time (git operations on the
same repo are not concurrency-safe).
Returns True on success, False if there was a conflict.
On conflict the merge is aborted and the branch is preserved.
"""
branch = f"task-{task_id}"
async with merge_lock:
try:
await run_git(
project_dir, "merge", branch,
"-m", f"Merge task-{task_id}",
)
print(style(f"[worktree] Merged {branch}", SYSTEM) + " " + style("into main", BRIGHT_GREEN))
return True
except RuntimeError as e:
print(style(f"[worktree] Merge conflict for {branch}: {e}", WARN))
await run_git(project_dir, "merge", "--abort", check=False)
return False
async def cleanup_worktree(
project_dir: Path,
task_id: int,
delete_branch: bool = True,
) -> None:
"""
Remove the worktree directory.
If delete_branch is True (merge succeeded), also delete the task branch.
If False (merge conflict), preserve the branch for manual resolution.
"""
worktree_dir = project_dir / ".worktrees" / f"task-{task_id}"
branch = f"task-{task_id}"
# Remove symlinks first so git worktree remove doesn't follow them
_remove_symlinks(worktree_dir)
if worktree_dir.exists():
await run_git(project_dir, "worktree", "remove", "--force",
str(worktree_dir), check=False)
if worktree_dir.exists():
shutil.rmtree(worktree_dir)
if delete_branch:
await run_git(project_dir, "branch", "-D", branch, check=False)
print(style(f"[worktree] Cleaned up {branch}", SYSTEM))
else:
print(style(f"[worktree] Preserved branch {branch} for manual merge", WARN))
# ---------------------------------------------------------------------------
# Shared state management
# ---------------------------------------------------------------------------
# Files that should be shared across worktrees (not per-branch).
# NOTE: .taskmaster is intentionally NOT shared. Each worktree keeps its
# own git-checkout copy so the agent can read task details. All status
# writes go through the orchestrator which targets the main project_dir,
# avoiding symlink/sandbox issues entirely.
_SHARED_DIRS: list[str] = []
_SHARED_FILES = ["app_spec.txt", "claude-progress.txt", "events.jsonl"]
def _symlink_shared(project_dir: Path, worktree_dir: Path) -> None:
"""Replace copies in the worktree with symlinks to the main project."""
for name in _SHARED_DIRS:
src = project_dir / name
dst = worktree_dir / name
if src.exists():
if dst.is_dir() and not dst.is_symlink():
shutil.rmtree(dst)
elif dst.exists() or dst.is_symlink():
dst.unlink()
dst.symlink_to(src.resolve())
for name in _SHARED_FILES:
src = project_dir / name
dst = worktree_dir / name
if src.exists():
if dst.exists() or dst.is_symlink():
dst.unlink()
dst.symlink_to(src.resolve())
def _remove_symlinks(worktree_dir: Path) -> None:
"""Remove our symlinks before git worktree remove."""
if not worktree_dir.exists():
return
for name in _SHARED_DIRS + _SHARED_FILES:
path = worktree_dir / name
if path.is_symlink():
path.unlink()

View File

@@ -0,0 +1,204 @@
"""
Git Worktree Management
=======================
Creates isolated git worktrees for parallel coding agents.
Each task gets its own worktree with a dedicated branch, preventing
agents from stepping on each other's file changes.
Shared orchestration state (.taskmaster) is symlinked so all agents
see the same task definitions and status updates.
"""
import asyncio
import shutil
from pathlib import Path
from colors import style, SYSTEM, BRIGHT_GREEN, WARN, DIM, BOLD, task_color
async def run_git(cwd: Path, *args: str, check: bool = True) -> str:
"""Run a git command and return stdout."""
proc = await asyncio.create_subprocess_exec(
"git", *args,
cwd=str(cwd),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate()
if check and proc.returncode != 0:
raise RuntimeError(
f"git {' '.join(args)} failed (exit {proc.returncode}): "
f"{stderr.decode().strip()}"
)
return stdout.decode().strip()
async def create_worktree(project_dir: Path, task_id: int) -> Path:
"""
Create an isolated git worktree for a task.
Creates branch 'task-{id}' from HEAD and checks it out in
.worktrees/task-{id}. Shared orchestration state (.taskmaster)
is symlinked so all agents share the same task data.
Returns the worktree path.
"""
worktrees_root = project_dir / ".worktrees"
worktrees_root.mkdir(exist_ok=True)
worktree_dir = worktrees_root / f"task-{task_id}"
branch = f"task-{task_id}"
# Clean up stale worktree/branch from a previous run
if worktree_dir.exists():
await run_git(project_dir, "worktree", "remove", "--force",
str(worktree_dir), check=False)
if worktree_dir.exists():
shutil.rmtree(worktree_dir)
await run_git(project_dir, "branch", "-D", branch, check=False)
# Create worktree with new branch based on current HEAD
await run_git(
project_dir, "worktree", "add", "-b", branch,
str(worktree_dir), "HEAD",
)
# Replace tracked copies of orchestration state with symlinks
_symlink_shared(project_dir, worktree_dir)
tc = task_color(task_id)
print(style(f"[worktree] Created .worktrees/task-{task_id}", SYSTEM) + " on branch " + style(branch, BOLD, tc))
return worktree_dir
async def merge_worktree(
project_dir: Path,
task_id: int,
merge_lock: asyncio.Lock,
) -> bool:
"""
Merge the task branch back into the main branch.
Uses a lock so only one merge runs at a time (git operations on the
same repo are not concurrency-safe).
.taskmaster/ conflicts are auto-resolved (keep ours) since task state
is synced separately by the orchestrator. If real code conflicts
remain, the merge is aborted and the branch preserved.
Returns True on success, False if there was an unresolvable conflict.
"""
branch = f"task-{task_id}"
async with merge_lock:
try:
await run_git(
project_dir, "merge", branch,
"-m", f"Merge task-{task_id}",
)
print(style(f"[worktree] Merged {branch}", SYSTEM) + " " + style("into main", BRIGHT_GREEN))
return True
except RuntimeError:
# Merge conflict — try to auto-resolve .taskmaster (task state
# is already synced separately, so keep the main branch's copy).
try:
await run_git(project_dir, "checkout", "--ours", "--", ".taskmaster/", check=False)
await run_git(project_dir, "add", ".taskmaster/", check=False)
# Check if any conflicts remain after resolving .taskmaster
unmerged = await run_git(project_dir, "diff", "--name-only", "--diff-filter=U", check=False)
if unmerged.strip():
# Real code conflicts exist — abort
print(style(f"[worktree] Code conflict for {branch}: {unmerged.strip()}", WARN))
await run_git(project_dir, "merge", "--abort", check=False)
return False
# All conflicts resolved — complete the merge
await run_git(
project_dir, "commit", "--no-edit",
)
print(
style(f"[worktree] Merged {branch}", SYSTEM)
+ " " + style("into main", BRIGHT_GREEN)
+ style(" (auto-resolved .taskmaster)", DIM)
)
return True
except RuntimeError as e2:
print(style(f"[worktree] Failed to resolve merge for {branch}: {e2}", WARN))
await run_git(project_dir, "merge", "--abort", check=False)
return False
async def cleanup_worktree(
project_dir: Path,
task_id: int,
delete_branch: bool = True,
) -> None:
"""
Remove the worktree directory.
If delete_branch is True (merge succeeded), also delete the task branch.
If False (merge conflict), preserve the branch for manual resolution.
"""
worktree_dir = project_dir / ".worktrees" / f"task-{task_id}"
branch = f"task-{task_id}"
# Remove symlinks first so git worktree remove doesn't follow them
_remove_symlinks(worktree_dir)
if worktree_dir.exists():
await run_git(project_dir, "worktree", "remove", "--force",
str(worktree_dir), check=False)
if worktree_dir.exists():
shutil.rmtree(worktree_dir)
if delete_branch:
await run_git(project_dir, "branch", "-D", branch, check=False)
print(style(f"[worktree] Cleaned up {branch}", SYSTEM))
else:
print(style(f"[worktree] Preserved branch {branch} for manual merge", WARN))
# ---------------------------------------------------------------------------
# Shared state management
# ---------------------------------------------------------------------------
# Files that should be shared across worktrees (not per-branch).
# NOTE: .taskmaster is intentionally NOT shared. Each worktree keeps its
# own git-checkout copy so the agent can read task details. All status
# writes go through the orchestrator which targets the main project_dir,
# avoiding symlink/sandbox issues entirely.
_SHARED_DIRS: list[str] = []
_SHARED_FILES = ["app_spec.txt", "claude-progress.txt", "events.jsonl"]
def _symlink_shared(project_dir: Path, worktree_dir: Path) -> None:
"""Replace copies in the worktree with symlinks to the main project."""
for name in _SHARED_DIRS:
src = project_dir / name
dst = worktree_dir / name
if src.exists():
if dst.is_dir() and not dst.is_symlink():
shutil.rmtree(dst)
elif dst.exists() or dst.is_symlink():
dst.unlink()
dst.symlink_to(src.resolve())
for name in _SHARED_FILES:
src = project_dir / name
dst = worktree_dir / name
if src.exists():
if dst.exists() or dst.is_symlink():
dst.unlink()
dst.symlink_to(src.resolve())
def _remove_symlinks(worktree_dir: Path) -> None:
"""Remove our symlinks before git worktree remove."""
if not worktree_dir.exists():
return
for name in _SHARED_DIRS + _SHARED_FILES:
path = worktree_dir / name
if path.is_symlink():
path.unlink()

View File

@@ -0,0 +1,300 @@
"""
Agent Session Logic
===================
Core agent interaction functions for running autonomous coding sessions.
"""
import asyncio
import os
import shutil
from pathlib import Path
from typing import Optional
from claude_code_sdk import ClaudeSDKClient
from client import create_client
from events import EventBus
from progress import (
print_session_header,
print_progress_summary,
get_available_tasks,
all_tasks_done,
count_task_progress,
is_initialized,
)
from prompts import get_initializer_prompt, get_coding_prompt, get_tester_prompt, copy_spec_to_project
AUTO_CONTINUE_DELAY_SECONDS = 3
MAX_TEST_RETRIES = 3
async def taskmaster_init(project_dir: Path) -> None:
"""
Run task-master parse-prd directly as a subprocess.
This avoids going through the MCP server and its API key validation.
"""
prd_src = Path(__file__).parent / "prompts" / "app_spec.txt"
prd_dst = project_dir / ".taskmaster" / "docs" / "prd.txt"
prd_dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(prd_src, prd_dst)
print(f"Copied spec to {prd_dst}")
env = {**os.environ}
print("Running task-master parse-prd...")
proc = await asyncio.create_subprocess_exec(
"npx", "-y", "task-master-ai", "parse-prd",
str(prd_dst),
"--project-root", str(project_dir),
"--num-tasks", "25",
cwd=str(project_dir),
env=env,
)
returncode = await proc.wait()
if returncode != 0:
raise RuntimeError(f"task-master parse-prd failed (exit {returncode})")
async def run_agent_session(
client: ClaudeSDKClient,
message: str,
project_dir: Path,
label: str = "",
) -> tuple[str, str, str | None]:
"""
Run a single agent session.
Returns:
(status, response_text, session_id)
status: "continue" | "error"
session_id: the CLI session ID, used to resume with --resume
"""
prefix = f"[{label}] " if label else ""
print(f"{prefix}Sending prompt to Claude...\n")
try:
await client.query(message)
response_text = ""
session_id: str | None = None
async for msg in client.receive_response():
await asyncio.sleep(1)
msg_type = type(msg).__name__
if msg_type == "ResultMessage":
session_id = getattr(msg, "session_id", None)
continue
if msg_type == "SystemMessage":
if getattr(msg, "subtype", None) == "rate_limit_event":
print(f"\n{prefix}[Rate limited — pausing 5s]\n", flush=True)
await asyncio.sleep(5)
continue
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
block_type = type(block).__name__
if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
print(block.text, end="", flush=True)
elif block_type == "ToolUseBlock" and hasattr(block, "name"):
print(f"\n{prefix}[Tool: {block.name}]", flush=True)
if hasattr(block, "input"):
input_str = str(block.input)
truncated = input_str[:200] + ("..." if len(input_str) > 200 else "")
print(f" Input: {truncated}", flush=True)
elif msg_type == "UserMessage" and hasattr(msg, "content"):
for block in msg.content:
if type(block).__name__ == "ToolResultBlock":
result_content = getattr(block, "content", "")
is_error = getattr(block, "is_error", False)
if "blocked" in str(result_content).lower():
print(f" {prefix}[BLOCKED] {result_content}", flush=True)
elif is_error:
print(f" {prefix}[Error] {str(result_content)[:500]}", flush=True)
else:
print(f" {prefix}[Done]", flush=True)
print("\n" + "-" * 70 + "\n")
return "continue", response_text, session_id
except Exception as e:
print(f"{prefix}Error during session: {e}")
return "error", str(e), None
async def run_coding_session(
task_id: int,
model: str,
project_dir: Path,
event_bus: EventBus,
) -> str | None:
"""Run a coding agent session for a specific task. Returns session_id."""
await event_bus.emit("coding_start", task_id=task_id)
client = create_client(project_dir, model)
prompt = get_coding_prompt(task_id)
async with client:
status, response, session_id = await run_agent_session(
client, prompt, project_dir, label=f"dev:{task_id}"
)
await event_bus.emit("coding_end", task_id=task_id, status=status, session_id=session_id)
return session_id
async def run_tester_session(
task_id: int,
model: str,
project_dir: Path,
event_bus: EventBus,
) -> tuple[bool, str]:
"""Run tester agent for a task. Returns (passed, response_text)."""
await event_bus.emit("test_start", task_id=task_id)
client = create_client(
project_dir,
model,
system_prompt="You are a meticulous QA engineer. Your only job is to test — never implement features.",
)
prompt = get_tester_prompt(task_id)
async with client:
status, response, _ = await run_agent_session(
client, prompt, project_dir, label=f"test:{task_id}"
)
passed = "TESTER_RESULT: PASS" in response
await event_bus.emit("test_end", task_id=task_id, passed=passed)
return passed, response
async def run_coding_worker(
task_id: int,
model: str,
project_dir: Path,
event_bus: EventBus,
) -> None:
"""
Full coding + test cycle for one task.
Flow:
1. Coding agent implements the task
2. Tester agent verifies it
3. If tester fails, resume the dev session with feedback and retry
4. Repeat up to MAX_TEST_RETRIES
"""
print_session_header(f"task {task_id}: coding")
dev_session_id = await run_coding_session(task_id, model, project_dir, event_bus)
for attempt in range(1, MAX_TEST_RETRIES + 1):
print_session_header(f"task {task_id}: testing (attempt {attempt}/{MAX_TEST_RETRIES})")
passed, feedback = await run_tester_session(task_id, model, project_dir, event_bus)
if passed:
print(f"\n[Task {task_id}] PASSED — marked done by tester")
await event_bus.emit("task_complete", task_id=task_id, attempts=attempt)
return
print(f"\n[Task {task_id}] FAILED — resuming dev with feedback")
await event_bus.emit("test_failed", task_id=task_id, attempt=attempt)
if dev_session_id and attempt < MAX_TEST_RETRIES:
# Resume the original dev session with the failure details
client = create_client(project_dir, model, resume=dev_session_id)
fix_prompt = (
f"The tester found issues with your implementation of task {task_id}.\n\n"
f"Tester feedback:\n{feedback}\n\n"
f"Please fix these issues and commit your changes."
)
async with client:
_, _, dev_session_id = await run_agent_session(
client, fix_prompt, project_dir, label=f"dev:{task_id}:fix{attempt}"
)
print(f"\n[Task {task_id}] Max retries reached — moving on")
await event_bus.emit("task_max_retries", task_id=task_id)
async def run_autonomous_agent(
project_dir: Path,
model: str,
max_iterations: Optional[int] = None,
concurrency: int = 2,
) -> None:
"""
Main orchestration loop.
Phase 1 (once): Initializer agent reads the spec, uses task-master to
parse it into a prioritized task graph, and sets up the project.
Phase 2 (loop): Dispatch up to `concurrency` coding workers in parallel,
each owning one task through the full code → test → (retry) cycle.
"""
print("\n" + "=" * 70)
print(" AUTONOMOUS CODING AGENT")
print("=" * 70)
print(f"\nProject: {project_dir}")
print(f"Model: {model}")
print(f"Concurrency: {concurrency}")
if max_iterations:
print(f"Max iterations: {max_iterations}")
print()
project_dir.mkdir(parents=True, exist_ok=True)
event_bus = EventBus(project_dir / "events.jsonl")
await event_bus.emit("run_start", model=model, concurrency=concurrency)
# Phase 1: Initialize
if not is_initialized(project_dir):
print("Fresh project — running initializer\n")
copy_spec_to_project(project_dir)
print_session_header("initializer")
client = create_client(project_dir, model)
async with client:
await run_agent_session(
client, get_initializer_prompt(), project_dir, label="init"
)
await event_bus.emit("initialized")
else:
done, total = count_task_progress(project_dir)
print(f"Resuming — {done}/{total} tasks complete")
print_progress_summary(project_dir)
# Phase 2: Parallel coding loop
iteration = 0
while True:
iteration += 1
if max_iterations and iteration > max_iterations:
print(f"\nReached max iterations ({max_iterations})")
break
if all_tasks_done(project_dir):
print("\nAll tasks complete!")
break
available = get_available_tasks(project_dir)
if not available:
print("\nNo available tasks — waiting for in-progress tasks or blocked dependencies...")
await asyncio.sleep(10)
continue
batch = available[:concurrency]
task_ids = [t["id"] for t in batch]
print(f"\nDispatching {len(batch)} task(s) in parallel: {task_ids}")
await event_bus.emit("batch_start", task_ids=task_ids, iteration=iteration)
await asyncio.gather(*[
run_coding_worker(task_id, model, project_dir, event_bus)
for task_id in task_ids
])
await event_bus.emit("batch_end", task_ids=task_ids)
print_progress_summary(project_dir)
await asyncio.sleep(AUTO_CONTINUE_DELAY_SECONDS)
done, total = count_task_progress(project_dir)
print(f"\n{'='*70}")
print(f" COMPLETE: {done}/{total} tasks done")
print(f"{'='*70}")
await event_bus.emit("run_end", done=done, total=total)

View File

@@ -0,0 +1,366 @@
"""
Agent Session Logic
===================
Core agent interaction functions for running autonomous coding sessions.
"""
import asyncio
import os
import shutil
from pathlib import Path
from typing import Optional
from claude_code_sdk import ClaudeSDKClient
from client import create_client
from events import EventBus
from progress import (
print_session_header,
print_progress_summary,
get_available_tasks,
all_tasks_done,
count_task_progress,
is_initialized,
reset_stale_tasks,
)
from prompts import get_initializer_prompt, get_coding_prompt, get_tester_prompt, copy_spec_to_project
from worktree import create_worktree, merge_worktree, cleanup_worktree
AUTO_CONTINUE_DELAY_SECONDS = 3
MAX_TEST_RETRIES = 3
async def taskmaster_init(project_dir: Path) -> None:
"""
Run task-master parse-prd directly as a subprocess.
This avoids going through the MCP server and its API key validation.
"""
prd_src = Path(__file__).parent / "prompts" / "app_spec.txt"
prd_dst = project_dir / ".taskmaster" / "docs" / "prd.txt"
prd_dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(prd_src, prd_dst)
print(f"Copied spec to {prd_dst}")
env = {**os.environ}
print("Running task-master parse-prd...")
proc = await asyncio.create_subprocess_exec(
"npx", "-y", "task-master-ai", "parse-prd",
str(prd_dst),
"--project-root", str(project_dir),
"--num-tasks", "25",
cwd=str(project_dir),
env=env,
)
returncode = await proc.wait()
if returncode != 0:
raise RuntimeError(f"task-master parse-prd failed (exit {returncode})")
async def run_agent_session(
client: ClaudeSDKClient,
message: str,
project_dir: Path,
label: str = "",
) -> tuple[str, str, str | None]:
"""
Run a single agent session.
Returns:
(status, response_text, session_id)
status: "continue" | "error"
session_id: the CLI session ID, used to resume with --resume
"""
prefix = f"[{label}] " if label else ""
print(f"{prefix}Sending prompt to Claude...\n")
try:
await client.query(message)
response_text = ""
session_id: str | None = None
async for msg in client.receive_response():
await asyncio.sleep(1)
msg_type = type(msg).__name__
if msg_type == "ResultMessage":
session_id = getattr(msg, "session_id", None)
continue
if msg_type == "SystemMessage":
if getattr(msg, "subtype", None) == "rate_limit_event":
print(f"\n{prefix}[Rate limited — pausing 5s]\n", flush=True)
await asyncio.sleep(5)
continue
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
block_type = type(block).__name__
if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
print(block.text, end="", flush=True)
elif block_type == "ToolUseBlock" and hasattr(block, "name"):
print(f"\n{prefix}[Tool: {block.name}]", flush=True)
if hasattr(block, "input"):
input_str = str(block.input)
truncated = input_str[:200] + ("..." if len(input_str) > 200 else "")
print(f" Input: {truncated}", flush=True)
elif msg_type == "UserMessage" and hasattr(msg, "content"):
for block in msg.content:
if type(block).__name__ == "ToolResultBlock":
result_content = getattr(block, "content", "")
is_error = getattr(block, "is_error", False)
if "blocked" in str(result_content).lower():
print(f" {prefix}[BLOCKED] {result_content}", flush=True)
elif is_error:
print(f" {prefix}[Error] {str(result_content)[:500]}", flush=True)
else:
print(f" {prefix}[Done]", flush=True)
print("\n" + "-" * 70 + "\n")
return "continue", response_text, session_id
except Exception as e:
print(f"{prefix}Error during session: {e}")
return "error", str(e), None
async def run_coding_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> str | None:
"""Run a coding agent session for a specific task. Returns session_id."""
await event_bus.emit("coding_start", task_id=task_id)
client = create_client(work_dir, model)
prompt = get_coding_prompt(task_id)
async with client:
status, response, session_id = await run_agent_session(
client, prompt, work_dir, label=f"dev:{task_id}"
)
await event_bus.emit("coding_end", task_id=task_id, status=status, session_id=session_id)
return session_id
async def run_tester_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> tuple[bool, str]:
"""Run tester agent for a task. Returns (passed, response_text)."""
await event_bus.emit("test_start", task_id=task_id)
client = create_client(
work_dir,
model,
system_prompt="You are a meticulous QA engineer. Your only job is to test — never implement features.",
)
prompt = get_tester_prompt(task_id)
async with client:
status, response, _ = await run_agent_session(
client, prompt, work_dir, label=f"test:{task_id}"
)
passed = "TESTER_RESULT: PASS" in response
await event_bus.emit("test_end", task_id=task_id, passed=passed)
return passed, response
async def run_coding_worker(
task_id: int,
model: str,
project_dir: Path,
event_bus: EventBus,
merge_lock: asyncio.Lock,
use_worktrees: bool = True,
) -> None:
"""
Full coding + test cycle for one task.
When use_worktrees is True (default), the agent works in an isolated
git worktree so parallel agents don't step on each other's files.
On completion the task branch is merged back into main.
Flow:
1. Create worktree (if enabled)
2. Coding agent implements the task
3. Tester agent verifies it
4. If tester fails, resume the dev session with feedback and retry
5. Repeat up to MAX_TEST_RETRIES
6. Merge worktree back into main and clean up
"""
work_dir = project_dir
if use_worktrees:
try:
work_dir = await create_worktree(project_dir, task_id)
except RuntimeError as e:
print(f"[Task {task_id}] Failed to create worktree: {e}")
print(f"[Task {task_id}] Falling back to shared directory")
work_dir = project_dir
use_worktrees = False
try:
print_session_header(f"task {task_id}: coding")
dev_session_id = await run_coding_session(task_id, model, work_dir, event_bus)
for attempt in range(1, MAX_TEST_RETRIES + 1):
print_session_header(f"task {task_id}: testing (attempt {attempt}/{MAX_TEST_RETRIES})")
passed, feedback = await run_tester_session(task_id, model, work_dir, event_bus)
if passed:
print(f"\n[Task {task_id}] PASSED — marked done by tester")
await event_bus.emit("task_complete", task_id=task_id, attempts=attempt)
return
print(f"\n[Task {task_id}] FAILED — resuming dev with feedback")
await event_bus.emit("test_failed", task_id=task_id, attempt=attempt)
if dev_session_id and attempt < MAX_TEST_RETRIES:
# Resume the original dev session with the failure details
client = create_client(work_dir, model, resume=dev_session_id)
fix_prompt = (
f"The tester found issues with your implementation of task {task_id}.\n\n"
f"Tester feedback:\n{feedback}\n\n"
f"Please fix these issues and commit your changes."
)
async with client:
_, _, dev_session_id = await run_agent_session(
client, fix_prompt, work_dir, label=f"dev:{task_id}:fix{attempt}"
)
print(f"\n[Task {task_id}] Max retries reached — moving on")
await event_bus.emit("task_max_retries", task_id=task_id)
finally:
if use_worktrees:
merged = await merge_worktree(project_dir, task_id, merge_lock)
if not merged:
print(f"[Task {task_id}] WARNING: merge conflict — branch task-{task_id} preserved")
await event_bus.emit("merge_conflict", task_id=task_id)
await cleanup_worktree(project_dir, task_id, delete_branch=merged)
async def run_autonomous_agent(
project_dir: Path,
model: str,
max_iterations: Optional[int] = None,
concurrency: int = 2,
use_worktrees: bool = True,
) -> None:
"""
Main orchestration loop.
Phase 1 (once): Initializer agent reads the spec, uses task-master to
parse it into a prioritized task graph, and sets up the project.
Phase 2 (loop): Dispatch up to `concurrency` coding workers in parallel,
each owning one task through the full code → test → (retry) cycle.
When use_worktrees is True, each parallel task runs in an isolated git
worktree so agents don't step on each other's files.
"""
print("\n" + "=" * 70)
print(" AUTONOMOUS CODING AGENT")
print("=" * 70)
print(f"\nProject: {project_dir}")
print(f"Model: {model}")
print(f"Concurrency: {concurrency}")
print(f"Worktrees: {'enabled' if use_worktrees else 'disabled'}")
if max_iterations:
print(f"Max iterations: {max_iterations}")
print()
project_dir.mkdir(parents=True, exist_ok=True)
event_bus = EventBus(project_dir / "events.jsonl")
merge_lock = asyncio.Lock()
await event_bus.emit("run_start", model=model, concurrency=concurrency, worktrees=use_worktrees)
# Phase 1: Initialize
if not is_initialized(project_dir):
print("Fresh project — running initializer\n")
copy_spec_to_project(project_dir)
print_session_header("initializer")
client = create_client(project_dir, model)
async with client:
await run_agent_session(
client, get_initializer_prompt(), project_dir, label="init"
)
await event_bus.emit("initialized")
# Ensure .worktrees and other orchestration artifacts are gitignored
if use_worktrees:
_ensure_gitignore(project_dir)
else:
done, total = count_task_progress(project_dir)
print(f"Resuming — {done}/{total} tasks complete")
# Reset tasks stuck as in-progress from a previous interrupted run
reset_stale_tasks(project_dir)
print_progress_summary(project_dir)
if use_worktrees:
_ensure_gitignore(project_dir)
# Phase 2: Parallel coding loop
iteration = 0
while True:
iteration += 1
if max_iterations and iteration > max_iterations:
print(f"\nReached max iterations ({max_iterations})")
break
if all_tasks_done(project_dir):
print("\nAll tasks complete!")
break
available = get_available_tasks(project_dir)
if not available:
print("\nNo available tasks — waiting for in-progress tasks or blocked dependencies...")
await asyncio.sleep(10)
continue
batch = available[:concurrency]
task_ids = [t["id"] for t in batch]
print(f"\nDispatching {len(batch)} task(s) in parallel: {task_ids}")
await event_bus.emit("batch_start", task_ids=task_ids, iteration=iteration)
await asyncio.gather(*[
run_coding_worker(task_id, model, project_dir, event_bus, merge_lock, use_worktrees)
for task_id in task_ids
])
await event_bus.emit("batch_end", task_ids=task_ids)
print_progress_summary(project_dir)
await asyncio.sleep(AUTO_CONTINUE_DELAY_SECONDS)
done, total = count_task_progress(project_dir)
print(f"\n{'='*70}")
print(f" COMPLETE: {done}/{total} tasks done")
print(f"{'='*70}")
await event_bus.emit("run_end", done=done, total=total)
def _ensure_gitignore(project_dir: Path) -> None:
"""Ensure .worktrees and orchestration files are gitignored."""
gitignore = project_dir / ".gitignore"
entries = [".worktrees/", "events.jsonl", ".claude_settings.json"]
existing = ""
if gitignore.exists():
existing = gitignore.read_text()
to_add = [e for e in entries if e not in existing]
if to_add:
with open(gitignore, "a") as f:
if existing and not existing.endswith("\n"):
f.write("\n")
f.write("# Orchestration artifacts\n")
for entry in to_add:
f.write(entry + "\n")
print(f"[gitignore] Added: {', '.join(to_add)}")

View File

@@ -0,0 +1,380 @@
"""
Agent Session Logic
===================
Core agent interaction functions for running autonomous coding sessions.
"""
import asyncio
import os
import shutil
from pathlib import Path
from typing import Optional
from claude_code_sdk import ClaudeSDKClient
from client import create_client
from events import EventBus
from progress import (
print_session_header,
print_progress_summary,
get_available_tasks,
all_tasks_done,
count_task_progress,
is_initialized,
reset_stale_tasks,
set_task_status,
)
from prompts import get_initializer_prompt, get_coding_prompt, get_tester_prompt, copy_spec_to_project
from worktree import create_worktree, merge_worktree, cleanup_worktree
AUTO_CONTINUE_DELAY_SECONDS = 3
MAX_TEST_RETRIES = 3
async def taskmaster_init(project_dir: Path) -> None:
"""
Run task-master parse-prd directly as a subprocess.
This avoids going through the MCP server and its API key validation.
"""
prd_src = Path(__file__).parent / "prompts" / "app_spec.txt"
prd_dst = project_dir / ".taskmaster" / "docs" / "prd.txt"
prd_dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(prd_src, prd_dst)
print(f"Copied spec to {prd_dst}")
env = {**os.environ}
print("Running task-master parse-prd...")
proc = await asyncio.create_subprocess_exec(
"npx", "-y", "task-master-ai", "parse-prd",
str(prd_dst),
"--project-root", str(project_dir),
"--num-tasks", "25",
cwd=str(project_dir),
env=env,
)
returncode = await proc.wait()
if returncode != 0:
raise RuntimeError(f"task-master parse-prd failed (exit {returncode})")
async def run_agent_session(
client: ClaudeSDKClient,
message: str,
project_dir: Path,
label: str = "",
) -> tuple[str, str, str | None]:
"""
Run a single agent session.
Returns:
(status, response_text, session_id)
status: "continue" | "error"
session_id: the CLI session ID, used to resume with --resume
"""
prefix = f"[{label}] " if label else ""
print(f"{prefix}Sending prompt to Claude...\n")
try:
await client.query(message)
response_text = ""
session_id: str | None = None
async for msg in client.receive_response():
await asyncio.sleep(1)
msg_type = type(msg).__name__
if msg_type == "ResultMessage":
session_id = getattr(msg, "session_id", None)
continue
if msg_type == "SystemMessage":
if getattr(msg, "subtype", None) == "rate_limit_event":
print(f"\n{prefix}[Rate limited — pausing 5s]\n", flush=True)
await asyncio.sleep(5)
continue
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
block_type = type(block).__name__
if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
print(block.text, end="", flush=True)
elif block_type == "ToolUseBlock" and hasattr(block, "name"):
print(f"\n{prefix}[Tool: {block.name}]", flush=True)
if hasattr(block, "input"):
input_str = str(block.input)
truncated = input_str[:200] + ("..." if len(input_str) > 200 else "")
print(f" Input: {truncated}", flush=True)
elif msg_type == "UserMessage" and hasattr(msg, "content"):
for block in msg.content:
if type(block).__name__ == "ToolResultBlock":
result_content = getattr(block, "content", "")
is_error = getattr(block, "is_error", False)
if "blocked" in str(result_content).lower():
print(f" {prefix}[BLOCKED] {result_content}", flush=True)
elif is_error:
print(f" {prefix}[Error] {str(result_content)[:500]}", flush=True)
else:
print(f" {prefix}[Done]", flush=True)
print("\n" + "-" * 70 + "\n")
return "continue", response_text, session_id
except Exception as e:
print(f"{prefix}Error during session: {e}")
return "error", str(e), None
async def run_coding_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> str | None:
"""Run a coding agent session for a specific task. Returns session_id."""
await event_bus.emit("coding_start", task_id=task_id)
client = create_client(work_dir, model)
prompt = get_coding_prompt(task_id)
async with client:
status, response, session_id = await run_agent_session(
client, prompt, work_dir, label=f"dev:{task_id}"
)
await event_bus.emit("coding_end", task_id=task_id, status=status, session_id=session_id)
return session_id
async def run_tester_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> tuple[bool, str]:
"""Run tester agent for a task. Returns (passed, response_text)."""
await event_bus.emit("test_start", task_id=task_id)
client = create_client(
work_dir,
model,
system_prompt="You are a meticulous QA engineer. Your only job is to test — never implement features.",
)
prompt = get_tester_prompt(task_id)
async with client:
status, response, _ = await run_agent_session(
client, prompt, work_dir, label=f"test:{task_id}"
)
passed = "TESTER_RESULT: PASS" in response
await event_bus.emit("test_end", task_id=task_id, passed=passed)
return passed, response
async def run_coding_worker(
task_id: int,
model: str,
project_dir: Path,
event_bus: EventBus,
merge_lock: asyncio.Lock,
use_worktrees: bool = True,
) -> None:
"""
Full coding + test cycle for one task.
When use_worktrees is True (default), the agent works in an isolated
git worktree so parallel agents don't step on each other's files.
On completion the task branch is merged back into main.
Flow:
1. Create worktree (if enabled)
2. Coding agent implements the task
3. Tester agent verifies it
4. If tester fails, resume the dev session with feedback and retry
5. Repeat up to MAX_TEST_RETRIES
6. Merge worktree back into main and clean up
"""
work_dir = project_dir
task_done = False
if use_worktrees:
try:
work_dir = await create_worktree(project_dir, task_id)
except RuntimeError as e:
print(f"[Task {task_id}] Failed to create worktree: {e}")
print(f"[Task {task_id}] Falling back to shared directory")
work_dir = project_dir
use_worktrees = False
# Mark in-progress from the orchestrator (don't rely on the agent doing it)
set_task_status(project_dir, task_id, "in-progress")
try:
print_session_header(f"task {task_id}: coding")
dev_session_id = await run_coding_session(task_id, model, work_dir, event_bus)
for attempt in range(1, MAX_TEST_RETRIES + 1):
print_session_header(f"task {task_id}: testing (attempt {attempt}/{MAX_TEST_RETRIES})")
passed, feedback = await run_tester_session(task_id, model, work_dir, event_bus)
if passed:
# Belt-and-suspenders: mark done from the orchestrator too,
# in case the tester's MCP set_task_status call didn't fire.
set_task_status(project_dir, task_id, "done")
task_done = True
print(f"\n[Task {task_id}] PASSED — marked done")
await event_bus.emit("task_complete", task_id=task_id, attempts=attempt)
return
print(f"\n[Task {task_id}] FAILED — resuming dev with feedback")
await event_bus.emit("test_failed", task_id=task_id, attempt=attempt)
if dev_session_id and attempt < MAX_TEST_RETRIES:
# Resume the original dev session with the failure details
client = create_client(work_dir, model, resume=dev_session_id)
fix_prompt = (
f"The tester found issues with your implementation of task {task_id}.\n\n"
f"Tester feedback:\n{feedback}\n\n"
f"Please fix these issues and commit your changes."
)
async with client:
_, _, dev_session_id = await run_agent_session(
client, fix_prompt, work_dir, label=f"dev:{task_id}:fix{attempt}"
)
print(f"\n[Task {task_id}] Max retries reached — moving on")
await event_bus.emit("task_max_retries", task_id=task_id)
finally:
# If the task wasn't completed, reset to pending so it can be retried
if not task_done:
set_task_status(project_dir, task_id, "pending")
print(f"[Task {task_id}] Reset to pending (was not marked done)")
if use_worktrees:
merged = await merge_worktree(project_dir, task_id, merge_lock)
if not merged:
print(f"[Task {task_id}] WARNING: merge conflict — branch task-{task_id} preserved")
await event_bus.emit("merge_conflict", task_id=task_id)
await cleanup_worktree(project_dir, task_id, delete_branch=merged)
async def run_autonomous_agent(
project_dir: Path,
model: str,
max_iterations: Optional[int] = None,
concurrency: int = 2,
use_worktrees: bool = True,
) -> None:
"""
Main orchestration loop.
Phase 1 (once): Initializer agent reads the spec, uses task-master to
parse it into a prioritized task graph, and sets up the project.
Phase 2 (loop): Dispatch up to `concurrency` coding workers in parallel,
each owning one task through the full code → test → (retry) cycle.
When use_worktrees is True, each parallel task runs in an isolated git
worktree so agents don't step on each other's files.
"""
print("\n" + "=" * 70)
print(" AUTONOMOUS CODING AGENT")
print("=" * 70)
print(f"\nProject: {project_dir}")
print(f"Model: {model}")
print(f"Concurrency: {concurrency}")
print(f"Worktrees: {'enabled' if use_worktrees else 'disabled'}")
if max_iterations:
print(f"Max iterations: {max_iterations}")
print()
project_dir.mkdir(parents=True, exist_ok=True)
event_bus = EventBus(project_dir / "events.jsonl")
merge_lock = asyncio.Lock()
await event_bus.emit("run_start", model=model, concurrency=concurrency, worktrees=use_worktrees)
# Phase 1: Initialize
if not is_initialized(project_dir):
print("Fresh project — running initializer\n")
copy_spec_to_project(project_dir)
print_session_header("initializer")
client = create_client(project_dir, model)
async with client:
await run_agent_session(
client, get_initializer_prompt(), project_dir, label="init"
)
await event_bus.emit("initialized")
# Ensure .worktrees and other orchestration artifacts are gitignored
if use_worktrees:
_ensure_gitignore(project_dir)
else:
done, total = count_task_progress(project_dir)
print(f"Resuming — {done}/{total} tasks complete")
# Reset tasks stuck as in-progress from a previous interrupted run
reset_stale_tasks(project_dir)
print_progress_summary(project_dir)
if use_worktrees:
_ensure_gitignore(project_dir)
# Phase 2: Parallel coding loop
iteration = 0
while True:
iteration += 1
if max_iterations and iteration > max_iterations:
print(f"\nReached max iterations ({max_iterations})")
break
if all_tasks_done(project_dir):
print("\nAll tasks complete!")
break
available = get_available_tasks(project_dir)
if not available:
print("\nNo available tasks — waiting for in-progress tasks or blocked dependencies...")
await asyncio.sleep(10)
continue
batch = available[:concurrency]
task_ids = [t["id"] for t in batch]
print(f"\nDispatching {len(batch)} task(s) in parallel: {task_ids}")
await event_bus.emit("batch_start", task_ids=task_ids, iteration=iteration)
await asyncio.gather(*[
run_coding_worker(task_id, model, project_dir, event_bus, merge_lock, use_worktrees)
for task_id in task_ids
])
await event_bus.emit("batch_end", task_ids=task_ids)
print_progress_summary(project_dir)
await asyncio.sleep(AUTO_CONTINUE_DELAY_SECONDS)
done, total = count_task_progress(project_dir)
print(f"\n{'='*70}")
print(f" COMPLETE: {done}/{total} tasks done")
print(f"{'='*70}")
await event_bus.emit("run_end", done=done, total=total)
def _ensure_gitignore(project_dir: Path) -> None:
"""Ensure .worktrees and orchestration files are gitignored."""
gitignore = project_dir / ".gitignore"
entries = [".worktrees/", "events.jsonl", ".claude_settings.json"]
existing = ""
if gitignore.exists():
existing = gitignore.read_text()
to_add = [e for e in entries if e not in existing]
if to_add:
with open(gitignore, "a") as f:
if existing and not existing.endswith("\n"):
f.write("\n")
f.write("# Orchestration artifacts\n")
for entry in to_add:
f.write(entry + "\n")
print(f"[gitignore] Added: {', '.join(to_add)}")

View File

@@ -0,0 +1,380 @@
"""
Agent Session Logic
===================
Core agent interaction functions for running autonomous coding sessions.
"""
import asyncio
import os
import shutil
from pathlib import Path
from typing import Optional
from claude_code_sdk import ClaudeSDKClient
from client import create_client
from events import EventBus
from progress import (
print_session_header,
print_progress_summary,
get_available_tasks,
all_tasks_done,
count_task_progress,
is_initialized,
reset_stale_tasks,
set_task_status,
)
from prompts import get_initializer_prompt, get_coding_prompt, get_tester_prompt, copy_spec_to_project
from worktree import create_worktree, merge_worktree, cleanup_worktree
AUTO_CONTINUE_DELAY_SECONDS = 3
MAX_TEST_RETRIES = 3
async def taskmaster_init(project_dir: Path) -> None:
"""
Run task-master parse-prd directly as a subprocess.
This avoids going through the MCP server and its API key validation.
"""
prd_src = Path(__file__).parent / "prompts" / "app_spec.txt"
prd_dst = project_dir / ".taskmaster" / "docs" / "prd.txt"
prd_dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(prd_src, prd_dst)
print(f"Copied spec to {prd_dst}")
env = {**os.environ}
print("Running task-master parse-prd...")
proc = await asyncio.create_subprocess_exec(
"npx", "-y", "task-master-ai", "parse-prd",
str(prd_dst),
"--project-root", str(project_dir),
"--num-tasks", "25",
cwd=str(project_dir),
env=env,
)
returncode = await proc.wait()
if returncode != 0:
raise RuntimeError(f"task-master parse-prd failed (exit {returncode})")
async def run_agent_session(
client: ClaudeSDKClient,
message: str,
project_dir: Path,
label: str = "",
) -> tuple[str, str, str | None]:
"""
Run a single agent session.
Returns:
(status, response_text, session_id)
status: "continue" | "error"
session_id: the CLI session ID, used to resume with --resume
"""
prefix = f"[{label}] " if label else ""
print(f"{prefix}Sending prompt to Claude...\n")
try:
await client.query(message)
response_text = ""
session_id: str | None = None
async for msg in client.receive_response():
#await asyncio.sleep(1)
msg_type = type(msg).__name__
if msg_type == "ResultMessage":
session_id = getattr(msg, "session_id", None)
continue
if msg_type == "SystemMessage":
if getattr(msg, "subtype", None) == "rate_limit_event":
print(f"\n{prefix}[Rate limited — pausing 5s]\n", flush=True)
await asyncio.sleep(5)
continue
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
block_type = type(block).__name__
if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
print(block.text, end="", flush=True)
elif block_type == "ToolUseBlock" and hasattr(block, "name"):
print(f"\n{prefix}[Tool: {block.name}]", flush=True)
if hasattr(block, "input"):
input_str = str(block.input)
truncated = input_str[:200] + ("..." if len(input_str) > 200 else "")
print(f" Input: {truncated}", flush=True)
elif msg_type == "UserMessage" and hasattr(msg, "content"):
for block in msg.content:
if type(block).__name__ == "ToolResultBlock":
result_content = getattr(block, "content", "")
is_error = getattr(block, "is_error", False)
if "blocked" in str(result_content).lower():
print(f" {prefix}[BLOCKED] {result_content}", flush=True)
elif is_error:
print(f" {prefix}[Error] {str(result_content)[:500]}", flush=True)
else:
print(f" {prefix}[Done]", flush=True)
print("\n" + "-" * 70 + "\n")
return "continue", response_text, session_id
except Exception as e:
print(f"{prefix}Error during session: {e}")
return "error", str(e), None
async def run_coding_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> str | None:
"""Run a coding agent session for a specific task. Returns session_id."""
await event_bus.emit("coding_start", task_id=task_id)
client = create_client(work_dir, model)
prompt = get_coding_prompt(task_id)
async with client:
status, response, session_id = await run_agent_session(
client, prompt, work_dir, label=f"dev:{task_id}"
)
await event_bus.emit("coding_end", task_id=task_id, status=status, session_id=session_id)
return session_id
async def run_tester_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> tuple[bool, str]:
"""Run tester agent for a task. Returns (passed, response_text)."""
await event_bus.emit("test_start", task_id=task_id)
client = create_client(
work_dir,
model,
system_prompt="You are a meticulous QA engineer. Your only job is to test — never implement features.",
)
prompt = get_tester_prompt(task_id)
async with client:
status, response, _ = await run_agent_session(
client, prompt, work_dir, label=f"test:{task_id}"
)
passed = "TESTER_RESULT: PASS" in response
await event_bus.emit("test_end", task_id=task_id, passed=passed)
return passed, response
async def run_coding_worker(
task_id: int,
model: str,
project_dir: Path,
event_bus: EventBus,
merge_lock: asyncio.Lock,
use_worktrees: bool = True,
) -> None:
"""
Full coding + test cycle for one task.
When use_worktrees is True (default), the agent works in an isolated
git worktree so parallel agents don't step on each other's files.
On completion the task branch is merged back into main.
Flow:
1. Create worktree (if enabled)
2. Coding agent implements the task
3. Tester agent verifies it
4. If tester fails, resume the dev session with feedback and retry
5. Repeat up to MAX_TEST_RETRIES
6. Merge worktree back into main and clean up
"""
work_dir = project_dir
task_done = False
if use_worktrees:
try:
work_dir = await create_worktree(project_dir, task_id)
except RuntimeError as e:
print(f"[Task {task_id}] Failed to create worktree: {e}")
print(f"[Task {task_id}] Falling back to shared directory")
work_dir = project_dir
use_worktrees = False
# Mark in-progress from the orchestrator (don't rely on the agent doing it)
set_task_status(project_dir, task_id, "in-progress")
try:
print_session_header(f"task {task_id}: coding")
dev_session_id = await run_coding_session(task_id, model, work_dir, event_bus)
for attempt in range(1, MAX_TEST_RETRIES + 1):
print_session_header(f"task {task_id}: testing (attempt {attempt}/{MAX_TEST_RETRIES})")
passed, feedback = await run_tester_session(task_id, model, work_dir, event_bus)
if passed:
# Belt-and-suspenders: mark done from the orchestrator too,
# in case the tester's MCP set_task_status call didn't fire.
set_task_status(project_dir, task_id, "done")
task_done = True
print(f"\n[Task {task_id}] PASSED — marked done")
await event_bus.emit("task_complete", task_id=task_id, attempts=attempt)
return
print(f"\n[Task {task_id}] FAILED — resuming dev with feedback")
await event_bus.emit("test_failed", task_id=task_id, attempt=attempt)
if dev_session_id and attempt < MAX_TEST_RETRIES:
# Resume the original dev session with the failure details
client = create_client(work_dir, model, resume=dev_session_id)
fix_prompt = (
f"The tester found issues with your implementation of task {task_id}.\n\n"
f"Tester feedback:\n{feedback}\n\n"
f"Please fix these issues and commit your changes."
)
async with client:
_, _, dev_session_id = await run_agent_session(
client, fix_prompt, work_dir, label=f"dev:{task_id}:fix{attempt}"
)
print(f"\n[Task {task_id}] Max retries reached — moving on")
await event_bus.emit("task_max_retries", task_id=task_id)
finally:
# If the task wasn't completed, reset to pending so it can be retried
if not task_done:
set_task_status(project_dir, task_id, "pending")
print(f"[Task {task_id}] Reset to pending (was not marked done)")
if use_worktrees:
merged = await merge_worktree(project_dir, task_id, merge_lock)
if not merged:
print(f"[Task {task_id}] WARNING: merge conflict — branch task-{task_id} preserved")
await event_bus.emit("merge_conflict", task_id=task_id)
await cleanup_worktree(project_dir, task_id, delete_branch=merged)
async def run_autonomous_agent(
project_dir: Path,
model: str,
max_iterations: Optional[int] = None,
concurrency: int = 2,
use_worktrees: bool = True,
) -> None:
"""
Main orchestration loop.
Phase 1 (once): Initializer agent reads the spec, uses task-master to
parse it into a prioritized task graph, and sets up the project.
Phase 2 (loop): Dispatch up to `concurrency` coding workers in parallel,
each owning one task through the full code → test → (retry) cycle.
When use_worktrees is True, each parallel task runs in an isolated git
worktree so agents don't step on each other's files.
"""
print("\n" + "=" * 70)
print(" AUTONOMOUS CODING AGENT")
print("=" * 70)
print(f"\nProject: {project_dir}")
print(f"Model: {model}")
print(f"Concurrency: {concurrency}")
print(f"Worktrees: {'enabled' if use_worktrees else 'disabled'}")
if max_iterations:
print(f"Max iterations: {max_iterations}")
print()
project_dir.mkdir(parents=True, exist_ok=True)
event_bus = EventBus(project_dir / "events.jsonl")
merge_lock = asyncio.Lock()
await event_bus.emit("run_start", model=model, concurrency=concurrency, worktrees=use_worktrees)
# Phase 1: Initialize
if not is_initialized(project_dir):
print("Fresh project — running initializer\n")
copy_spec_to_project(project_dir)
print_session_header("initializer")
client = create_client(project_dir, model)
async with client:
await run_agent_session(
client, get_initializer_prompt(), project_dir, label="init"
)
await event_bus.emit("initialized")
# Ensure .worktrees and other orchestration artifacts are gitignored
if use_worktrees:
_ensure_gitignore(project_dir)
else:
done, total = count_task_progress(project_dir)
print(f"Resuming — {done}/{total} tasks complete")
# Reset tasks stuck as in-progress from a previous interrupted run
reset_stale_tasks(project_dir)
print_progress_summary(project_dir)
if use_worktrees:
_ensure_gitignore(project_dir)
# Phase 2: Parallel coding loop
iteration = 0
while True:
iteration += 1
if max_iterations and iteration > max_iterations:
print(f"\nReached max iterations ({max_iterations})")
break
if all_tasks_done(project_dir):
print("\nAll tasks complete!")
break
available = get_available_tasks(project_dir)
if not available:
print("\nNo available tasks — waiting for in-progress tasks or blocked dependencies...")
await asyncio.sleep(10)
continue
batch = available[:concurrency]
task_ids = [t["id"] for t in batch]
print(f"\nDispatching {len(batch)} task(s) in parallel: {task_ids}")
await event_bus.emit("batch_start", task_ids=task_ids, iteration=iteration)
await asyncio.gather(*[
run_coding_worker(task_id, model, project_dir, event_bus, merge_lock, use_worktrees)
for task_id in task_ids
])
await event_bus.emit("batch_end", task_ids=task_ids)
print_progress_summary(project_dir)
await asyncio.sleep(AUTO_CONTINUE_DELAY_SECONDS)
done, total = count_task_progress(project_dir)
print(f"\n{'='*70}")
print(f" COMPLETE: {done}/{total} tasks done")
print(f"{'='*70}")
await event_bus.emit("run_end", done=done, total=total)
def _ensure_gitignore(project_dir: Path) -> None:
"""Ensure .worktrees and orchestration files are gitignored."""
gitignore = project_dir / ".gitignore"
entries = [".worktrees/", "events.jsonl", ".claude_settings.json"]
existing = ""
if gitignore.exists():
existing = gitignore.read_text()
to_add = [e for e in entries if e not in existing]
if to_add:
with open(gitignore, "a") as f:
if existing and not existing.endswith("\n"):
f.write("\n")
f.write("# Orchestration artifacts\n")
for entry in to_add:
f.write(entry + "\n")
print(f"[gitignore] Added: {', '.join(to_add)}")

View File

@@ -0,0 +1,386 @@
"""
Agent Session Logic
===================
Core agent interaction functions for running autonomous coding sessions.
"""
import asyncio
import os
import shutil
from pathlib import Path
from typing import Optional
from claude_code_sdk import ClaudeSDKClient
from client import create_client
from colors import (
style, label_for, header_bar, progress_line,
BOLD, DIM, RESET,
TOOL_NAME, TOOL_INPUT, TOOL_DONE, TOOL_ERROR, TOOL_BLOCKED,
THINKING, RATE_LIMIT, SYSTEM, DISPATCH,
PASS, FAIL, WARN, LABEL_ORCH,
BRIGHT_GREEN, BRIGHT_RED,
task_color,
)
from events import EventBus
from progress import (
get_available_tasks,
all_tasks_done,
count_task_progress,
is_initialized,
reset_stale_tasks,
set_task_status,
)
from prompts import get_initializer_prompt, get_coding_prompt, get_tester_prompt, copy_spec_to_project
from worktree import create_worktree, merge_worktree, cleanup_worktree
AUTO_CONTINUE_DELAY_SECONDS = 3
MAX_TEST_RETRIES = 3
async def taskmaster_init(project_dir: Path) -> None:
"""
Run task-master parse-prd directly as a subprocess.
This avoids going through the MCP server and its API key validation.
"""
prd_src = Path(__file__).parent / "prompts" / "app_spec.txt"
prd_dst = project_dir / ".taskmaster" / "docs" / "prd.txt"
prd_dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(prd_src, prd_dst)
print(f"Copied spec to {prd_dst}")
env = {**os.environ}
print("Running task-master parse-prd...")
proc = await asyncio.create_subprocess_exec(
"npx", "-y", "task-master-ai", "parse-prd",
str(prd_dst),
"--project-root", str(project_dir),
"--num-tasks", "25",
cwd=str(project_dir),
env=env,
)
returncode = await proc.wait()
if returncode != 0:
raise RuntimeError(f"task-master parse-prd failed (exit {returncode})")
async def run_agent_session(
client: ClaudeSDKClient,
message: str,
project_dir: Path,
label: str = "",
) -> tuple[str, str, str | None]:
"""
Run a single agent session.
Returns:
(status, response_text, session_id)
status: "continue" | "error"
session_id: the CLI session ID, used to resume with --resume
"""
tag = label_for(label)
prefix = f"{tag} " if tag else ""
print(f"{prefix}Sending prompt to Claude...\n")
try:
await client.query(message)
response_text = ""
session_id: str | None = None
async for msg in client.receive_response():
msg_type = type(msg).__name__
if msg_type == "ResultMessage":
session_id = getattr(msg, "session_id", None)
continue
if msg_type == "SystemMessage":
if getattr(msg, "subtype", None) == "rate_limit_event":
print(f"\n{prefix}{style('[Rate limited — pausing 5s]', RATE_LIMIT)}\n", flush=True)
await asyncio.sleep(5)
continue
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
block_type = type(block).__name__
if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
print(style(block.text, THINKING), end="", flush=True)
elif block_type == "ToolUseBlock" and hasattr(block, "name"):
print(f"\n{prefix}{style('[Tool: ' + block.name + ']', TOOL_NAME)}", flush=True)
if hasattr(block, "input"):
input_str = str(block.input)
truncated = input_str[:200] + ("..." if len(input_str) > 200 else "")
print(f" {style('Input: ' + truncated, TOOL_INPUT)}", flush=True)
elif msg_type == "UserMessage" and hasattr(msg, "content"):
for block in msg.content:
if type(block).__name__ == "ToolResultBlock":
result_content = getattr(block, "content", "")
is_error = getattr(block, "is_error", False)
if "blocked" in str(result_content).lower():
print(f" {prefix}{style('[BLOCKED] ' + str(result_content), TOOL_BLOCKED)}", flush=True)
elif is_error:
print(f" {prefix}{style('[Error] ' + str(result_content)[:500], TOOL_ERROR)}", flush=True)
else:
print(f" {prefix}{style('[Done]', TOOL_DONE)}", flush=True)
print(f"\n{style('-' * 70, DIM)}\n")
return "continue", response_text, session_id
except Exception as e:
print(f"{prefix}{style('Error during session: ' + str(e), TOOL_ERROR)}")
return "error", str(e), None
async def run_coding_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> str | None:
"""Run a coding agent session for a specific task. Returns session_id."""
await event_bus.emit("coding_start", task_id=task_id)
client = create_client(work_dir, model)
prompt = get_coding_prompt(task_id)
async with client:
status, response, session_id = await run_agent_session(
client, prompt, work_dir, label=f"dev:{task_id}"
)
await event_bus.emit("coding_end", task_id=task_id, status=status, session_id=session_id)
return session_id
async def run_tester_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> tuple[bool, str]:
"""Run tester agent for a task. Returns (passed, response_text)."""
await event_bus.emit("test_start", task_id=task_id)
client = create_client(
work_dir,
model,
system_prompt="You are a meticulous QA engineer. Your only job is to test — never implement features.",
)
prompt = get_tester_prompt(task_id)
async with client:
status, response, _ = await run_agent_session(
client, prompt, work_dir, label=f"test:{task_id}"
)
passed = "TESTER_RESULT: PASS" in response
await event_bus.emit("test_end", task_id=task_id, passed=passed)
return passed, response
async def run_coding_worker(
task_id: int,
model: str,
project_dir: Path,
event_bus: EventBus,
merge_lock: asyncio.Lock,
use_worktrees: bool = True,
) -> None:
"""
Full coding + test cycle for one task.
When use_worktrees is True (default), the agent works in an isolated
git worktree so parallel agents don't step on each other's files.
On completion the task branch is merged back into main.
Flow:
1. Create worktree (if enabled)
2. Coding agent implements the task
3. Tester agent verifies it
4. If tester fails, resume the dev session with feedback and retry
5. Repeat up to MAX_TEST_RETRIES
6. Merge worktree back into main and clean up
"""
tc = task_color(task_id)
task_tag = style(f"[Task {task_id}]", BOLD, tc)
work_dir = project_dir
task_done = False
if use_worktrees:
try:
work_dir = await create_worktree(project_dir, task_id)
except RuntimeError as e:
print(f"{task_tag} {style('Failed to create worktree: ' + str(e), WARN)}")
print(f"{task_tag} {style('Falling back to shared directory', DIM)}")
work_dir = project_dir
use_worktrees = False
# Mark in-progress from the orchestrator (don't rely on the agent doing it)
set_task_status(project_dir, task_id, "in-progress")
try:
print(header_bar(f"task {task_id}: coding"))
dev_session_id = await run_coding_session(task_id, model, work_dir, event_bus)
for attempt in range(1, MAX_TEST_RETRIES + 1):
print(header_bar(f"task {task_id}: testing (attempt {attempt}/{MAX_TEST_RETRIES})"))
passed, feedback = await run_tester_session(task_id, model, work_dir, event_bus)
if passed:
# Belt-and-suspenders: mark done from the orchestrator too,
# in case the tester's MCP set_task_status call didn't fire.
set_task_status(project_dir, task_id, "done")
task_done = True
print(f"\n{task_tag} {style('PASSED', PASS)} — marked done")
await event_bus.emit("task_complete", task_id=task_id, attempts=attempt)
return
print(f"\n{task_tag} {style('FAILED', FAIL)} — resuming dev with feedback")
await event_bus.emit("test_failed", task_id=task_id, attempt=attempt)
if dev_session_id and attempt < MAX_TEST_RETRIES:
# Resume the original dev session with the failure details
client = create_client(work_dir, model, resume=dev_session_id)
fix_prompt = (
f"The tester found issues with your implementation of task {task_id}.\n\n"
f"Tester feedback:\n{feedback}\n\n"
f"Please fix these issues and commit your changes."
)
async with client:
_, _, dev_session_id = await run_agent_session(
client, fix_prompt, work_dir, label=f"dev:{task_id}:fix{attempt}"
)
print(f"\n{task_tag} {style('Max retries reached — moving on', WARN)}")
await event_bus.emit("task_max_retries", task_id=task_id)
finally:
# If the task wasn't completed, reset to pending so it can be retried
if not task_done:
set_task_status(project_dir, task_id, "pending")
print(f"{task_tag} {style('Reset to pending (was not marked done)', WARN)}")
if use_worktrees:
merged = await merge_worktree(project_dir, task_id, merge_lock)
if not merged:
print(f"{task_tag} {style('WARNING: merge conflict — branch task-' + str(task_id) + ' preserved', WARN)}")
await event_bus.emit("merge_conflict", task_id=task_id)
await cleanup_worktree(project_dir, task_id, delete_branch=merged)
async def run_autonomous_agent(
project_dir: Path,
model: str,
max_iterations: Optional[int] = None,
concurrency: int = 2,
use_worktrees: bool = True,
) -> None:
"""
Main orchestration loop.
Phase 1 (once): Initializer agent reads the spec, uses task-master to
parse it into a prioritized task graph, and sets up the project.
Phase 2 (loop): Dispatch up to `concurrency` coding workers in parallel,
each owning one task through the full code → test → (retry) cycle.
When use_worktrees is True, each parallel task runs in an isolated git
worktree so agents don't step on each other's files.
"""
print(header_bar("AUTONOMOUS CODING AGENT"))
print(f" {style('Project:', BOLD)} {project_dir}")
print(f" {style('Model:', BOLD)} {model}")
print(f" {style('Concurrency:', BOLD)} {concurrency}")
print(f" {style('Worktrees:', BOLD)} {style('enabled', BRIGHT_GREEN) if use_worktrees else style('disabled', DIM)}")
if max_iterations:
print(f" {style('Max iter:', BOLD)} {max_iterations}")
print()
project_dir.mkdir(parents=True, exist_ok=True)
event_bus = EventBus(project_dir / "events.jsonl")
merge_lock = asyncio.Lock()
await event_bus.emit("run_start", model=model, concurrency=concurrency, worktrees=use_worktrees)
# Phase 1: Initialize
if not is_initialized(project_dir):
print(style("Fresh project — running initializer\n", LABEL_ORCH))
copy_spec_to_project(project_dir)
print(header_bar("initializer"))
client = create_client(project_dir, model)
async with client:
await run_agent_session(
client, get_initializer_prompt(), project_dir, label="init"
)
await event_bus.emit("initialized")
# Ensure .worktrees and other orchestration artifacts are gitignored
if use_worktrees:
_ensure_gitignore(project_dir)
else:
done, total = count_task_progress(project_dir)
print(style(f"Resuming — {done}/{total} tasks complete", LABEL_ORCH))
# Reset tasks stuck as in-progress from a previous interrupted run
reset_stale_tasks(project_dir)
print(progress_line(done, total))
if use_worktrees:
_ensure_gitignore(project_dir)
# Phase 2: Parallel coding loop
iteration = 0
while True:
iteration += 1
if max_iterations and iteration > max_iterations:
print(f"\n{style(f'Reached max iterations ({max_iterations})', WARN)}")
break
if all_tasks_done(project_dir):
print(f"\n{style('All tasks complete!', PASS)}")
break
available = get_available_tasks(project_dir)
if not available:
print(f"\n{style('No available tasks — waiting for in-progress tasks or blocked dependencies...', DIM)}")
await asyncio.sleep(10)
continue
batch = available[:concurrency]
task_ids = [t["id"] for t in batch]
task_labels = " ".join(style(f"[{tid}]", BOLD, task_color(tid)) for tid in task_ids)
print(f"\n{style(f'Dispatching {len(batch)} task(s):', DISPATCH)} {task_labels}")
await event_bus.emit("batch_start", task_ids=task_ids, iteration=iteration)
await asyncio.gather(*[
run_coding_worker(task_id, model, project_dir, event_bus, merge_lock, use_worktrees)
for task_id in task_ids
])
await event_bus.emit("batch_end", task_ids=task_ids)
done, total = count_task_progress(project_dir)
print(progress_line(done, total))
await asyncio.sleep(AUTO_CONTINUE_DELAY_SECONDS)
done, total = count_task_progress(project_dir)
print(header_bar(f"COMPLETE: {done}/{total} tasks done"))
def _ensure_gitignore(project_dir: Path) -> None:
"""Ensure .worktrees and orchestration files are gitignored."""
gitignore = project_dir / ".gitignore"
entries = [".worktrees/", "events.jsonl", ".claude_settings.json"]
existing = ""
if gitignore.exists():
existing = gitignore.read_text()
to_add = [e for e in entries if e not in existing]
if to_add:
with open(gitignore, "a") as f:
if existing and not existing.endswith("\n"):
f.write("\n")
f.write("# Orchestration artifacts\n")
for entry in to_add:
f.write(entry + "\n")
print(style(f"[gitignore] Added: {', '.join(to_add)}", SYSTEM))

View File

@@ -0,0 +1,389 @@
"""
Agent Session Logic
===================
Core agent interaction functions for running autonomous coding sessions.
"""
import asyncio
import os
import shutil
from pathlib import Path
from typing import Optional
from claude_code_sdk import ClaudeSDKClient
from client import create_client
from colors import (
style, label_for, header_bar, progress_line,
BOLD, DIM, RESET,
TOOL_NAME, TOOL_INPUT, TOOL_DONE, TOOL_ERROR, TOOL_BLOCKED,
THINKING, RATE_LIMIT, SYSTEM, DISPATCH,
PASS, FAIL, WARN, LABEL_ORCH,
BRIGHT_GREEN, BRIGHT_RED,
task_color,
)
from events import EventBus
from progress import (
get_available_tasks,
all_tasks_done,
count_task_progress,
is_initialized,
reset_stale_tasks,
set_task_status,
)
from prompts import get_initializer_prompt, get_coding_prompt, get_tester_prompt, copy_spec_to_project
from worktree import create_worktree, merge_worktree, cleanup_worktree
AUTO_CONTINUE_DELAY_SECONDS = 3
MAX_TEST_RETRIES = 3
async def taskmaster_init(project_dir: Path) -> None:
"""
Run task-master parse-prd directly as a subprocess.
This avoids going through the MCP server and its API key validation.
"""
prd_src = Path(__file__).parent / "prompts" / "app_spec.txt"
prd_dst = project_dir / ".taskmaster" / "docs" / "prd.txt"
prd_dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(prd_src, prd_dst)
print(f"Copied spec to {prd_dst}")
env = {**os.environ}
print("Running task-master parse-prd...")
proc = await asyncio.create_subprocess_exec(
"npx", "-y", "task-master-ai", "parse-prd",
str(prd_dst),
"--project-root", str(project_dir),
"--num-tasks", "25",
cwd=str(project_dir),
env=env,
)
returncode = await proc.wait()
if returncode != 0:
raise RuntimeError(f"task-master parse-prd failed (exit {returncode})")
async def run_agent_session(
client: ClaudeSDKClient,
message: str,
project_dir: Path,
label: str = "",
) -> tuple[str, str, str | None]:
"""
Run a single agent session.
Returns:
(status, response_text, session_id)
status: "continue" | "error"
session_id: the CLI session ID, used to resume with --resume
"""
tag = label_for(label)
prefix = f"{tag} " if tag else ""
print(f"{prefix}Sending prompt to Claude...\n")
try:
await client.query(message)
response_text = ""
session_id: str | None = None
async for msg in client.receive_response():
msg_type = type(msg).__name__
if msg_type == "ResultMessage":
session_id = getattr(msg, "session_id", None)
continue
if msg_type == "SystemMessage":
if getattr(msg, "subtype", None) == "rate_limit_event":
print(f"\n{prefix}{style('[Rate limited — pausing 5s]', RATE_LIMIT)}\n", flush=True)
await asyncio.sleep(5)
continue
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
block_type = type(block).__name__
if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
print(style(block.text, THINKING), end="", flush=True)
elif block_type == "ToolUseBlock" and hasattr(block, "name"):
print(f"\n{prefix}{style('[Tool: ' + block.name + ']', TOOL_NAME)}", flush=True)
if hasattr(block, "input"):
input_str = str(block.input)
truncated = input_str[:200] + ("..." if len(input_str) > 200 else "")
print(f" {style('Input: ' + truncated, TOOL_INPUT)}", flush=True)
elif msg_type == "UserMessage" and hasattr(msg, "content"):
for block in msg.content:
if type(block).__name__ == "ToolResultBlock":
result_content = getattr(block, "content", "")
is_error = getattr(block, "is_error", False)
if "blocked" in str(result_content).lower():
print(f" {prefix}{style('[BLOCKED] ' + str(result_content), TOOL_BLOCKED)}", flush=True)
elif is_error:
print(f" {prefix}{style('[Error] ' + str(result_content)[:500], TOOL_ERROR)}", flush=True)
else:
print(f" {prefix}{style('[Done]', TOOL_DONE)}", flush=True)
print(f"\n{style('-' * 70, DIM)}\n")
return "continue", response_text, session_id
except Exception as e:
print(f"{prefix}{style('Error during session: ' + str(e), TOOL_ERROR)}")
return "error", str(e), None
async def run_coding_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> str | None:
"""Run a coding agent session for a specific task. Returns session_id."""
await event_bus.emit("coding_start", task_id=task_id)
client = create_client(work_dir, model)
prompt = get_coding_prompt(task_id)
async with client:
status, response, session_id = await run_agent_session(
client, prompt, work_dir, label=f"dev:{task_id}"
)
await event_bus.emit("coding_end", task_id=task_id, status=status, session_id=session_id)
return session_id
async def run_tester_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> tuple[bool, str]:
"""Run tester agent for a task. Returns (passed, response_text)."""
await event_bus.emit("test_start", task_id=task_id)
client = create_client(
work_dir,
model,
system_prompt="You are a meticulous QA engineer. Your only job is to test — never implement features.",
)
prompt = get_tester_prompt(task_id)
async with client:
status, response, _ = await run_agent_session(
client, prompt, work_dir, label=f"test:{task_id}"
)
passed = "TESTER_RESULT: PASS" in response
await event_bus.emit("test_end", task_id=task_id, passed=passed)
return passed, response
async def run_coding_worker(
task_id: int,
model: str,
tester_model: str,
project_dir: Path,
event_bus: EventBus,
merge_lock: asyncio.Lock,
use_worktrees: bool = True,
) -> None:
"""
Full coding + test cycle for one task.
When use_worktrees is True (default), the agent works in an isolated
git worktree so parallel agents don't step on each other's files.
On completion the task branch is merged back into main.
Flow:
1. Create worktree (if enabled)
2. Coding agent implements the task
3. Tester agent verifies it
4. If tester fails, resume the dev session with feedback and retry
5. Repeat up to MAX_TEST_RETRIES
6. Merge worktree back into main and clean up
"""
tc = task_color(task_id)
task_tag = style(f"[Task {task_id}]", BOLD, tc)
work_dir = project_dir
task_done = False
if use_worktrees:
try:
work_dir = await create_worktree(project_dir, task_id)
except RuntimeError as e:
print(f"{task_tag} {style('Failed to create worktree: ' + str(e), WARN)}")
print(f"{task_tag} {style('Falling back to shared directory', DIM)}")
work_dir = project_dir
use_worktrees = False
# Mark in-progress from the orchestrator (don't rely on the agent doing it)
set_task_status(project_dir, task_id, "in-progress")
try:
print(header_bar(f"task {task_id}: coding"))
dev_session_id = await run_coding_session(task_id, model, work_dir, event_bus)
for attempt in range(1, MAX_TEST_RETRIES + 1):
print(header_bar(f"task {task_id}: testing (attempt {attempt}/{MAX_TEST_RETRIES})"))
passed, feedback = await run_tester_session(task_id, tester_model, work_dir, event_bus)
if passed:
# Belt-and-suspenders: mark done from the orchestrator too,
# in case the tester's MCP set_task_status call didn't fire.
set_task_status(project_dir, task_id, "done")
task_done = True
print(f"\n{task_tag} {style('PASSED', PASS)} — marked done")
await event_bus.emit("task_complete", task_id=task_id, attempts=attempt)
return
print(f"\n{task_tag} {style('FAILED', FAIL)} — resuming dev with feedback")
await event_bus.emit("test_failed", task_id=task_id, attempt=attempt)
if dev_session_id and attempt < MAX_TEST_RETRIES:
# Resume the original dev session with the failure details
client = create_client(work_dir, model, resume=dev_session_id)
fix_prompt = (
f"The tester found issues with your implementation of task {task_id}.\n\n"
f"Tester feedback:\n{feedback}\n\n"
f"Please fix these issues and commit your changes."
)
async with client:
_, _, dev_session_id = await run_agent_session(
client, fix_prompt, work_dir, label=f"dev:{task_id}:fix{attempt}"
)
print(f"\n{task_tag} {style('Max retries reached — moving on', WARN)}")
await event_bus.emit("task_max_retries", task_id=task_id)
finally:
# If the task wasn't completed, reset to pending so it can be retried
if not task_done:
set_task_status(project_dir, task_id, "pending")
print(f"{task_tag} {style('Reset to pending (was not marked done)', WARN)}")
if use_worktrees:
merged = await merge_worktree(project_dir, task_id, merge_lock)
if not merged:
print(f"{task_tag} {style('WARNING: merge conflict — branch task-' + str(task_id) + ' preserved', WARN)}")
await event_bus.emit("merge_conflict", task_id=task_id)
await cleanup_worktree(project_dir, task_id, delete_branch=merged)
async def run_autonomous_agent(
project_dir: Path,
model: str,
tester_model: str = "claude-sonnet-4-6",
max_iterations: Optional[int] = None,
concurrency: int = 2,
use_worktrees: bool = True,
) -> None:
"""
Main orchestration loop.
Phase 1 (once): Initializer agent reads the spec, uses task-master to
parse it into a prioritized task graph, and sets up the project.
Phase 2 (loop): Dispatch up to `concurrency` coding workers in parallel,
each owning one task through the full code → test → (retry) cycle.
When use_worktrees is True, each parallel task runs in an isolated git
worktree so agents don't step on each other's files.
"""
print(header_bar("AUTONOMOUS CODING AGENT"))
print(f" {style('Project:', BOLD)} {project_dir}")
print(f" {style('Dev model:', BOLD)} {model}")
print(f" {style('Test model:', BOLD)} {tester_model}")
print(f" {style('Concurrency:', BOLD)} {concurrency}")
print(f" {style('Worktrees:', BOLD)} {style('enabled', BRIGHT_GREEN) if use_worktrees else style('disabled', DIM)}")
if max_iterations:
print(f" {style('Max iter:', BOLD)} {max_iterations}")
print()
project_dir.mkdir(parents=True, exist_ok=True)
event_bus = EventBus(project_dir / "events.jsonl")
merge_lock = asyncio.Lock()
await event_bus.emit("run_start", model=model, tester_model=tester_model, concurrency=concurrency, worktrees=use_worktrees)
# Phase 1: Initialize
if not is_initialized(project_dir):
print(style("Fresh project — running initializer\n", LABEL_ORCH))
copy_spec_to_project(project_dir)
print(header_bar("initializer"))
client = create_client(project_dir, model)
async with client:
await run_agent_session(
client, get_initializer_prompt(), project_dir, label="init"
)
await event_bus.emit("initialized")
# Ensure .worktrees and other orchestration artifacts are gitignored
if use_worktrees:
_ensure_gitignore(project_dir)
else:
done, total = count_task_progress(project_dir)
print(style(f"Resuming — {done}/{total} tasks complete", LABEL_ORCH))
# Reset tasks stuck as in-progress from a previous interrupted run
reset_stale_tasks(project_dir)
print(progress_line(done, total))
if use_worktrees:
_ensure_gitignore(project_dir)
# Phase 2: Parallel coding loop
iteration = 0
while True:
iteration += 1
if max_iterations and iteration > max_iterations:
print(f"\n{style(f'Reached max iterations ({max_iterations})', WARN)}")
break
if all_tasks_done(project_dir):
print(f"\n{style('All tasks complete!', PASS)}")
break
available = get_available_tasks(project_dir)
if not available:
print(f"\n{style('No available tasks — waiting for in-progress tasks or blocked dependencies...', DIM)}")
await asyncio.sleep(10)
continue
batch = available[:concurrency]
task_ids = [t["id"] for t in batch]
task_labels = " ".join(style(f"[{tid}]", BOLD, task_color(tid)) for tid in task_ids)
print(f"\n{style(f'Dispatching {len(batch)} task(s):', DISPATCH)} {task_labels}")
await event_bus.emit("batch_start", task_ids=task_ids, iteration=iteration)
await asyncio.gather(*[
run_coding_worker(task_id, model, tester_model, project_dir, event_bus, merge_lock, use_worktrees)
for task_id in task_ids
])
await event_bus.emit("batch_end", task_ids=task_ids)
done, total = count_task_progress(project_dir)
print(progress_line(done, total))
await asyncio.sleep(AUTO_CONTINUE_DELAY_SECONDS)
done, total = count_task_progress(project_dir)
print(header_bar(f"COMPLETE: {done}/{total} tasks done"))
def _ensure_gitignore(project_dir: Path) -> None:
"""Ensure .worktrees and orchestration files are gitignored."""
gitignore = project_dir / ".gitignore"
entries = [".worktrees/", "events.jsonl", ".claude_settings.json"]
existing = ""
if gitignore.exists():
existing = gitignore.read_text()
to_add = [e for e in entries if e not in existing]
if to_add:
with open(gitignore, "a") as f:
if existing and not existing.endswith("\n"):
f.write("\n")
f.write("# Orchestration artifacts\n")
for entry in to_add:
f.write(entry + "\n")
print(style(f"[gitignore] Added: {', '.join(to_add)}", SYSTEM))

View File

@@ -0,0 +1,390 @@
"""
Agent Session Logic
===================
Core agent interaction functions for running autonomous coding sessions.
"""
import asyncio
import os
import shutil
from pathlib import Path
from typing import Optional
from claude_code_sdk import ClaudeSDKClient
from client import create_client
from colors import (
style, label_for, header_bar, progress_line,
BOLD, DIM, RESET,
TOOL_NAME, TOOL_INPUT, TOOL_DONE, TOOL_ERROR, TOOL_BLOCKED,
THINKING, RATE_LIMIT, SYSTEM, DISPATCH,
PASS, FAIL, WARN, LABEL_ORCH,
BRIGHT_GREEN, BRIGHT_RED,
task_color,
)
from events import EventBus
from progress import (
get_available_tasks,
all_tasks_done,
count_task_progress,
is_initialized,
reset_stale_tasks,
set_task_status,
sync_task_from_worktree,
)
from prompts import get_initializer_prompt, get_coding_prompt, get_tester_prompt, copy_spec_to_project
from worktree import create_worktree, merge_worktree, cleanup_worktree
AUTO_CONTINUE_DELAY_SECONDS = 3
MAX_TEST_RETRIES = 3
async def taskmaster_init(project_dir: Path) -> None:
"""
Run task-master parse-prd directly as a subprocess.
This avoids going through the MCP server and its API key validation.
"""
prd_src = Path(__file__).parent / "prompts" / "app_spec.txt"
prd_dst = project_dir / ".taskmaster" / "docs" / "prd.txt"
prd_dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(prd_src, prd_dst)
print(f"Copied spec to {prd_dst}")
env = {**os.environ}
print("Running task-master parse-prd...")
proc = await asyncio.create_subprocess_exec(
"npx", "-y", "task-master-ai", "parse-prd",
str(prd_dst),
"--project-root", str(project_dir),
"--num-tasks", "25",
cwd=str(project_dir),
env=env,
)
returncode = await proc.wait()
if returncode != 0:
raise RuntimeError(f"task-master parse-prd failed (exit {returncode})")
async def run_agent_session(
client: ClaudeSDKClient,
message: str,
project_dir: Path,
label: str = "",
) -> tuple[str, str, str | None]:
"""
Run a single agent session.
Returns:
(status, response_text, session_id)
status: "continue" | "error"
session_id: the CLI session ID, used to resume with --resume
"""
tag = label_for(label)
prefix = f"{tag} " if tag else ""
print(f"{prefix}Sending prompt to Claude...\n")
try:
await client.query(message)
response_text = ""
session_id: str | None = None
async for msg in client.receive_response():
msg_type = type(msg).__name__
if msg_type == "ResultMessage":
session_id = getattr(msg, "session_id", None)
continue
if msg_type == "SystemMessage":
if getattr(msg, "subtype", None) == "rate_limit_event":
print(f"\n{prefix}{style('[Rate limited — pausing 5s]', RATE_LIMIT)}\n", flush=True)
await asyncio.sleep(5)
continue
if msg_type == "AssistantMessage" and hasattr(msg, "content"):
for block in msg.content:
block_type = type(block).__name__
if block_type == "TextBlock" and hasattr(block, "text"):
response_text += block.text
print(style(block.text, THINKING), end="", flush=True)
elif block_type == "ToolUseBlock" and hasattr(block, "name"):
print(f"\n{prefix}{style('[Tool: ' + block.name + ']', TOOL_NAME)}", flush=True)
if hasattr(block, "input"):
input_str = str(block.input)
truncated = input_str[:200] + ("..." if len(input_str) > 200 else "")
print(f" {style('Input: ' + truncated, TOOL_INPUT)}", flush=True)
elif msg_type == "UserMessage" and hasattr(msg, "content"):
for block in msg.content:
if type(block).__name__ == "ToolResultBlock":
result_content = getattr(block, "content", "")
is_error = getattr(block, "is_error", False)
if "blocked" in str(result_content).lower():
print(f" {prefix}{style('[BLOCKED] ' + str(result_content), TOOL_BLOCKED)}", flush=True)
elif is_error:
print(f" {prefix}{style('[Error] ' + str(result_content)[:500], TOOL_ERROR)}", flush=True)
else:
print(f" {prefix}{style('[Done]', TOOL_DONE)}", flush=True)
print(f"\n{style('-' * 70, DIM)}\n")
return "continue", response_text, session_id
except Exception as e:
print(f"{prefix}{style('Error during session: ' + str(e), TOOL_ERROR)}")
return "error", str(e), None
async def run_coding_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> str | None:
"""Run a coding agent session for a specific task. Returns session_id."""
await event_bus.emit("coding_start", task_id=task_id)
client = create_client(work_dir, model)
prompt = get_coding_prompt(task_id)
async with client:
status, response, session_id = await run_agent_session(
client, prompt, work_dir, label=f"dev:{task_id}"
)
await event_bus.emit("coding_end", task_id=task_id, status=status, session_id=session_id)
return session_id
async def run_tester_session(
task_id: int,
model: str,
work_dir: Path,
event_bus: EventBus,
) -> tuple[bool, str]:
"""Run tester agent for a task. Returns (passed, response_text)."""
await event_bus.emit("test_start", task_id=task_id)
client = create_client(
work_dir,
model,
system_prompt="You are a meticulous QA engineer. Your only job is to test — never implement features.",
)
prompt = get_tester_prompt(task_id)
async with client:
status, response, _ = await run_agent_session(
client, prompt, work_dir, label=f"test:{task_id}"
)
passed = "TESTER_RESULT: PASS" in response
await event_bus.emit("test_end", task_id=task_id, passed=passed)
return passed, response
async def run_coding_worker(
task_id: int,
model: str,
tester_model: str,
project_dir: Path,
event_bus: EventBus,
merge_lock: asyncio.Lock,
use_worktrees: bool = True,
) -> None:
"""
Full coding + test cycle for one task.
When use_worktrees is True (default), the agent works in an isolated
git worktree so parallel agents don't step on each other's files.
On completion the task branch is merged back into main.
Flow:
1. Create worktree (if enabled)
2. Coding agent implements the task
3. Tester agent verifies it
4. If tester fails, resume the dev session with feedback and retry
5. Repeat up to MAX_TEST_RETRIES
6. Merge worktree back into main and clean up
"""
tc = task_color(task_id)
task_tag = style(f"[Task {task_id}]", BOLD, tc)
work_dir = project_dir
task_done = False
if use_worktrees:
try:
work_dir = await create_worktree(project_dir, task_id)
except RuntimeError as e:
print(f"{task_tag} {style('Failed to create worktree: ' + str(e), WARN)}")
print(f"{task_tag} {style('Falling back to shared directory', DIM)}")
work_dir = project_dir
use_worktrees = False
try:
print(header_bar(f"task {task_id}: coding"))
dev_session_id = await run_coding_session(task_id, model, work_dir, event_bus)
for attempt in range(1, MAX_TEST_RETRIES + 1):
print(header_bar(f"task {task_id}: testing (attempt {attempt}/{MAX_TEST_RETRIES})"))
passed, feedback = await run_tester_session(task_id, tester_model, work_dir, event_bus)
if passed:
task_done = True
print(f"\n{task_tag} {style('PASSED', PASS)}")
await event_bus.emit("task_complete", task_id=task_id, attempts=attempt)
return
print(f"\n{task_tag} {style('FAILED', FAIL)} — resuming dev with feedback")
await event_bus.emit("test_failed", task_id=task_id, attempt=attempt)
if dev_session_id and attempt < MAX_TEST_RETRIES:
# Resume the original dev session with the failure details
client = create_client(work_dir, model, resume=dev_session_id)
fix_prompt = (
f"The tester found issues with your implementation of task {task_id}.\n\n"
f"Tester feedback:\n{feedback}\n\n"
f"Please fix these issues and commit your changes."
)
async with client:
_, _, dev_session_id = await run_agent_session(
client, fix_prompt, work_dir, label=f"dev:{task_id}:fix{attempt}"
)
print(f"\n{task_tag} {style('Max retries reached — moving on', WARN)}")
await event_bus.emit("task_max_retries", task_id=task_id)
finally:
# Sync the agent's task/subtask status changes back to the main project.
# The agent owns status — the orchestrator just propagates it.
if use_worktrees:
sync_task_from_worktree(project_dir, work_dir, task_id)
# If the agent never marked the task done (crash, max retries, etc.),
# reset to pending so it can be retried on the next run.
if not task_done:
set_task_status(project_dir, task_id, "pending")
print(f"{task_tag} {style('Reset to pending (not marked done by agent)', WARN)}")
if use_worktrees:
merged = await merge_worktree(project_dir, task_id, merge_lock)
if not merged:
print(f"{task_tag} {style('WARNING: merge conflict — branch task-' + str(task_id) + ' preserved', WARN)}")
await event_bus.emit("merge_conflict", task_id=task_id)
await cleanup_worktree(project_dir, task_id, delete_branch=merged)
async def run_autonomous_agent(
project_dir: Path,
model: str,
tester_model: str = "claude-sonnet-4-6",
max_iterations: Optional[int] = None,
concurrency: int = 2,
use_worktrees: bool = True,
) -> None:
"""
Main orchestration loop.
Phase 1 (once): Initializer agent reads the spec, uses task-master to
parse it into a prioritized task graph, and sets up the project.
Phase 2 (loop): Dispatch up to `concurrency` coding workers in parallel,
each owning one task through the full code → test → (retry) cycle.
When use_worktrees is True, each parallel task runs in an isolated git
worktree so agents don't step on each other's files.
"""
print(header_bar("AUTONOMOUS CODING AGENT"))
print(f" {style('Project:', BOLD)} {project_dir}")
print(f" {style('Dev model:', BOLD)} {model}")
print(f" {style('Test model:', BOLD)} {tester_model}")
print(f" {style('Concurrency:', BOLD)} {concurrency}")
print(f" {style('Worktrees:', BOLD)} {style('enabled', BRIGHT_GREEN) if use_worktrees else style('disabled', DIM)}")
if max_iterations:
print(f" {style('Max iter:', BOLD)} {max_iterations}")
print()
project_dir.mkdir(parents=True, exist_ok=True)
event_bus = EventBus(project_dir / "events.jsonl")
merge_lock = asyncio.Lock()
await event_bus.emit("run_start", model=model, tester_model=tester_model, concurrency=concurrency, worktrees=use_worktrees)
# Phase 1: Initialize
if not is_initialized(project_dir):
print(style("Fresh project — running initializer\n", LABEL_ORCH))
copy_spec_to_project(project_dir)
print(header_bar("initializer"))
client = create_client(project_dir, model)
async with client:
await run_agent_session(
client, get_initializer_prompt(), project_dir, label="init"
)
await event_bus.emit("initialized")
# Ensure .worktrees and other orchestration artifacts are gitignored
if use_worktrees:
_ensure_gitignore(project_dir)
else:
done, total = count_task_progress(project_dir)
print(style(f"Resuming — {done}/{total} tasks complete", LABEL_ORCH))
# Reset tasks stuck as in-progress from a previous interrupted run
reset_stale_tasks(project_dir)
print(progress_line(done, total))
if use_worktrees:
_ensure_gitignore(project_dir)
# Phase 2: Parallel coding loop
iteration = 0
while True:
iteration += 1
if max_iterations and iteration > max_iterations:
print(f"\n{style(f'Reached max iterations ({max_iterations})', WARN)}")
break
if all_tasks_done(project_dir):
print(f"\n{style('All tasks complete!', PASS)}")
break
available = get_available_tasks(project_dir)
if not available:
print(f"\n{style('No available tasks — waiting for in-progress tasks or blocked dependencies...', DIM)}")
await asyncio.sleep(10)
continue
batch = available[:concurrency]
task_ids = [t["id"] for t in batch]
task_labels = " ".join(style(f"[{tid}]", BOLD, task_color(tid)) for tid in task_ids)
print(f"\n{style(f'Dispatching {len(batch)} task(s):', DISPATCH)} {task_labels}")
await event_bus.emit("batch_start", task_ids=task_ids, iteration=iteration)
await asyncio.gather(*[
run_coding_worker(task_id, model, tester_model, project_dir, event_bus, merge_lock, use_worktrees)
for task_id in task_ids
])
await event_bus.emit("batch_end", task_ids=task_ids)
done, total = count_task_progress(project_dir)
print(progress_line(done, total))
await asyncio.sleep(AUTO_CONTINUE_DELAY_SECONDS)
done, total = count_task_progress(project_dir)
print(header_bar(f"COMPLETE: {done}/{total} tasks done"))
def _ensure_gitignore(project_dir: Path) -> None:
"""Ensure .worktrees and orchestration files are gitignored."""
gitignore = project_dir / ".gitignore"
entries = [".worktrees/", "events.jsonl", ".claude_settings.json"]
existing = ""
if gitignore.exists():
existing = gitignore.read_text()
to_add = [e for e in entries if e not in existing]
if to_add:
with open(gitignore, "a") as f:
if existing and not existing.endswith("\n"):
f.write("\n")
f.write("# Orchestration artifacts\n")
for entry in to_add:
f.write(entry + "\n")
print(style(f"[gitignore] Added: {', '.join(to_add)}", SYSTEM))

View File

@@ -0,0 +1,57 @@
<<<<<<< HEAD
# Local PostgreSQL for development
# Usage: docker compose up -d
# Connection: postgresql://zman_tcg:zman_tcg_dev@localhost:5432/zman_tcg
version: '3.8'
services:
postgres:
image: postgres:16-alpine
container_name: zman-tcg-postgres
ports:
- '5432:5432'
environment:
POSTGRES_USER: zman_tcg
POSTGRES_PASSWORD: zman_tcg_dev
POSTGRES_DB: zman_tcg
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U zman_tcg -d zman_tcg']
interval: 5s
timeout: 5s
retries: 5
volumes:
pgdata:
||||||| 2b6a73d
=======
# Zman TCG Fulfillment — Development Services
#
# Usage:
# docker compose up -d Start all services
# docker compose down Stop all services
# docker compose logs redis View Redis logs
#
# Redis is required for the BullMQ worker package.
# The web app (Next.js) does not require Redis.
services:
redis:
image: redis:7-alpine
container_name: zman-tcg-redis
ports:
- "6379:6379"
volumes:
- redis-data:/data
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 5
restart: unless-stopped
volumes:
redis-data:
>>>>>>> task-12

View File

@@ -0,0 +1,45 @@
# Zman TCG Fulfillment — Development Services
#
# Usage:
# docker compose up -d Start all services
# docker compose down Stop all services
#
# PostgreSQL is required for the database.
# Redis is required for the BullMQ worker package.
services:
postgres:
image: postgres:16-alpine
container_name: zman-tcg-postgres
ports:
- '5432:5432'
environment:
POSTGRES_USER: zman_tcg
POSTGRES_PASSWORD: zman_tcg_dev
POSTGRES_DB: zman_tcg
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U zman_tcg -d zman_tcg']
interval: 5s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
container_name: zman-tcg-redis
ports:
- "6379:6379"
volumes:
- redis-data:/data
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 5
restart: unless-stopped
volumes:
pgdata:
redis-data:

View File

@@ -0,0 +1,146 @@
"""
Terminal Colors
===============
ANSI color helpers for distinguishing parallel agent output at a glance.
No external dependencies — uses raw escape codes that work in any modern terminal.
"""
import os
# Respect NO_COLOR convention (https://no-color.org/)
_NO_COLOR = os.environ.get("NO_COLOR") is not None or not os.isatty(1)
# ---------------------------------------------------------------------------
# ANSI escape codes
# ---------------------------------------------------------------------------
RESET = "" if _NO_COLOR else "\033[0m"
BOLD = "" if _NO_COLOR else "\033[1m"
DIM = "" if _NO_COLOR else "\033[2m"
ITALIC = "" if _NO_COLOR else "\033[3m"
# Foreground colors
BLACK = "" if _NO_COLOR else "\033[30m"
RED = "" if _NO_COLOR else "\033[31m"
GREEN = "" if _NO_COLOR else "\033[32m"
YELLOW = "" if _NO_COLOR else "\033[33m"
BLUE = "" if _NO_COLOR else "\033[34m"
MAGENTA = "" if _NO_COLOR else "\033[35m"
CYAN = "" if _NO_COLOR else "\033[36m"
WHITE = "" if _NO_COLOR else "\033[37m"
# Bright foreground colors
BRIGHT_RED = "" if _NO_COLOR else "\033[91m"
BRIGHT_GREEN = "" if _NO_COLOR else "\033[92m"
BRIGHT_YELLOW = "" if _NO_COLOR else "\033[93m"
BRIGHT_BLUE = "" if _NO_COLOR else "\033[94m"
BRIGHT_MAGENTA = "" if _NO_COLOR else "\033[95m"
BRIGHT_CYAN = "" if _NO_COLOR else "\033[96m"
# ---------------------------------------------------------------------------
# Semantic styles — single place to tweak the look
# ---------------------------------------------------------------------------
HEADER = BOLD + BRIGHT_CYAN # ═══ SESSION HEADERS ═══
PROGRESS_BAR = BOLD + GREEN # Progress: 5/32 tasks done
PROGRESS_PCT = BOLD + BRIGHT_GREEN # (15.6%)
LABEL_INIT = BOLD + BRIGHT_MAGENTA # [init]
LABEL_DEV = BOLD + BRIGHT_CYAN # [dev:5]
LABEL_TEST = BOLD + BRIGHT_YELLOW # [test:5]
LABEL_FIX = BOLD + YELLOW # [dev:5:fix1]
LABEL_ORCH = BOLD + WHITE # orchestrator-level messages
TOOL_NAME = BOLD + CYAN # [Tool: Write]
TOOL_INPUT = DIM # Input: {...}
TOOL_DONE = GREEN # [Done]
TOOL_ERROR = RED # [Error]
TOOL_BLOCKED = BOLD + RED # [BLOCKED]
PASS = BOLD + BRIGHT_GREEN # PASSED
FAIL = BOLD + BRIGHT_RED # FAILED
WARN = BOLD + YELLOW # WARNING
THINKING = WHITE # agent text/thoughts
RATE_LIMIT = BOLD + YELLOW # [Rate limited]
SYSTEM = DIM + BLUE # [worktree], [gitignore], etc.
DISPATCH = BOLD + BRIGHT_BLUE # Dispatching 3 task(s)
# ---------------------------------------------------------------------------
# Per-task color rotation — each task_id gets a stable distinct color
# ---------------------------------------------------------------------------
_TASK_COLORS = [
BRIGHT_CYAN,
BRIGHT_MAGENTA,
BRIGHT_YELLOW,
BRIGHT_GREEN,
BRIGHT_BLUE,
CYAN,
MAGENTA,
YELLOW,
GREEN,
BLUE,
]
def task_color(task_id: int) -> str:
"""Return a stable color for a given task ID."""
return _TASK_COLORS[task_id % len(_TASK_COLORS)]
# ---------------------------------------------------------------------------
# Convenience formatters
# ---------------------------------------------------------------------------
def style(text: str, *codes: str) -> str:
"""Wrap text in ANSI codes with auto-reset."""
if _NO_COLOR:
return text
prefix = "".join(codes)
return f"{prefix}{text}{RESET}"
def label_for(label: str) -> str:
"""Colorize a session label like 'dev:5' or 'test:12:fix2'."""
if not label:
return ""
lower = label.lower()
if lower.startswith("init"):
return style(f"[{label}]", LABEL_INIT)
elif "fix" in lower:
# Extract task_id from labels like "dev:5:fix1"
parts = label.split(":")
tid = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else 0
return style(f"[{label}]", BOLD, task_color(tid))
elif lower.startswith("dev"):
parts = label.split(":")
tid = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else 0
return style(f"[{label}]", BOLD, task_color(tid))
elif lower.startswith("test"):
parts = label.split(":")
tid = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else 0
return style(f"[{label}]", BOLD, task_color(tid)) + style(" QA", DIM)
return style(f"[{label}]", DIM)
def header_bar(text: str) -> str:
"""Format a full-width session header."""
line = "=" * 70
return (
f"\n{style(line, HEADER)}\n"
f" {style(text.upper(), HEADER)}\n"
f"{style(line, HEADER)}\n"
)
def progress_line(done: int, total: int) -> str:
"""Format the progress summary line with a mini bar."""
if total == 0:
return style("Progress: no tasks found (initializer not yet run?)", DIM)
pct = (done / total) * 100
filled = int(20 * done / total)
bar = style("█" * filled, BRIGHT_GREEN) + style("░" * (20 - filled), DIM)
return (
f"\n{style('Progress:', BOLD)} {bar} "
f"{style(f'{done}/{total}', PROGRESS_BAR)} tasks done "
f"{style(f'({pct:.1f}%)', PROGRESS_PCT)}"
)

View File

@@ -0,0 +1,146 @@
"""
Terminal Colors
===============
ANSI color helpers for distinguishing parallel agent output at a glance.
No external dependencies — uses raw escape codes that work in any modern terminal.
"""
import os
# Respect NO_COLOR convention (https://no-color.org/)
_NO_COLOR = os.environ.get("NO_COLOR") is not None or not os.isatty(1)
# ---------------------------------------------------------------------------
# ANSI escape codes
# ---------------------------------------------------------------------------
RESET = "" if _NO_COLOR else "\033[0m"
BOLD = "" if _NO_COLOR else "\033[1m"
DIM = "" if _NO_COLOR else "\033[2m"
ITALIC = "" if _NO_COLOR else "\033[3m"
# Foreground colors
BLACK = "" if _NO_COLOR else "\033[30m"
RED = "" if _NO_COLOR else "\033[31m"
GREEN = "" if _NO_COLOR else "\033[32m"
YELLOW = "" if _NO_COLOR else "\033[33m"
BLUE = "" if _NO_COLOR else "\033[34m"
MAGENTA = "" if _NO_COLOR else "\033[35m"
CYAN = "" if _NO_COLOR else "\033[36m"
WHITE = "" if _NO_COLOR else "\033[37m"
# Bright foreground colors
BRIGHT_RED = "" if _NO_COLOR else "\033[91m"
BRIGHT_GREEN = "" if _NO_COLOR else "\033[92m"
BRIGHT_YELLOW = "" if _NO_COLOR else "\033[93m"
BRIGHT_BLUE = "" if _NO_COLOR else "\033[94m"
BRIGHT_MAGENTA = "" if _NO_COLOR else "\033[95m"
BRIGHT_CYAN = "" if _NO_COLOR else "\033[96m"
# ---------------------------------------------------------------------------
# Semantic styles — single place to tweak the look
# ---------------------------------------------------------------------------
HEADER = BOLD + BRIGHT_CYAN # ═══ SESSION HEADERS ═══
PROGRESS_BAR = BOLD + GREEN # Progress: 5/32 tasks done
PROGRESS_PCT = BOLD + BRIGHT_GREEN # (15.6%)
LABEL_INIT = BOLD + BRIGHT_MAGENTA # [init]
LABEL_DEV = BOLD + BRIGHT_CYAN # [dev:5]
LABEL_TEST = BOLD + BRIGHT_YELLOW # [test:5]
LABEL_FIX = BOLD + YELLOW # [dev:5:fix1]
LABEL_ORCH = BOLD + WHITE # orchestrator-level messages
TOOL_NAME = BOLD + CYAN # [Tool: Write]
TOOL_INPUT = DIM # Input: {...}
TOOL_DONE = GREEN # [Done]
TOOL_ERROR = RED # [Error]
TOOL_BLOCKED = BOLD + RED # [BLOCKED]
PASS = BOLD + BRIGHT_GREEN # PASSED
FAIL = BOLD + BRIGHT_RED # FAILED
WARN = BOLD + YELLOW # WARNING
THINKING = WHITE # agent text/thoughts
RATE_LIMIT = BOLD + YELLOW # [Rate limited]
SYSTEM = DIM + BLUE # [worktree], [gitignore], etc.
DISPATCH = BOLD + BRIGHT_BLUE # Dispatching 3 task(s)
# ---------------------------------------------------------------------------
# Per-task color rotation — each task_id gets a stable distinct color
# ---------------------------------------------------------------------------
_TASK_COLORS = [
BRIGHT_CYAN,
BRIGHT_MAGENTA,
BRIGHT_YELLOW,
BRIGHT_GREEN,
BRIGHT_BLUE,
CYAN,
MAGENTA,
YELLOW,
GREEN,
BLUE,
]
def task_color(task_id: int | str) -> str:
"""Return a stable color for a given task ID."""
if isinstance(task_id, str):
# Handle dotted IDs like "1.2" — use the major part
task_id = int(str(task_id).split(".")[0])
return _TASK_COLORS[task_id % len(_TASK_COLORS)]
# ---------------------------------------------------------------------------
# Convenience formatters
# ---------------------------------------------------------------------------
def style(text: str, *codes: str) -> str:
"""Wrap text in ANSI codes with auto-reset."""
if _NO_COLOR:
return text
prefix = "".join(codes)
return f"{prefix}{text}{RESET}"
def _extract_task_id(label: str) -> str:
"""Extract the task ID portion from a label like 'dev:5' or 'dev:1.2:fix1'."""
parts = label.split(":")
return parts[1] if len(parts) > 1 else "0"
def label_for(label: str) -> str:
"""Colorize a session label like 'dev:5' or 'test:12:fix2'."""
if not label:
return ""
lower = label.lower()
if lower.startswith("init"):
return style(f"[{label}]", LABEL_INIT)
tid = _extract_task_id(label)
if lower.startswith("test"):
return style(f"[{label}]", BOLD, task_color(tid)) + style(" QA", DIM)
# dev, dev:fix, or anything else with a task ID
return style(f"[{label}]", BOLD, task_color(tid))
def header_bar(text: str) -> str:
"""Format a full-width session header."""
line = "=" * 70
return (
f"\n{style(line, HEADER)}\n"
f" {style(text.upper(), HEADER)}\n"
f"{style(line, HEADER)}\n"
)
def progress_line(done: int, total: int) -> str:
"""Format the progress summary line with a mini bar."""
if total == 0:
return style("Progress: no tasks found (initializer not yet run?)", DIM)
pct = (done / total) * 100
filled = int(20 * done / total)
bar = style("█" * filled, BRIGHT_GREEN) + style("░" * (20 - filled), DIM)
return (
f"\n{style('Progress:', BOLD)} {bar} "
f"{style(f'{done}/{total}', PROGRESS_BAR)} tasks done "
f"{style(f'({pct:.1f}%)', PROGRESS_PCT)}"
)

View File

@@ -0,0 +1,115 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>🐱 Cattopia — The Great Mouse Chase!</title>
<link rel="stylesheet" href="/styles/style.css">
<link rel="stylesheet" href="/styles/rpg_style.css">
<link href="https://fonts.googleapis.com/css2?family=Fredoka+One&family=Nunito:wght@400;700;900&display=swap"
rel="stylesheet">
</head>
<body>
<div id="app">
<!-- INTRO SCREEN -->
<div id="intro-screen" class="screen active">
<canvas id="intro-canvas"></canvas>
<div id="intro-text">
<h1 class="glow-text">✨ CATTOPIA ✨</h1>
<p>Click the treasure chest to enter the magical world!</p>
</div>
</div>
<!-- CAT CREATOR SCREEN -->
<div id="creator-screen" class="screen">
<div class="creator-panel">
<h1>🐱 Create Your Cat!</h1>
<div class="creator-form">
<div class="form-group">
<label>Cat Name:</label>
<input type="text" id="cat-name" placeholder="What's your cat's name?" maxlength="15">
</div>
<div class="form-group">
<label>Color:</label>
<div class="color-picker">
<button class="color-btn selected" data-color="white" id="btn-white">
<div class="cat-preview white-cat"></div>
<span>White</span>
</button>
<button class="color-btn" data-color="orange" id="btn-orange">
<div class="cat-preview orange-cat"></div>
<span>Orange</span>
</button>
</div>
</div>
<button id="create-cat-btn" class="big-btn">🐾 Create Cat!</button>
<div id="cat-stats-reveal" class="hidden">
<h3>✨ Your cat's secret stats! ✨</h3>
<div id="stats-display"></div>
<button id="add-another-btn" class="big-btn secondary"> Add Another Cat</button>
<button id="start-game-btn" class="big-btn">🎮 Start the Hunt!</button>
</div>
</div>
<div id="created-cats-list">
<h3>Your Cats:</h3>
<div id="cats-roster"></div>
<div id="inventory-panel" class="hidden">
<h3>🎒 Party Inventory</h3>
<p class="inventory-hint">Click loot to choose which cat equips it!</p>
<div id="party-loot"></div>
</div>
</div>
</div>
</div>
<!-- GAME SCREEN -->
<div id="game-screen" class="screen">
<canvas id="game-canvas"></canvas>
<div id="game-hud">
<div id="scoreboard">
<h3>🏆 Scoreboard</h3>
<div id="scores"></div>
</div>
<div id="game-controls">
<button id="add-mouse-btn" class="hud-btn">🐭 Add Mouse!</button>
<button id="add-5-mice-btn" class="hud-btn">🐭x5 Add 5 Mice!</button>
<div id="mouse-count">Mice remaining: <span id="mice-left">0</span></div>
<div id="total-caught">Mice Caught: <span id="total-caught-count">0</span></div>
<div id="boss-progress">Next Boss in: <span id="mice-until-boss">10</span></div>
<div id="boss-warning" class="hidden">🚨 BOSS INCOMING! 🚨</div>
</div>
<div id="inventory-hud">
<h3>🎒 Loot</h3>
<div id="hud-loot-list"></div>
</div>
</div>
<div id="giga-dog-hud">
<span class="giga-key">SPACE</span> Summon Giga Dog
</div>
</div>
<!-- BOSS SCREEN OVERLAY -->
<div id="boss-overlay" class="hidden">
<h1 class="boss-title" id="boss-overlay-title">⚠️ BOSS APPEARS! ⚠️</h1>
<p id="boss-overlay-desc">All cats must work together!</p>
</div>
<!-- VICTORY SCREEN -->
<div id="victory-screen" class="screen">
<canvas id="victory-canvas"></canvas>
<div id="victory-content" class="hidden">
<h1 class="glow-text">🎉 VICTORY! 🎉</h1>
<h2>The treasure vault is open!</h2>
<div id="final-scoreboard"></div>
<div id="mvp-display"></div>
<button id="play-again-btn" class="big-btn">🔄 Play Again!</button>
</div>
</div>
</div>
<script type="module" src="/src/main.js"></script>
</body>
</html>

View File

@@ -0,0 +1,111 @@
import { Container } from 'pixi.js';
import Cat from '../entities/Cat.js';
import { gameState } from '../state/GameState.js';
import { GAME_PHASES } from '../constants.js';
export default class CreatorScene {
constructor(app, sceneManager) {
this.app = app;
this.sceneManager = sceneManager;
this.container = new Container(); // empty — this scene is DOM-only
this.selectedColor = 'white';
this._creatorScreen = document.getElementById('creator-screen');
// Wire DOM events
document.getElementById('btn-white').addEventListener('click', () => {
this.selectedColor = 'white';
document.getElementById('btn-white').classList.add('selected');
document.getElementById('btn-orange').classList.remove('selected');
});
document.getElementById('btn-orange').addEventListener('click', () => {
this.selectedColor = 'orange';
document.getElementById('btn-orange').classList.add('selected');
document.getElementById('btn-white').classList.remove('selected');
});
document.getElementById('create-cat-btn').addEventListener('click', () => this._createCat());
document.getElementById('add-another-btn').addEventListener('click', () => {
document.getElementById('cat-stats-reveal').classList.add('hidden');
document.getElementById('cat-name').focus();
});
document.getElementById('start-game-btn').addEventListener('click', () => this._startGame());
}
_generateStats() {
return {
speed: Math.floor(Math.random() * 3) + 1,
agility: Math.floor(Math.random() * 3) + 1,
hunting: Math.floor(Math.random() * 3) + 1,
stealth: Math.floor(Math.random() * 3) + 1
};
}
_createCat() {
const nameInput = document.getElementById('cat-name');
const name = nameInput.value.trim();
if (!name) { nameInput.focus(); nameInput.style.borderColor = '#ff4444'; return; }
nameInput.style.borderColor = '';
const stats = this._generateStats();
const cat = new Cat({ name, color: this.selectedColor, stats });
gameState.cats.push(cat);
this._showStatsReveal(cat);
this._updateRoster();
nameInput.value = '';
}
_showStatsReveal(cat) {
const reveal = document.getElementById('cat-stats-reveal');
reveal.classList.remove('hidden');
const display = document.getElementById('stats-display');
display.innerHTML = '';
const statNames = [
{ key: 'speed', label: '\u26A1 Speed', cls: 'speed' },
{ key: 'agility', label: '\u{1F300} Agility', cls: 'agility' },
{ key: 'hunting', label: '\u{1F3AF} Hunting', cls: 'hunting' },
{ key: 'stealth', label: '\u{1F47B} Stealth', cls: 'stealth' }
];
statNames.forEach(s => {
const val = cat.stats[s.key];
const bar = document.createElement('div');
bar.className = 'stat-bar';
bar.innerHTML = `<span class="stat-label">${s.label}</span><div class="stat-track"><div class="stat-fill ${s.cls}" style="width:0%"></div></div><span class="stat-value">${val}</span>`;
display.appendChild(bar);
setTimeout(() => bar.querySelector('.stat-fill').style.width = `${val * 10}%`, 50);
});
document.getElementById('start-game-btn').style.display = gameState.cats.length >= 1 ? 'block' : 'none';
}
_updateRoster() {
const roster = document.getElementById('cats-roster');
roster.innerHTML = '';
gameState.cats.forEach(cat => {
const div = document.createElement('div');
div.className = 'roster-cat';
const bgColor = cat.color === 'white' ? '#fff' : '#ff9a3c';
div.innerHTML = `<div class="roster-cat-icon" style="background:${bgColor}"></div><span class="roster-cat-name">${cat.name}</span><span class="roster-cat-stats">SPD:${cat.stats.speed} AGI:${cat.stats.agility} HNT:${cat.stats.hunting} STL:${cat.stats.stealth}</span>`;
roster.appendChild(div);
});
}
_startGame() {
this.sceneManager.switchTo('game');
}
enter() {
this._creatorScreen.classList.add('active');
gameState.phase = GAME_PHASES.CREATE;
}
update() {}
render() {}
exit() {
this._creatorScreen.classList.remove('active');
}
}

View File

@@ -0,0 +1,42 @@
import Mouse from '../entities/Mouse.js';
import ZombieMinion from '../entities/ZombieMinion.js';
import Kitten from '../entities/Kitten.js';
import GigaDog from '../entities/GigaDog.js';
import { GAME_PHASES } from '../constants.js';
export default class SpawnSystem {
constructor(gameState, particleSystem, floatingTextSystem) {
this.gameState = gameState;
this.particles = particleSystem;
this.floats = floatingTextSystem;
}
spawnMice(count, bounds) {
if (this.gameState.phase !== GAME_PHASES.HUNT) return;
for (let i = 0; i < count; i++) {
this.gameState.mice.push(new Mouse({
x: Math.random() * (bounds.width - 100) + 50,
y: Math.random() * (bounds.height - 100) + 50
}));
}
}
spawnZombieMinion(bossX, bossY) {
this.gameState.zombieMinions.push(new ZombieMinion({ x: bossX, y: bossY }));
}
spawnKitten(parent) {
this.gameState.kittens.push(new Kitten({ parent }));
this.floats.add('Kitten summoned!', parent.x, parent.y - 30, 1.5, '#ffd700');
this.particles.spawnCatchParticles(parent.x, parent.y);
}
spawnGigaDog(bounds) {
const x = bounds.width / 2;
const y = bounds.height / 2;
this.gameState.gigaDogUsed = true;
this.gameState.gigaDog = new GigaDog({ x, y });
this.floats.add('GIGA DOG UNLEASHED!', x, y - 60, 3, '#ff0');
for (let i = 0; i < 40; i++) this.particles.spawnCatchParticles(x, y);
}
}

View File

@@ -0,0 +1,486 @@
import { Container } from 'pixi.js';
import { GAME_PHASES, MICE_GOAL, BOSS_HP_PER_CAT, BOSS_DEFS } from '../constants.js';
import { gameState } from '../state/GameState.js';
import { eventBus } from '../core/EventBus.js';
import Boss from '../entities/Boss.js';
import ParticleSystem from '../systems/ParticleSystem.js';
import FloatingTextSystem from '../systems/FloatingTextSystem.js';
import SpawnSystem from '../systems/SpawnSystem.js';
import ForestRenderer from '../renderers/ForestRenderer.js';
import CatRenderer from '../renderers/CatRenderer.js';
import MouseRenderer from '../renderers/MouseRenderer.js';
import BossRenderer from '../renderers/BossRenderer.js';
import ZombieMinionRenderer from '../renderers/ZombieMinionRenderer.js';
import KittenRenderer from '../renderers/KittenRenderer.js';
import GigaDogRenderer from '../renderers/GigaDogRenderer.js';
import ParticleRenderer from '../renderers/ParticleRenderer.js';
import FloatingTextRenderer from '../renderers/FloatingTextRenderer.js';
export default class GameScene {
constructor(app, sceneManager) {
this.app = app;
this.sceneManager = sceneManager;
this.container = new Container();
// Systems
this.particleSys = new ParticleSystem(gameState);
this.floatTextSys = new FloatingTextSystem(gameState);
this.spawnSys = new SpawnSystem(gameState, this.particleSys, this.floatTextSys);
// Renderers
this.forestRenderer = new ForestRenderer();
this.mouseRenderer = new MouseRenderer();
this.bossRenderer = new BossRenderer();
this.zombieMinionRenderer = new ZombieMinionRenderer();
this.kittenRenderer = new KittenRenderer();
this.catRenderer = new CatRenderer();
this.gigaDogRenderer = new GigaDogRenderer();
this.particleRenderer = new ParticleRenderer();
this.floatingTextRenderer = new FloatingTextRenderer();
// Add in render order
this.container.addChild(this.forestRenderer.container);
this.container.addChild(this.mouseRenderer.container);
this.container.addChild(this.bossRenderer.container);
this.container.addChild(this.zombieMinionRenderer.container);
this.container.addChild(this.kittenRenderer.container);
this.container.addChild(this.catRenderer.container);
this.container.addChild(this.gigaDogRenderer.container);
this.container.addChild(this.particleRenderer.container);
this.container.addChild(this.floatingTextRenderer.container);
// DOM refs
this._gameScreen = document.getElementById('game-screen');
// Wire DOM buttons
document.getElementById('add-mouse-btn').addEventListener('click', () => {
this.spawnSys.spawnMice(1, this._bounds());
this._updateHUD();
});
document.getElementById('add-5-mice-btn').addEventListener('click', () => {
this.spawnSys.spawnMice(5, this._bounds());
this._updateHUD();
});
document.getElementById('play-again-btn').addEventListener('click', () => location.reload());
// Spacebar: summon Giga Dog (once per game)
document.addEventListener('keydown', (e) => {
if (e.code === 'Space' && !gameState.gigaDogUsed &&
(gameState.phase === GAME_PHASES.HUNT || gameState.phase === GAME_PHASES.BOSS)) {
e.preventDefault();
this.spawnSys.spawnGigaDog(this._bounds());
this._updateGigaDogHUD();
}
});
}
_bounds() {
return { width: this.app.screen.width, height: this.app.screen.height };
}
enter() {
this._gameScreen.classList.add('active');
gameState.phase = GAME_PHASES.HUNT;
this.forestRenderer.generateForest(this.app.screen.width, this.app.screen.height);
// Place cats
gameState.cats.forEach((cat, i) => {
cat.x = this.app.screen.width / 2 + (i - gameState.cats.length / 2) * 80;
cat.y = this.app.screen.height / 2;
});
this._updateScoreboard();
document.getElementById('mice-until-boss').textContent = MICE_GOAL[0];
}
update() {
if (gameState.phase !== GAME_PHASES.HUNT && gameState.phase !== GAME_PHASES.BOSS) return;
const bounds = this._bounds();
// Update mice
for (const mouse of gameState.mice) {
mouse.update(gameState.cats, bounds);
}
gameState.mice = gameState.mice.filter(m => m.alive);
// Update cats
for (const cat of gameState.cats) {
if (gameState.phase === GAME_PHASES.BOSS && gameState.bossRat) {
const dist = cat.updateBossChase(gameState.bossRat);
if (dist < 35) {
if (Math.random() < 0.03 * (cat.stats.hunting / 5)) {
const dmg = cat.stats.hunting * 0.5 + cat.stats.speed * 0.2 + cat.level;
gameState.bossRat.hp -= dmg;
cat.gainXP(3, gameState.floatingTexts);
this.particleSys.spawnHitParticles(gameState.bossRat.x, gameState.bossRat.y);
if (gameState.bossRat.hp <= 0) {
gameState.bossRat.hp = 0;
this._defeatBoss();
}
}
}
} else if (gameState.phase === GAME_PHASES.HUNT) {
const result = cat.updateHunt(gameState.mice, bounds);
if (result && result.dist < 20) {
const catchChance = cat.stats.hunting * 0.08 + cat.stats.agility * 0.05;
if (Math.random() < catchChance) {
result.closestMouse.alive = false;
cat.catches++;
gameState.totalCaught++;
cat.gainXP(5, gameState.floatingTexts);
this.particleSys.spawnCatchParticles(result.closestMouse.x, result.closestMouse.y);
this._updateScoreboard();
this._updateHUD();
if (this._shouldTriggerBoss()) this._triggerBoss();
}
}
}
// Crown kitten spawning
if (cat.hasCrown()) {
cat.crownCooldown--;
if (cat.crownCooldown <= 0) {
cat.crownCooldown = 240;
this.spawnSys.spawnKitten(cat);
}
}
cat.applyPhysics(bounds);
}
// Update boss
if (gameState.phase === GAME_PHASES.BOSS && gameState.bossRat) {
const bossActions = gameState.bossRat.update(bounds);
this._handleBossActions(bossActions);
}
// Update zombie minions
for (const minion of gameState.zombieMinions) {
const result = minion.update(gameState.cats, bounds);
if (result) {
if (result.type === 'hit') this.particleSys.spawnHitParticles(minion.x, minion.y);
if (result.type === 'killed') {
result.killer.gainXP(2, gameState.floatingTexts);
this.particleSys.spawnCatchParticles(minion.x, minion.y);
}
}
}
gameState.zombieMinions = gameState.zombieMinions.filter(m => m.alive);
// Update kittens
for (const kitten of gameState.kittens) {
let actions = null;
kitten.clampToBounds(bounds);
if (gameState.phase === GAME_PHASES.BOSS && gameState.bossRat) {
actions = kitten.updateBoss(gameState.bossRat, gameState.zombieMinions);
if (actions) {
for (const a of actions) {
if (a.type === 'hitBoss') {
this.particleSys.spawnHitParticles(gameState.bossRat.x, gameState.bossRat.y);
if (gameState.bossRat.hp <= 0) {
gameState.bossRat.hp = 0;
this._defeatBoss();
}
}
if (a.type === 'hitMinion') {
this.particleSys.spawnHitParticles(a.minion.x, a.minion.y);
if (a.killed) this.particleSys.spawnCatchParticles(a.minion.x, a.minion.y);
}
}
}
} else if (gameState.phase === GAME_PHASES.HUNT) {
const result = kitten.updateHunt(gameState.mice);
if (result && result.type === 'caughtMouse') {
const parent = gameState.cats.find(c => c.id === kitten.parentId);
if (parent) {
parent.catches++;
gameState.totalCaught++;
parent.gainXP(5, gameState.floatingTexts);
this._updateScoreboard();
this._updateHUD();
if (this._shouldTriggerBoss()) this._triggerBoss();
}
this.particleSys.spawnCatchParticles(result.mouse.x, result.mouse.y);
}
}
}
gameState.kittens = gameState.kittens.filter(k => k.life > 0);
// Update Giga Dog
if (gameState.gigaDog) {
const dogAction = gameState.gigaDog.update(
gameState.bossRat, gameState.zombieMinions, gameState.mice, bounds
);
this._handleGigaDogAction(dogAction);
}
// Update systems
this.particleSys.update();
this.floatTextSys.update();
}
render() {
const w = this.app.screen.width;
const h = this.app.screen.height;
const isBloodMoon = gameState.phase === GAME_PHASES.BOSS &&
gameState.bossRat && gameState.bossRat.defIndex === 4;
this.forestRenderer.sync(w, h, isBloodMoon);
this.mouseRenderer.sync(gameState.mice);
this.bossRenderer.sync(gameState.bossRat);
this.zombieMinionRenderer.sync(gameState.zombieMinions);
this.kittenRenderer.sync(gameState.kittens);
this.catRenderer.sync(gameState.cats);
this.gigaDogRenderer.sync(gameState.gigaDog);
this.particleRenderer.sync(gameState.particles);
this.floatingTextRenderer.sync(gameState.floatingTexts);
}
_handleBossActions(actions) {
if (!actions) return;
if (!Array.isArray(actions)) actions = [actions];
for (const action of actions) {
if (action.type === 'fireTrail') {
this.particleSys.spawnHitParticles(action.x, action.y);
} else if (action.type === 'spawnMinion') {
this.spawnSys.spawnZombieMinion(action.x, action.y);
} else if (action.type === 'wizardTeleport') {
if (gameState.cats.length > 0) {
const lowestCat = [...gameState.cats].sort((a, b) =>
(a.stats.speed + a.stats.agility + a.stats.hunting + a.stats.stealth) -
(b.stats.speed + b.stats.agility + b.stats.hunting + b.stats.stealth)
)[0];
this.floatTextSys.add('TELEPORTED TO MOON!', lowestCat.x, lowestCat.y, 3, '#f44');
gameState.bossRat.startWizardTeleport(lowestCat);
}
} else if (action.type === 'wizardReturn') {
const cat = action.cat;
cat.x = action.oldX;
cat.stats.speed += 3;
cat.stats.hunting += 3;
this.floatTextSys.add('MOON POWER AURA!', cat.x, cat.y, 3, '#0ff');
this.particleSys.spawnCatchParticles(cat.x, cat.y);
}
}
}
_handleGigaDogAction(action) {
if (!action) return;
if (action.type === 'expired') {
this.floatTextSys.add('Giga Dog departed!', gameState.gigaDog?.x || 0, gameState.gigaDog?.y || 0, 2, '#aaa');
gameState.gigaDog = null;
return;
}
if (action.type === 'hitBoss') {
this.particleSys.spawnHitParticles(action.target.x, action.target.y);
if (action.target.hp <= 0) {
action.target.hp = 0;
this._defeatBoss();
}
} else if (action.type === 'hitMinion') {
this.particleSys.spawnHitParticles(action.target.x, action.target.y);
if (action.killed) this.particleSys.spawnCatchParticles(action.target.x, action.target.y);
} else if (action.type === 'killedMouse') {
if (gameState.cats.length > 0) {
const cat = gameState.cats[Math.floor(Math.random() * gameState.cats.length)];
cat.catches++;
gameState.totalCaught++;
cat.gainXP(5, gameState.floatingTexts);
this._updateScoreboard();
this._updateHUD();
if (this._shouldTriggerBoss()) this._triggerBoss();
}
this.particleSys.spawnCatchParticles(action.target.x, action.target.y);
}
}
_shouldTriggerBoss() {
const currentGoal = MICE_GOAL[gameState.currentBossIndex];
return gameState.phase === GAME_PHASES.HUNT && gameState.totalCaught >= currentGoal;
}
_triggerBoss() {
if (gameState.currentBossIndex >= BOSS_DEFS.length) return;
const def = BOSS_DEFS[gameState.currentBossIndex];
gameState.phase = GAME_PHASES.BOSS;
const overlay = document.getElementById('boss-overlay');
document.getElementById('boss-overlay-title').textContent = `\u26A0\uFE0F ${def.name.toUpperCase()} APPEARS! \u26A0\uFE0F`;
overlay.classList.remove('hidden');
document.getElementById('boss-warning').classList.remove('hidden');
document.getElementById('boss-warning').textContent = `\u{1F6A8} ${def.name}! \u{1F6A8}`;
document.getElementById('add-mouse-btn').style.display = 'none';
document.getElementById('add-5-mice-btn').style.display = 'none';
const maxHp = BOSS_HP_PER_CAT[gameState.currentBossIndex] * gameState.cats.length;
gameState.bossRat = new Boss({
defIndex: gameState.currentBossIndex,
name: def.name,
color: def.color,
maxHp,
x: this.app.screen.width / 2,
y: this.app.screen.height / 3
});
setTimeout(() => overlay.classList.add('hidden'), 2500);
}
_defeatBoss() {
const def = BOSS_DEFS[gameState.currentBossIndex];
gameState.cats.forEach(c => c.gainXP(50 * (gameState.currentBossIndex + 1), gameState.floatingTexts));
// Drop loot
gameState.loot.push(def.loot);
this._renderInventoryHUD();
// Confetti explosion
for (let i = 0; i < 30; i++) this.particleSys.spawnCatchParticles(gameState.bossRat.x, gameState.bossRat.y);
gameState.currentBossIndex++;
gameState.bossRat = null;
gameState.zombieMinions = [];
gameState.kittens = [];
gameState.mice.forEach(m => m.alive = false);
gameState.totalCaught = 0;
document.getElementById('boss-warning').classList.add('hidden');
document.getElementById('add-mouse-btn').style.display = 'block';
document.getElementById('add-5-mice-btn').style.display = 'block';
this._updateHUD();
if (gameState.currentBossIndex >= BOSS_DEFS.length) {
gameState.phase = GAME_PHASES.VICTORY;
setTimeout(() => this.sceneManager.switchTo('victory'), 1500);
} else {
gameState.phase = GAME_PHASES.HUNT;
}
}
_updateScoreboard() {
const scores = document.getElementById('scores');
const sorted = [...gameState.cats].sort((a, b) => b.catches - a.catches);
scores.innerHTML = sorted.map(cat => {
const icon = cat.color === 'white' ? '\u2B1C' : '\u{1F7E7}';
const xpPct = (cat.xp / cat.maxXp) * 100;
const gearHtml = cat.gear.map(g => `<div class="equipped-gear">\u2694\uFE0F ${g.name}</div>`).join('');
return `<div class="score-entry" style="flex-direction: column; align-items: stretch; gap: 0.2rem;">
<div style="display: flex; justify-content: space-between;">
<span class="score-name">${icon} ${cat.name} (Lv.${cat.level})</span>
<span class="score-count">${cat.catches} \u{1F42D}</span>
</div>
<div class="cat-level-info">
<span>XP</span>
<div class="xp-bar"><div class="xp-fill" style="width: ${xpPct}%"></div></div>
</div>
${gearHtml}
</div>`;
}).join('');
}
_updateHUD() {
const aliveMice = gameState.mice.filter(m => m.alive).length;
document.getElementById('mice-left').textContent = aliveMice;
document.getElementById('total-caught-count').textContent = gameState.totalCaught;
const toBoss = Math.max(0, (MICE_GOAL[gameState.currentBossIndex] || 0) - gameState.totalCaught);
document.getElementById('mice-until-boss').textContent = toBoss;
}
_updateGigaDogHUD() {
const el = document.getElementById('giga-dog-hud');
if (!el) return;
if (gameState.gigaDogUsed) {
el.innerHTML = '<span class="giga-key">SPACE</span> Giga Dog Summoned';
el.classList.add('used');
}
}
_renderInventoryHUD() {
const list = document.getElementById('hud-loot-list');
list.innerHTML = gameState.loot.map((item, idx) => `
<div class="loot-item" data-loot-index="${idx}">
<span class="loot-name">${item.icon} ${item.name}</span>
<span class="loot-stats">${this._formatStats(item.stats)}</span>
</div>
`).join('');
if (gameState.loot.length === 0) {
list.innerHTML = '<div style="color: #aaa; font-style: italic; font-size: 0.8rem;">Empty</div>';
}
// Delegated click handler for loot items
list.onclick = (e) => {
const lootItem = e.target.closest('.loot-item');
if (lootItem) {
const idx = parseInt(lootItem.dataset.lootIndex, 10);
this._showEquipPicker(idx);
}
};
}
_formatStats(stats) {
const s = [];
if (stats.speed) s.push(`+${stats.speed} SPD`);
if (stats.agility) s.push(`+${stats.agility} AGI`);
if (stats.hunting) s.push(`+${stats.hunting} HNT`);
if (stats.stealth) s.push(`+${stats.stealth} STL`);
return s.join(' | ');
}
_showEquipPicker(lootIndex) {
const existing = document.getElementById('equip-picker');
if (existing) existing.remove();
const item = gameState.loot[lootIndex];
const picker = document.createElement('div');
picker.id = 'equip-picker';
picker.innerHTML = `
<div class="equip-picker-header">
<span>Equip ${item.icon} ${item.name} to:</span>
<button class="equip-picker-close">&times;</button>
</div>
${gameState.cats.map((cat, catIdx) => {
const icon = cat.color === 'white' ? '\u2B1C' : '\u{1F7E7}';
return `<div class="equip-picker-cat" data-cat-index="${catIdx}">
${icon} ${cat.name} <span class="equip-picker-level">Lv.${cat.level}</span>
</div>`;
}).join('')}
`;
// Event delegation
picker.addEventListener('click', (e) => {
if (e.target.closest('.equip-picker-close')) {
picker.remove();
return;
}
const catEl = e.target.closest('.equip-picker-cat');
if (catEl) {
const catIdx = parseInt(catEl.dataset.catIndex, 10);
this._equipLootToCat(lootIndex, catIdx);
}
});
document.getElementById('inventory-hud').appendChild(picker);
}
_equipLootToCat(lootIndex, catIndex) {
const item = gameState.loot[lootIndex];
const cat = gameState.cats[catIndex];
gameState.loot.splice(lootIndex, 1);
cat.equipItem(item);
this.floatTextSys.add(`${cat.name} equipped ${item.name}!`, cat.x, cat.y - 30, 2, '#ff6bac');
const picker = document.getElementById('equip-picker');
if (picker) picker.remove();
this._renderInventoryHUD();
this._updateScoreboard();
}
exit() {
this._gameScreen.classList.remove('active');
}
}

View File

@@ -0,0 +1,27 @@
import { GAME_PHASES } from '../constants.js';
class GameState {
constructor() {
this.reset();
}
reset() {
this.phase = GAME_PHASES.INTRO;
this.currentBossIndex = 0;
this.cats = [];
this.mice = [];
this.bossRat = null;
this.totalCaught = 0;
this.particles = [];
this.sparkles = [];
this.loot = [];
this.floatingTexts = [];
this.zombieMinions = [];
this.kittens = [];
this.gigaDog = null;
this.gigaDogUsed = false;
}
}
export const gameState = new GameState();
export default GameState;

View File

@@ -0,0 +1,21 @@
export const GAME_PHASES = {
INTRO: 'intro',
CREATE: 'create',
HUNT: 'hunt',
BOSS: 'boss',
VICTORY: 'victory'
};
export const MICE_GOAL = [15, 20, 25, 30, 35];
export const BOSS_HP_PER_CAT = [40, 60, 200, 120, 180];
export const BOSS_DEFS = [
{ name: 'The Rat King', icon: '\u{1F400}', loot: { name: "Rat King's Crown", icon: '\u{1F451}', stats: { hunting: 3, agility: 1 } }, color: '#4a3a3a' },
{ name: 'Skeleton Warrior', icon: '\u{1F480}', loot: { name: "Fire Axe", icon: '\u{1FA93}', stats: { stealth: -1, hunting: 5, speed: 2 } }, color: '#ddd' },
{ name: 'Upside-Down Wizard', icon: '\u{1F9D9}\u200D\u2642\uFE0F', loot: { name: "Magic Hat", icon: '\u{1F3A9}', stats: { agility: 4, speed: 4, stealth: 2 } }, color: '#6b2fa0' },
{ name: 'Burning Zombie', icon: '\u{1F9DF}\u200D\u2642\uFE0F', loot: { name: "Burning Cape", icon: '\u{1F9B8}', stats: { speed: 5, hunting: 4 } }, color: '#3a7a40' },
{ name: 'The Blood Wolf', icon: '\u{1F43A}', loot: { name: "Blood Fang", icon: '\u{1F9B7}', stats: { hunting: 10, speed: 5, agility: 5, stealth: 5 } }, color: '#1a0505' }
];
export const FIXED_TIMESTEP = 1000 / 60;

View File

@@ -0,0 +1,125 @@
import { Container, Graphics, FillGradient } from 'pixi.js';
import gsap from 'gsap';
import { gameState } from '../state/GameState.js';
export default class VictoryScene {
constructor(app, sceneManager) {
this.app = app;
this.sceneManager = sceneManager;
this.container = new Container();
this._bgGraphics = new Graphics();
this._confetti = [];
this._confettiGraphics = new Graphics();
this.container.addChild(this._bgGraphics);
this.container.addChild(this._confettiGraphics);
this._victoryScreen = document.getElementById('victory-screen');
this._victoryContent = document.getElementById('victory-content');
}
enter() {
this._victoryScreen.classList.add('active');
const w = this.app.screen.width;
const h = this.app.screen.height;
// Create confetti
this._confetti = [];
for (let i = 0; i < 150; i++) {
this._confetti.push({
x: Math.random() * w,
y: Math.random() * h - h,
vx: (Math.random() - 0.5) * 3,
vy: Math.random() * 3 + 2,
size: Math.random() * 8 + 4,
rotation: Math.random() * 360,
rotSpeed: (Math.random() - 0.5) * 10,
color: `hsl(${Math.random() * 360}, 80%, 60%)`
});
}
// GSAP: animate each confetti piece perpetually
for (const c of this._confetti) {
gsap.to(c, {
y: `+=${h + 20}`,
duration: (h + 20) / (c.vy * 60),
repeat: -1,
ease: 'none',
modifiers: {
y: (y) => {
const val = parseFloat(y);
return val > h + 10 ? -10 : val;
}
}
});
}
// Show content after delay with GSAP
setTimeout(() => {
this._victoryContent.classList.remove('hidden');
gsap.from(this._victoryContent, { y: 50, opacity: 0, duration: 1, ease: 'power2.out' });
// Build final scoreboard
const sorted = [...gameState.cats].sort((a, b) => b.catches - a.catches);
const fsb = document.getElementById('final-scoreboard');
fsb.innerHTML = '<h3 style="color:#ffd700;margin-bottom:0.8rem">\u{1F3C6} Final Scores \u{1F3C6}</h3>' +
sorted.map((cat, i) => {
const medal = i === 0 ? '\u{1F947}' : i === 1 ? '\u{1F948}' : i === 2 ? '\u{1F949}' : '\u{1F431}';
return `<div class="final-score-entry"><span class="final-rank">${medal}</span><span class="final-name">${cat.name}</span><span class="final-catches">${cat.catches} mice</span></div>`;
}).join('');
// MVP
const mvp = sorted[0];
const mvpDiv = document.getElementById('mvp-display');
mvpDiv.innerHTML = `<h3>\u{1F451} MVP: ${mvp.name}!</h3><p>Caught ${mvp.catches} mice with stats \u2014 SPD: ${mvp.stats.speed} | AGI: ${mvp.stats.agility} | HNT: ${mvp.stats.hunting} | STL: ${mvp.stats.stealth}</p>`;
}, 500);
}
update() {
// confetti physics handled by GSAP
}
render() {
const w = this.app.screen.width;
const h = this.app.screen.height;
// Background
this._bgGraphics.clear();
const bgGrad = new FillGradient({
type: 'radial',
center: { x: w / 2, y: h / 2 },
innerRadius: 0,
outerCenter: { x: w / 2, y: h / 2 },
outerRadius: Math.max(w, h) * 0.7,
colorStops: [
{ offset: 0, color: '#2d1b69' },
{ offset: 1, color: '#0a0a2e' }
]
});
this._bgGraphics.rect(0, 0, w, h).fill(bgGrad);
this._confettiGraphics.clear();
for (const c of this._confetti) {
c.x += c.vx * 0.016; // approximate dt
c.rotation += c.rotSpeed * 0.016;
if (c.y > h + 10) { c.y = -10; c.x = Math.random() * w; }
// Draw rotated rectangle
const cos = Math.cos(c.rotation * Math.PI / 180);
const sin = Math.sin(c.rotation * Math.PI / 180);
const hw = c.size / 2;
const hh = c.size / 4;
this._confettiGraphics.poly([
c.x + cos * (-hw) - sin * (-hh), c.y + sin * (-hw) + cos * (-hh),
c.x + cos * (hw) - sin * (-hh), c.y + sin * (hw) + cos * (-hh),
c.x + cos * (hw) - sin * (hh), c.y + sin * (hw) + cos * (hh),
c.x + cos * (-hw) - sin * (hh), c.y + sin * (-hw) + cos * (hh)
]).fill(c.color);
}
}
exit() {
this._victoryScreen.classList.remove('active');
this._victoryContent.classList.add('hidden');
}
}

View File

@@ -0,0 +1,66 @@
import { Application } from 'pixi.js';
import GameLoop from './core/GameLoop.js';
import SceneManager from './core/SceneManager.js';
import IntroScene from './scenes/IntroScene.js';
import CreatorScene from './scenes/CreatorScene.js';
import GameScene from './scenes/GameScene.js';
import VictoryScene from './scenes/VictoryScene.js';
async function boot() {
const app = new Application();
await app.init({
resizeTo: window,
background: '#0a0a2e',
antialias: true,
autoDensity: true,
resolution: window.devicePixelRatio || 1
});
// Replace canvas elements: insert PixiJS canvas as the game's single canvas
const appEl = document.getElementById('app');
app.canvas.style.position = 'fixed';
app.canvas.style.top = '0';
app.canvas.style.left = '0';
app.canvas.style.width = '100%';
app.canvas.style.height = '100%';
app.canvas.style.zIndex = '0';
appEl.insertBefore(app.canvas, appEl.firstChild);
// Enable PixiJS event system on the stage
app.stage.eventMode = 'static';
app.stage.hitArea = app.screen;
// Hide the old canvas elements (they remain in DOM for CSS but aren't used)
const oldCanvases = ['intro-canvas', 'game-canvas', 'victory-canvas'];
for (const id of oldCanvases) {
const el = document.getElementById(id);
if (el) el.style.display = 'none';
}
// Scene manager
const sceneManager = new SceneManager(app);
// Register scenes
const introScene = new IntroScene(app, sceneManager);
const creatorScene = new CreatorScene(app, sceneManager);
const gameScene = new GameScene(app, sceneManager);
const victoryScene = new VictoryScene(app, sceneManager);
sceneManager.register('intro', introScene);
sceneManager.register('creator', creatorScene);
sceneManager.register('game', gameScene);
sceneManager.register('victory', victoryScene);
// Game loop: fixed-timestep update + render each frame
const gameLoop = new GameLoop(app);
gameLoop.onUpdate(() => sceneManager.update());
gameLoop.start();
// Render on each frame via ticker
app.ticker.add(() => sceneManager.render());
// Start with intro
sceneManager.switchTo('intro');
}
boot().catch(console.error);

View File

@@ -0,0 +1,162 @@
import { Graphics, Container, FillGradient, Text } from 'pixi.js';
export default class ForestRenderer {
constructor() {
this.container = new Container();
this.skyGraphics = new Graphics();
this.groundGraphics = new Graphics();
this.treeContainer = new Container();
this.mushroomContainer = new Container();
this.flowerContainer = new Container();
this.sparkleGraphics = new Graphics();
this.bloodMoonGraphics = new Graphics();
this.container.addChild(this.skyGraphics);
this.container.addChild(this.groundGraphics);
this.container.addChild(this.treeContainer);
this.container.addChild(this.mushroomContainer);
this.container.addChild(this.flowerContainer);
this.container.addChild(this.sparkleGraphics);
this.container.addChild(this.bloodMoonGraphics);
this.trees = [];
this.flowers = [];
this.mushrooms = [];
}
generateForest(w, h) {
this.trees = [];
this.flowers = [];
this.mushrooms = [];
for (let i = 0; i < 18; i++) {
this.trees.push({
x: Math.random() * w,
y: Math.random() * h * 0.85 + h * 0.1,
size: Math.random() * 30 + 40,
hue: Math.random() > 0.5 ? 300 : 220
});
}
for (let i = 0; i < 35; i++) {
this.flowers.push({
x: Math.random() * w,
y: Math.random() * h * 0.7 + h * 0.25,
size: Math.random() * 6 + 3,
hue: Math.random() * 60 + 280
});
}
for (let i = 0; i < 10; i++) {
this.mushrooms.push({
x: Math.random() * w,
y: Math.random() * h * 0.6 + h * 0.35,
size: Math.random() * 10 + 8
});
}
}
sync(width, height, isBloodMoon) {
const t = Date.now() / 1000;
// Sky
this.skyGraphics.clear();
if (isBloodMoon) {
const grad = new FillGradient({ type: 'linear', start: { x: 0, y: 0 }, end: { x: width, y: height },
colorStops: [
{ offset: 0, color: '#3a0000' }, { offset: 0.3, color: '#5a0000' },
{ offset: 0.5, color: '#7a0000' }, { offset: 0.7, color: '#2a0000' }, { offset: 1, color: '#110000' }
]
});
this.skyGraphics.rect(0, 0, width, height).fill(grad);
} else {
const grad = new FillGradient({ type: 'linear', start: { x: 0, y: 0 }, end: { x: width, y: height },
colorStops: [
{ offset: 0, color: '#2a0845' }, { offset: 0.3, color: '#4a1a6b' },
{ offset: 0.5, color: '#6b2fa0' }, { offset: 0.7, color: '#3a5a9f' }, { offset: 1, color: '#1a3a6a' }
]
});
this.skyGraphics.rect(0, 0, width, height).fill(grad);
}
// Blood moon orb
this.bloodMoonGraphics.clear();
if (isBloodMoon) {
// Glow
this.bloodMoonGraphics.circle(width * 0.8, height * 0.2, 100).fill({ color: '#ff0000', alpha: 0.15 });
this.bloodMoonGraphics.circle(width * 0.8, height * 0.2, 80).fill('#ff0000');
}
// Ground
this.groundGraphics.clear();
if (isBloodMoon) {
const groundGrad = new FillGradient({ type: 'linear', start: { x: 0, y: height * 0.7 }, end: { x: 0, y: height },
colorStops: [
{ offset: 0, color: 'rgba(80, 0, 0, 0.4)' }, { offset: 1, color: 'rgba(40, 0, 0, 0.6)' }
]
});
this.groundGraphics.rect(0, height * 0.7, width, height * 0.3).fill(groundGrad);
} else {
const groundGrad = new FillGradient({ type: 'linear', start: { x: 0, y: height * 0.7 }, end: { x: 0, y: height },
colorStops: [
{ offset: 0, color: 'rgba(100, 50, 150, 0.3)' }, { offset: 1, color: 'rgba(50, 100, 80, 0.5)' }
]
});
this.groundGraphics.rect(0, height * 0.7, width, height * 0.3).fill(groundGrad);
}
// Trees
this.treeContainer.removeChildren();
for (const tree of this.trees) {
const g = new Graphics();
const sway = Math.sin(t + tree.x * 0.01) * 3;
if (isBloodMoon) {
g.rect(tree.x - 5, tree.y, 10, tree.size).fill('#0a0000');
} else {
const h1 = tree.hue;
g.rect(tree.x - 5, tree.y, 10, tree.size).fill(`hsla(${h1}, 40%, 20%, 0.8)`);
g.circle(tree.x + sway, tree.y - tree.size * 0.3, tree.size * 0.6).fill(`hsla(${h1}, 60%, 45%, 0.7)`);
g.circle(tree.x + sway + 10, tree.y - tree.size * 0.5, tree.size * 0.35).fill(`hsla(${h1 + 30}, 70%, 55%, 0.5)`);
}
this.treeContainer.addChild(g);
}
// Mushrooms
this.mushroomContainer.removeChildren();
if (!isBloodMoon) {
for (const m of this.mushrooms) {
const g = new Graphics();
g.rect(m.x - 2, m.y, 4, m.size * 0.6).fill('#e8d8c8');
g.ellipse(m.x, m.y, m.size, m.size * 0.6).fill('#ff6b9d');
g.circle(m.x - 3, m.y - 3, 2).fill({ color: '#ffffff', alpha: 0.6 });
g.circle(m.x + 4, m.y - 5, 1.5).fill({ color: '#ffffff', alpha: 0.6 });
this.mushroomContainer.addChild(g);
}
}
// Flowers
this.flowerContainer.removeChildren();
if (!isBloodMoon) {
for (const f of this.flowers) {
const g = new Graphics();
g.circle(f.x, f.y, f.size).fill(`hsla(${f.hue}, 80%, 70%, 0.8)`);
g.circle(f.x, f.y, f.size * 0.4).fill('#ffd700');
this.flowerContainer.addChild(g);
}
}
// Sparkles
this.sparkleGraphics.clear();
for (let i = 0; i < 20; i++) {
const sx = (Math.sin(t * 0.7 + i * 47) * 0.5 + 0.5) * width;
const sy = (Math.cos(t * 0.5 + i * 31) * 0.5 + 0.5) * height;
const alpha = 0.3 + Math.sin(t * 3 + i) * 0.2;
const hue = (i * 40 + t * 20) % 360;
this.sparkleGraphics.circle(sx, sy, 1.5 + Math.sin(t * 4 + i) * 0.5)
.fill({ color: `hsl(${hue}, 80%, 80%)`, alpha });
}
}
destroy() {
this.container.destroy({ children: true });
}
}

View File

@@ -0,0 +1,713 @@
/* ===== GLOBAL STYLES ===== */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Nunito', sans-serif;
overflow: hidden;
width: 100vw;
height: 100vh;
background: #0a0a2e;
cursor: default;
}
h1,
h2,
h3 {
font-family: 'Fredoka One', cursive;
}
.screen {
position: fixed;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
display: none;
z-index: 1;
}
.screen.active {
display: flex;
align-items: center;
justify-content: center;
}
.hidden {
display: none !important;
}
/* ===== INTRO SCREEN ===== */
#intro-screen {
background: transparent;
flex-direction: column;
pointer-events: none;
}
#intro-canvas {
display: none;
}
#intro-text {
position: relative;
z-index: 2;
text-align: center;
pointer-events: none;
}
#intro-text h1 {
font-size: 4rem;
margin-bottom: 1rem;
}
#intro-text p {
font-size: 1.3rem;
color: #c0a0ff;
animation: pulse 2s ease-in-out infinite;
}
.glow-text {
background: linear-gradient(135deg, #ff9ecd, #a78bfa, #60d5ff);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
filter: drop-shadow(0 0 20px rgba(167, 139, 250, 0.6));
animation: glow 3s ease-in-out infinite alternate;
}
@keyframes glow {
from {
filter: drop-shadow(0 0 20px rgba(167, 139, 250, 0.4));
}
to {
filter: drop-shadow(0 0 40px rgba(255, 158, 205, 0.8));
}
}
@keyframes pulse {
0%,
100% {
opacity: 0.6;
transform: scale(1);
}
50% {
opacity: 1;
transform: scale(1.05);
}
}
/* ===== CAT CREATOR SCREEN ===== */
#creator-screen {
background: linear-gradient(135deg, #1a0a3e 0%, #2d1b69 50%, #1a0a3e 100%);
}
.creator-panel {
background: rgba(255, 255, 255, 0.08);
backdrop-filter: blur(20px);
border: 1px solid rgba(255, 255, 255, 0.15);
border-radius: 24px;
padding: 2.5rem;
max-width: 600px;
width: 90%;
max-height: 90vh;
overflow-y: auto;
color: white;
}
.creator-panel h1 {
text-align: center;
font-size: 2rem;
margin-bottom: 1.5rem;
background: linear-gradient(135deg, #ff9ecd, #ffd700);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
.form-group {
margin-bottom: 1.2rem;
}
.form-group label {
display: block;
font-weight: 700;
margin-bottom: 0.5rem;
font-size: 1.1rem;
color: #c0a0ff;
}
#cat-name {
width: 100%;
padding: 0.8rem 1rem;
border-radius: 12px;
border: 2px solid rgba(167, 139, 250, 0.4);
background: rgba(255, 255, 255, 0.1);
color: white;
font-size: 1.1rem;
font-family: 'Nunito', sans-serif;
outline: none;
transition: border-color 0.3s;
}
#cat-name:focus {
border-color: #a78bfa;
}
#cat-name::placeholder {
color: rgba(255, 255, 255, 0.35);
}
.color-picker {
display: flex;
gap: 1rem;
}
.color-btn {
flex: 1;
padding: 1rem;
border-radius: 16px;
border: 3px solid rgba(255, 255, 255, 0.15);
background: rgba(255, 255, 255, 0.06);
cursor: pointer;
transition: all 0.3s;
text-align: center;
color: white;
font-family: 'Nunito', sans-serif;
font-weight: 700;
font-size: 1rem;
}
.color-btn:hover {
background: rgba(255, 255, 255, 0.12);
transform: translateY(-2px);
}
.color-btn.selected {
border-color: #ffd700;
background: rgba(255, 215, 0, 0.15);
box-shadow: 0 0 20px rgba(255, 215, 0, 0.2);
}
.cat-preview {
width: 60px;
height: 60px;
margin: 0 auto 0.5rem;
border-radius: 50%;
position: relative;
}
.white-cat {
background: radial-gradient(circle, #ffffff, #e8e0f0);
box-shadow: 0 0 15px rgba(255, 255, 255, 0.4);
}
.orange-cat {
background: radial-gradient(circle, #ff9a3c, #e07020);
box-shadow: 0 0 15px rgba(255, 154, 60, 0.4);
}
/* Big button styles */
.big-btn {
display: block;
width: 100%;
padding: 1rem;
border-radius: 16px;
border: none;
font-family: 'Fredoka One', cursive;
font-size: 1.3rem;
cursor: pointer;
transition: all 0.3s;
margin-top: 1rem;
background: linear-gradient(135deg, #ff6bac, #a78bfa);
color: white;
box-shadow: 0 4px 15px rgba(167, 139, 250, 0.4);
}
.big-btn:hover {
transform: translateY(-3px);
box-shadow: 0 8px 25px rgba(167, 139, 250, 0.6);
}
.big-btn:active {
transform: translateY(0);
}
.big-btn.secondary {
background: linear-gradient(135deg, #4a9eff, #60d5ff);
box-shadow: 0 4px 15px rgba(96, 213, 255, 0.3);
}
.big-btn.secondary:hover {
box-shadow: 0 8px 25px rgba(96, 213, 255, 0.5);
}
/* Stats reveal */
#cat-stats-reveal {
margin-top: 1.5rem;
padding: 1.5rem;
background: rgba(255, 215, 0, 0.08);
border: 1px solid rgba(255, 215, 0, 0.2);
border-radius: 16px;
animation: fadeIn 0.5s ease;
}
#cat-stats-reveal h3 {
text-align: center;
color: #ffd700;
margin-bottom: 1rem;
font-size: 1.2rem;
}
#stats-display {
display: flex;
flex-direction: column;
gap: 0.6rem;
}
.stat-bar {
display: flex;
align-items: center;
gap: 0.8rem;
}
.stat-label {
width: 100px;
font-weight: 700;
font-size: 0.95rem;
color: #c0a0ff;
}
.stat-track {
flex: 1;
height: 14px;
background: rgba(255, 255, 255, 0.1);
border-radius: 7px;
overflow: hidden;
}
.stat-fill {
height: 100%;
border-radius: 7px;
transition: width 1s ease;
width: 0;
}
.stat-fill.speed {
background: linear-gradient(90deg, #4af, #60d5ff);
}
.stat-fill.agility {
background: linear-gradient(90deg, #4f4, #80ff80);
}
.stat-fill.hunting {
background: linear-gradient(90deg, #fa4, #ffd700);
}
.stat-fill.stealth {
background: linear-gradient(90deg, #a78bfa, #c0a0ff);
}
.stat-value {
width: 30px;
text-align: right;
font-weight: 900;
color: #fff;
}
/* Created cats roster */
#created-cats-list {
margin-top: 1.5rem;
}
#created-cats-list h3 {
color: #ff9ecd;
margin-bottom: 0.8rem;
}
#cats-roster {
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.roster-cat {
display: flex;
align-items: center;
gap: 0.8rem;
padding: 0.6rem 1rem;
background: rgba(255, 255, 255, 0.06);
border-radius: 12px;
border: 1px solid rgba(255, 255, 255, 0.1);
}
.roster-cat-icon {
width: 30px;
height: 30px;
border-radius: 50%;
}
.roster-cat-name {
font-weight: 700;
flex: 1;
}
.roster-cat-stats {
font-size: 0.8rem;
color: #c0a0ff;
}
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
/* ===== GAME SCREEN ===== */
#game-screen {
background: transparent;
pointer-events: none;
}
#game-screen > * {
pointer-events: auto;
}
#game-canvas {
display: none;
}
#game-hud {
position: absolute;
top: 0;
right: 0;
z-index: 10;
display: flex;
flex-direction: column;
gap: 0.8rem;
padding: 1rem;
max-width: 280px;
}
#scoreboard {
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 16px;
padding: 1rem;
color: white;
}
#scoreboard h3 {
text-align: center;
margin-bottom: 0.5rem;
font-size: 1rem;
color: #ffd700;
}
#scores {
display: flex;
flex-direction: column;
gap: 0.3rem;
font-size: 0.9rem;
}
.score-entry {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.3rem 0.5rem;
border-radius: 8px;
}
.score-entry:nth-child(1) {
background: rgba(255, 215, 0, 0.15);
}
.score-name {
font-weight: 700;
}
.score-count {
font-weight: 900;
color: #ffd700;
}
#game-controls {
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 16px;
padding: 1rem;
color: white;
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.hud-btn {
padding: 0.7rem 1rem;
border-radius: 12px;
border: none;
font-family: 'Fredoka One', cursive;
font-size: 1rem;
cursor: pointer;
transition: all 0.2s;
background: linear-gradient(135deg, #ff6bac, #a78bfa);
color: white;
}
.hud-btn:hover {
transform: scale(1.05);
}
.hud-btn:active {
transform: scale(0.98);
}
#mouse-count,
#total-caught {
font-weight: 700;
font-size: 0.9rem;
text-align: center;
}
#boss-warning {
text-align: center;
font-family: 'Fredoka One', cursive;
font-size: 1.1rem;
color: #ff4444;
animation: pulse 0.5s ease-in-out infinite;
padding: 0.5rem;
background: rgba(255, 0, 0, 0.1);
border-radius: 8px;
border: 1px solid rgba(255, 0, 0, 0.3);
}
/* ===== BOSS OVERLAY ===== */
#boss-overlay {
position: fixed;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
background: rgba(255, 0, 0, 0.15);
z-index: 100;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
animation: bossFlash 0.5s ease-in-out;
pointer-events: none;
}
.boss-title {
font-size: 3rem;
color: #ff4444;
text-shadow: 0 0 30px rgba(255, 0, 0, 0.6);
animation: shake 0.5s ease-in-out;
}
#boss-overlay p {
font-size: 1.5rem;
color: white;
margin-top: 1rem;
}
@keyframes bossFlash {
0%,
100% {
background: rgba(255, 0, 0, 0.15);
}
50% {
background: rgba(255, 0, 0, 0.4);
}
}
@keyframes shake {
0%,
100% {
transform: translateX(0);
}
20% {
transform: translateX(-10px);
}
40% {
transform: translateX(10px);
}
60% {
transform: translateX(-5px);
}
80% {
transform: translateX(5px);
}
}
/* ===== VICTORY SCREEN ===== */
#victory-screen {
background: transparent;
flex-direction: column;
align-items: center;
justify-content: center;
pointer-events: none;
}
#victory-screen > * {
pointer-events: auto;
}
#victory-canvas {
display: none;
}
#victory-content {
position: relative;
z-index: 2;
text-align: center;
color: white;
padding: 2rem;
max-width: 600px;
animation: fadeIn 1s ease;
}
#victory-content h1 {
font-size: 3rem;
margin-bottom: 0.5rem;
}
#victory-content h2 {
font-size: 1.5rem;
color: #ffd700;
margin-bottom: 1.5rem;
}
#final-scoreboard {
background: rgba(255, 255, 255, 0.08);
border-radius: 16px;
padding: 1.5rem;
margin-bottom: 1rem;
border: 1px solid rgba(255, 215, 0, 0.3);
}
.final-score-entry {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.5rem 0;
border-bottom: 1px solid rgba(255, 255, 255, 0.08);
font-size: 1.1rem;
}
.final-score-entry:last-child {
border-bottom: none;
}
.final-rank {
width: 30px;
font-weight: 900;
color: #ffd700;
}
.final-name {
flex: 1;
font-weight: 700;
text-align: left;
margin-left: 0.5rem;
}
.final-catches {
font-weight: 900;
color: #60d5ff;
}
#mvp-display {
margin: 1rem 0;
padding: 1.2rem;
background: linear-gradient(135deg, rgba(255, 215, 0, 0.15), rgba(255, 154, 60, 0.15));
border-radius: 16px;
border: 2px solid rgba(255, 215, 0, 0.4);
}
#mvp-display h3 {
color: #ffd700;
margin-bottom: 0.3rem;
font-size: 1.3rem;
}
#mvp-display p {
color: #c0a0ff;
font-size: 0.95rem;
}
/* Scrollbar styling */
::-webkit-scrollbar {
width: 6px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: rgba(167, 139, 250, 0.3);
border-radius: 3px;
}
::-webkit-scrollbar-thumb:hover {
background: rgba(167, 139, 250, 0.5);
}
/* Giga Dog HUD */
#giga-dog-hud {
position: absolute;
bottom: 20px;
left: 50%;
transform: translateX(-50%);
z-index: 10;
background: rgba(0, 0, 0, 0.7);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 215, 0, 0.4);
border-radius: 12px;
padding: 0.5rem 1.2rem;
color: white;
font-family: 'Fredoka One', cursive;
font-size: 1rem;
pointer-events: none;
animation: pulse 2s ease-in-out infinite;
}
#giga-dog-hud.used {
opacity: 0.3;
border-color: rgba(255, 255, 255, 0.1);
animation: none;
}
.giga-key {
display: inline-block;
background: rgba(255, 215, 0, 0.2);
border: 1px solid rgba(255, 215, 0, 0.5);
border-radius: 6px;
padding: 0.1rem 0.5rem;
font-size: 0.85rem;
color: #ffd700;
margin-right: 0.3rem;
}

View File

@@ -0,0 +1,28 @@
FROM python:3.12-slim
WORKDIR /app
# Install system dependencies (poppler for PDF-to-image conversion)
RUN apt-get update && apt-get install -y --no-install-recommends \
poppler-utils \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . ./app/
# Create non-root user and directories for runtime data
RUN useradd --create-home appuser \
&& mkdir -p /app/data /app/labels /app/prints \
&& chown -R appuser:appuser /app
USER appuser
EXPOSE 8000
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD python -c "import httpx; httpx.get('http://localhost:8000/health').raise_for_status()" || exit 1
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]

View File

@@ -0,0 +1,28 @@
FROM python:3.12-slim
WORKDIR /app
# Install system dependencies (poppler for PDF-to-image conversion)
RUN apt-get update && apt-get install -y --no-install-recommends \
poppler-utils \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Create non-root user and directories for runtime data
RUN useradd --create-home appuser \
&& mkdir -p /app/data /app/labels /app/prints \
&& chown -R appuser:appuser /app
USER appuser
EXPOSE 8000
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD python -c "import httpx; httpx.get('http://localhost:8000/health').raise_for_status()" || exit 1
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]

View File

@@ -0,0 +1,29 @@
# ── Dev Mode ─────────────────────────────────────────────
# Swaps external clients (ManaPool, EasyPost) with mocks
# and auto-fills placeholder credentials
DEV_MODE=true
# ── Database ─────────────────────────────────────────────
# Default: local SQLite (no Postgres needed for dev)
DATABASE_URL=sqlite:///./data/sql_app.db
# ── TCGPlayer ────────────────────────────────────────────
TCGPLAYER_SELLER_KEY=e576ed4c
TCGPLAYER_COOKIE_FILE=~/.tcgplayer/credentials.json
# ── Label Printer ────────────────────────────────────────
LABEL_PRINTER_URL=http://localhost:8000
# ── File Storage ─────────────────────────────────────────
FILE_STORAGE_DIR=prints
# ── ETL Scheduler ────────────────────────────────────────
# Auto-disabled in DEV_MODE; manual triggers still work
# ETL_SCHEDULER_ENABLED=false
# ── Application ──────────────────────────────────────────
CORS_ORIGINS=http://localhost:5173,http://localhost:3000
DEBUG=true
LOG_LEVEL=DEBUG
HTTP_TIMEOUT=30
HTTP_MAX_RETRIES=3

View File

@@ -0,0 +1,679 @@
/* ===== GLOBAL STYLES ===== */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Nunito', sans-serif;
overflow: hidden;
width: 100vw;
height: 100vh;
background: #0a0a2e;
cursor: default;
}
h1,
h2,
h3 {
font-family: 'Fredoka One', cursive;
}
.screen {
position: fixed;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
display: none;
z-index: 1;
}
.screen.active {
display: flex;
align-items: center;
justify-content: center;
}
.hidden {
display: none !important;
}
/* ===== INTRO SCREEN ===== */
#intro-screen {
background: radial-gradient(ellipse at center, #1a0a3e 0%, #0a0a2e 100%);
flex-direction: column;
cursor: pointer;
}
#intro-canvas {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
#intro-text {
position: relative;
z-index: 2;
text-align: center;
pointer-events: none;
}
#intro-text h1 {
font-size: 4rem;
margin-bottom: 1rem;
}
#intro-text p {
font-size: 1.3rem;
color: #c0a0ff;
animation: pulse 2s ease-in-out infinite;
}
.glow-text {
background: linear-gradient(135deg, #ff9ecd, #a78bfa, #60d5ff);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
filter: drop-shadow(0 0 20px rgba(167, 139, 250, 0.6));
animation: glow 3s ease-in-out infinite alternate;
}
@keyframes glow {
from {
filter: drop-shadow(0 0 20px rgba(167, 139, 250, 0.4));
}
to {
filter: drop-shadow(0 0 40px rgba(255, 158, 205, 0.8));
}
}
@keyframes pulse {
0%,
100% {
opacity: 0.6;
transform: scale(1);
}
50% {
opacity: 1;
transform: scale(1.05);
}
}
/* ===== CAT CREATOR SCREEN ===== */
#creator-screen {
background: linear-gradient(135deg, #1a0a3e 0%, #2d1b69 50%, #1a0a3e 100%);
}
.creator-panel {
background: rgba(255, 255, 255, 0.08);
backdrop-filter: blur(20px);
border: 1px solid rgba(255, 255, 255, 0.15);
border-radius: 24px;
padding: 2.5rem;
max-width: 600px;
width: 90%;
max-height: 90vh;
overflow-y: auto;
color: white;
}
.creator-panel h1 {
text-align: center;
font-size: 2rem;
margin-bottom: 1.5rem;
background: linear-gradient(135deg, #ff9ecd, #ffd700);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
.form-group {
margin-bottom: 1.2rem;
}
.form-group label {
display: block;
font-weight: 700;
margin-bottom: 0.5rem;
font-size: 1.1rem;
color: #c0a0ff;
}
#cat-name {
width: 100%;
padding: 0.8rem 1rem;
border-radius: 12px;
border: 2px solid rgba(167, 139, 250, 0.4);
background: rgba(255, 255, 255, 0.1);
color: white;
font-size: 1.1rem;
font-family: 'Nunito', sans-serif;
outline: none;
transition: border-color 0.3s;
}
#cat-name:focus {
border-color: #a78bfa;
}
#cat-name::placeholder {
color: rgba(255, 255, 255, 0.35);
}
.color-picker {
display: flex;
gap: 1rem;
}
.color-btn {
flex: 1;
padding: 1rem;
border-radius: 16px;
border: 3px solid rgba(255, 255, 255, 0.15);
background: rgba(255, 255, 255, 0.06);
cursor: pointer;
transition: all 0.3s;
text-align: center;
color: white;
font-family: 'Nunito', sans-serif;
font-weight: 700;
font-size: 1rem;
}
.color-btn:hover {
background: rgba(255, 255, 255, 0.12);
transform: translateY(-2px);
}
.color-btn.selected {
border-color: #ffd700;
background: rgba(255, 215, 0, 0.15);
box-shadow: 0 0 20px rgba(255, 215, 0, 0.2);
}
.cat-preview {
width: 60px;
height: 60px;
margin: 0 auto 0.5rem;
border-radius: 50%;
position: relative;
}
.white-cat {
background: radial-gradient(circle, #ffffff, #e8e0f0);
box-shadow: 0 0 15px rgba(255, 255, 255, 0.4);
}
.orange-cat {
background: radial-gradient(circle, #ff9a3c, #e07020);
box-shadow: 0 0 15px rgba(255, 154, 60, 0.4);
}
/* Big button styles */
.big-btn {
display: block;
width: 100%;
padding: 1rem;
border-radius: 16px;
border: none;
font-family: 'Fredoka One', cursive;
font-size: 1.3rem;
cursor: pointer;
transition: all 0.3s;
margin-top: 1rem;
background: linear-gradient(135deg, #ff6bac, #a78bfa);
color: white;
box-shadow: 0 4px 15px rgba(167, 139, 250, 0.4);
}
.big-btn:hover {
transform: translateY(-3px);
box-shadow: 0 8px 25px rgba(167, 139, 250, 0.6);
}
.big-btn:active {
transform: translateY(0);
}
.big-btn.secondary {
background: linear-gradient(135deg, #4a9eff, #60d5ff);
box-shadow: 0 4px 15px rgba(96, 213, 255, 0.3);
}
.big-btn.secondary:hover {
box-shadow: 0 8px 25px rgba(96, 213, 255, 0.5);
}
/* Stats reveal */
#cat-stats-reveal {
margin-top: 1.5rem;
padding: 1.5rem;
background: rgba(255, 215, 0, 0.08);
border: 1px solid rgba(255, 215, 0, 0.2);
border-radius: 16px;
animation: fadeIn 0.5s ease;
}
#cat-stats-reveal h3 {
text-align: center;
color: #ffd700;
margin-bottom: 1rem;
font-size: 1.2rem;
}
#stats-display {
display: flex;
flex-direction: column;
gap: 0.6rem;
}
.stat-bar {
display: flex;
align-items: center;
gap: 0.8rem;
}
.stat-label {
width: 100px;
font-weight: 700;
font-size: 0.95rem;
color: #c0a0ff;
}
.stat-track {
flex: 1;
height: 14px;
background: rgba(255, 255, 255, 0.1);
border-radius: 7px;
overflow: hidden;
}
.stat-fill {
height: 100%;
border-radius: 7px;
transition: width 1s ease;
width: 0;
}
.stat-fill.speed {
background: linear-gradient(90deg, #4af, #60d5ff);
}
.stat-fill.agility {
background: linear-gradient(90deg, #4f4, #80ff80);
}
.stat-fill.hunting {
background: linear-gradient(90deg, #fa4, #ffd700);
}
.stat-fill.stealth {
background: linear-gradient(90deg, #a78bfa, #c0a0ff);
}
.stat-value {
width: 30px;
text-align: right;
font-weight: 900;
color: #fff;
}
/* Created cats roster */
#created-cats-list {
margin-top: 1.5rem;
}
#created-cats-list h3 {
color: #ff9ecd;
margin-bottom: 0.8rem;
}
#cats-roster {
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.roster-cat {
display: flex;
align-items: center;
gap: 0.8rem;
padding: 0.6rem 1rem;
background: rgba(255, 255, 255, 0.06);
border-radius: 12px;
border: 1px solid rgba(255, 255, 255, 0.1);
}
.roster-cat-icon {
width: 30px;
height: 30px;
border-radius: 50%;
}
.roster-cat-name {
font-weight: 700;
flex: 1;
}
.roster-cat-stats {
font-size: 0.8rem;
color: #c0a0ff;
}
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
/* ===== GAME SCREEN ===== */
#game-screen {
background: #0a0a2e;
}
#game-canvas {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
#game-hud {
position: absolute;
top: 0;
right: 0;
z-index: 10;
display: flex;
flex-direction: column;
gap: 0.8rem;
padding: 1rem;
max-width: 280px;
}
#scoreboard {
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 16px;
padding: 1rem;
color: white;
}
#scoreboard h3 {
text-align: center;
margin-bottom: 0.5rem;
font-size: 1rem;
color: #ffd700;
}
#scores {
display: flex;
flex-direction: column;
gap: 0.3rem;
font-size: 0.9rem;
}
.score-entry {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.3rem 0.5rem;
border-radius: 8px;
}
.score-entry:nth-child(1) {
background: rgba(255, 215, 0, 0.15);
}
.score-name {
font-weight: 700;
}
.score-count {
font-weight: 900;
color: #ffd700;
}
#game-controls {
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 16px;
padding: 1rem;
color: white;
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.hud-btn {
padding: 0.7rem 1rem;
border-radius: 12px;
border: none;
font-family: 'Fredoka One', cursive;
font-size: 1rem;
cursor: pointer;
transition: all 0.2s;
background: linear-gradient(135deg, #ff6bac, #a78bfa);
color: white;
}
.hud-btn:hover {
transform: scale(1.05);
}
.hud-btn:active {
transform: scale(0.98);
}
#mouse-count,
#total-caught {
font-weight: 700;
font-size: 0.9rem;
text-align: center;
}
#boss-warning {
text-align: center;
font-family: 'Fredoka One', cursive;
font-size: 1.1rem;
color: #ff4444;
animation: pulse 0.5s ease-in-out infinite;
padding: 0.5rem;
background: rgba(255, 0, 0, 0.1);
border-radius: 8px;
border: 1px solid rgba(255, 0, 0, 0.3);
}
/* ===== BOSS OVERLAY ===== */
#boss-overlay {
position: fixed;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
background: rgba(255, 0, 0, 0.15);
z-index: 100;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
animation: bossFlash 0.5s ease-in-out;
pointer-events: none;
}
.boss-title {
font-size: 3rem;
color: #ff4444;
text-shadow: 0 0 30px rgba(255, 0, 0, 0.6);
animation: shake 0.5s ease-in-out;
}
#boss-overlay p {
font-size: 1.5rem;
color: white;
margin-top: 1rem;
}
@keyframes bossFlash {
0%,
100% {
background: rgba(255, 0, 0, 0.15);
}
50% {
background: rgba(255, 0, 0, 0.4);
}
}
@keyframes shake {
0%,
100% {
transform: translateX(0);
}
20% {
transform: translateX(-10px);
}
40% {
transform: translateX(10px);
}
60% {
transform: translateX(-5px);
}
80% {
transform: translateX(5px);
}
}
/* ===== VICTORY SCREEN ===== */
#victory-screen {
background: radial-gradient(ellipse at center, #2d1b69 0%, #0a0a2e 100%);
flex-direction: column;
align-items: center;
justify-content: center;
}
#victory-canvas {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
#victory-content {
position: relative;
z-index: 2;
text-align: center;
color: white;
padding: 2rem;
max-width: 600px;
animation: fadeIn 1s ease;
}
#victory-content h1 {
font-size: 3rem;
margin-bottom: 0.5rem;
}
#victory-content h2 {
font-size: 1.5rem;
color: #ffd700;
margin-bottom: 1.5rem;
}
#final-scoreboard {
background: rgba(255, 255, 255, 0.08);
border-radius: 16px;
padding: 1.5rem;
margin-bottom: 1rem;
border: 1px solid rgba(255, 215, 0, 0.3);
}
.final-score-entry {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.5rem 0;
border-bottom: 1px solid rgba(255, 255, 255, 0.08);
font-size: 1.1rem;
}
.final-score-entry:last-child {
border-bottom: none;
}
.final-rank {
width: 30px;
font-weight: 900;
color: #ffd700;
}
.final-name {
flex: 1;
font-weight: 700;
text-align: left;
margin-left: 0.5rem;
}
.final-catches {
font-weight: 900;
color: #60d5ff;
}
#mvp-display {
margin: 1rem 0;
padding: 1.2rem;
background: linear-gradient(135deg, rgba(255, 215, 0, 0.15), rgba(255, 154, 60, 0.15));
border-radius: 16px;
border: 2px solid rgba(255, 215, 0, 0.4);
}
#mvp-display h3 {
color: #ffd700;
margin-bottom: 0.3rem;
font-size: 1.3rem;
}
#mvp-display p {
color: #c0a0ff;
font-size: 0.95rem;
}
/* Scrollbar styling */
::-webkit-scrollbar {
width: 6px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: rgba(167, 139, 250, 0.3);
border-radius: 3px;
}
::-webkit-scrollbar-thumb:hover {
background: rgba(167, 139, 250, 0.5);
}

View File

@@ -0,0 +1,715 @@
/* ===== GLOBAL STYLES ===== */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: 'Nunito', sans-serif;
overflow: hidden;
width: 100vw;
height: 100vh;
background: #0a0a2e;
cursor: default;
}
h1,
h2,
h3 {
font-family: 'Fredoka One', cursive;
}
.screen {
position: fixed;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
display: none;
z-index: 1;
}
.screen.active {
display: flex;
align-items: center;
justify-content: center;
}
.hidden {
display: none !important;
}
/* ===== INTRO SCREEN ===== */
#intro-screen {
background: radial-gradient(ellipse at center, #1a0a3e 0%, #0a0a2e 100%);
flex-direction: column;
cursor: pointer;
}
#intro-canvas {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
#intro-text {
position: relative;
z-index: 2;
text-align: center;
pointer-events: none;
}
#intro-text h1 {
font-size: 4rem;
margin-bottom: 1rem;
}
#intro-text p {
font-size: 1.3rem;
color: #c0a0ff;
animation: pulse 2s ease-in-out infinite;
}
.glow-text {
background: linear-gradient(135deg, #ff9ecd, #a78bfa, #60d5ff);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
filter: drop-shadow(0 0 20px rgba(167, 139, 250, 0.6));
animation: glow 3s ease-in-out infinite alternate;
}
@keyframes glow {
from {
filter: drop-shadow(0 0 20px rgba(167, 139, 250, 0.4));
}
to {
filter: drop-shadow(0 0 40px rgba(255, 158, 205, 0.8));
}
}
@keyframes pulse {
0%,
100% {
opacity: 0.6;
transform: scale(1);
}
50% {
opacity: 1;
transform: scale(1.05);
}
}
/* ===== CAT CREATOR SCREEN ===== */
#creator-screen {
background: linear-gradient(135deg, #1a0a3e 0%, #2d1b69 50%, #1a0a3e 100%);
}
.creator-panel {
background: rgba(255, 255, 255, 0.08);
backdrop-filter: blur(20px);
border: 1px solid rgba(255, 255, 255, 0.15);
border-radius: 24px;
padding: 2.5rem;
max-width: 600px;
width: 90%;
max-height: 90vh;
overflow-y: auto;
color: white;
}
.creator-panel h1 {
text-align: center;
font-size: 2rem;
margin-bottom: 1.5rem;
background: linear-gradient(135deg, #ff9ecd, #ffd700);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
.form-group {
margin-bottom: 1.2rem;
}
.form-group label {
display: block;
font-weight: 700;
margin-bottom: 0.5rem;
font-size: 1.1rem;
color: #c0a0ff;
}
#cat-name {
width: 100%;
padding: 0.8rem 1rem;
border-radius: 12px;
border: 2px solid rgba(167, 139, 250, 0.4);
background: rgba(255, 255, 255, 0.1);
color: white;
font-size: 1.1rem;
font-family: 'Nunito', sans-serif;
outline: none;
transition: border-color 0.3s;
}
#cat-name:focus {
border-color: #a78bfa;
}
#cat-name::placeholder {
color: rgba(255, 255, 255, 0.35);
}
.color-picker {
display: flex;
gap: 1rem;
}
.color-btn {
flex: 1;
padding: 1rem;
border-radius: 16px;
border: 3px solid rgba(255, 255, 255, 0.15);
background: rgba(255, 255, 255, 0.06);
cursor: pointer;
transition: all 0.3s;
text-align: center;
color: white;
font-family: 'Nunito', sans-serif;
font-weight: 700;
font-size: 1rem;
}
.color-btn:hover {
background: rgba(255, 255, 255, 0.12);
transform: translateY(-2px);
}
.color-btn.selected {
border-color: #ffd700;
background: rgba(255, 215, 0, 0.15);
box-shadow: 0 0 20px rgba(255, 215, 0, 0.2);
}
.cat-preview {
width: 60px;
height: 60px;
margin: 0 auto 0.5rem;
border-radius: 50%;
position: relative;
}
.white-cat {
background: radial-gradient(circle, #ffffff, #e8e0f0);
box-shadow: 0 0 15px rgba(255, 255, 255, 0.4);
}
.orange-cat {
background: radial-gradient(circle, #ff9a3c, #e07020);
box-shadow: 0 0 15px rgba(255, 154, 60, 0.4);
}
/* Big button styles */
.big-btn {
display: block;
width: 100%;
padding: 1rem;
border-radius: 16px;
border: none;
font-family: 'Fredoka One', cursive;
font-size: 1.3rem;
cursor: pointer;
transition: all 0.3s;
margin-top: 1rem;
background: linear-gradient(135deg, #ff6bac, #a78bfa);
color: white;
box-shadow: 0 4px 15px rgba(167, 139, 250, 0.4);
}
.big-btn:hover {
transform: translateY(-3px);
box-shadow: 0 8px 25px rgba(167, 139, 250, 0.6);
}
.big-btn:active {
transform: translateY(0);
}
.big-btn.secondary {
background: linear-gradient(135deg, #4a9eff, #60d5ff);
box-shadow: 0 4px 15px rgba(96, 213, 255, 0.3);
}
.big-btn.secondary:hover {
box-shadow: 0 8px 25px rgba(96, 213, 255, 0.5);
}
/* Stats reveal */
#cat-stats-reveal {
margin-top: 1.5rem;
padding: 1.5rem;
background: rgba(255, 215, 0, 0.08);
border: 1px solid rgba(255, 215, 0, 0.2);
border-radius: 16px;
animation: fadeIn 0.5s ease;
}
#cat-stats-reveal h3 {
text-align: center;
color: #ffd700;
margin-bottom: 1rem;
font-size: 1.2rem;
}
#stats-display {
display: flex;
flex-direction: column;
gap: 0.6rem;
}
.stat-bar {
display: flex;
align-items: center;
gap: 0.8rem;
}
.stat-label {
width: 100px;
font-weight: 700;
font-size: 0.95rem;
color: #c0a0ff;
}
.stat-track {
flex: 1;
height: 14px;
background: rgba(255, 255, 255, 0.1);
border-radius: 7px;
overflow: hidden;
}
.stat-fill {
height: 100%;
border-radius: 7px;
transition: width 1s ease;
width: 0;
}
.stat-fill.speed {
background: linear-gradient(90deg, #4af, #60d5ff);
}
.stat-fill.agility {
background: linear-gradient(90deg, #4f4, #80ff80);
}
.stat-fill.hunting {
background: linear-gradient(90deg, #fa4, #ffd700);
}
.stat-fill.stealth {
background: linear-gradient(90deg, #a78bfa, #c0a0ff);
}
.stat-value {
width: 30px;
text-align: right;
font-weight: 900;
color: #fff;
}
/* Created cats roster */
#created-cats-list {
margin-top: 1.5rem;
}
#created-cats-list h3 {
color: #ff9ecd;
margin-bottom: 0.8rem;
}
#cats-roster {
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.roster-cat {
display: flex;
align-items: center;
gap: 0.8rem;
padding: 0.6rem 1rem;
background: rgba(255, 255, 255, 0.06);
border-radius: 12px;
border: 1px solid rgba(255, 255, 255, 0.1);
}
.roster-cat-icon {
width: 30px;
height: 30px;
border-radius: 50%;
}
.roster-cat-name {
font-weight: 700;
flex: 1;
}
.roster-cat-stats {
font-size: 0.8rem;
color: #c0a0ff;
}
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
/* ===== GAME SCREEN ===== */
#game-screen {
background: #0a0a2e;
}
#game-canvas {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
#game-hud {
position: absolute;
top: 0;
right: 0;
z-index: 10;
display: flex;
flex-direction: column;
gap: 0.8rem;
padding: 1rem;
max-width: 280px;
}
#scoreboard {
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 16px;
padding: 1rem;
color: white;
}
#scoreboard h3 {
text-align: center;
margin-bottom: 0.5rem;
font-size: 1rem;
color: #ffd700;
}
#scores {
display: flex;
flex-direction: column;
gap: 0.3rem;
font-size: 0.9rem;
}
.score-entry {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.3rem 0.5rem;
border-radius: 8px;
}
.score-entry:nth-child(1) {
background: rgba(255, 215, 0, 0.15);
}
.score-name {
font-weight: 700;
}
.score-count {
font-weight: 900;
color: #ffd700;
}
#game-controls {
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 255, 255, 0.1);
border-radius: 16px;
padding: 1rem;
color: white;
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.hud-btn {
padding: 0.7rem 1rem;
border-radius: 12px;
border: none;
font-family: 'Fredoka One', cursive;
font-size: 1rem;
cursor: pointer;
transition: all 0.2s;
background: linear-gradient(135deg, #ff6bac, #a78bfa);
color: white;
}
.hud-btn:hover {
transform: scale(1.05);
}
.hud-btn:active {
transform: scale(0.98);
}
#mouse-count,
#total-caught {
font-weight: 700;
font-size: 0.9rem;
text-align: center;
}
#boss-warning {
text-align: center;
font-family: 'Fredoka One', cursive;
font-size: 1.1rem;
color: #ff4444;
animation: pulse 0.5s ease-in-out infinite;
padding: 0.5rem;
background: rgba(255, 0, 0, 0.1);
border-radius: 8px;
border: 1px solid rgba(255, 0, 0, 0.3);
}
/* ===== BOSS OVERLAY ===== */
#boss-overlay {
position: fixed;
top: 0;
left: 0;
width: 100vw;
height: 100vh;
background: rgba(255, 0, 0, 0.15);
z-index: 100;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
animation: bossFlash 0.5s ease-in-out;
pointer-events: none;
}
.boss-title {
font-size: 3rem;
color: #ff4444;
text-shadow: 0 0 30px rgba(255, 0, 0, 0.6);
animation: shake 0.5s ease-in-out;
}
#boss-overlay p {
font-size: 1.5rem;
color: white;
margin-top: 1rem;
}
@keyframes bossFlash {
0%,
100% {
background: rgba(255, 0, 0, 0.15);
}
50% {
background: rgba(255, 0, 0, 0.4);
}
}
@keyframes shake {
0%,
100% {
transform: translateX(0);
}
20% {
transform: translateX(-10px);
}
40% {
transform: translateX(10px);
}
60% {
transform: translateX(-5px);
}
80% {
transform: translateX(5px);
}
}
/* ===== VICTORY SCREEN ===== */
#victory-screen {
background: radial-gradient(ellipse at center, #2d1b69 0%, #0a0a2e 100%);
flex-direction: column;
align-items: center;
justify-content: center;
}
#victory-canvas {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
#victory-content {
position: relative;
z-index: 2;
text-align: center;
color: white;
padding: 2rem;
max-width: 600px;
animation: fadeIn 1s ease;
}
#victory-content h1 {
font-size: 3rem;
margin-bottom: 0.5rem;
}
#victory-content h2 {
font-size: 1.5rem;
color: #ffd700;
margin-bottom: 1.5rem;
}
#final-scoreboard {
background: rgba(255, 255, 255, 0.08);
border-radius: 16px;
padding: 1.5rem;
margin-bottom: 1rem;
border: 1px solid rgba(255, 215, 0, 0.3);
}
.final-score-entry {
display: flex;
justify-content: space-between;
align-items: center;
padding: 0.5rem 0;
border-bottom: 1px solid rgba(255, 255, 255, 0.08);
font-size: 1.1rem;
}
.final-score-entry:last-child {
border-bottom: none;
}
.final-rank {
width: 30px;
font-weight: 900;
color: #ffd700;
}
.final-name {
flex: 1;
font-weight: 700;
text-align: left;
margin-left: 0.5rem;
}
.final-catches {
font-weight: 900;
color: #60d5ff;
}
#mvp-display {
margin: 1rem 0;
padding: 1.2rem;
background: linear-gradient(135deg, rgba(255, 215, 0, 0.15), rgba(255, 154, 60, 0.15));
border-radius: 16px;
border: 2px solid rgba(255, 215, 0, 0.4);
}
#mvp-display h3 {
color: #ffd700;
margin-bottom: 0.3rem;
font-size: 1.3rem;
}
#mvp-display p {
color: #c0a0ff;
font-size: 0.95rem;
}
/* Scrollbar styling */
::-webkit-scrollbar {
width: 6px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: rgba(167, 139, 250, 0.3);
border-radius: 3px;
}
::-webkit-scrollbar-thumb:hover {
background: rgba(167, 139, 250, 0.5);
}
/* Giga Dog HUD */
#giga-dog-hud {
position: absolute;
bottom: 20px;
left: 50%;
transform: translateX(-50%);
z-index: 10;
background: rgba(0, 0, 0, 0.7);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 215, 0, 0.4);
border-radius: 12px;
padding: 0.5rem 1.2rem;
color: white;
font-family: 'Fredoka One', cursive;
font-size: 1rem;
pointer-events: none;
animation: pulse 2s ease-in-out infinite;
}
#giga-dog-hud.used {
opacity: 0.3;
border-color: rgba(255, 255, 255, 0.1);
animation: none;
}
.giga-key {
display: inline-block;
background: rgba(255, 215, 0, 0.2);
border: 1px solid rgba(255, 215, 0, 0.5);
border-radius: 6px;
padding: 0.1rem 0.5rem;
font-size: 0.85rem;
color: #ffd700;
margin-right: 0.3rem;
}

View File

@@ -0,0 +1,104 @@
/* ===== RPG INVENTORY & UI STYLES ===== */
#inventory-hud {
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 215, 0, 0.3);
border-radius: 16px;
padding: 1rem;
color: white;
margin-top: 0.8rem;
max-height: 200px;
overflow-y: auto;
}
#inventory-hud h3 {
color: #ffd700;
text-align: center;
margin-bottom: 0.5rem;
font-size: 1rem;
}
#hud-loot-list {
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.loot-item {
background: linear-gradient(135deg, rgba(255, 215, 0, 0.1), rgba(255, 150, 0, 0.1));
border: 1px solid rgba(255, 215, 0, 0.4);
border-radius: 8px;
padding: 0.5rem;
font-size: 0.85rem;
cursor: pointer;
transition: all 0.2s;
}
.loot-item:hover {
transform: scale(1.02);
background: linear-gradient(135deg, rgba(255, 215, 0, 0.2), rgba(255, 150, 0, 0.2));
}
.loot-name {
font-weight: bold;
color: #ffd700;
display: block;
}
.loot-stats {
color: #c0a0ff;
font-size: 0.75rem;
}
.cat-level-info {
font-size: 0.8rem;
color: #c0a0ff;
margin-top: 0.2rem;
display: flex;
align-items: center;
gap: 0.5rem;
}
.xp-bar {
flex: 1;
height: 6px;
background: rgba(255, 255, 255, 0.1);
border-radius: 3px;
overflow: hidden;
}
.xp-fill {
height: 100%;
background: linear-gradient(90deg, #60d5ff, #a78bfa);
width: 0%;
transition: width 0.3s;
}
.level-up-text {
position: absolute;
color: #ffd700;
font-family: 'Fredoka One', cursive;
font-size: 1.5rem;
text-shadow: 0 0 10px #fa4;
pointer-events: none;
animation: floatUpFade 1.5s ease forwards;
z-index: 50;
}
@keyframes floatUpFade {
0% {
opacity: 1;
transform: translateY(0) scale(1.5);
}
100% {
opacity: 0;
transform: translateY(-40px) scale(1);
}
}
.equipped-gear {
font-size: 0.75rem;
color: #ffd700;
margin-top: 0.2rem;
}

View File

@@ -0,0 +1,154 @@
/* ===== RPG INVENTORY & UI STYLES ===== */
#inventory-hud {
background: rgba(0, 0, 0, 0.6);
backdrop-filter: blur(10px);
border: 1px solid rgba(255, 215, 0, 0.3);
border-radius: 16px;
padding: 1rem;
color: white;
margin-top: 0.8rem;
max-height: 300px;
overflow-y: auto;
}
#inventory-hud h3 {
color: #ffd700;
text-align: center;
margin-bottom: 0.5rem;
font-size: 1rem;
}
#hud-loot-list {
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.loot-item {
background: linear-gradient(135deg, rgba(255, 215, 0, 0.1), rgba(255, 150, 0, 0.1));
border: 1px solid rgba(255, 215, 0, 0.4);
border-radius: 8px;
padding: 0.5rem;
font-size: 0.85rem;
cursor: pointer;
transition: all 0.2s;
}
.loot-item:hover {
transform: scale(1.02);
background: linear-gradient(135deg, rgba(255, 215, 0, 0.2), rgba(255, 150, 0, 0.2));
}
.loot-name {
font-weight: bold;
color: #ffd700;
display: block;
}
.loot-stats {
color: #c0a0ff;
font-size: 0.75rem;
}
.cat-level-info {
font-size: 0.8rem;
color: #c0a0ff;
margin-top: 0.2rem;
display: flex;
align-items: center;
gap: 0.5rem;
}
.xp-bar {
flex: 1;
height: 6px;
background: rgba(255, 255, 255, 0.1);
border-radius: 3px;
overflow: hidden;
}
.xp-fill {
height: 100%;
background: linear-gradient(90deg, #60d5ff, #a78bfa);
width: 0%;
transition: width 0.3s;
}
.level-up-text {
position: absolute;
color: #ffd700;
font-family: 'Fredoka One', cursive;
font-size: 1.5rem;
text-shadow: 0 0 10px #fa4;
pointer-events: none;
animation: floatUpFade 1.5s ease forwards;
z-index: 50;
}
@keyframes floatUpFade {
0% {
opacity: 1;
transform: translateY(0) scale(1.5);
}
100% {
opacity: 0;
transform: translateY(-40px) scale(1);
}
}
.equipped-gear {
font-size: 0.75rem;
color: #ffd700;
margin-top: 0.2rem;
}
/* Equip picker */
#equip-picker {
background: rgba(20, 10, 40, 0.95);
border: 1px solid rgba(255, 215, 0, 0.5);
border-radius: 8px;
padding: 0.5rem;
margin-top: 0.5rem;
}
.equip-picker-header {
display: flex;
justify-content: space-between;
align-items: center;
color: #ffd700;
font-size: 0.85rem;
font-weight: bold;
margin-bottom: 0.4rem;
}
.equip-picker-close {
background: none;
border: none;
color: #fff;
cursor: pointer;
font-size: 1.1rem;
padding: 0 0.3rem;
}
.equip-picker-close:hover {
color: #f44;
}
.equip-picker-cat {
padding: 0.4rem 0.5rem;
color: #fff;
cursor: pointer;
border-radius: 6px;
font-size: 0.85rem;
transition: background 0.15s;
}
.equip-picker-cat:hover {
background: rgba(255, 215, 0, 0.2);
}
.equip-picker-level {
color: #c0a0ff;
font-size: 0.75rem;
}

View File

@@ -0,0 +1,110 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>🐱 Cattopia — The Great Mouse Chase!</title>
<link rel="stylesheet" href="style.css">
<link rel="stylesheet" href="rpg_style.css">
<link href="https://fonts.googleapis.com/css2?family=Fredoka+One&family=Nunito:wght@400;700;900&display=swap"
rel="stylesheet">
</head>
<body>
<!-- INTRO SCREEN -->
<div id="intro-screen" class="screen active">
<canvas id="intro-canvas"></canvas>
<div id="intro-text">
<h1 class="glow-text">✨ CATTOPIA ✨</h1>
<p>Click the treasure chest to enter the magical world!</p>
</div>
</div>
<!-- CAT CREATOR SCREEN -->
<div id="creator-screen" class="screen">
<div class="creator-panel">
<h1>🐱 Create Your Cat!</h1>
<div class="creator-form">
<div class="form-group">
<label>Cat Name:</label>
<input type="text" id="cat-name" placeholder="What's your cat's name?" maxlength="15">
</div>
<div class="form-group">
<label>Color:</label>
<div class="color-picker">
<button class="color-btn selected" data-color="white" id="btn-white">
<div class="cat-preview white-cat"></div>
<span>White</span>
</button>
<button class="color-btn" data-color="orange" id="btn-orange">
<div class="cat-preview orange-cat"></div>
<span>Orange</span>
</button>
</div>
</div>
<button id="create-cat-btn" class="big-btn">🐾 Create Cat!</button>
<div id="cat-stats-reveal" class="hidden">
<h3>✨ Your cat's secret stats! ✨</h3>
<div id="stats-display"></div>
<button id="add-another-btn" class="big-btn secondary"> Add Another Cat</button>
<button id="start-game-btn" class="big-btn">🎮 Start the Hunt!</button>
</div>
</div>
<div id="created-cats-list">
<h3>Your Cats:</h3>
<div id="cats-roster"></div>
<div id="inventory-panel" class="hidden">
<h3>🎒 Party Inventory</h3>
<p class="inventory-hint">Click loot to equip it to the MVP!</p>
<div id="party-loot"></div>
</div>
</div>
</div>
</div>
<!-- GAME SCREEN -->
<div id="game-screen" class="screen">
<canvas id="game-canvas"></canvas>
<div id="game-hud">
<div id="scoreboard">
<h3>🏆 Scoreboard</h3>
<div id="scores"></div>
</div>
<div id="game-controls">
<button id="add-mouse-btn" class="hud-btn">🐭 Add Mouse!</button>
<button id="add-5-mice-btn" class="hud-btn">🐭x5 Add 5 Mice!</button>
<div id="mouse-count">Mice remaining: <span id="mice-left">0</span></div>
<div id="total-caught">Mice Caught: <span id="total-caught-count">0</span></div>
<div id="boss-progress">Next Boss in: <span id="mice-until-boss">10</span></div>
<div id="boss-warning" class="hidden">🚨 BOSS INCOMING! 🚨</div>
</div>
<div id="inventory-hud">
<h3>🎒 Loot</h3>
<div id="hud-loot-list"></div>
</div>
</div>
</div>
<!-- BOSS SCREEN OVERLAY -->
<div id="boss-overlay" class="hidden">
<h1 class="boss-title" id="boss-overlay-title">⚠️ BOSS APPEARS! ⚠️</h1>
<p id="boss-overlay-desc">All cats must work together!</p>
</div>
<!-- VICTORY SCREEN -->
<div id="victory-screen" class="screen">
<canvas id="victory-canvas"></canvas>
<div id="victory-content" class="hidden">
<h1 class="glow-text">🎉 VICTORY! 🎉</h1>
<h2>The treasure vault is open!</h2>
<div id="final-scoreboard"></div>
<div id="mvp-display"></div>
<button id="play-again-btn" class="big-btn">🔄 Play Again!</button>
</div>
</div>
<script src="game.js"></script>
</body>
</html>

View File

@@ -0,0 +1,110 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>🐱 Cattopia — The Great Mouse Chase!</title>
<link rel="stylesheet" href="style.css">
<link rel="stylesheet" href="rpg_style.css">
<link href="https://fonts.googleapis.com/css2?family=Fredoka+One&family=Nunito:wght@400;700;900&display=swap"
rel="stylesheet">
</head>
<body>
<!-- INTRO SCREEN -->
<div id="intro-screen" class="screen active">
<canvas id="intro-canvas"></canvas>
<div id="intro-text">
<h1 class="glow-text">✨ CATTOPIA ✨</h1>
<p>Click the treasure chest to enter the magical world!</p>
</div>
</div>
<!-- CAT CREATOR SCREEN -->
<div id="creator-screen" class="screen">
<div class="creator-panel">
<h1>🐱 Create Your Cat!</h1>
<div class="creator-form">
<div class="form-group">
<label>Cat Name:</label>
<input type="text" id="cat-name" placeholder="What's your cat's name?" maxlength="15">
</div>
<div class="form-group">
<label>Color:</label>
<div class="color-picker">
<button class="color-btn selected" data-color="white" id="btn-white">
<div class="cat-preview white-cat"></div>
<span>White</span>
</button>
<button class="color-btn" data-color="orange" id="btn-orange">
<div class="cat-preview orange-cat"></div>
<span>Orange</span>
</button>
</div>
</div>
<button id="create-cat-btn" class="big-btn">🐾 Create Cat!</button>
<div id="cat-stats-reveal" class="hidden">
<h3>✨ Your cat's secret stats! ✨</h3>
<div id="stats-display"></div>
<button id="add-another-btn" class="big-btn secondary"> Add Another Cat</button>
<button id="start-game-btn" class="big-btn">🎮 Start the Hunt!</button>
</div>
</div>
<div id="created-cats-list">
<h3>Your Cats:</h3>
<div id="cats-roster"></div>
<div id="inventory-panel" class="hidden">
<h3>🎒 Party Inventory</h3>
<p class="inventory-hint">Click loot to choose which cat equips it!</p>
<div id="party-loot"></div>
</div>
</div>
</div>
</div>
<!-- GAME SCREEN -->
<div id="game-screen" class="screen">
<canvas id="game-canvas"></canvas>
<div id="game-hud">
<div id="scoreboard">
<h3>🏆 Scoreboard</h3>
<div id="scores"></div>
</div>
<div id="game-controls">
<button id="add-mouse-btn" class="hud-btn">🐭 Add Mouse!</button>
<button id="add-5-mice-btn" class="hud-btn">🐭x5 Add 5 Mice!</button>
<div id="mouse-count">Mice remaining: <span id="mice-left">0</span></div>
<div id="total-caught">Mice Caught: <span id="total-caught-count">0</span></div>
<div id="boss-progress">Next Boss in: <span id="mice-until-boss">10</span></div>
<div id="boss-warning" class="hidden">🚨 BOSS INCOMING! 🚨</div>
</div>
<div id="inventory-hud">
<h3>🎒 Loot</h3>
<div id="hud-loot-list"></div>
</div>
</div>
</div>
<!-- BOSS SCREEN OVERLAY -->
<div id="boss-overlay" class="hidden">
<h1 class="boss-title" id="boss-overlay-title">⚠️ BOSS APPEARS! ⚠️</h1>
<p id="boss-overlay-desc">All cats must work together!</p>
</div>
<!-- VICTORY SCREEN -->
<div id="victory-screen" class="screen">
<canvas id="victory-canvas"></canvas>
<div id="victory-content" class="hidden">
<h1 class="glow-text">🎉 VICTORY! 🎉</h1>
<h2>The treasure vault is open!</h2>
<div id="final-scoreboard"></div>
<div id="mvp-display"></div>
<button id="play-again-btn" class="big-btn">🔄 Play Again!</button>
</div>
</div>
<script src="game.js"></script>
</body>
</html>

View File

@@ -0,0 +1,113 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>🐱 Cattopia — The Great Mouse Chase!</title>
<link rel="stylesheet" href="style.css">
<link rel="stylesheet" href="rpg_style.css">
<link href="https://fonts.googleapis.com/css2?family=Fredoka+One&family=Nunito:wght@400;700;900&display=swap"
rel="stylesheet">
</head>
<body>
<!-- INTRO SCREEN -->
<div id="intro-screen" class="screen active">
<canvas id="intro-canvas"></canvas>
<div id="intro-text">
<h1 class="glow-text">✨ CATTOPIA ✨</h1>
<p>Click the treasure chest to enter the magical world!</p>
</div>
</div>
<!-- CAT CREATOR SCREEN -->
<div id="creator-screen" class="screen">
<div class="creator-panel">
<h1>🐱 Create Your Cat!</h1>
<div class="creator-form">
<div class="form-group">
<label>Cat Name:</label>
<input type="text" id="cat-name" placeholder="What's your cat's name?" maxlength="15">
</div>
<div class="form-group">
<label>Color:</label>
<div class="color-picker">
<button class="color-btn selected" data-color="white" id="btn-white">
<div class="cat-preview white-cat"></div>
<span>White</span>
</button>
<button class="color-btn" data-color="orange" id="btn-orange">
<div class="cat-preview orange-cat"></div>
<span>Orange</span>
</button>
</div>
</div>
<button id="create-cat-btn" class="big-btn">🐾 Create Cat!</button>
<div id="cat-stats-reveal" class="hidden">
<h3>✨ Your cat's secret stats! ✨</h3>
<div id="stats-display"></div>
<button id="add-another-btn" class="big-btn secondary"> Add Another Cat</button>
<button id="start-game-btn" class="big-btn">🎮 Start the Hunt!</button>
</div>
</div>
<div id="created-cats-list">
<h3>Your Cats:</h3>
<div id="cats-roster"></div>
<div id="inventory-panel" class="hidden">
<h3>🎒 Party Inventory</h3>
<p class="inventory-hint">Click loot to choose which cat equips it!</p>
<div id="party-loot"></div>
</div>
</div>
</div>
</div>
<!-- GAME SCREEN -->
<div id="game-screen" class="screen">
<canvas id="game-canvas"></canvas>
<div id="game-hud">
<div id="scoreboard">
<h3>🏆 Scoreboard</h3>
<div id="scores"></div>
</div>
<div id="game-controls">
<button id="add-mouse-btn" class="hud-btn">🐭 Add Mouse!</button>
<button id="add-5-mice-btn" class="hud-btn">🐭x5 Add 5 Mice!</button>
<div id="mouse-count">Mice remaining: <span id="mice-left">0</span></div>
<div id="total-caught">Mice Caught: <span id="total-caught-count">0</span></div>
<div id="boss-progress">Next Boss in: <span id="mice-until-boss">10</span></div>
<div id="boss-warning" class="hidden">🚨 BOSS INCOMING! 🚨</div>
</div>
<div id="inventory-hud">
<h3>🎒 Loot</h3>
<div id="hud-loot-list"></div>
</div>
</div>
<div id="giga-dog-hud">
<span class="giga-key">SPACE</span> Summon Giga Dog
</div>
</div>
<!-- BOSS SCREEN OVERLAY -->
<div id="boss-overlay" class="hidden">
<h1 class="boss-title" id="boss-overlay-title">⚠️ BOSS APPEARS! ⚠️</h1>
<p id="boss-overlay-desc">All cats must work together!</p>
</div>
<!-- VICTORY SCREEN -->
<div id="victory-screen" class="screen">
<canvas id="victory-canvas"></canvas>
<div id="victory-content" class="hidden">
<h1 class="glow-text">🎉 VICTORY! 🎉</h1>
<h2>The treasure vault is open!</h2>
<div id="final-scoreboard"></div>
<div id="mvp-display"></div>
<button id="play-again-btn" class="big-btn">🔄 Play Again!</button>
</div>
</div>
<script src="game.js"></script>
</body>
</html>

View File

@@ -0,0 +1,589 @@
import fs from 'node:fs';
import path from 'node:path';
import { DatabaseSync } from 'node:sqlite';
import {
createPostgresPool,
initializePostgresSchema,
runPostgresHealthcheck,
} from './dbPostgres.js';
import { openPostgresSyncBridge } from './dbPostgresSyncBridge.js';
import { parseBooleanFlag } from './utils.js';
const defaultDbPath = path.join(process.cwd(), 'data', 'gigagimbank.sqlite');
const DB_DRIVER_SQLITE = 'sqlite';
const DB_DRIVER_POSTGRES = 'postgres';
function normalizeDbDriver(value) {
const normalized = String(value ?? '').trim().toLowerCase();
if (normalized === DB_DRIVER_POSTGRES) {
return DB_DRIVER_POSTGRES;
}
return DB_DRIVER_SQLITE;
}
export function resolveDatabaseRuntimeConfig(env = process.env) {
const requestedDriver = normalizeDbDriver(env.DB_DRIVER);
const sqliteDevFallback = parseBooleanFlag(env.DEV_DB_SQLITE_FALLBACK);
if (requestedDriver === DB_DRIVER_POSTGRES && sqliteDevFallback) {
return {
requested_driver: requestedDriver,
resolved_driver: DB_DRIVER_SQLITE,
mode: 'sqlite_dev_fallback',
sqlite_dev_fallback: true,
postgres_adapter_available: true,
postgres_store_compatibility: 'sqlite_fallback',
};
}
return {
requested_driver: requestedDriver,
resolved_driver: requestedDriver,
mode: requestedDriver === DB_DRIVER_SQLITE ? 'sqlite_primary' : 'postgres_primary',
sqlite_dev_fallback: sqliteDevFallback,
postgres_adapter_available: true,
postgres_store_compatibility:
requestedDriver === DB_DRIVER_POSTGRES ? 'sync_bridge' : 'sqlite_primary',
};
}
export function openDatabase(env = process.env) {
const runtime = resolveDatabaseRuntimeConfig(env);
if (runtime.resolved_driver === DB_DRIVER_POSTGRES) {
const bridge = openPostgresSyncBridge(env);
return bridge;
}
const filePath = env.DB_PATH || defaultDbPath;
fs.mkdirSync(path.dirname(filePath), { recursive: true });
const db = new DatabaseSync(filePath);
db.exec('PRAGMA journal_mode = WAL;');
db.exec('PRAGMA foreign_keys = ON;');
return db;
}
export async function openPostgresMigrationAdapter(env = process.env) {
const runtime = resolveDatabaseRuntimeConfig(env);
if (runtime.requested_driver !== DB_DRIVER_POSTGRES) {
throw new Error('openPostgresMigrationAdapter requires DB_DRIVER=postgres.');
}
const { pool, runtime: postgresRuntime } = createPostgresPool(env);
return {
pool,
runtime: {
...runtime,
postgres: postgresRuntime,
},
};
}
export async function initializePostgresMigrationSchema(pool) {
await initializePostgresSchema(pool);
}
export async function checkPostgresMigrationHealth(pool) {
return runPostgresHealthcheck(pool);
}
export function initializeSchema(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS users (
id TEXT PRIMARY KEY,
discord_id TEXT,
discord_avatar_url TEXT,
runelite_account_hash TEXT UNIQUE NOT NULL,
runelite_linked INTEGER NOT NULL DEFAULT 1,
default_display_name TEXT NOT NULL,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
last_seen_at TEXT,
created_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS user_runelite_accounts (
account_hash TEXT PRIMARY KEY,
user_id TEXT NOT NULL,
linked_at TEXT NOT NULL,
is_active INTEGER NOT NULL DEFAULT 1,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_user_runelite_accounts_user_id
ON user_runelite_accounts(user_id);
CREATE TABLE IF NOT EXISTS oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
account_hash TEXT NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS web_oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS groups_table (
id TEXT PRIMARY KEY,
group_name TEXT NOT NULL,
leader_user_id TEXT NOT NULL,
join_code TEXT UNIQUE NOT NULL,
join_code_expires_at TEXT NOT NULL,
allow_open_invite_join INTEGER NOT NULL DEFAULT 0,
open_invite_expires_at TEXT,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
webhook_config_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
FOREIGN KEY (leader_user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS group_members (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
user_id TEXT,
account_hash TEXT,
expected_runescape_name TEXT NOT NULL,
role TEXT NOT NULL,
webhook_config_perms INTEGER NOT NULL DEFAULT 0,
loadout_admin_perms INTEGER NOT NULL DEFAULT 0,
joined_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (user_id) REFERENCES users (id),
FOREIGN KEY (account_hash) REFERENCES user_runelite_accounts (account_hash)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL;
CREATE TABLE IF NOT EXISTS group_join_requests (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
requester_user_id TEXT NOT NULL,
requester_account_hash TEXT,
status TEXT NOT NULL,
requested_at TEXT NOT NULL,
resolved_at TEXT,
resolved_by_user_id TEXT,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (requester_user_id) REFERENCES users (id),
FOREIGN KEY (requester_account_hash) REFERENCES user_runelite_accounts (account_hash),
FOREIGN KEY (resolved_by_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_join_requests_group_status
ON group_join_requests(group_id, status, requested_at DESC);
CREATE INDEX IF NOT EXISTS idx_join_requests_requester
ON group_join_requests(requester_user_id, requested_at DESC);
CREATE TABLE IF NOT EXISTS storage_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
location_type TEXT NOT NULL,
items_json TEXT NOT NULL,
state_hash TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_group_slot
ON storage_snapshots(group_id, location_type)
WHERE group_member_id IS NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_member_slot
ON storage_snapshots(group_id, group_member_id, location_type)
WHERE group_member_id IS NOT NULL;
CREATE TABLE IF NOT EXISTS audit_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
location_type TEXT NOT NULL,
item_id INTEGER NOT NULL,
quantity_delta INTEGER NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_audit_group_created_at
ON audit_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS activity_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
event_type TEXT NOT NULL,
event_data_json TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_activity_group_created_at
ON activity_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS webhook_delivery_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
activity_id TEXT,
event_type TEXT NOT NULL,
delivery_status TEXT NOT NULL,
attempt_count INTEGER NOT NULL DEFAULT 0,
http_status INTEGER,
error_message TEXT,
webhook_host TEXT,
payload_json TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_group_created_at
ON webhook_delivery_logs(group_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_status_created_at
ON webhook_delivery_logs(delivery_status, created_at DESC);
CREATE TABLE IF NOT EXISTS billing_customers (
user_id TEXT PRIMARY KEY,
stripe_customer_id TEXT UNIQUE NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_billing_customers_customer_id
ON billing_customers(stripe_customer_id);
CREATE TABLE IF NOT EXISTS billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
stripe_customer_id TEXT NOT NULL,
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_billing_subscriptions_customer_updated
ON billing_subscriptions(stripe_customer_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_billing_subscriptions_group_updated
ON group_billing_subscriptions(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_boost_credits (
user_id TEXT PRIMARY KEY,
available_boosts INTEGER NOT NULL DEFAULT 0,
updated_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS group_manual_boost_allocations (
user_id TEXT NOT NULL,
group_id TEXT NOT NULL,
boosts_assigned INTEGER NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
PRIMARY KEY (user_id, group_id),
FOREIGN KEY (user_id) REFERENCES users (id),
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_manual_boost_allocations_group
ON group_manual_boost_allocations(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_subscription_boost_overrides (
user_id TEXT NOT NULL,
allocation_month TEXT NOT NULL,
allocations_json TEXT NOT NULL DEFAULT '{}',
configured_at TEXT NOT NULL,
PRIMARY KEY (user_id, allocation_month),
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS item_catalog (
item_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
catalog_source TEXT,
catalog_version TEXT,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_name
ON item_catalog(name);
CREATE TABLE IF NOT EXISTS item_catalog_snapshots (
id TEXT PRIMARY KEY,
source_name TEXT NOT NULL,
source_version TEXT,
checksum_sha256 TEXT NOT NULL,
item_count INTEGER NOT NULL,
created_at TEXT NOT NULL,
notes TEXT
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_snapshots_created_at
ON item_catalog_snapshots(created_at DESC);
CREATE TABLE IF NOT EXISTS item_catalog_snapshot_items (
snapshot_id TEXT NOT NULL,
item_id INTEGER NOT NULL,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
PRIMARY KEY (snapshot_id, item_id),
FOREIGN KEY (snapshot_id) REFERENCES item_catalog_snapshots (id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS hiscores_fallback_state (
group_member_id TEXT PRIMARY KEY,
runescape_name TEXT NOT NULL,
snapshot_json TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE TABLE IF NOT EXISTS group_wealth_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
total_value_gp INTEGER NOT NULL DEFAULT 0,
captured_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_wealth_snapshots_group_time
ON group_wealth_snapshots(group_id, captured_at DESC);
CREATE TABLE IF NOT EXISTS group_goals (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
created_by_user_id TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT,
target_value_gp INTEGER NOT NULL,
current_value_gp INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'ACTIVE',
due_at TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (created_by_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_group_goals_group_status
ON group_goals(group_id, status, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadouts (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
owner_user_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
scope TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (owner_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_group_loadouts_group_scope_updated
ON group_loadouts(group_id, scope, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadout_items (
id TEXT PRIMARY KEY,
loadout_id TEXT NOT NULL,
item_id INTEGER NOT NULL,
required_qty INTEGER NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (loadout_id) REFERENCES group_loadouts (id)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_loadout_items_unique
ON group_loadout_items(loadout_id, item_id);
CREATE TABLE IF NOT EXISTS feature_usage_events (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
user_id TEXT,
feature_key TEXT NOT NULL,
action_key TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_feature_time
ON feature_usage_events(feature_key, action_key, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_group_time
ON feature_usage_events(group_id, created_at DESC);
`);
addColumnIfMissing(db, 'groups_table', 'opt_out_hiscores INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', 'opt_out_activity_feed INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', 'allow_open_invite_join INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', "join_code_expires_at TEXT NOT NULL DEFAULT ''");
addColumnIfMissing(db, 'groups_table', 'open_invite_expires_at TEXT');
addColumnIfMissing(db, 'groups_table', "webhook_config_json TEXT NOT NULL DEFAULT '{}'");
addColumnIfMissing(db, 'users', 'runelite_linked INTEGER NOT NULL DEFAULT 1');
addColumnIfMissing(db, 'users', 'discord_avatar_url TEXT');
addColumnIfMissing(db, 'users', 'last_seen_at TEXT');
addColumnIfMissing(db, 'item_catalog', 'is_tradeable INTEGER NOT NULL DEFAULT 1');
addColumnIfMissing(db, 'item_catalog', 'catalog_source TEXT');
addColumnIfMissing(db, 'item_catalog', 'catalog_version TEXT');
addColumnIfMissing(db, 'group_members', 'webhook_config_perms INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'group_members', 'loadout_admin_perms INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'group_members', 'account_hash TEXT');
addColumnIfMissing(db, 'group_join_requests', 'requester_account_hash TEXT');
const defaultInviteExpiry = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString();
db.prepare(
`UPDATE groups_table
SET join_code_expires_at = ?
WHERE join_code_expires_at IS NULL
OR join_code_expires_at = ''`
).run(defaultInviteExpiry);
db.prepare(
`UPDATE groups_table
SET webhook_config_json = '{}'
WHERE webhook_config_json IS NULL
OR webhook_config_json = ''`
).run();
db.prepare(
`INSERT OR IGNORE INTO user_runelite_accounts (
account_hash,
user_id,
linked_at,
is_active
)
SELECT
runelite_account_hash,
id,
created_at,
CASE WHEN runelite_linked = 1 THEN 1 ELSE 0 END
FROM users
WHERE runelite_account_hash IS NOT NULL
AND runelite_account_hash <> ''`
).run();
db.prepare(
`UPDATE users
SET runelite_linked = CASE
WHEN EXISTS (
SELECT 1
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
) THEN 1
ELSE 0
END`
).run();
db.prepare(
`UPDATE users
SET runelite_account_hash = COALESCE(
(
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
),
runelite_account_hash
)`
).run();
db.prepare(
`UPDATE group_members
SET account_hash = (
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = group_members.user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
)
WHERE group_members.user_id IS NOT NULL
AND (group_members.account_hash IS NULL OR group_members.account_hash = '')`
).run();
db.prepare(
`UPDATE group_join_requests
SET requester_account_hash = (
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = group_join_requests.requester_user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
)
WHERE requester_account_hash IS NULL
OR requester_account_hash = ''`
).run();
db.exec(`DROP INDEX IF EXISTS idx_group_members_claimed_user;`);
db.exec(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL;`
);
db.exec(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL;`
);
}
export function nowIso() {
return new Date().toISOString();
}
function addColumnIfMissing(db, tableName, columnDefinition) {
try {
db.exec(`ALTER TABLE ${tableName} ADD COLUMN ${columnDefinition};`);
} catch (error) {
const message = String(error?.message ?? '').toLowerCase();
const duplicateColumn =
message.includes('duplicate column name') || message.includes('already exists');
if (!duplicateColumn) {
throw error;
}
}
}

View File

@@ -0,0 +1,615 @@
import fs from 'node:fs';
import path from 'node:path';
import { DatabaseSync } from 'node:sqlite';
import {
createPostgresPool,
initializePostgresSchema,
runPostgresHealthcheck,
} from './dbPostgres.js';
import { openPostgresSyncBridge } from './dbPostgresSyncBridge.js';
import { parseBooleanFlag } from './utils.js';
const defaultDbPath = path.join(process.cwd(), 'data', 'gigagimbank.sqlite');
const DB_DRIVER_SQLITE = 'sqlite';
const DB_DRIVER_POSTGRES = 'postgres';
function normalizeDbDriver(value) {
const normalized = String(value ?? '').trim().toLowerCase();
if (normalized === DB_DRIVER_POSTGRES) {
return DB_DRIVER_POSTGRES;
}
return DB_DRIVER_SQLITE;
}
export function resolveDatabaseRuntimeConfig(env = process.env) {
const requestedDriver = normalizeDbDriver(env.DB_DRIVER);
const sqliteDevFallback = parseBooleanFlag(env.DEV_DB_SQLITE_FALLBACK);
if (requestedDriver === DB_DRIVER_POSTGRES && sqliteDevFallback) {
return {
requested_driver: requestedDriver,
resolved_driver: DB_DRIVER_SQLITE,
mode: 'sqlite_dev_fallback',
sqlite_dev_fallback: true,
postgres_adapter_available: true,
postgres_store_compatibility: 'sqlite_fallback',
};
}
return {
requested_driver: requestedDriver,
resolved_driver: requestedDriver,
mode: requestedDriver === DB_DRIVER_SQLITE ? 'sqlite_primary' : 'postgres_primary',
sqlite_dev_fallback: sqliteDevFallback,
postgres_adapter_available: true,
postgres_store_compatibility:
requestedDriver === DB_DRIVER_POSTGRES ? 'sync_bridge' : 'sqlite_primary',
};
}
export function openDatabase(env = process.env) {
const runtime = resolveDatabaseRuntimeConfig(env);
if (runtime.resolved_driver === DB_DRIVER_POSTGRES) {
const bridge = openPostgresSyncBridge(env);
return bridge;
}
const filePath = env.DB_PATH || defaultDbPath;
fs.mkdirSync(path.dirname(filePath), { recursive: true });
const db = new DatabaseSync(filePath);
db.exec('PRAGMA journal_mode = WAL;');
db.exec('PRAGMA foreign_keys = ON;');
return db;
}
export async function openPostgresMigrationAdapter(env = process.env) {
const runtime = resolveDatabaseRuntimeConfig(env);
if (runtime.requested_driver !== DB_DRIVER_POSTGRES) {
throw new Error('openPostgresMigrationAdapter requires DB_DRIVER=postgres.');
}
const { pool, runtime: postgresRuntime } = createPostgresPool(env);
return {
pool,
runtime: {
...runtime,
postgres: postgresRuntime,
},
};
}
export async function initializePostgresMigrationSchema(pool) {
await initializePostgresSchema(pool);
}
export async function checkPostgresMigrationHealth(pool) {
return runPostgresHealthcheck(pool);
}
export function initializeSchema(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS users (
id TEXT PRIMARY KEY,
discord_id TEXT,
discord_avatar_url TEXT,
runelite_account_hash TEXT UNIQUE NOT NULL,
runelite_linked INTEGER NOT NULL DEFAULT 1,
default_display_name TEXT NOT NULL,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
last_seen_at TEXT,
created_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS user_runelite_accounts (
account_hash TEXT PRIMARY KEY,
user_id TEXT NOT NULL,
linked_at TEXT NOT NULL,
is_active INTEGER NOT NULL DEFAULT 1,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_user_runelite_accounts_user_id
ON user_runelite_accounts(user_id);
CREATE TABLE IF NOT EXISTS oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
account_hash TEXT NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS web_oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS groups_table (
id TEXT PRIMARY KEY,
group_name TEXT NOT NULL,
leader_user_id TEXT NOT NULL,
join_code TEXT UNIQUE NOT NULL,
join_code_expires_at TEXT NOT NULL,
allow_open_invite_join INTEGER NOT NULL DEFAULT 0,
open_invite_expires_at TEXT,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
webhook_config_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
FOREIGN KEY (leader_user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS group_members (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
user_id TEXT,
account_hash TEXT,
expected_runescape_name TEXT NOT NULL,
role TEXT NOT NULL,
webhook_config_perms INTEGER NOT NULL DEFAULT 0,
loadout_admin_perms INTEGER NOT NULL DEFAULT 0,
joined_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (user_id) REFERENCES users (id),
FOREIGN KEY (account_hash) REFERENCES user_runelite_accounts (account_hash)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL;
CREATE TABLE IF NOT EXISTS group_join_requests (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
requester_user_id TEXT NOT NULL,
requester_account_hash TEXT,
status TEXT NOT NULL,
requested_at TEXT NOT NULL,
resolved_at TEXT,
resolved_by_user_id TEXT,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (requester_user_id) REFERENCES users (id),
FOREIGN KEY (requester_account_hash) REFERENCES user_runelite_accounts (account_hash),
FOREIGN KEY (resolved_by_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_join_requests_group_status
ON group_join_requests(group_id, status, requested_at DESC);
CREATE INDEX IF NOT EXISTS idx_join_requests_requester
ON group_join_requests(requester_user_id, requested_at DESC);
CREATE TABLE IF NOT EXISTS storage_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
location_type TEXT NOT NULL,
items_json TEXT NOT NULL,
state_hash TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_group_slot
ON storage_snapshots(group_id, location_type)
WHERE group_member_id IS NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_member_slot
ON storage_snapshots(group_id, group_member_id, location_type)
WHERE group_member_id IS NOT NULL;
CREATE TABLE IF NOT EXISTS audit_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
location_type TEXT NOT NULL,
item_id INTEGER NOT NULL,
quantity_delta INTEGER NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_audit_group_created_at
ON audit_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS activity_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
event_type TEXT NOT NULL,
event_data_json TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_activity_group_created_at
ON activity_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS webhook_delivery_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
activity_id TEXT,
event_type TEXT NOT NULL,
delivery_status TEXT NOT NULL,
attempt_count INTEGER NOT NULL DEFAULT 0,
http_status INTEGER,
error_message TEXT,
webhook_host TEXT,
payload_json TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_group_created_at
ON webhook_delivery_logs(group_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_status_created_at
ON webhook_delivery_logs(delivery_status, created_at DESC);
CREATE TABLE IF NOT EXISTS billing_customers (
user_id TEXT PRIMARY KEY,
stripe_customer_id TEXT UNIQUE NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_billing_customers_customer_id
ON billing_customers(stripe_customer_id);
CREATE TABLE IF NOT EXISTS billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
stripe_customer_id TEXT NOT NULL,
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_billing_subscriptions_customer_updated
ON billing_subscriptions(stripe_customer_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_billing_subscriptions_group_updated
ON group_billing_subscriptions(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_boost_credits (
user_id TEXT PRIMARY KEY,
available_boosts INTEGER NOT NULL DEFAULT 0,
updated_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS group_manual_boost_allocations (
user_id TEXT NOT NULL,
group_id TEXT NOT NULL,
boosts_assigned INTEGER NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
PRIMARY KEY (user_id, group_id),
FOREIGN KEY (user_id) REFERENCES users (id),
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_manual_boost_allocations_group
ON group_manual_boost_allocations(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_subscription_boost_overrides (
user_id TEXT NOT NULL,
allocation_month TEXT NOT NULL,
allocations_json TEXT NOT NULL DEFAULT '{}',
configured_at TEXT NOT NULL,
PRIMARY KEY (user_id, allocation_month),
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS item_catalog (
item_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
catalog_source TEXT,
catalog_version TEXT,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_name
ON item_catalog(name);
CREATE TABLE IF NOT EXISTS item_catalog_snapshots (
id TEXT PRIMARY KEY,
source_name TEXT NOT NULL,
source_version TEXT,
checksum_sha256 TEXT NOT NULL,
item_count INTEGER NOT NULL,
created_at TEXT NOT NULL,
notes TEXT
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_snapshots_created_at
ON item_catalog_snapshots(created_at DESC);
CREATE TABLE IF NOT EXISTS item_catalog_snapshot_items (
snapshot_id TEXT NOT NULL,
item_id INTEGER NOT NULL,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
PRIMARY KEY (snapshot_id, item_id),
FOREIGN KEY (snapshot_id) REFERENCES item_catalog_snapshots (id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS hiscores_fallback_state (
group_member_id TEXT PRIMARY KEY,
runescape_name TEXT NOT NULL,
snapshot_json TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE TABLE IF NOT EXISTS group_wealth_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
total_value_gp INTEGER NOT NULL DEFAULT 0,
captured_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_wealth_snapshots_group_time
ON group_wealth_snapshots(group_id, captured_at DESC);
CREATE TABLE IF NOT EXISTS group_goals (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
created_by_user_id TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT,
target_value_gp INTEGER NOT NULL,
current_value_gp INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'ACTIVE',
due_at TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (created_by_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_group_goals_group_status
ON group_goals(group_id, status, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadouts (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
owner_user_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
scope TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (owner_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_group_loadouts_group_scope_updated
ON group_loadouts(group_id, scope, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadout_items (
id TEXT PRIMARY KEY,
loadout_id TEXT NOT NULL,
item_id INTEGER NOT NULL,
required_qty INTEGER NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (loadout_id) REFERENCES group_loadouts (id)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_loadout_items_unique
ON group_loadout_items(loadout_id, item_id);
CREATE TABLE IF NOT EXISTS feature_usage_events (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
user_id TEXT,
feature_key TEXT NOT NULL,
action_key TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_feature_time
ON feature_usage_events(feature_key, action_key, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_group_time
ON feature_usage_events(group_id, created_at DESC);
`);
addColumnIfMissing(db, 'groups_table', 'opt_out_hiscores INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', 'opt_out_activity_feed INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', 'allow_open_invite_join INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', "join_code_expires_at TEXT NOT NULL DEFAULT ''");
addColumnIfMissing(db, 'groups_table', 'open_invite_expires_at TEXT');
addColumnIfMissing(db, 'groups_table', "webhook_config_json TEXT NOT NULL DEFAULT '{}'");
addColumnIfMissing(db, 'users', 'runelite_linked INTEGER NOT NULL DEFAULT 1');
addColumnIfMissing(db, 'users', 'discord_avatar_url TEXT');
addColumnIfMissing(db, 'users', 'last_seen_at TEXT');
addColumnIfMissing(db, 'item_catalog', 'is_tradeable INTEGER NOT NULL DEFAULT 1');
addColumnIfMissing(db, 'item_catalog', 'catalog_source TEXT');
addColumnIfMissing(db, 'item_catalog', 'catalog_version TEXT');
addColumnIfMissing(db, 'group_members', 'webhook_config_perms INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'group_members', 'loadout_admin_perms INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'group_members', 'account_hash TEXT');
addColumnIfMissing(db, 'group_join_requests', 'requester_account_hash TEXT');
const defaultInviteExpiry = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString();
db.prepare(
`UPDATE groups_table
SET join_code_expires_at = ?
WHERE join_code_expires_at IS NULL
OR join_code_expires_at = ''`
).run(defaultInviteExpiry);
db.prepare(
`UPDATE groups_table
SET webhook_config_json = '{}'
WHERE webhook_config_json IS NULL
OR webhook_config_json = ''`
).run();
const insertOrIgnoreAccountsSql = db.engine === 'postgres'
? `INSERT INTO user_runelite_accounts (
account_hash,
user_id,
linked_at,
is_active
)
SELECT
runelite_account_hash,
id,
created_at,
CASE WHEN runelite_linked = 1 THEN 1 ELSE 0 END
FROM users
WHERE runelite_account_hash IS NOT NULL
AND runelite_account_hash <> ''
ON CONFLICT(account_hash) DO NOTHING`
: `INSERT OR IGNORE INTO user_runelite_accounts (
account_hash,
user_id,
linked_at,
is_active
)
SELECT
runelite_account_hash,
id,
created_at,
CASE WHEN runelite_linked = 1 THEN 1 ELSE 0 END
FROM users
WHERE runelite_account_hash IS NOT NULL
AND runelite_account_hash <> ''`;
db.prepare(insertOrIgnoreAccountsSql).run();
db.prepare(
`UPDATE users
SET runelite_linked = CASE
WHEN EXISTS (
SELECT 1
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
) THEN 1
ELSE 0
END`
).run();
db.prepare(
`UPDATE users
SET runelite_account_hash = COALESCE(
(
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
),
runelite_account_hash
)`
).run();
db.prepare(
`UPDATE group_members
SET account_hash = (
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = group_members.user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
)
WHERE group_members.user_id IS NOT NULL
AND (group_members.account_hash IS NULL OR group_members.account_hash = '')`
).run();
db.prepare(
`UPDATE group_join_requests
SET requester_account_hash = (
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = group_join_requests.requester_user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
)
WHERE requester_account_hash IS NULL
OR requester_account_hash = ''`
).run();
db.exec(`DROP INDEX IF EXISTS idx_group_members_claimed_user;`);
db.exec(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL;`
);
db.exec(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL;`
);
}
export function nowIso() {
return new Date().toISOString();
}
function addColumnIfMissing(db, tableName, columnDefinition) {
if (db.engine === 'postgres') {
try {
db.exec(`ALTER TABLE ${tableName} ADD COLUMN IF NOT EXISTS ${columnDefinition};`);
} catch (error) {
const message = String(error?.message ?? '').toLowerCase();
if (!message.includes('already exists')) {
throw error;
}
}
return;
}
try {
db.exec(`ALTER TABLE ${tableName} ADD COLUMN ${columnDefinition};`);
} catch (error) {
const message = String(error?.message ?? '').toLowerCase();
const duplicateColumn =
message.includes('duplicate column name') || message.includes('already exists');
if (!duplicateColumn) {
throw error;
}
}
}

View File

@@ -0,0 +1,79 @@
services:
app:
build: .
ports:
- "3000:3000"
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
PORT: 3000
DB_DRIVER: postgres
POSTGRES_URL: postgres://gigagimbank:gigagimbank@postgres:5432/gigagimbank
WEB_SESSION_SECRET: ${WEB_SESSION_SECRET:-change_me_in_production}
DISCORD_CLIENT_ID: ${DISCORD_CLIENT_ID:-}
DISCORD_CLIENT_SECRET: ${DISCORD_CLIENT_SECRET:-}
DISCORD_REDIRECT_URI: ${DISCORD_REDIRECT_URI:-http://localhost:3000/api/v1/auth/discord/callback}
POST_AUTH_REDIRECT_URL: ${POST_AUTH_REDIRECT_URL:-http://localhost:3000/}
WEB_POST_AUTH_REDIRECT_URL: ${WEB_POST_AUTH_REDIRECT_URL:-http://localhost:3000/}
ENFORCE_GROUP_AUTH: ${ENFORCE_GROUP_AUTH:-1}
INTERNAL_API_TOKEN: ${INTERNAL_API_TOKEN:-}
ACTIVITY_WEBHOOK_QUEUE_DRIVER: bullmq
ACTIVITY_WEBHOOK_QUEUE_ROLE: producer
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL: redis://redis:6379
FEATURE_FLAGS: ${FEATURE_FLAGS:-}
STRIPE_SECRET_KEY: ${STRIPE_SECRET_KEY:-}
STRIPE_WEBHOOK_SECRET: ${STRIPE_WEBHOOK_SECRET:-}
DEV_MOCK_DISCORD_OAUTH: ${DEV_MOCK_DISCORD_OAUTH:-0}
restart: unless-stopped
worker:
build: .
command: ["node", "src/activityWebhookWorker.js"]
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
DB_DRIVER: postgres
POSTGRES_URL: postgres://gigagimbank:gigagimbank@postgres:5432/gigagimbank
ACTIVITY_WEBHOOK_QUEUE_DRIVER: bullmq
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL: redis://redis:6379
restart: unless-stopped
postgres:
image: postgres:17-alpine
volumes:
- pgdata:/var/lib/postgresql/data
environment:
POSTGRES_USER: gigagimbank
POSTGRES_PASSWORD: gigagimbank
POSTGRES_DB: gigagimbank
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U gigagimbank"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
redis:
image: redis:7-alpine
volumes:
- redisdata:/data
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
volumes:
pgdata:
redisdata:

View File

@@ -0,0 +1,64 @@
services:
app:
build: .
ports:
- "3000:3000"
env_file: .env.docker
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
# Override DB/Redis hosts to use docker network names
POSTGRES_URL: postgres://gigagimbank:gigagimbank@postgres:5432/gigagimbank
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL: redis://redis:6379
restart: unless-stopped
worker:
build: .
command: ["node", "src/activityWebhookWorker.js"]
env_file: .env.docker
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
POSTGRES_URL: postgres://gigagimbank:gigagimbank@postgres:5432/gigagimbank
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL: redis://redis:6379
restart: unless-stopped
postgres:
image: postgres:17-alpine
volumes:
- pgdata:/var/lib/postgresql/data
environment:
POSTGRES_USER: gigagimbank
POSTGRES_PASSWORD: gigagimbank
POSTGRES_DB: gigagimbank
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U gigagimbank"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
redis:
image: redis:7-alpine
volumes:
- redisdata:/data
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
restart: unless-stopped
volumes:
pgdata:
redisdata:

View File

@@ -0,0 +1,45 @@
# === Docker Compose dev environment (postgres + redis) ===
# Discord OAuth — mock enabled, no real app needed
DISCORD_CLIENT_ID=
DISCORD_CLIENT_SECRET=
DISCORD_REDIRECT_URI=http://localhost:3000/api/v1/auth/discord/callback
POST_AUTH_REDIRECT_URL=http://localhost:3000/
WEB_POST_AUTH_REDIRECT_URL=http://localhost:3000/
DEV_MOCK_DISCORD_OAUTH=1
DEV_MOCK_DISCORD_ACCOUNTS=[{"id":"dev_qm","discord_id":"dev_discord_qm","display_name":"Dev Quartermaster"}]
# Server
PORT=3000
# Database — postgres via docker-compose
DB_DRIVER=postgres
POSTGRES_URL=postgres://gigagimbank:gigagimbank@localhost:5432/gigagimbank
# Auth / sessions
ENFORCE_GROUP_AUTH=0
WEB_SESSION_SECRET=docker_dev_session_secret
INTERNAL_API_TOKEN=docker_dev_internal_token
WEB_SESSION_INACTIVITY_DAYS=30
GROUP_INVITE_TTL_HOURS=168
OPEN_INVITE_TTL_HOURS=24
# Activity webhook queue — Redis via docker-compose
ACTIVITY_WEBHOOK_QUEUE_DRIVER=bullmq
ACTIVITY_WEBHOOK_QUEUE_ROLE=producer
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL=redis://localhost:6379
ACTIVITY_WEBHOOK_QUEUE_NAME=ggb-activity-webhook
# Dev toggles
ALLOW_FALLBACK_ACCOUNT_HASH=0
SHOW_UNTRADEABLE_ITEMS=0
SHOW_UNKNOWN_ITEMS=0
DEV_WIPE_NON_TEST_DATA_ON_BOOT=1
DISABLE_BOOT_SYNC_JOBS=0
# Sync intervals
ITEM_CATALOG_SYNC_MS=86400000
HISCORES_SYNC_MS=900000
# Feature flags
FEATURE_FLAGS=

View File

@@ -0,0 +1,45 @@
# === Docker Compose dev environment (postgres + redis) ===
# Discord OAuth — mock enabled, no real app needed
DISCORD_CLIENT_ID=
DISCORD_CLIENT_SECRET=
DISCORD_REDIRECT_URI=http://app.gigagimbank.orb.local/api/v1/auth/discord/callback
POST_AUTH_REDIRECT_URL=http://app.gigagimbank.orb.local/
WEB_POST_AUTH_REDIRECT_URL=http://app.gigagimbank.orb.local/
DEV_MOCK_DISCORD_OAUTH=1
DEV_MOCK_DISCORD_ACCOUNTS=[{"id":"dev_qm","discord_id":"dev_discord_qm","display_name":"Dev Quartermaster"},{"id":"dev_mm","discord_id":"dev_discord_mm","display_name":"Dev Member"}]
# Server
PORT=3000
# Database — postgres via docker-compose
DB_DRIVER=postgres
POSTGRES_URL=postgres://gigagimbank:gigagimbank@localhost:5432/gigagimbank
# Auth / sessions
ENFORCE_GROUP_AUTH=0
WEB_SESSION_SECRET=docker_dev_session_secret
INTERNAL_API_TOKEN=docker_dev_internal_token
WEB_SESSION_INACTIVITY_DAYS=30
GROUP_INVITE_TTL_HOURS=168
OPEN_INVITE_TTL_HOURS=24
# Activity webhook queue — Redis via docker-compose
ACTIVITY_WEBHOOK_QUEUE_DRIVER=bullmq
ACTIVITY_WEBHOOK_QUEUE_ROLE=producer
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL=redis://localhost:6379
ACTIVITY_WEBHOOK_QUEUE_NAME=ggb-activity-webhook
# Dev toggles
ALLOW_FALLBACK_ACCOUNT_HASH=1
SHOW_UNTRADEABLE_ITEMS=0
SHOW_UNKNOWN_ITEMS=0
DEV_WIPE_NON_TEST_DATA_ON_BOOT=1
DISABLE_BOOT_SYNC_JOBS=0
# Sync intervals
ITEM_CATALOG_SYNC_MS=86400000
HISCORES_SYNC_MS=900000
# Feature flags
FEATURE_FLAGS=

View File

@@ -0,0 +1,45 @@
# === Docker Compose dev environment (postgres + redis) ===
# Discord OAuth — mock enabled, no real app needed
DISCORD_CLIENT_ID=
DISCORD_CLIENT_SECRET=
DISCORD_REDIRECT_URI=http://app.gigagimbank.orb.local/api/v1/auth/discord/callback
POST_AUTH_REDIRECT_URL=http://app.gigagimbank.orb.local/
WEB_POST_AUTH_REDIRECT_URL=http://app.gigagimbank.orb.local/
DEV_MOCK_DISCORD_OAUTH=1
DEV_MOCK_DISCORD_ACCOUNTS=[{"id":"dev_qm","discord_id":"dev_discord_qm","display_name":"Dev Quartermaster"},{"id":"dev_mm","discord_id":"dev_discord_mm","display_name":"Dev Member"}]
# Server
PORT=3000
# Database — postgres via docker-compose
DB_DRIVER=postgres
POSTGRES_URL=postgres://gigagimbank:gigagimbank@localhost:5432/gigagimbank
# Auth / sessions
ENFORCE_GROUP_AUTH=0
WEB_SESSION_SECRET=docker_dev_session_secret
INTERNAL_API_TOKEN=docker_dev_internal_token
WEB_SESSION_INACTIVITY_DAYS=30
GROUP_INVITE_TTL_HOURS=168
OPEN_INVITE_TTL_HOURS=24
# Activity webhook queue — Redis via docker-compose
ACTIVITY_WEBHOOK_QUEUE_DRIVER=bullmq
ACTIVITY_WEBHOOK_QUEUE_ROLE=producer
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL=redis://localhost:6379
ACTIVITY_WEBHOOK_QUEUE_NAME=ggb-activity-webhook
# Dev toggles
ALLOW_FALLBACK_ACCOUNT_HASH=1
SHOW_UNTRADEABLE_ITEMS=0
SHOW_UNKNOWN_ITEMS=0
DEV_WIPE_NON_TEST_DATA_ON_BOOT=1
DISABLE_BOOT_SYNC_JOBS=1
# Sync intervals
ITEM_CATALOG_SYNC_MS=86400000
HISCORES_SYNC_MS=900000
# Feature flags
FEATURE_FLAGS=

View File

@@ -0,0 +1,45 @@
# === Docker Compose dev environment (postgres + redis) ===
# Discord OAuth — mock enabled, no real app needed
DISCORD_CLIENT_ID=
DISCORD_CLIENT_SECRET=
DISCORD_REDIRECT_URI=http://app.gigagimbank.orb.local/api/v1/auth/discord/callback
POST_AUTH_REDIRECT_URL=http://app.gigagimbank.orb.local/
WEB_POST_AUTH_REDIRECT_URL=http://app.gigagimbank.orb.local/
DEV_MOCK_DISCORD_OAUTH=1
DEV_MOCK_DISCORD_ACCOUNTS=[{"id":"dev_qm","discord_id":"dev_discord_qm","display_name":"Dev Quartermaster"},{"id":"dev_mm","discord_id":"dev_discord_mm","display_name":"Dev Member"}]
# Server
PORT=3000
# Database — postgres via docker-compose
DB_DRIVER=postgres
POSTGRES_URL=postgres://gigagimbank:gigagimbank@localhost:5432/gigagimbank
# Auth / sessions
ENFORCE_GROUP_AUTH=0
WEB_SESSION_SECRET=docker_dev_session_secret
INTERNAL_API_TOKEN=docker_dev_internal_token
WEB_SESSION_INACTIVITY_DAYS=30
GROUP_INVITE_TTL_HOURS=168
OPEN_INVITE_TTL_HOURS=24
# Activity webhook queue — Redis via docker-compose
ACTIVITY_WEBHOOK_QUEUE_DRIVER=bullmq
ACTIVITY_WEBHOOK_QUEUE_ROLE=producer
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL=redis://localhost:6379
ACTIVITY_WEBHOOK_QUEUE_NAME=ggb-activity-webhook
# Dev toggles
ALLOW_FALLBACK_ACCOUNT_HASH=1
SHOW_UNTRADEABLE_ITEMS=0
SHOW_UNKNOWN_ITEMS=0
DEV_WIPE_NON_TEST_DATA_ON_BOOT=0
DISABLE_BOOT_SYNC_JOBS=0
# Sync intervals
ITEM_CATALOG_SYNC_MS=86400000
HISCORES_SYNC_MS=900000
# Feature flags
FEATURE_FLAGS=

View File

@@ -0,0 +1,45 @@
# === Docker Compose dev environment (postgres + redis) ===
# Discord OAuth — mock enabled, no real app needed
DISCORD_CLIENT_ID=
DISCORD_CLIENT_SECRET=
DISCORD_REDIRECT_URI=http://app.gigagimbank.orb.local/api/v1/auth/discord/callback
POST_AUTH_REDIRECT_URL=http://app.gigagimbank.orb.local/
WEB_POST_AUTH_REDIRECT_URL=http://app.gigagimbank.orb.local/
DEV_MOCK_DISCORD_OAUTH=1
DEV_MOCK_DISCORD_ACCOUNTS=[{"id":"dev_qm","discord_id":"dev_discord_qm","display_name":"Dev Quartermaster"},{"id":"dev_mm","discord_id":"dev_discord_mm","display_name":"Dev Member"}]
# Server
PORT=3000
# Database — postgres via docker-compose
DB_DRIVER=postgres
POSTGRES_URL=postgres://gigagimbank:gigagimbank@localhost:5432/gigagimbank
# Auth / sessions
ENFORCE_GROUP_AUTH=0
WEB_SESSION_SECRET=docker_dev_session_secret
INTERNAL_API_TOKEN=docker_dev_internal_token
WEB_SESSION_INACTIVITY_DAYS=30
GROUP_INVITE_TTL_HOURS=168
OPEN_INVITE_TTL_HOURS=24
# Activity webhook queue — Redis via docker-compose
ACTIVITY_WEBHOOK_QUEUE_DRIVER=bullmq
ACTIVITY_WEBHOOK_QUEUE_ROLE=producer
ACTIVITY_WEBHOOK_QUEUE_REDIS_URL=redis://localhost:6379
ACTIVITY_WEBHOOK_QUEUE_NAME=ggb-activity-webhook
# Dev toggles
ALLOW_FALLBACK_ACCOUNT_HASH=1
SHOW_UNTRADEABLE_ITEMS=0
SHOW_UNKNOWN_ITEMS=0
DEV_WIPE_NON_TEST_DATA_ON_BOOT=1
DISABLE_BOOT_SYNC_JOBS=0
# Sync intervals
ITEM_CATALOG_SYNC_MS=86400000
HISCORES_SYNC_MS=900000
# Feature flags
FEATURE_FLAGS=

View File

@@ -0,0 +1,111 @@
import { parentPort, workerData } from 'node:worker_threads';
import { Client } from 'pg';
import { convertSqliteParamsToPostgres } from './dbPostgres.js';
let port = null;
let client = null;
let connected = false;
function toSerializableError(error) {
return {
message: error instanceof Error ? error.message : String(error),
code: error?.code || null,
detail: error?.detail || null,
};
}
async function ensureConnected() {
if (connected) {
return;
}
client = new Client(workerData.postgresConfig);
await client.connect();
connected = true;
}
async function closeClient() {
if (!client) {
return;
}
try {
await client.end();
} finally {
client = null;
connected = false;
}
}
async function runPrepared(mode, sqlText, params) {
const text = params.length ? convertSqliteParamsToPostgres(sqlText) : sqlText;
const result = await client.query(text, params);
if (mode === 'all') {
return result.rows || [];
}
if (mode === 'get') {
return result.rows?.[0] ?? null;
}
if (mode === 'run') {
return {
changes: Number(result.rowCount || 0),
lastInsertRowid: null,
};
}
throw new Error(`Unsupported prepared mode: ${mode}`);
}
async function handleRequest(message) {
const { id, op } = message || {};
if (!Number.isInteger(id)) {
return;
}
try {
await ensureConnected();
let result = null;
if (op === 'exec') {
await client.query(String(message.sql || ''));
result = null;
} else if (op === 'prepared') {
result = await runPrepared(
String(message.mode || ''),
String(message.sql || ''),
Array.isArray(message.params) ? message.params : []
);
} else if (op === 'close') {
await closeClient();
result = { closed: true };
} else {
throw new Error(`Unsupported op: ${String(op || '')}`);
}
port.postMessage({
id,
ok: true,
result,
});
} catch (error) {
port.postMessage({
id,
ok: false,
error: toSerializableError(error),
});
}
}
parentPort.on('message', async (message) => {
if (message?.type !== 'attach_port') {
return;
}
port = message.port;
port.on('message', async (request) => {
await handleRequest(request);
});
port.postMessage({
type: 'ready',
});
});
process.on('beforeExit', async () => {
await closeClient();
});

View File

@@ -0,0 +1,120 @@
import { parentPort, workerData } from 'node:worker_threads';
import { Client } from 'pg';
import { convertSqliteParamsToPostgres } from './dbPostgres.js';
let port = null;
let client = null;
let connected = false;
function toSerializableError(error) {
return {
message: error instanceof Error ? error.message : String(error),
code: error?.code || null,
detail: error?.detail || null,
};
}
async function ensureConnected() {
if (connected) {
return;
}
client = new Client(workerData.postgresConfig);
client.on('error', (err) => {
connected = false;
client = null;
});
await client.connect();
connected = true;
}
async function closeClient() {
if (!client) {
return;
}
try {
await client.end();
} catch {
// swallow close-time errors
} finally {
client = null;
connected = false;
}
}
async function runPrepared(mode, sqlText, params) {
const text = params.length ? convertSqliteParamsToPostgres(sqlText) : sqlText;
const result = await client.query(text, params);
if (mode === 'all') {
return result.rows || [];
}
if (mode === 'get') {
return result.rows?.[0] ?? null;
}
if (mode === 'run') {
return {
changes: Number(result.rowCount || 0),
lastInsertRowid: null,
};
}
throw new Error(`Unsupported prepared mode: ${mode}`);
}
async function handleRequest(message) {
const { id, op } = message || {};
if (!Number.isInteger(id)) {
return;
}
try {
await ensureConnected();
let result = null;
if (op === 'exec') {
await client.query(String(message.sql || ''));
result = null;
} else if (op === 'prepared') {
result = await runPrepared(
String(message.mode || ''),
String(message.sql || ''),
Array.isArray(message.params) ? message.params : []
);
} else if (op === 'close') {
await closeClient();
result = { closed: true };
} else {
throw new Error(`Unsupported op: ${String(op || '')}`);
}
port.postMessage({
id,
ok: true,
result,
});
} catch (error) {
port.postMessage({
id,
ok: false,
error: toSerializableError(error),
});
}
}
parentPort.on('message', async (message) => {
if (message?.type !== 'attach_port') {
return;
}
port = message.port;
if (typeof port.ref === 'function') {
port.ref();
}
port.on('message', async (request) => {
await handleRequest(request);
});
port.postMessage({
type: 'ready',
});
});
process.on('beforeExit', async () => {
await closeClient();
});

View File

@@ -0,0 +1,35 @@
FROM node:22-alpine AS base
WORKDIR /app
# Install dependencies
COPY package.json package-lock.json* ./
RUN npm ci --omit=dev
# Copy source
COPY src/ src/
COPY config/ config/
COPY public/ public/
COPY web/ web/
# Build the React frontend
FROM node:22-alpine AS web-build
WORKDIR /app
COPY package.json package-lock.json* ./
RUN npm ci
COPY web/ web/
RUN npx vite build --config web/vite.config.js
# Final image
FROM node:22-alpine
WORKDIR /app
COPY --from=base /app/node_modules node_modules/
COPY --from=base /app/package.json .
COPY --from=base /app/src/ src/
COPY --from=base /app/config/ config/
COPY --from=base /app/public/ public/
COPY --from=web-build /app/web/dist/ web/dist/
EXPOSE 3000
CMD ["node", "src/server.js"]

View File

@@ -0,0 +1,11 @@
node_modules
data
.git
.env
*.sqlite
runelite-plugin
e2e
test
.gitea
.claude
web/dist

View File

@@ -0,0 +1,437 @@
"""Tests for main.py entry point, error handling, and integration."""
import asyncio
import os
import signal
import sys
from datetime import datetime, timezone
from io import StringIO
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from main import (
AppFactoryError,
ClarificationTimeout,
ConfigurationError,
DockerDaemonError,
GracefulShutdown,
GitError,
MCPConnectionError,
main,
parse_args,
print_summary,
run_factory,
validate_environment,
)
# ---------------------------------------------------------------------------
# parse_args tests
# ---------------------------------------------------------------------------
class TestParseArgs:
def test_prompt_required(self):
with pytest.raises(SystemExit):
parse_args([])
def test_prompt_only(self):
args = parse_args(["--prompt", "Build a REST API"])
assert args.prompt == "Build a REST API"
assert args.repo_path == os.getcwd()
assert args.max_concurrent_tasks == 5
assert args.debug is False
assert args.dry_run is False
def test_all_options(self):
args = parse_args([
"--prompt", "Build an app",
"--repo-path", "/tmp/project",
"--max-concurrent-tasks", "3",
"--debug",
"--dry-run",
])
assert args.prompt == "Build an app"
assert args.repo_path == "/tmp/project"
assert args.max_concurrent_tasks == 3
assert args.debug is True
assert args.dry_run is True
def test_max_concurrent_tasks_default(self):
args = parse_args(["--prompt", "test"])
assert args.max_concurrent_tasks == 5
def test_repo_path_default_is_cwd(self):
args = parse_args(["--prompt", "test"])
assert args.repo_path == os.getcwd()
# ---------------------------------------------------------------------------
# validate_environment tests
# ---------------------------------------------------------------------------
class TestValidateEnvironment:
def test_valid_config(self):
env = {
"ANTHROPIC_API_KEY": "sk-test-key",
"LANGSMITH_API_KEY": "ls-key",
"LANGSMITH_PROJECT": "my-project",
}
with patch.dict(os.environ, env, clear=False), \
patch("main.subprocess.run") as mock_run, \
patch("main.shutil.which", return_value="/usr/bin/git"):
mock_run.return_value = MagicMock(returncode=0)
config = validate_environment()
assert config["api_key"] == "sk-test-key"
assert config["langsmith_api_key"] == "ls-key"
assert config["langsmith_project"] == "my-project"
def test_missing_api_key(self):
with patch.dict(os.environ, {}, clear=True):
with pytest.raises(ConfigurationError, match="ANTHROPIC_API_KEY"):
validate_environment()
def test_docker_not_running(self):
env = {"ANTHROPIC_API_KEY": "sk-test"}
with patch.dict(os.environ, env, clear=False), \
patch("main.subprocess.run") as mock_run:
mock_run.return_value = MagicMock(returncode=1)
with pytest.raises(DockerDaemonError, match="not running"):
validate_environment()
def test_docker_not_found(self):
env = {"ANTHROPIC_API_KEY": "sk-test"}
with patch.dict(os.environ, env, clear=False), \
patch("main.subprocess.run", side_effect=FileNotFoundError):
with pytest.raises(DockerDaemonError, match="not found"):
validate_environment()
def test_docker_timeout(self):
import subprocess as sp
env = {"ANTHROPIC_API_KEY": "sk-test"}
with patch.dict(os.environ, env, clear=False), \
patch("main.subprocess.run", side_effect=sp.TimeoutExpired("docker", 10)):
with pytest.raises(DockerDaemonError, match="not responding"):
validate_environment()
def test_git_not_found(self):
env = {"ANTHROPIC_API_KEY": "sk-test"}
with patch.dict(os.environ, env, clear=False), \
patch("main.subprocess.run") as mock_run, \
patch("main.shutil.which", return_value=None):
mock_run.return_value = MagicMock(returncode=0)
with pytest.raises(GitError, match="git not found"):
validate_environment()
# ---------------------------------------------------------------------------
# print_summary tests
# ---------------------------------------------------------------------------
class TestPrintSummary:
def test_basic_summary(self, capsys):
result = {
"completed_tasks": ["1", "2"],
"tasks": [{"id": 1}, {"id": 2}, {"id": 3}],
"errors": [],
"iteration_count": 5,
}
start = datetime.now(timezone.utc)
print_summary(result, start)
captured = capsys.readouterr().out
assert "2 / 3" in captured
assert "Iterations" in captured
def test_summary_with_errors(self, capsys):
result = {
"completed_tasks": [],
"tasks": [{"id": 1}],
"errors": ["Error one", "Error two"],
"iteration_count": 1,
}
start = datetime.now(timezone.utc)
print_summary(result, start)
captured = capsys.readouterr().out
assert "Errors" in captured
assert "Error one" in captured
def test_summary_truncates_many_errors(self, capsys):
result = {
"completed_tasks": [],
"tasks": [],
"errors": [f"Error {i}" for i in range(10)],
"iteration_count": 0,
}
start = datetime.now(timezone.utc)
print_summary(result, start)
captured = capsys.readouterr().out
assert "and 5 more" in captured
def test_summary_with_langsmith(self, capsys):
result = {
"completed_tasks": [],
"tasks": [],
"errors": [],
"iteration_count": 0,
}
start = datetime.now(timezone.utc)
with patch.dict(os.environ, {"LANGSMITH_PROJECT": "test-proj"}):
print_summary(result, start)
captured = capsys.readouterr().out
assert "test-proj" in captured
def test_summary_empty_result(self, capsys):
result = {
"completed_tasks": [],
"tasks": [],
"errors": [],
"iteration_count": 0,
}
start = datetime.now(timezone.utc)
print_summary(result, start)
captured = capsys.readouterr().out
assert "0 / 0" in captured
# ---------------------------------------------------------------------------
# GracefulShutdown tests
# ---------------------------------------------------------------------------
class TestGracefulShutdown:
def test_initial_state(self):
with patch("main.signal.signal"):
gs = GracefulShutdown()
assert gs.shutdown_requested is False
assert gs.workspace_manager is None
def test_registers_signals(self):
with patch("main.signal.signal") as mock_signal:
gs = GracefulShutdown()
calls = [c[0] for c in mock_signal.call_args_list]
assert (signal.SIGINT, gs._handler) in calls
assert (signal.SIGTERM, gs._handler) in calls
def test_first_signal_sets_flag(self):
with patch("main.signal.signal"):
gs = GracefulShutdown()
# Simulate first signal
with patch("builtins.print"):
gs._handler(signal.SIGINT, None)
assert gs.shutdown_requested is True
def test_second_signal_force_exits(self):
with patch("main.signal.signal"):
gs = GracefulShutdown()
gs.shutdown_requested = True
with patch("builtins.print"), pytest.raises(SystemExit):
gs._handler(signal.SIGINT, None)
def test_first_signal_triggers_cleanup(self):
mock_ws = MagicMock()
mock_ws.cleanup_all = AsyncMock()
with patch("main.signal.signal"):
gs = GracefulShutdown(workspace_manager=mock_ws)
# Simulate handler with a running loop
loop = asyncio.new_event_loop()
async def _run():
with patch("builtins.print"):
gs._handler(signal.SIGINT, None)
loop.run_until_complete(_run())
loop.close()
assert gs.shutdown_requested is True
# ---------------------------------------------------------------------------
# run_factory tests
# ---------------------------------------------------------------------------
class TestRunFactory:
@pytest.mark.asyncio
async def test_initializes_all_components(self):
mock_orchestrator_instance = MagicMock()
mock_orchestrator_instance.run = AsyncMock(return_value={
"completed_tasks": ["1"],
"tasks": [{"id": 1}],
"errors": [],
"iteration_count": 1,
})
args = MagicMock()
args.prompt = "Build a REST API"
args.repo_path = "/tmp/test-repo"
config = {
"api_key": "sk-test-key",
"langsmith_api_key": "",
"langsmith_project": "app-factory",
}
with patch("app_factory.core.observability.ObservabilityManager") as mock_obs, \
patch("app_factory.core.workspace.WorkspaceManager") as mock_ws, \
patch("app_factory.core.architecture_tracker.ArchitectureTracker") as mock_arch, \
patch("app_factory.agents.pm_agent.PMAgent") as mock_pm, \
patch("app_factory.agents.task_agent.TaskMasterAgent") as mock_task, \
patch("app_factory.agents.dev_agent.DevAgentManager") as mock_dev, \
patch("app_factory.agents.qa_agent.QAAgent") as mock_qa, \
patch("app_factory.core.graph.AppFactoryOrchestrator") as mock_orch, \
patch("main.GracefulShutdown", create=True):
mock_orch.return_value = mock_orchestrator_instance
mock_ws.return_value = MagicMock(docker_client=MagicMock())
result = await run_factory(args, config)
assert result["completed_tasks"] == ["1"]
mock_obs.assert_called_once()
mock_ws.assert_called_once_with(repo_path="/tmp/test-repo")
mock_arch.assert_called_once_with(api_key="sk-test-key")
mock_pm.assert_called_once_with(api_key="sk-test-key")
mock_task.assert_called_once_with(project_root="/tmp/test-repo")
mock_dev.assert_called_once()
mock_qa.assert_called_once_with(repo_path="/tmp/test-repo", api_key="sk-test-key")
mock_orch.assert_called_once()
mock_orchestrator_instance.run.assert_awaited_once_with("Build a REST API")
# ---------------------------------------------------------------------------
# main() integration tests
# ---------------------------------------------------------------------------
class TestMainEntryPoint:
def test_dry_run_validates_without_executing(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("builtins.print") as mock_print:
mock_args.return_value = MagicMock(
prompt="test", debug=False, dry_run=True,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
mock_print.assert_called_with(
"Dry-run: configuration is valid. All checks passed."
)
def test_configuration_error_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment", side_effect=ConfigurationError("no key")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(debug=False, dry_run=False)
main()
assert exc_info.value.code == 1
def test_docker_error_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment", side_effect=DockerDaemonError("not running")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(debug=False, dry_run=False)
main()
assert exc_info.value.code == 1
def test_git_error_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment", side_effect=GitError("no git")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(debug=False, dry_run=False)
main()
assert exc_info.value.code == 1
def test_clarification_timeout_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("main.asyncio.run", side_effect=ClarificationTimeout("task 5")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(
prompt="test", debug=False, dry_run=False,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
assert exc_info.value.code == 1
def test_generic_exception_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("main.asyncio.run", side_effect=RuntimeError("boom")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(
prompt="test", debug=False, dry_run=False,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
assert exc_info.value.code == 1
def test_debug_flag_sets_logging(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("main.logging.basicConfig") as mock_logging, \
patch("builtins.print"):
mock_args.return_value = MagicMock(
prompt="test", debug=True, dry_run=True,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
mock_logging.assert_called_once()
call_kwargs = mock_logging.call_args[1]
assert call_kwargs["level"] == 10 # logging.DEBUG
def test_successful_run(self):
mock_result = {
"completed_tasks": ["1"],
"tasks": [{"id": 1}],
"errors": [],
"iteration_count": 3,
}
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("main.asyncio.run", return_value=mock_result), \
patch("main.print_summary") as mock_summary:
mock_args.return_value = MagicMock(
prompt="test", debug=False, dry_run=False,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
mock_summary.assert_called_once()
# Verify the result was passed to print_summary
assert mock_summary.call_args[0][0] == mock_result
# ---------------------------------------------------------------------------
# Exception hierarchy tests
# ---------------------------------------------------------------------------
class TestExceptionHierarchy:
def test_all_exceptions_inherit_from_base(self):
assert issubclass(ClarificationTimeout, AppFactoryError)
assert issubclass(DockerDaemonError, AppFactoryError)
assert issubclass(GitError, AppFactoryError)
assert issubclass(MCPConnectionError, AppFactoryError)
assert issubclass(ConfigurationError, AppFactoryError)
def test_base_inherits_from_exception(self):
assert issubclass(AppFactoryError, Exception)

View File

@@ -0,0 +1,433 @@
"""Tests for main.py entry point, error handling, and integration."""
import asyncio
import os
import signal
import sys
from datetime import datetime, timezone
from io import StringIO
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from main import (
AppFactoryError,
ClarificationTimeout,
ConfigurationError,
DockerDaemonError,
GracefulShutdown,
GitError,
MCPConnectionError,
main,
parse_args,
print_summary,
run_factory,
validate_environment,
)
# ---------------------------------------------------------------------------
# parse_args tests
# ---------------------------------------------------------------------------
class TestParseArgs:
def test_prompt_required(self):
with pytest.raises(SystemExit):
parse_args([])
def test_prompt_only(self):
args = parse_args(["--prompt", "Build a REST API"])
assert args.prompt == "Build a REST API"
assert args.repo_path == os.getcwd()
assert args.max_concurrent_tasks == 5
assert args.debug is False
assert args.dry_run is False
def test_all_options(self):
args = parse_args([
"--prompt", "Build an app",
"--repo-path", "/tmp/project",
"--max-concurrent-tasks", "3",
"--debug",
"--dry-run",
])
assert args.prompt == "Build an app"
assert args.repo_path == "/tmp/project"
assert args.max_concurrent_tasks == 3
assert args.debug is True
assert args.dry_run is True
def test_max_concurrent_tasks_default(self):
args = parse_args(["--prompt", "test"])
assert args.max_concurrent_tasks == 5
def test_repo_path_default_is_cwd(self):
args = parse_args(["--prompt", "test"])
assert args.repo_path == os.getcwd()
# ---------------------------------------------------------------------------
# validate_environment tests
# ---------------------------------------------------------------------------
class TestValidateEnvironment:
def test_valid_config(self):
env = {
"ANTHROPIC_API_KEY": "sk-test-key",
"LANGSMITH_API_KEY": "ls-key",
"LANGSMITH_PROJECT": "my-project",
}
with patch.dict(os.environ, env, clear=False), \
patch("main.subprocess.run") as mock_run, \
patch("main.shutil.which", return_value="/usr/bin/git"):
mock_run.return_value = MagicMock(returncode=0)
config = validate_environment()
assert config["api_key"] == "sk-test-key"
assert config["langsmith_api_key"] == "ls-key"
assert config["langsmith_project"] == "my-project"
def test_missing_api_key_still_works(self):
"""API key is optional (Claude Code OAuth supported)."""
with patch.dict(os.environ, {}, clear=True), \
patch("main.subprocess.run") as mock_run, \
patch("main.shutil.which", return_value="/usr/bin/git"):
mock_run.return_value = MagicMock(returncode=0)
config = validate_environment()
assert config["api_key"] == ""
def test_docker_not_running(self):
with patch("main.subprocess.run") as mock_run:
mock_run.return_value = MagicMock(returncode=1)
with pytest.raises(DockerDaemonError, match="not running"):
validate_environment()
def test_docker_not_found(self):
with patch("main.subprocess.run", side_effect=FileNotFoundError):
with pytest.raises(DockerDaemonError, match="not found"):
validate_environment()
def test_docker_timeout(self):
import subprocess as sp
with patch("main.subprocess.run", side_effect=sp.TimeoutExpired("docker", 10)):
with pytest.raises(DockerDaemonError, match="not responding"):
validate_environment()
def test_git_not_found(self):
with patch("main.subprocess.run") as mock_run, \
patch("main.shutil.which", return_value=None):
mock_run.return_value = MagicMock(returncode=0)
with pytest.raises(GitError, match="git not found"):
validate_environment()
# ---------------------------------------------------------------------------
# print_summary tests
# ---------------------------------------------------------------------------
class TestPrintSummary:
def test_basic_summary(self, capsys):
result = {
"completed_tasks": ["1", "2"],
"tasks": [{"id": 1}, {"id": 2}, {"id": 3}],
"errors": [],
"iteration_count": 5,
}
start = datetime.now(timezone.utc)
print_summary(result, start)
captured = capsys.readouterr().out
assert "2 / 3" in captured
assert "Iterations" in captured
def test_summary_with_errors(self, capsys):
result = {
"completed_tasks": [],
"tasks": [{"id": 1}],
"errors": ["Error one", "Error two"],
"iteration_count": 1,
}
start = datetime.now(timezone.utc)
print_summary(result, start)
captured = capsys.readouterr().out
assert "Errors" in captured
assert "Error one" in captured
def test_summary_truncates_many_errors(self, capsys):
result = {
"completed_tasks": [],
"tasks": [],
"errors": [f"Error {i}" for i in range(10)],
"iteration_count": 0,
}
start = datetime.now(timezone.utc)
print_summary(result, start)
captured = capsys.readouterr().out
assert "and 5 more" in captured
def test_summary_with_langsmith(self, capsys):
result = {
"completed_tasks": [],
"tasks": [],
"errors": [],
"iteration_count": 0,
}
start = datetime.now(timezone.utc)
with patch.dict(os.environ, {"LANGSMITH_PROJECT": "test-proj"}):
print_summary(result, start)
captured = capsys.readouterr().out
assert "test-proj" in captured
def test_summary_empty_result(self, capsys):
result = {
"completed_tasks": [],
"tasks": [],
"errors": [],
"iteration_count": 0,
}
start = datetime.now(timezone.utc)
print_summary(result, start)
captured = capsys.readouterr().out
assert "0 / 0" in captured
# ---------------------------------------------------------------------------
# GracefulShutdown tests
# ---------------------------------------------------------------------------
class TestGracefulShutdown:
def test_initial_state(self):
with patch("main.signal.signal"):
gs = GracefulShutdown()
assert gs.shutdown_requested is False
assert gs.workspace_manager is None
def test_registers_signals(self):
with patch("main.signal.signal") as mock_signal:
gs = GracefulShutdown()
calls = [c[0] for c in mock_signal.call_args_list]
assert (signal.SIGINT, gs._handler) in calls
assert (signal.SIGTERM, gs._handler) in calls
def test_first_signal_sets_flag(self):
with patch("main.signal.signal"):
gs = GracefulShutdown()
# Simulate first signal
with patch("builtins.print"):
gs._handler(signal.SIGINT, None)
assert gs.shutdown_requested is True
def test_second_signal_force_exits(self):
with patch("main.signal.signal"):
gs = GracefulShutdown()
gs.shutdown_requested = True
with patch("builtins.print"), pytest.raises(SystemExit):
gs._handler(signal.SIGINT, None)
def test_first_signal_triggers_cleanup(self):
mock_ws = MagicMock()
mock_ws.cleanup_all = AsyncMock()
with patch("main.signal.signal"):
gs = GracefulShutdown(workspace_manager=mock_ws)
# Simulate handler with a running loop
loop = asyncio.new_event_loop()
async def _run():
with patch("builtins.print"):
gs._handler(signal.SIGINT, None)
loop.run_until_complete(_run())
loop.close()
assert gs.shutdown_requested is True
# ---------------------------------------------------------------------------
# run_factory tests
# ---------------------------------------------------------------------------
class TestRunFactory:
@pytest.mark.asyncio
async def test_initializes_all_components(self):
mock_orchestrator_instance = MagicMock()
mock_orchestrator_instance.run = AsyncMock(return_value={
"completed_tasks": ["1"],
"tasks": [{"id": 1}],
"errors": [],
"iteration_count": 1,
})
args = MagicMock()
args.prompt = "Build a REST API"
args.repo_path = "/tmp/test-repo"
config = {
"api_key": "sk-test-key",
"langsmith_api_key": "",
"langsmith_project": "app-factory",
}
with patch("app_factory.core.observability.ObservabilityManager") as mock_obs, \
patch("app_factory.core.workspace.WorkspaceManager") as mock_ws, \
patch("app_factory.core.architecture_tracker.ArchitectureTracker") as mock_arch, \
patch("app_factory.agents.pm_agent.PMAgent") as mock_pm, \
patch("app_factory.agents.task_agent.TaskMasterAgent") as mock_task, \
patch("app_factory.agents.dev_agent.DevAgentManager") as mock_dev, \
patch("app_factory.agents.qa_agent.QAAgent") as mock_qa, \
patch("app_factory.core.graph.AppFactoryOrchestrator") as mock_orch, \
patch("main.GracefulShutdown", create=True):
mock_orch.return_value = mock_orchestrator_instance
mock_ws.return_value = MagicMock(docker_client=MagicMock())
result = await run_factory(args, config)
assert result["completed_tasks"] == ["1"]
mock_obs.assert_called_once()
mock_ws.assert_called_once_with(repo_path="/tmp/test-repo")
mock_arch.assert_called_once_with(api_key="sk-test-key")
mock_pm.assert_called_once_with(api_key="sk-test-key")
mock_task.assert_called_once_with(project_root="/tmp/test-repo")
mock_dev.assert_called_once()
mock_qa.assert_called_once_with(repo_path="/tmp/test-repo", api_key="sk-test-key")
mock_orch.assert_called_once()
mock_orchestrator_instance.run.assert_awaited_once_with("Build a REST API")
# ---------------------------------------------------------------------------
# main() integration tests
# ---------------------------------------------------------------------------
class TestMainEntryPoint:
def test_dry_run_validates_without_executing(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("builtins.print") as mock_print:
mock_args.return_value = MagicMock(
prompt="test", debug=False, dry_run=True,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
mock_print.assert_called_with(
"Dry-run: configuration is valid. All checks passed."
)
def test_configuration_error_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment", side_effect=ConfigurationError("no key")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(debug=False, dry_run=False)
main()
assert exc_info.value.code == 1
def test_docker_error_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment", side_effect=DockerDaemonError("not running")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(debug=False, dry_run=False)
main()
assert exc_info.value.code == 1
def test_git_error_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment", side_effect=GitError("no git")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(debug=False, dry_run=False)
main()
assert exc_info.value.code == 1
def test_clarification_timeout_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("main.asyncio.run", side_effect=ClarificationTimeout("task 5")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(
prompt="test", debug=False, dry_run=False,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
assert exc_info.value.code == 1
def test_generic_exception_exits(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("main.asyncio.run", side_effect=RuntimeError("boom")), \
patch("builtins.print"), \
pytest.raises(SystemExit) as exc_info:
mock_args.return_value = MagicMock(
prompt="test", debug=False, dry_run=False,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
assert exc_info.value.code == 1
def test_debug_flag_sets_logging(self):
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("main.logging.basicConfig") as mock_logging, \
patch("builtins.print"):
mock_args.return_value = MagicMock(
prompt="test", debug=True, dry_run=True,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
mock_logging.assert_called_once()
call_kwargs = mock_logging.call_args[1]
assert call_kwargs["level"] == 10 # logging.DEBUG
def test_successful_run(self):
mock_result = {
"completed_tasks": ["1"],
"tasks": [{"id": 1}],
"errors": [],
"iteration_count": 3,
}
with patch("main.load_dotenv"), \
patch("main.parse_args") as mock_args, \
patch("main.validate_environment") as mock_validate, \
patch("main.asyncio.run", return_value=mock_result), \
patch("main.print_summary") as mock_summary:
mock_args.return_value = MagicMock(
prompt="test", debug=False, dry_run=False,
)
mock_validate.return_value = {"api_key": "sk-test"}
main()
mock_summary.assert_called_once()
# Verify the result was passed to print_summary
assert mock_summary.call_args[0][0] == mock_result
# ---------------------------------------------------------------------------
# Exception hierarchy tests
# ---------------------------------------------------------------------------
class TestExceptionHierarchy:
def test_all_exceptions_inherit_from_base(self):
assert issubclass(ClarificationTimeout, AppFactoryError)
assert issubclass(DockerDaemonError, AppFactoryError)
assert issubclass(GitError, AppFactoryError)
assert issubclass(MCPConnectionError, AppFactoryError)
assert issubclass(ConfigurationError, AppFactoryError)
def test_base_inherits_from_exception(self):
assert issubclass(AppFactoryError, Exception)

View File

@@ -0,0 +1,105 @@
"""Project Manager Agent - Expands user prompts into structured PRDs and handles clarification requests."""
import os
from datetime import datetime, timezone
from pathlib import Path
import anthropic
class PMAgent:
"""Agent responsible for PRD generation, clarification handling, and project planning."""
def __init__(self, api_key: str = None, model: str = "claude-sonnet-4-20250514"):
self.model = model
self.input_tokens = 0
self.output_tokens = 0
self._prompts_dir = Path(__file__).resolve().parent.parent / "prompts"
resolved_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
if not resolved_key:
raise ValueError(
"Anthropic API key required. Pass api_key or set ANTHROPIC_API_KEY."
)
self.client = anthropic.AsyncAnthropic(api_key=resolved_key)
def _load_template(self, template_name: str) -> str:
"""Load a prompt template file from app_factory/prompts/."""
path = self._prompts_dir / template_name
return path.read_text()
async def expand_prompt_to_prd(self, user_input: str) -> str:
"""Expand a user prompt into a structured PRD using Claude.
Returns markdown with sections: Objective, Core Requirements,
Technical Architecture, Tech Stack, Success Criteria, Non-Functional Requirements.
"""
system_prompt = self._load_template("pm_prd_expansion.txt")
response = await self.client.messages.create(
model=self.model,
max_tokens=4096,
system=system_prompt,
messages=[{"role": "user", "content": user_input}],
)
self.input_tokens += response.usage.input_tokens
self.output_tokens += response.usage.output_tokens
return response.content[0].text
async def handle_clarification_request(self, clarification: dict) -> str:
"""Handle a clarification request from a downstream agent.
Args:
clarification: dict with keys requesting_agent, task_id, question, context.
Returns:
Clarification response string. If the question requires human input,
prompts the user and returns their answer.
"""
template = self._load_template("pm_clarification.txt")
prompt = template.format(
requesting_agent=clarification.get("requesting_agent", "unknown"),
task_id=clarification.get("task_id", "N/A"),
question=clarification.get("question", ""),
context=clarification.get("context", ""),
)
response = await self.client.messages.create(
model=self.model,
max_tokens=2048,
messages=[{"role": "user", "content": prompt}],
)
self.input_tokens += response.usage.input_tokens
self.output_tokens += response.usage.output_tokens
answer = response.content[0].text.strip()
if "ESCALATE_TO_HUMAN" in answer:
human_answer = input(
f"[PMAgent] Clarification needed for {clarification.get('requesting_agent', 'agent')} "
f"(task {clarification.get('task_id', 'N/A')}): "
f"{clarification.get('question', '')}\n> "
)
return human_answer
return answer
def update_prd(self, prd_path: str, updates: str):
"""Append updates to an existing PRD file with a versioned header."""
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
header = f"\n\n---\n## PRD Update - {timestamp}\n\n"
with open(prd_path, "a") as f:
f.write(header)
f.write(updates)
def get_token_usage(self) -> dict:
"""Return cumulative token usage."""
return {
"input_tokens": self.input_tokens,
"output_tokens": self.output_tokens,
"total_tokens": self.input_tokens + self.output_tokens,
}

View File

@@ -0,0 +1,105 @@
"""Project Manager Agent - Expands user prompts into structured PRDs and handles clarification requests."""
import os
from datetime import datetime, timezone
from pathlib import Path
import anthropic
class PMAgent:
"""Agent responsible for PRD generation, clarification handling, and project planning."""
def __init__(self, api_key: str = None, model: str = "claude-sonnet-4-20250514"):
self.model = model
self.input_tokens = 0
self.output_tokens = 0
self._prompts_dir = Path(__file__).resolve().parent.parent / "prompts"
resolved_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
if resolved_key:
self.client = anthropic.AsyncAnthropic(api_key=resolved_key)
else:
# Try default client (picks up OAuth/environment auth automatically)
self.client = anthropic.AsyncAnthropic()
def _load_template(self, template_name: str) -> str:
"""Load a prompt template file from app_factory/prompts/."""
path = self._prompts_dir / template_name
return path.read_text()
async def expand_prompt_to_prd(self, user_input: str) -> str:
"""Expand a user prompt into a structured PRD using Claude.
Returns markdown with sections: Objective, Core Requirements,
Technical Architecture, Tech Stack, Success Criteria, Non-Functional Requirements.
"""
system_prompt = self._load_template("pm_prd_expansion.txt")
response = await self.client.messages.create(
model=self.model,
max_tokens=4096,
system=system_prompt,
messages=[{"role": "user", "content": user_input}],
)
self.input_tokens += response.usage.input_tokens
self.output_tokens += response.usage.output_tokens
return response.content[0].text
async def handle_clarification_request(self, clarification: dict) -> str:
"""Handle a clarification request from a downstream agent.
Args:
clarification: dict with keys requesting_agent, task_id, question, context.
Returns:
Clarification response string. If the question requires human input,
prompts the user and returns their answer.
"""
template = self._load_template("pm_clarification.txt")
prompt = template.format(
requesting_agent=clarification.get("requesting_agent", "unknown"),
task_id=clarification.get("task_id", "N/A"),
question=clarification.get("question", ""),
context=clarification.get("context", ""),
)
response = await self.client.messages.create(
model=self.model,
max_tokens=2048,
messages=[{"role": "user", "content": prompt}],
)
self.input_tokens += response.usage.input_tokens
self.output_tokens += response.usage.output_tokens
answer = response.content[0].text.strip()
if "ESCALATE_TO_HUMAN" in answer:
human_answer = input(
f"[PMAgent] Clarification needed for {clarification.get('requesting_agent', 'agent')} "
f"(task {clarification.get('task_id', 'N/A')}): "
f"{clarification.get('question', '')}\n> "
)
return human_answer
return answer
def update_prd(self, prd_path: str, updates: str):
"""Append updates to an existing PRD file with a versioned header."""
timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
header = f"\n\n---\n## PRD Update - {timestamp}\n\n"
with open(prd_path, "a") as f:
f.write(header)
f.write(updates)
def get_token_usage(self) -> dict:
"""Return cumulative token usage."""
return {
"input_tokens": self.input_tokens,
"output_tokens": self.output_tokens,
"total_tokens": self.input_tokens + self.output_tokens,
}

View File

@@ -0,0 +1,361 @@
"""QA Agent - Handles code review, testing, linting, and merge operations."""
import os
import re
import subprocess
from pathlib import Path
import anthropic
import git
class QAAgent:
"""Reviews code, runs tests, handles merge conflicts, merges worktrees to main."""
def __init__(self, repo_path: str, api_key: str = None, max_retries: int = 3):
"""Initialize QAAgent.
Args:
repo_path: Path to the git repository.
api_key: Anthropic API key. Falls back to ANTHROPIC_API_KEY env var.
max_retries: Maximum QA-Dev bounce retries per task.
"""
self.repo = git.Repo(repo_path)
self.repo_path = Path(repo_path).resolve()
self.max_retries = max_retries
self._retry_counts: dict[str, int] = {}
self._prompts_dir = Path(__file__).resolve().parent.parent / "prompts"
resolved_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
if not resolved_key:
raise ValueError(
"Anthropic API key required. Pass api_key or set ANTHROPIC_API_KEY."
)
self.client = anthropic.AsyncAnthropic(api_key=resolved_key)
async def review_and_merge(self, task_id: str, worktree_path: str, task: dict = None) -> dict:
"""Full QA pipeline: rebase, lint, test, review, merge.
Returns:
dict with status and details. Status is one of:
'merged', 'rebase_failed', 'lint_failed', 'tests_failed', 'review_failed'.
"""
# 1. Rebase feature branch onto main
rebase_result = await self.rebase_onto_main(worktree_path, task_id)
if not rebase_result["success"]:
self._increment_retry(task_id)
return {
"status": "rebase_failed",
"conflicts": rebase_result.get("conflicts", []),
"retry_count": self.get_retry_count(task_id),
}
# 2. Run linting
lint_result = self.run_linter(worktree_path)
if not lint_result["passed"]:
self._increment_retry(task_id)
return {
"status": "lint_failed",
"errors": lint_result["errors"],
"warnings": lint_result["warnings"],
"retry_count": self.get_retry_count(task_id),
}
# 3. Run tests
test_result = self.run_tests(worktree_path)
if not test_result["passed"]:
self._increment_retry(task_id)
return {
"status": "tests_failed",
"total": test_result["total"],
"failures": test_result["failures"],
"errors": test_result["errors"],
"output": test_result["output"],
"retry_count": self.get_retry_count(task_id),
}
# 4. Code review via Claude
wt_repo = git.Repo(worktree_path)
diff = wt_repo.git.diff("main", "--", ".")
review_result = await self.code_review(diff, task=task)
if not review_result["approved"]:
self._increment_retry(task_id)
return {
"status": "review_failed",
"issues": review_result["issues"],
"summary": review_result["summary"],
"retry_count": self.get_retry_count(task_id),
}
# 5. Merge to main
merge_result = self.merge_to_main(worktree_path, task_id)
if not merge_result["success"]:
return {
"status": "merge_failed",
"error": merge_result.get("error", "Unknown merge error"),
}
return {
"status": "merged",
"commit_sha": merge_result["commit_sha"],
"review_summary": review_result["summary"],
}
async def rebase_onto_main(self, worktree_path: str, task_id: str) -> dict:
"""Rebase the feature branch in the worktree onto main.
Returns:
dict with success bool and conflicts list.
"""
wt_repo = git.Repo(worktree_path)
try:
wt_repo.git.fetch("origin", "main")
except git.GitCommandError:
pass # fetch may fail in local-only repos; continue with local main
try:
wt_repo.git.rebase("main")
return {"success": True, "conflicts": []}
except git.GitCommandError:
# Rebase failed — check for conflicts
conflicts = self._get_conflict_files(wt_repo)
if conflicts and self.auto_resolve_conflicts(worktree_path):
return {"success": True, "conflicts": []}
# Abort the failed rebase
try:
wt_repo.git.rebase("--abort")
except git.GitCommandError:
pass
return {"success": False, "conflicts": conflicts}
def run_linter(self, worktree_path: str) -> dict:
"""Run ruff linter on the worktree.
Returns:
dict with passed bool, errors list, and warnings list.
"""
try:
result = subprocess.run(
["ruff", "check", "."],
cwd=worktree_path,
capture_output=True,
text=True,
timeout=120,
)
except FileNotFoundError:
return {"passed": True, "errors": [], "warnings": ["ruff not found, skipping lint"]}
except subprocess.TimeoutExpired:
return {"passed": False, "errors": ["Linter timed out"], "warnings": []}
errors = []
warnings = []
for line in result.stdout.splitlines():
line = line.strip()
if not line or line.startswith("Found") or line.startswith("All checks"):
continue
# ruff output lines contain error codes like E501, W291, etc.
if re.search(r"\b[A-Z]\d{3,4}\b", line):
errors.append(line)
elif line:
warnings.append(line)
passed = result.returncode == 0
return {"passed": passed, "errors": errors, "warnings": warnings}
def run_tests(self, worktree_path: str) -> dict:
"""Run pytest in the worktree.
Returns:
dict with passed bool, total/failures/errors counts, and raw output.
"""
try:
result = subprocess.run(
["python", "-m", "pytest", "-v", "--tb=short"],
cwd=worktree_path,
capture_output=True,
text=True,
timeout=300,
)
except FileNotFoundError:
return {"passed": False, "total": 0, "failures": 0, "errors": 1,
"output": "pytest not found"}
except subprocess.TimeoutExpired:
return {"passed": False, "total": 0, "failures": 0, "errors": 1,
"output": "Test execution timed out"}
output = result.stdout + result.stderr
parsed = self.parse_test_results(output)
parsed["output"] = output
return parsed
async def code_review(self, diff: str, task: dict = None) -> dict:
"""Review a diff using Claude for quality and security issues.
Returns:
dict with approved bool, issues list, and summary string.
"""
template = self._load_template("qa_review.txt")
task_context = ""
if task:
task_context = (
f"Task ID: {task.get('id', 'N/A')}\n"
f"Title: {task.get('title', 'N/A')}\n"
f"Description: {task.get('description', 'N/A')}"
)
prompt = template.format(task_context=task_context, diff=diff)
response = await self.client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=4096,
messages=[{"role": "user", "content": prompt}],
)
text = response.content[0].text
return self._parse_review_response(text)
def merge_to_main(self, worktree_path: str, task_id: str) -> dict:
"""Merge the feature branch into main with --no-ff.
Returns:
dict with success bool and commit_sha.
"""
branch_name = f"feature/task-{task_id}"
try:
self.repo.git.checkout("main")
self.repo.git.merge("--no-ff", branch_name, m=f"Merge {branch_name}")
commit_sha = self.repo.head.commit.hexsha
return {"success": True, "commit_sha": commit_sha}
except git.GitCommandError as e:
return {"success": False, "commit_sha": None, "error": str(e)}
def auto_resolve_conflicts(self, worktree_path: str) -> bool:
"""Try to auto-resolve simple merge conflicts.
Returns True if all conflicts were resolved.
"""
wt_repo = git.Repo(worktree_path)
unmerged = wt_repo.index.unmerged_blobs()
if not unmerged:
return True
for path in unmerged:
file_path = os.path.join(worktree_path, path)
if not os.path.exists(file_path):
continue
try:
with open(file_path) as f:
content = f.read()
# Accept "theirs" (incoming) for simple conflicts
if "<<<<<<< " in content and "=======" in content and ">>>>>>> " in content:
resolved = re.sub(
r"<<<<<<< [^\n]*\n.*?=======\n(.*?)>>>>>>> [^\n]*\n",
r"\1",
content,
flags=re.DOTALL,
)
with open(file_path, "w") as f:
f.write(resolved)
wt_repo.index.add([path])
else:
return False
except Exception:
return False
try:
wt_repo.git.rebase("--continue")
return True
except git.GitCommandError:
return False
def parse_test_results(self, output: str) -> dict:
"""Parse pytest output into structured results.
Returns:
dict with passed bool, total int, failures int, errors int.
"""
# Match pytest summary line like "5 passed, 2 failed, 1 error"
passed_count = 0
failed_count = 0
error_count = 0
# Look for the summary line
summary_match = re.search(
r"=+\s*(.*?)\s*=+\s*$",
output,
re.MULTILINE,
)
if summary_match:
summary_line = summary_match.group(1)
p = re.search(r"(\d+)\s+passed", summary_line)
f = re.search(r"(\d+)\s+failed", summary_line)
e = re.search(r"(\d+)\s+error", summary_line)
if p:
passed_count = int(p.group(1))
if f:
failed_count = int(f.group(1))
if e:
error_count = int(e.group(1))
total = passed_count + failed_count + error_count
all_passed = failed_count == 0 and error_count == 0 and total > 0
return {
"passed": all_passed,
"total": total,
"failures": failed_count,
"errors": error_count,
}
def get_retry_count(self, task_id: str) -> int:
"""Return QA retry count for a task."""
return self._retry_counts.get(task_id, 0)
def _increment_retry(self, task_id: str):
"""Increment the retry counter for a task."""
self._retry_counts[task_id] = self._retry_counts.get(task_id, 0) + 1
def _load_template(self, template_name: str) -> str:
"""Load a prompt template file from app_factory/prompts/."""
path = self._prompts_dir / template_name
return path.read_text()
def _get_conflict_files(self, repo: git.Repo) -> list[str]:
"""Get list of conflicting files from a repo."""
try:
status_output = repo.git.status("--porcelain")
conflicts = []
for line in status_output.splitlines():
if line.startswith("UU ") or line.startswith("AA "):
conflicts.append(line[3:].strip())
return conflicts
except git.GitCommandError:
return []
def _parse_review_response(self, text: str) -> dict:
"""Parse Claude's review response into structured data."""
approved = False
issues = []
summary = ""
for line in text.splitlines():
line = line.strip()
if line.upper().startswith("APPROVED:"):
value = line.split(":", 1)[1].strip().lower()
approved = value in ("true", "yes")
elif line.startswith("- ["):
# Parse issue lines like "- [severity: critical] description"
issue_match = re.match(
r"-\s*\[severity:\s*(critical|warning|info)\]\s*(.*)",
line,
re.IGNORECASE,
)
if issue_match:
issues.append({
"severity": issue_match.group(1).lower(),
"description": issue_match.group(2).strip(),
})
elif line.upper().startswith("SUMMARY:"):
summary = line.split(":", 1)[1].strip()
return {"approved": approved, "issues": issues, "summary": summary}

View File

@@ -0,0 +1,361 @@
"""QA Agent - Handles code review, testing, linting, and merge operations."""
import os
import re
import subprocess
from pathlib import Path
import anthropic
import git
class QAAgent:
"""Reviews code, runs tests, handles merge conflicts, merges worktrees to main."""
def __init__(self, repo_path: str, api_key: str = None, max_retries: int = 3):
"""Initialize QAAgent.
Args:
repo_path: Path to the git repository.
api_key: Anthropic API key. Falls back to ANTHROPIC_API_KEY env var.
max_retries: Maximum QA-Dev bounce retries per task.
"""
self.repo = git.Repo(repo_path)
self.repo_path = Path(repo_path).resolve()
self.max_retries = max_retries
self._retry_counts: dict[str, int] = {}
self._prompts_dir = Path(__file__).resolve().parent.parent / "prompts"
resolved_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
if resolved_key:
self.client = anthropic.AsyncAnthropic(api_key=resolved_key)
else:
# Try default client (picks up OAuth/environment auth automatically)
self.client = anthropic.AsyncAnthropic()
async def review_and_merge(self, task_id: str, worktree_path: str, task: dict = None) -> dict:
"""Full QA pipeline: rebase, lint, test, review, merge.
Returns:
dict with status and details. Status is one of:
'merged', 'rebase_failed', 'lint_failed', 'tests_failed', 'review_failed'.
"""
# 1. Rebase feature branch onto main
rebase_result = await self.rebase_onto_main(worktree_path, task_id)
if not rebase_result["success"]:
self._increment_retry(task_id)
return {
"status": "rebase_failed",
"conflicts": rebase_result.get("conflicts", []),
"retry_count": self.get_retry_count(task_id),
}
# 2. Run linting
lint_result = self.run_linter(worktree_path)
if not lint_result["passed"]:
self._increment_retry(task_id)
return {
"status": "lint_failed",
"errors": lint_result["errors"],
"warnings": lint_result["warnings"],
"retry_count": self.get_retry_count(task_id),
}
# 3. Run tests
test_result = self.run_tests(worktree_path)
if not test_result["passed"]:
self._increment_retry(task_id)
return {
"status": "tests_failed",
"total": test_result["total"],
"failures": test_result["failures"],
"errors": test_result["errors"],
"output": test_result["output"],
"retry_count": self.get_retry_count(task_id),
}
# 4. Code review via Claude
wt_repo = git.Repo(worktree_path)
diff = wt_repo.git.diff("main", "--", ".")
review_result = await self.code_review(diff, task=task)
if not review_result["approved"]:
self._increment_retry(task_id)
return {
"status": "review_failed",
"issues": review_result["issues"],
"summary": review_result["summary"],
"retry_count": self.get_retry_count(task_id),
}
# 5. Merge to main
merge_result = self.merge_to_main(worktree_path, task_id)
if not merge_result["success"]:
return {
"status": "merge_failed",
"error": merge_result.get("error", "Unknown merge error"),
}
return {
"status": "merged",
"commit_sha": merge_result["commit_sha"],
"review_summary": review_result["summary"],
}
async def rebase_onto_main(self, worktree_path: str, task_id: str) -> dict:
"""Rebase the feature branch in the worktree onto main.
Returns:
dict with success bool and conflicts list.
"""
wt_repo = git.Repo(worktree_path)
try:
wt_repo.git.fetch("origin", "main")
except git.GitCommandError:
pass # fetch may fail in local-only repos; continue with local main
try:
wt_repo.git.rebase("main")
return {"success": True, "conflicts": []}
except git.GitCommandError:
# Rebase failed — check for conflicts
conflicts = self._get_conflict_files(wt_repo)
if conflicts and self.auto_resolve_conflicts(worktree_path):
return {"success": True, "conflicts": []}
# Abort the failed rebase
try:
wt_repo.git.rebase("--abort")
except git.GitCommandError:
pass
return {"success": False, "conflicts": conflicts}
def run_linter(self, worktree_path: str) -> dict:
"""Run ruff linter on the worktree.
Returns:
dict with passed bool, errors list, and warnings list.
"""
try:
result = subprocess.run(
["ruff", "check", "."],
cwd=worktree_path,
capture_output=True,
text=True,
timeout=120,
)
except FileNotFoundError:
return {"passed": True, "errors": [], "warnings": ["ruff not found, skipping lint"]}
except subprocess.TimeoutExpired:
return {"passed": False, "errors": ["Linter timed out"], "warnings": []}
errors = []
warnings = []
for line in result.stdout.splitlines():
line = line.strip()
if not line or line.startswith("Found") or line.startswith("All checks"):
continue
# ruff output lines contain error codes like E501, W291, etc.
if re.search(r"\b[A-Z]\d{3,4}\b", line):
errors.append(line)
elif line:
warnings.append(line)
passed = result.returncode == 0
return {"passed": passed, "errors": errors, "warnings": warnings}
def run_tests(self, worktree_path: str) -> dict:
"""Run pytest in the worktree.
Returns:
dict with passed bool, total/failures/errors counts, and raw output.
"""
try:
result = subprocess.run(
["python", "-m", "pytest", "-v", "--tb=short"],
cwd=worktree_path,
capture_output=True,
text=True,
timeout=300,
)
except FileNotFoundError:
return {"passed": False, "total": 0, "failures": 0, "errors": 1,
"output": "pytest not found"}
except subprocess.TimeoutExpired:
return {"passed": False, "total": 0, "failures": 0, "errors": 1,
"output": "Test execution timed out"}
output = result.stdout + result.stderr
parsed = self.parse_test_results(output)
parsed["output"] = output
return parsed
async def code_review(self, diff: str, task: dict = None) -> dict:
"""Review a diff using Claude for quality and security issues.
Returns:
dict with approved bool, issues list, and summary string.
"""
template = self._load_template("qa_review.txt")
task_context = ""
if task:
task_context = (
f"Task ID: {task.get('id', 'N/A')}\n"
f"Title: {task.get('title', 'N/A')}\n"
f"Description: {task.get('description', 'N/A')}"
)
prompt = template.format(task_context=task_context, diff=diff)
response = await self.client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=4096,
messages=[{"role": "user", "content": prompt}],
)
text = response.content[0].text
return self._parse_review_response(text)
def merge_to_main(self, worktree_path: str, task_id: str) -> dict:
"""Merge the feature branch into main with --no-ff.
Returns:
dict with success bool and commit_sha.
"""
branch_name = f"feature/task-{task_id}"
try:
self.repo.git.checkout("main")
self.repo.git.merge("--no-ff", branch_name, m=f"Merge {branch_name}")
commit_sha = self.repo.head.commit.hexsha
return {"success": True, "commit_sha": commit_sha}
except git.GitCommandError as e:
return {"success": False, "commit_sha": None, "error": str(e)}
def auto_resolve_conflicts(self, worktree_path: str) -> bool:
"""Try to auto-resolve simple merge conflicts.
Returns True if all conflicts were resolved.
"""
wt_repo = git.Repo(worktree_path)
unmerged = wt_repo.index.unmerged_blobs()
if not unmerged:
return True
for path in unmerged:
file_path = os.path.join(worktree_path, path)
if not os.path.exists(file_path):
continue
try:
with open(file_path) as f:
content = f.read()
# Accept "theirs" (incoming) for simple conflicts
if "<<<<<<< " in content and "=======" in content and ">>>>>>> " in content:
resolved = re.sub(
r"<<<<<<< [^\n]*\n.*?=======\n(.*?)>>>>>>> [^\n]*\n",
r"\1",
content,
flags=re.DOTALL,
)
with open(file_path, "w") as f:
f.write(resolved)
wt_repo.index.add([path])
else:
return False
except Exception:
return False
try:
wt_repo.git.rebase("--continue")
return True
except git.GitCommandError:
return False
def parse_test_results(self, output: str) -> dict:
"""Parse pytest output into structured results.
Returns:
dict with passed bool, total int, failures int, errors int.
"""
# Match pytest summary line like "5 passed, 2 failed, 1 error"
passed_count = 0
failed_count = 0
error_count = 0
# Look for the summary line
summary_match = re.search(
r"=+\s*(.*?)\s*=+\s*$",
output,
re.MULTILINE,
)
if summary_match:
summary_line = summary_match.group(1)
p = re.search(r"(\d+)\s+passed", summary_line)
f = re.search(r"(\d+)\s+failed", summary_line)
e = re.search(r"(\d+)\s+error", summary_line)
if p:
passed_count = int(p.group(1))
if f:
failed_count = int(f.group(1))
if e:
error_count = int(e.group(1))
total = passed_count + failed_count + error_count
all_passed = failed_count == 0 and error_count == 0 and total > 0
return {
"passed": all_passed,
"total": total,
"failures": failed_count,
"errors": error_count,
}
def get_retry_count(self, task_id: str) -> int:
"""Return QA retry count for a task."""
return self._retry_counts.get(task_id, 0)
def _increment_retry(self, task_id: str):
"""Increment the retry counter for a task."""
self._retry_counts[task_id] = self._retry_counts.get(task_id, 0) + 1
def _load_template(self, template_name: str) -> str:
"""Load a prompt template file from app_factory/prompts/."""
path = self._prompts_dir / template_name
return path.read_text()
def _get_conflict_files(self, repo: git.Repo) -> list[str]:
"""Get list of conflicting files from a repo."""
try:
status_output = repo.git.status("--porcelain")
conflicts = []
for line in status_output.splitlines():
if line.startswith("UU ") or line.startswith("AA "):
conflicts.append(line[3:].strip())
return conflicts
except git.GitCommandError:
return []
def _parse_review_response(self, text: str) -> dict:
"""Parse Claude's review response into structured data."""
approved = False
issues = []
summary = ""
for line in text.splitlines():
line = line.strip()
if line.upper().startswith("APPROVED:"):
value = line.split(":", 1)[1].strip().lower()
approved = value in ("true", "yes")
elif line.startswith("- ["):
# Parse issue lines like "- [severity: critical] description"
issue_match = re.match(
r"-\s*\[severity:\s*(critical|warning|info)\]\s*(.*)",
line,
re.IGNORECASE,
)
if issue_match:
issues.append({
"severity": issue_match.group(1).lower(),
"description": issue_match.group(2).strip(),
})
elif line.upper().startswith("SUMMARY:"):
summary = line.split(":", 1)[1].strip()
return {"approved": approved, "issues": issues, "summary": summary}

View File

@@ -0,0 +1,254 @@
"""Tests for PMAgent."""
import os
import tempfile
from types import SimpleNamespace
from unittest.mock import AsyncMock, patch
import pytest
from app_factory.agents.pm_agent import PMAgent
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_api_response(text, input_tokens=10, output_tokens=20):
"""Build a fake Anthropic messages.create response."""
return SimpleNamespace(
content=[SimpleNamespace(text=text)],
usage=SimpleNamespace(input_tokens=input_tokens, output_tokens=output_tokens),
)
def _build_agent(**kwargs):
"""Create a PMAgent with a mocked Anthropic client."""
with patch("app_factory.agents.pm_agent.anthropic") as mock_mod:
mock_client = AsyncMock()
mock_mod.AsyncAnthropic.return_value = mock_client
agent = PMAgent(api_key="test-key", **kwargs)
agent.client = mock_client
return agent, mock_client
# ---------------------------------------------------------------------------
# Initialization
# ---------------------------------------------------------------------------
class TestInitialization:
def test_missing_api_key_raises(self):
with patch.dict(os.environ, {}, clear=True):
env = os.environ.copy()
env.pop("ANTHROPIC_API_KEY", None)
with patch.dict(os.environ, env, clear=True):
with pytest.raises(ValueError, match="API key required"):
PMAgent()
def test_api_key_from_param(self):
agent, _ = _build_agent()
assert agent.model == "claude-sonnet-4-20250514"
def test_custom_model(self):
agent, _ = _build_agent(model="claude-opus-4-20250514")
assert agent.model == "claude-opus-4-20250514"
# ---------------------------------------------------------------------------
# Template loading
# ---------------------------------------------------------------------------
class TestTemplateLoading:
def test_load_prd_template(self):
agent, _ = _build_agent()
template = agent._load_template("pm_prd_expansion.txt")
assert "Product Manager" in template
assert "Objective" in template
def test_load_clarification_template(self):
agent, _ = _build_agent()
template = agent._load_template("pm_clarification.txt")
assert "{requesting_agent}" in template
assert "ESCALATE_TO_HUMAN" in template
def test_load_missing_template_raises(self):
agent, _ = _build_agent()
with pytest.raises(FileNotFoundError):
agent._load_template("nonexistent.txt")
# ---------------------------------------------------------------------------
# PRD expansion
# ---------------------------------------------------------------------------
class TestExpandPromptToPrd:
@pytest.mark.asyncio
async def test_returns_prd_markdown(self):
agent, mock_client = _build_agent()
prd_text = (
"# Objective\nBuild a todo app\n"
"# Core Requirements\n1. Add tasks\n"
"# Technical Architecture\nMonolith\n"
"# Tech Stack\nPython, FastAPI\n"
"# Success Criteria\nAll tests pass\n"
"# Non-Functional Requirements\n<1s response"
)
mock_client.messages.create = AsyncMock(
return_value=_make_api_response(prd_text, 15, 100)
)
result = await agent.expand_prompt_to_prd("Build a todo app")
assert "Objective" in result
assert "Core Requirements" in result
assert "Technical Architecture" in result
assert "Tech Stack" in result
assert "Success Criteria" in result
assert "Non-Functional Requirements" in result
mock_client.messages.create.assert_awaited_once()
@pytest.mark.asyncio
async def test_tracks_token_usage(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_api_response("prd content", 50, 200)
)
await agent.expand_prompt_to_prd("some input")
usage = agent.get_token_usage()
assert usage["input_tokens"] == 50
assert usage["output_tokens"] == 200
assert usage["total_tokens"] == 250
# ---------------------------------------------------------------------------
# Clarification handling
# ---------------------------------------------------------------------------
class TestHandleClarification:
@pytest.mark.asyncio
async def test_auto_resolve(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_api_response("Use PostgreSQL for the database.")
)
result = await agent.handle_clarification_request({
"requesting_agent": "dev_agent",
"task_id": "2.1",
"question": "Which database should I use?",
"context": "PRD says relational DB",
})
assert result == "Use PostgreSQL for the database."
@pytest.mark.asyncio
async def test_escalate_to_human(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_api_response("ESCALATE_TO_HUMAN")
)
with patch("builtins.input", return_value="Use MySQL") as mock_input:
result = await agent.handle_clarification_request({
"requesting_agent": "dev_agent",
"task_id": "3.1",
"question": "Which vendor should we pick?",
"context": "",
})
assert result == "Use MySQL"
mock_input.assert_called_once()
@pytest.mark.asyncio
async def test_tracks_tokens(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_api_response("answer", 5, 10)
)
await agent.handle_clarification_request({
"requesting_agent": "qa",
"task_id": "1.1",
"question": "q",
"context": "c",
})
usage = agent.get_token_usage()
assert usage["input_tokens"] == 5
assert usage["output_tokens"] == 10
# ---------------------------------------------------------------------------
# PRD updates
# ---------------------------------------------------------------------------
class TestUpdatePrd:
def test_appends_with_version_header(self):
agent, _ = _build_agent()
with tempfile.NamedTemporaryFile(mode="w", suffix=".md", delete=False) as f:
f.write("# Original PRD\nSome content\n")
prd_path = f.name
try:
agent.update_prd(prd_path, "Added authentication requirement.")
with open(prd_path) as f:
content = f.read()
assert "# Original PRD" in content
assert "## PRD Update -" in content
assert "Added authentication requirement." in content
finally:
os.unlink(prd_path)
def test_multiple_updates_maintain_history(self):
agent, _ = _build_agent()
with tempfile.NamedTemporaryFile(mode="w", suffix=".md", delete=False) as f:
f.write("# PRD v1\n")
prd_path = f.name
try:
agent.update_prd(prd_path, "Update 1")
agent.update_prd(prd_path, "Update 2")
with open(prd_path) as f:
content = f.read()
assert content.count("## PRD Update -") == 2
assert "Update 1" in content
assert "Update 2" in content
finally:
os.unlink(prd_path)
# ---------------------------------------------------------------------------
# Token usage
# ---------------------------------------------------------------------------
class TestTokenUsage:
def test_initial_usage_zero(self):
agent, _ = _build_agent()
usage = agent.get_token_usage()
assert usage == {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
@pytest.mark.asyncio
async def test_accumulates_across_calls(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
side_effect=[
_make_api_response("r1", 10, 20),
_make_api_response("r2", 30, 40),
]
)
await agent.expand_prompt_to_prd("first call")
await agent.expand_prompt_to_prd("second call")
usage = agent.get_token_usage()
assert usage["input_tokens"] == 40
assert usage["output_tokens"] == 60
assert usage["total_tokens"] == 100

View File

@@ -0,0 +1,257 @@
"""Tests for PMAgent."""
import os
import tempfile
from types import SimpleNamespace
from unittest.mock import AsyncMock, patch
import pytest
from app_factory.agents.pm_agent import PMAgent
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_api_response(text, input_tokens=10, output_tokens=20):
"""Build a fake Anthropic messages.create response."""
return SimpleNamespace(
content=[SimpleNamespace(text=text)],
usage=SimpleNamespace(input_tokens=input_tokens, output_tokens=output_tokens),
)
def _build_agent(**kwargs):
"""Create a PMAgent with a mocked Anthropic client."""
with patch("app_factory.agents.pm_agent.anthropic") as mock_mod:
mock_client = AsyncMock()
mock_mod.AsyncAnthropic.return_value = mock_client
agent = PMAgent(api_key="test-key", **kwargs)
agent.client = mock_client
return agent, mock_client
# ---------------------------------------------------------------------------
# Initialization
# ---------------------------------------------------------------------------
class TestInitialization:
def test_no_api_key_uses_default_client(self):
with patch.dict(os.environ, {}, clear=True):
env = os.environ.copy()
env.pop("ANTHROPIC_API_KEY", None)
with patch.dict(os.environ, env, clear=True), \
patch("app_factory.agents.pm_agent.anthropic") as mock_mod:
mock_mod.AsyncAnthropic.return_value = AsyncMock()
agent = PMAgent()
# Should create default client without explicit key
mock_mod.AsyncAnthropic.assert_called_once_with()
def test_api_key_from_param(self):
agent, _ = _build_agent()
assert agent.model == "claude-sonnet-4-20250514"
def test_custom_model(self):
agent, _ = _build_agent(model="claude-opus-4-20250514")
assert agent.model == "claude-opus-4-20250514"
# ---------------------------------------------------------------------------
# Template loading
# ---------------------------------------------------------------------------
class TestTemplateLoading:
def test_load_prd_template(self):
agent, _ = _build_agent()
template = agent._load_template("pm_prd_expansion.txt")
assert "Product Manager" in template
assert "Objective" in template
def test_load_clarification_template(self):
agent, _ = _build_agent()
template = agent._load_template("pm_clarification.txt")
assert "{requesting_agent}" in template
assert "ESCALATE_TO_HUMAN" in template
def test_load_missing_template_raises(self):
agent, _ = _build_agent()
with pytest.raises(FileNotFoundError):
agent._load_template("nonexistent.txt")
# ---------------------------------------------------------------------------
# PRD expansion
# ---------------------------------------------------------------------------
class TestExpandPromptToPrd:
@pytest.mark.asyncio
async def test_returns_prd_markdown(self):
agent, mock_client = _build_agent()
prd_text = (
"# Objective\nBuild a todo app\n"
"# Core Requirements\n1. Add tasks\n"
"# Technical Architecture\nMonolith\n"
"# Tech Stack\nPython, FastAPI\n"
"# Success Criteria\nAll tests pass\n"
"# Non-Functional Requirements\n<1s response"
)
mock_client.messages.create = AsyncMock(
return_value=_make_api_response(prd_text, 15, 100)
)
result = await agent.expand_prompt_to_prd("Build a todo app")
assert "Objective" in result
assert "Core Requirements" in result
assert "Technical Architecture" in result
assert "Tech Stack" in result
assert "Success Criteria" in result
assert "Non-Functional Requirements" in result
mock_client.messages.create.assert_awaited_once()
@pytest.mark.asyncio
async def test_tracks_token_usage(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_api_response("prd content", 50, 200)
)
await agent.expand_prompt_to_prd("some input")
usage = agent.get_token_usage()
assert usage["input_tokens"] == 50
assert usage["output_tokens"] == 200
assert usage["total_tokens"] == 250
# ---------------------------------------------------------------------------
# Clarification handling
# ---------------------------------------------------------------------------
class TestHandleClarification:
@pytest.mark.asyncio
async def test_auto_resolve(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_api_response("Use PostgreSQL for the database.")
)
result = await agent.handle_clarification_request({
"requesting_agent": "dev_agent",
"task_id": "2.1",
"question": "Which database should I use?",
"context": "PRD says relational DB",
})
assert result == "Use PostgreSQL for the database."
@pytest.mark.asyncio
async def test_escalate_to_human(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_api_response("ESCALATE_TO_HUMAN")
)
with patch("builtins.input", return_value="Use MySQL") as mock_input:
result = await agent.handle_clarification_request({
"requesting_agent": "dev_agent",
"task_id": "3.1",
"question": "Which vendor should we pick?",
"context": "",
})
assert result == "Use MySQL"
mock_input.assert_called_once()
@pytest.mark.asyncio
async def test_tracks_tokens(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_api_response("answer", 5, 10)
)
await agent.handle_clarification_request({
"requesting_agent": "qa",
"task_id": "1.1",
"question": "q",
"context": "c",
})
usage = agent.get_token_usage()
assert usage["input_tokens"] == 5
assert usage["output_tokens"] == 10
# ---------------------------------------------------------------------------
# PRD updates
# ---------------------------------------------------------------------------
class TestUpdatePrd:
def test_appends_with_version_header(self):
agent, _ = _build_agent()
with tempfile.NamedTemporaryFile(mode="w", suffix=".md", delete=False) as f:
f.write("# Original PRD\nSome content\n")
prd_path = f.name
try:
agent.update_prd(prd_path, "Added authentication requirement.")
with open(prd_path) as f:
content = f.read()
assert "# Original PRD" in content
assert "## PRD Update -" in content
assert "Added authentication requirement." in content
finally:
os.unlink(prd_path)
def test_multiple_updates_maintain_history(self):
agent, _ = _build_agent()
with tempfile.NamedTemporaryFile(mode="w", suffix=".md", delete=False) as f:
f.write("# PRD v1\n")
prd_path = f.name
try:
agent.update_prd(prd_path, "Update 1")
agent.update_prd(prd_path, "Update 2")
with open(prd_path) as f:
content = f.read()
assert content.count("## PRD Update -") == 2
assert "Update 1" in content
assert "Update 2" in content
finally:
os.unlink(prd_path)
# ---------------------------------------------------------------------------
# Token usage
# ---------------------------------------------------------------------------
class TestTokenUsage:
def test_initial_usage_zero(self):
agent, _ = _build_agent()
usage = agent.get_token_usage()
assert usage == {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
@pytest.mark.asyncio
async def test_accumulates_across_calls(self):
agent, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
side_effect=[
_make_api_response("r1", 10, 20),
_make_api_response("r2", 30, 40),
]
)
await agent.expand_prompt_to_prd("first call")
await agent.expand_prompt_to_prd("second call")
usage = agent.get_token_usage()
assert usage["input_tokens"] == 40
assert usage["output_tokens"] == 60
assert usage["total_tokens"] == 100

View File

@@ -0,0 +1,331 @@
"""App Factory - Autonomous multi-agent orchestration framework.
Usage:
python main.py --prompt "Build a video transcription service" --repo-path /path/to/project
python main.py --prompt "Build a REST API" --max-concurrent-tasks 3 --debug
python main.py --dry-run --prompt "Test project"
"""
import argparse
import asyncio
import logging
import os
import shutil
import signal
import subprocess
import sys
from datetime import datetime, timezone
from dotenv import load_dotenv
# ---------------------------------------------------------------------------
# Custom exceptions
# ---------------------------------------------------------------------------
class AppFactoryError(Exception):
"""Base exception for App Factory."""
class ClarificationTimeout(AppFactoryError):
"""A task needs human input but timed out waiting."""
class DockerDaemonError(AppFactoryError):
"""Docker daemon is not reachable."""
class GitError(AppFactoryError):
"""Git operation failed."""
class MCPConnectionError(AppFactoryError):
"""MCP server connection failed."""
class ConfigurationError(AppFactoryError):
"""Missing or invalid configuration."""
logger = logging.getLogger("app_factory")
# ---------------------------------------------------------------------------
# CLI
# ---------------------------------------------------------------------------
def parse_args(argv=None):
"""Parse CLI arguments."""
parser = argparse.ArgumentParser(
description="App Factory - Autonomous multi-agent orchestration framework.",
)
parser.add_argument(
"--prompt",
required=True,
help="Project description for the App Factory to build.",
)
parser.add_argument(
"--repo-path",
default=os.getcwd(),
help="Target repository path (default: current directory).",
)
parser.add_argument(
"--max-concurrent-tasks",
type=int,
default=5,
help="Maximum parallel dev agents (default: 5).",
)
parser.add_argument(
"--debug",
action="store_true",
help="Enable verbose debug logging.",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Validate configuration without executing.",
)
return parser.parse_args(argv)
# ---------------------------------------------------------------------------
# Environment validation
# ---------------------------------------------------------------------------
def validate_environment():
"""Check required env vars, Docker daemon, and git availability.
Returns:
dict of validated config values.
Raises:
ConfigurationError: If a required env var is missing.
DockerDaemonError: If Docker is unreachable.
GitError: If git is not available.
"""
# API key
api_key = os.environ.get("ANTHROPIC_API_KEY")
if not api_key:
raise ConfigurationError(
"ANTHROPIC_API_KEY environment variable is required. "
"Set it in your .env file or export it in your shell."
)
# Docker
try:
result = subprocess.run(
["docker", "info"],
capture_output=True,
text=True,
timeout=10,
)
if result.returncode != 0:
raise DockerDaemonError(
"Docker daemon not running. Start Docker Desktop and retry."
)
except FileNotFoundError:
raise DockerDaemonError(
"Docker not found on PATH. Install Docker and retry."
)
except subprocess.TimeoutExpired:
raise DockerDaemonError(
"Docker daemon not responding. Start Docker Desktop and retry."
)
# Git
if not shutil.which("git"):
raise GitError("git not found on PATH. Install git and retry.")
return {
"api_key": api_key,
"langsmith_api_key": os.environ.get("LANGSMITH_API_KEY", ""),
"langsmith_project": os.environ.get("LANGSMITH_PROJECT", "app-factory"),
}
# ---------------------------------------------------------------------------
# Orchestrator execution
# ---------------------------------------------------------------------------
async def run_factory(args, config: dict) -> dict:
"""Main execution flow: initialize all components and run the orchestrator.
Returns:
Final state dict from the orchestrator.
"""
from app_factory.agents.dev_agent import DevAgentManager
from app_factory.agents.pm_agent import PMAgent
from app_factory.agents.qa_agent import QAAgent
from app_factory.agents.task_agent import TaskMasterAgent
from app_factory.core.architecture_tracker import ArchitectureTracker
from app_factory.core.graph import AppFactoryOrchestrator
from app_factory.core.observability import ObservabilityManager
from app_factory.core.workspace import WorkspaceManager
api_key = config["api_key"]
# 1. Observability
observability = ObservabilityManager(
project_name=config.get("langsmith_project", "app-factory"),
)
# 2. Workspace
workspace_manager = WorkspaceManager(repo_path=args.repo_path)
# 3. Architecture tracker
arch_tracker = ArchitectureTracker(api_key=api_key)
# 4. Agents
pm_agent = PMAgent(api_key=api_key)
task_agent = TaskMasterAgent(project_root=args.repo_path)
dev_manager = DevAgentManager(
docker_client=workspace_manager.docker_client,
max_retries=3,
)
qa_agent = QAAgent(repo_path=args.repo_path, api_key=api_key)
# 5. Orchestrator
orchestrator = AppFactoryOrchestrator(
pm_agent=pm_agent,
task_agent=task_agent,
dev_manager=dev_manager,
qa_agent=qa_agent,
workspace_manager=workspace_manager,
observability=observability,
)
# 6. Set up graceful shutdown
GracefulShutdown(workspace_manager=workspace_manager)
# 7. Execute
result = await orchestrator.run(args.prompt)
return result
# ---------------------------------------------------------------------------
# Summary
# ---------------------------------------------------------------------------
def print_summary(result: dict, start_time: datetime):
"""Print final execution summary."""
elapsed = datetime.now(timezone.utc) - start_time
minutes, seconds = divmod(int(elapsed.total_seconds()), 60)
completed = result.get("completed_tasks", [])
tasks = result.get("tasks", [])
errors = result.get("errors", [])
print("\n" + "=" * 60)
print("APP FACTORY - Execution Summary")
print("=" * 60)
print(f" Tasks completed : {len(completed)} / {len(tasks)}")
print(f" Execution time : {minutes}m {seconds}s")
print(f" Iterations : {result.get('iteration_count', 0)}")
if errors:
print(f" Errors : {len(errors)}")
for err in errors[:5]:
print(f" - {err[:120]}")
if len(errors) > 5:
print(f" ... and {len(errors) - 5} more")
langsmith_project = os.environ.get("LANGSMITH_PROJECT", "")
if langsmith_project:
print(f" LangSmith : project={langsmith_project}")
print("=" * 60)
# ---------------------------------------------------------------------------
# Graceful shutdown
# ---------------------------------------------------------------------------
class GracefulShutdown:
"""Signal handler for SIGINT/SIGTERM with two-stage shutdown."""
def __init__(self, workspace_manager=None):
self.shutdown_requested = False
self.workspace_manager = workspace_manager
signal.signal(signal.SIGINT, self._handler)
signal.signal(signal.SIGTERM, self._handler)
def _handler(self, signum, frame):
if self.shutdown_requested:
print("\nForce exit.")
sys.exit(1)
self.shutdown_requested = True
print("\nShutting down gracefully... (press Ctrl+C again to force)")
if self.workspace_manager:
try:
loop = asyncio.get_running_loop()
loop.create_task(self.workspace_manager.cleanup_all())
except RuntimeError:
# No running loop - run synchronously is not possible for async cleanup
pass
# ---------------------------------------------------------------------------
# Entry point
# ---------------------------------------------------------------------------
def main():
"""Entry point: load env, parse args, validate, run factory."""
load_dotenv()
args = parse_args()
# Logging
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(
level=level,
format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
# Validate or dry-run
try:
config = validate_environment()
except ConfigurationError as e:
print(f"Configuration error: {e}", file=sys.stderr)
sys.exit(1)
except DockerDaemonError as e:
print(f"Docker error: {e}", file=sys.stderr)
sys.exit(1)
except GitError as e:
print(f"Git error: {e}", file=sys.stderr)
sys.exit(1)
if args.dry_run:
print("Dry-run: configuration is valid. All checks passed.")
return
# Run
start_time = datetime.now(timezone.utc)
try:
result = asyncio.run(run_factory(args, config))
print_summary(result, start_time)
except ClarificationTimeout as e:
print(
f"Clarification timeout: {e}. "
"Run with --interactive for manual resolution.",
file=sys.stderr,
)
sys.exit(1)
except KeyboardInterrupt:
print("\nInterrupted.", file=sys.stderr)
sys.exit(130)
except Exception as e:
logger.exception("Fatal error")
print(f"Fatal error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,326 @@
"""App Factory - Autonomous multi-agent orchestration framework.
Usage:
python main.py --prompt "Build a video transcription service" --repo-path /path/to/project
python main.py --prompt "Build a REST API" --max-concurrent-tasks 3 --debug
python main.py --dry-run --prompt "Test project"
"""
import argparse
import asyncio
import logging
import os
import shutil
import signal
import subprocess
import sys
from datetime import datetime, timezone
from dotenv import load_dotenv
# ---------------------------------------------------------------------------
# Custom exceptions
# ---------------------------------------------------------------------------
class AppFactoryError(Exception):
"""Base exception for App Factory."""
class ClarificationTimeout(AppFactoryError):
"""A task needs human input but timed out waiting."""
class DockerDaemonError(AppFactoryError):
"""Docker daemon is not reachable."""
class GitError(AppFactoryError):
"""Git operation failed."""
class MCPConnectionError(AppFactoryError):
"""MCP server connection failed."""
class ConfigurationError(AppFactoryError):
"""Missing or invalid configuration."""
logger = logging.getLogger("app_factory")
# ---------------------------------------------------------------------------
# CLI
# ---------------------------------------------------------------------------
def parse_args(argv=None):
"""Parse CLI arguments."""
parser = argparse.ArgumentParser(
description="App Factory - Autonomous multi-agent orchestration framework.",
)
parser.add_argument(
"--prompt",
required=True,
help="Project description for the App Factory to build.",
)
parser.add_argument(
"--repo-path",
default=os.getcwd(),
help="Target repository path (default: current directory).",
)
parser.add_argument(
"--max-concurrent-tasks",
type=int,
default=5,
help="Maximum parallel dev agents (default: 5).",
)
parser.add_argument(
"--debug",
action="store_true",
help="Enable verbose debug logging.",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Validate configuration without executing.",
)
return parser.parse_args(argv)
# ---------------------------------------------------------------------------
# Environment validation
# ---------------------------------------------------------------------------
def validate_environment():
"""Check required env vars, Docker daemon, and git availability.
Returns:
dict of validated config values.
Raises:
ConfigurationError: If a required env var is missing.
DockerDaemonError: If Docker is unreachable.
GitError: If git is not available.
"""
# API key (optional — Claude Code OAuth is also supported)
api_key = os.environ.get("ANTHROPIC_API_KEY", "")
# Docker
try:
result = subprocess.run(
["docker", "info"],
capture_output=True,
text=True,
timeout=10,
)
if result.returncode != 0:
raise DockerDaemonError(
"Docker daemon not running. Start Docker Desktop and retry."
)
except FileNotFoundError:
raise DockerDaemonError(
"Docker not found on PATH. Install Docker and retry."
)
except subprocess.TimeoutExpired:
raise DockerDaemonError(
"Docker daemon not responding. Start Docker Desktop and retry."
)
# Git
if not shutil.which("git"):
raise GitError("git not found on PATH. Install git and retry.")
return {
"api_key": api_key,
"langsmith_api_key": os.environ.get("LANGSMITH_API_KEY", ""),
"langsmith_project": os.environ.get("LANGSMITH_PROJECT", "app-factory"),
}
# ---------------------------------------------------------------------------
# Orchestrator execution
# ---------------------------------------------------------------------------
async def run_factory(args, config: dict) -> dict:
"""Main execution flow: initialize all components and run the orchestrator.
Returns:
Final state dict from the orchestrator.
"""
from app_factory.agents.dev_agent import DevAgentManager
from app_factory.agents.pm_agent import PMAgent
from app_factory.agents.qa_agent import QAAgent
from app_factory.agents.task_agent import TaskMasterAgent
from app_factory.core.architecture_tracker import ArchitectureTracker
from app_factory.core.graph import AppFactoryOrchestrator
from app_factory.core.observability import ObservabilityManager
from app_factory.core.workspace import WorkspaceManager
api_key = config.get("api_key") or None
# 1. Observability
observability = ObservabilityManager(
project_name=config.get("langsmith_project", "app-factory"),
)
# 2. Workspace
workspace_manager = WorkspaceManager(repo_path=args.repo_path)
# 3. Architecture tracker
arch_tracker = ArchitectureTracker(api_key=api_key)
# 4. Agents
pm_agent = PMAgent(api_key=api_key)
task_agent = TaskMasterAgent(project_root=args.repo_path)
dev_manager = DevAgentManager(
docker_client=workspace_manager.docker_client,
max_retries=3,
)
qa_agent = QAAgent(repo_path=args.repo_path, api_key=api_key)
# 5. Orchestrator
orchestrator = AppFactoryOrchestrator(
pm_agent=pm_agent,
task_agent=task_agent,
dev_manager=dev_manager,
qa_agent=qa_agent,
workspace_manager=workspace_manager,
observability=observability,
)
# 6. Set up graceful shutdown
GracefulShutdown(workspace_manager=workspace_manager)
# 7. Execute
result = await orchestrator.run(args.prompt)
return result
# ---------------------------------------------------------------------------
# Summary
# ---------------------------------------------------------------------------
def print_summary(result: dict, start_time: datetime):
"""Print final execution summary."""
elapsed = datetime.now(timezone.utc) - start_time
minutes, seconds = divmod(int(elapsed.total_seconds()), 60)
completed = result.get("completed_tasks", [])
tasks = result.get("tasks", [])
errors = result.get("errors", [])
print("\n" + "=" * 60)
print("APP FACTORY - Execution Summary")
print("=" * 60)
print(f" Tasks completed : {len(completed)} / {len(tasks)}")
print(f" Execution time : {minutes}m {seconds}s")
print(f" Iterations : {result.get('iteration_count', 0)}")
if errors:
print(f" Errors : {len(errors)}")
for err in errors[:5]:
print(f" - {err[:120]}")
if len(errors) > 5:
print(f" ... and {len(errors) - 5} more")
langsmith_project = os.environ.get("LANGSMITH_PROJECT", "")
if langsmith_project:
print(f" LangSmith : project={langsmith_project}")
print("=" * 60)
# ---------------------------------------------------------------------------
# Graceful shutdown
# ---------------------------------------------------------------------------
class GracefulShutdown:
"""Signal handler for SIGINT/SIGTERM with two-stage shutdown."""
def __init__(self, workspace_manager=None):
self.shutdown_requested = False
self.workspace_manager = workspace_manager
signal.signal(signal.SIGINT, self._handler)
signal.signal(signal.SIGTERM, self._handler)
def _handler(self, signum, frame):
if self.shutdown_requested:
print("\nForce exit.")
sys.exit(1)
self.shutdown_requested = True
print("\nShutting down gracefully... (press Ctrl+C again to force)")
if self.workspace_manager:
try:
loop = asyncio.get_running_loop()
loop.create_task(self.workspace_manager.cleanup_all())
except RuntimeError:
# No running loop - run synchronously is not possible for async cleanup
pass
# ---------------------------------------------------------------------------
# Entry point
# ---------------------------------------------------------------------------
def main():
"""Entry point: load env, parse args, validate, run factory."""
load_dotenv()
args = parse_args()
# Logging
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(
level=level,
format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
# Validate or dry-run
try:
config = validate_environment()
except ConfigurationError as e:
print(f"Configuration error: {e}", file=sys.stderr)
sys.exit(1)
except DockerDaemonError as e:
print(f"Docker error: {e}", file=sys.stderr)
sys.exit(1)
except GitError as e:
print(f"Git error: {e}", file=sys.stderr)
sys.exit(1)
if args.dry_run:
print("Dry-run: configuration is valid. All checks passed.")
return
# Run
start_time = datetime.now(timezone.utc)
try:
result = asyncio.run(run_factory(args, config))
print_summary(result, start_time)
except ClarificationTimeout as e:
print(
f"Clarification timeout: {e}. "
"Run with --interactive for manual resolution.",
file=sys.stderr,
)
sys.exit(1)
except KeyboardInterrupt:
print("\nInterrupted.", file=sys.stderr)
sys.exit(130)
except Exception as e:
logger.exception("Fatal error")
print(f"Fatal error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,151 @@
# App Factory
Autonomous multi-agent orchestration framework. Give it a natural language prompt, get back a fully developed, QA-verified, and merged codebase.
## How It Works
```
User prompt
→ PM Agent (expands into structured PRD)
→ Task Agent (generates prioritized dependency graph via claude-task-master)
→ Dev Agents (concurrent, isolated Docker containers with Claude Code)
→ QA Agent (code review, tests, rebase, merge to main)
→ Done
```
If any agent gets blocked, the flow reverses through a **clarification loop** — Dev asks Task, Task asks PM, PM asks the human — while other agents keep working.
## Quick Start
### Prerequisites
- Python 3.11+
- Docker Desktop (running)
- Git
- [Anthropic API key](https://console.anthropic.com/)
### Setup
```bash
# Clone and enter project
git clone <repo-url> && cd ai_ops2
# Create venv and install dependencies
uv venv
uv pip install -r requirements.txt
# Configure environment
cp .env.example .env
# Edit .env and add your ANTHROPIC_API_KEY (required)
# Optionally add LANGSMITH_API_KEY for tracing
```
### Run
```bash
# Build a project from a prompt
python main.py --prompt "Build a video transcription service with Whisper and summarization"
# Limit concurrent dev agents
python main.py --prompt "Build a REST API" --max-concurrent-tasks 3
# Target a specific repo
python main.py --prompt "Add user authentication" --repo-path /path/to/project
# Validate config without executing
python main.py --dry-run --prompt "test"
# Verbose logging
python main.py --prompt "Build a CLI tool" --debug
```
## Architecture
### Agents
| Agent | File | Role |
|-------|------|------|
| **PMAgent** | `agents/pm_agent.py` | Expands prompts into PRDs, handles clarification requests |
| **TaskMasterAgent** | `agents/task_agent.py` | Bridges to claude-task-master for task graph management |
| **DevAgentManager** | `agents/dev_agent.py` | Spawns Claude Code in Docker containers via pexpect |
| **QAAgent** | `agents/qa_agent.py` | Code review, linting, testing, rebase, and merge |
### Core
| Component | File | Role |
|-----------|------|------|
| **AppFactoryOrchestrator** | `core/graph.py` | LangGraph state machine with conditional routing |
| **WorkspaceManager** | `core/workspace.py` | Git worktree + Docker container lifecycle |
| **ObservabilityManager** | `core/observability.py` | LangSmith tracing + structured logging |
| **ArchitectureTracker** | `core/architecture_tracker.py` | Prevents context starvation across dev agents |
### Project Structure
```
app_factory/
├── agents/
│ ├── pm_agent.py # PRD generation + clarification
│ ├── task_agent.py # claude-task-master interface
│ ├── dev_agent.py # Claude Code + Docker orchestration
│ └── qa_agent.py # Review, test, merge pipeline
├── core/
│ ├── graph.py # LangGraph state machine
│ ├── workspace.py # Git worktree + Docker isolation
│ ├── observability.py # LangSmith tracing + logging
│ └── architecture_tracker.py # Global architecture summary
├── prompts/ # Agent prompt templates
│ ├── pm_prd_expansion.txt
│ ├── pm_clarification.txt
│ ├── dev_task_execution.txt
│ └── qa_review.txt
└── data/ # Runtime state + architecture tracking
```
## Execution Phases
1. **Linear Planning** — User → PM Agent → Task Agent. Produces a prioritized DAG of tasks.
2. **Dynamic Concurrency** — Orchestrator spins up a WorkspaceManager + DevAgent for every unblocked task concurrently via `asyncio.gather()`.
3. **Clarification Loop** — Blocked agents route requests backward up the chain. Other agents continue uninterrupted.
4. **QA & Merge** — QA Agent rebases, lints, tests, reviews, and merges each completed task. Task Agent then unlocks downstream dependencies.
## Design Decisions
- **Context Starvation Prevention**: A read-only `ArchitectureTracker` summary is injected into every Dev Agent prompt so they know what other agents have built.
- **Merge Conflict Handling**: QA Agent rebases onto main before testing. Complex conflicts are kicked back to the Dev Agent automatically.
- **Infinite Loop Protection**: Max retry counter (3) per task at the LangGraph node level. Exceeded retries escalate to PM → human.
- **Claude Code Automation**: Dev agents interact with Claude Code via `pexpect` subprocess in headless mode inside Docker containers.
## Testing
```bash
# Run full test suite
python -m pytest tests/ -v
# Run specific test file
python -m pytest tests/test_graph.py -v
# Run with coverage
python -m pytest tests/ --cov=app_factory --cov-report=term-missing
```
**229 tests** across 9 test files covering all agents, core components, and integration.
## Configuration
### Required
| Variable | Description |
|----------|-------------|
| `ANTHROPIC_API_KEY` | Claude API key for PM, QA, and Dev agents |
### Optional
| Variable | Description |
|----------|-------------|
| `OPENAI_API_KEY` | Codex fallback for algorithmic generation |
| `LANGSMITH_API_KEY` | LangSmith tracing and observability |
| `LANGSMITH_PROJECT` | LangSmith project name (default: `app-factory`) |
## License
MIT

View File

@@ -0,0 +1,155 @@
# App Factory
Autonomous multi-agent orchestration framework. Give it a natural language prompt, get back a fully developed, QA-verified, and merged codebase.
## How It Works
```
User prompt
→ PM Agent (expands into structured PRD)
→ Task Agent (generates prioritized dependency graph via claude-task-master)
→ Dev Agents (concurrent, isolated Docker containers with Claude Code)
→ QA Agent (code review, tests, rebase, merge to main)
→ Done
```
If any agent gets blocked, the flow reverses through a **clarification loop** — Dev asks Task, Task asks PM, PM asks the human — while other agents keep working.
## Quick Start
### Prerequisites
- Python 3.11+
- Docker Desktop (running)
- Git
- Claude Code with OAuth **or** an [Anthropic API key](https://console.anthropic.com/)
### Setup
```bash
# Clone and enter project
git clone <repo-url> && cd ai_ops2
# Create venv and install dependencies
uv venv
uv pip install -r requirements.txt
# Configure environment (optional — not needed with Claude Code OAuth)
cp .env.example .env
# Edit .env to add API keys if not using OAuth
# Optionally add LANGSMITH_API_KEY for tracing
```
### Run
```bash
# Build a project from a prompt
python main.py --prompt "Build a video transcription service with Whisper and summarization"
# Limit concurrent dev agents
python main.py --prompt "Build a REST API" --max-concurrent-tasks 3
# Target a specific repo
python main.py --prompt "Add user authentication" --repo-path /path/to/project
# Validate config without executing
python main.py --dry-run --prompt "test"
# Verbose logging
python main.py --prompt "Build a CLI tool" --debug
```
## Architecture
### Agents
| Agent | File | Role |
|-------|------|------|
| **PMAgent** | `agents/pm_agent.py` | Expands prompts into PRDs, handles clarification requests |
| **TaskMasterAgent** | `agents/task_agent.py` | Bridges to claude-task-master for task graph management |
| **DevAgentManager** | `agents/dev_agent.py` | Spawns Claude Code in Docker containers via pexpect |
| **QAAgent** | `agents/qa_agent.py` | Code review, linting, testing, rebase, and merge |
### Core
| Component | File | Role |
|-----------|------|------|
| **AppFactoryOrchestrator** | `core/graph.py` | LangGraph state machine with conditional routing |
| **WorkspaceManager** | `core/workspace.py` | Git worktree + Docker container lifecycle |
| **ObservabilityManager** | `core/observability.py` | LangSmith tracing + structured logging |
| **ArchitectureTracker** | `core/architecture_tracker.py` | Prevents context starvation across dev agents |
### Project Structure
```
app_factory/
├── agents/
│ ├── pm_agent.py # PRD generation + clarification
│ ├── task_agent.py # claude-task-master interface
│ ├── dev_agent.py # Claude Code + Docker orchestration
│ └── qa_agent.py # Review, test, merge pipeline
├── core/
│ ├── graph.py # LangGraph state machine
│ ├── workspace.py # Git worktree + Docker isolation
│ ├── observability.py # LangSmith tracing + logging
│ └── architecture_tracker.py # Global architecture summary
├── prompts/ # Agent prompt templates
│ ├── pm_prd_expansion.txt
│ ├── pm_clarification.txt
│ ├── dev_task_execution.txt
│ └── qa_review.txt
└── data/ # Runtime state + architecture tracking
```
## Execution Phases
1. **Linear Planning** — User → PM Agent → Task Agent. Produces a prioritized DAG of tasks.
2. **Dynamic Concurrency** — Orchestrator spins up a WorkspaceManager + DevAgent for every unblocked task concurrently via `asyncio.gather()`.
3. **Clarification Loop** — Blocked agents route requests backward up the chain. Other agents continue uninterrupted.
4. **QA & Merge** — QA Agent rebases, lints, tests, reviews, and merges each completed task. Task Agent then unlocks downstream dependencies.
## Design Decisions
- **Context Starvation Prevention**: A read-only `ArchitectureTracker` summary is injected into every Dev Agent prompt so they know what other agents have built.
- **Merge Conflict Handling**: QA Agent rebases onto main before testing. Complex conflicts are kicked back to the Dev Agent automatically.
- **Infinite Loop Protection**: Max retry counter (3) per task at the LangGraph node level. Exceeded retries escalate to PM → human.
- **Claude Code Automation**: Dev agents interact with Claude Code via `pexpect` subprocess in headless mode inside Docker containers.
## Testing
```bash
# Run full test suite
python -m pytest tests/ -v
# Run specific test file
python -m pytest tests/test_graph.py -v
# Run with coverage
python -m pytest tests/ --cov=app_factory --cov-report=term-missing
```
**229 tests** across 9 test files covering all agents, core components, and integration.
## Configuration
### Authentication
App Factory supports two auth modes:
- **Claude Code OAuth** (default) — If you use Claude Code with OAuth, no API key is needed. The Anthropic SDK picks up your auth automatically.
- **API key** — Set `ANTHROPIC_API_KEY` in `.env` for direct API access.
### Environment Variables
| Variable | Required | Description |
|----------|----------|-------------|
| `ANTHROPIC_API_KEY` | No* | Claude API key. Not needed with Claude Code OAuth. |
| `OPENAI_API_KEY` | No | Codex fallback for algorithmic generation |
| `LANGSMITH_API_KEY` | No | LangSmith tracing and observability |
| `LANGSMITH_PROJECT` | No | LangSmith project name (default: `app-factory`) |
*Required only if not using Claude Code OAuth.*
## License
MIT

View File

@@ -0,0 +1,548 @@
"""Tests for QAAgent."""
import os
import subprocess
from types import SimpleNamespace
from unittest.mock import AsyncMock, MagicMock, patch
import git as gitmod
import pytest
from app_factory.agents.qa_agent import QAAgent
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_review_response(text, input_tokens=10, output_tokens=20):
"""Build a fake Anthropic messages.create response."""
return SimpleNamespace(
content=[SimpleNamespace(text=text)],
usage=SimpleNamespace(input_tokens=input_tokens, output_tokens=output_tokens),
)
def _build_agent(repo_path="/fake/repo", **kwargs):
"""Create a QAAgent with mocked git.Repo and Anthropic client."""
with patch("app_factory.agents.qa_agent.git.Repo") as mock_repo_cls, \
patch("app_factory.agents.qa_agent.anthropic") as mock_anthro:
mock_repo = MagicMock()
mock_repo_cls.return_value = mock_repo
mock_client = AsyncMock()
mock_anthro.AsyncAnthropic.return_value = mock_client
agent = QAAgent(repo_path=repo_path, api_key="test-key", **kwargs)
agent.client = mock_client
agent.repo = mock_repo
return agent, mock_repo, mock_client
APPROVED_REVIEW = """\
APPROVED: true
ISSUES:
- [severity: info] Minor style suggestion
SUMMARY: Code looks good overall."""
REJECTED_REVIEW = """\
APPROVED: false
ISSUES:
- [severity: critical] SQL injection in query builder
- [severity: warning] Missing input validation
SUMMARY: Critical security issue found."""
# ---------------------------------------------------------------------------
# Initialization
# ---------------------------------------------------------------------------
class TestInitialization:
def test_missing_api_key_raises(self):
with patch("app_factory.agents.qa_agent.git.Repo"), \
patch("app_factory.agents.qa_agent.anthropic") as mock_anthro, \
patch.dict(os.environ, {}, clear=True):
env = os.environ.copy()
env.pop("ANTHROPIC_API_KEY", None)
with patch.dict(os.environ, env, clear=True):
with pytest.raises(ValueError, match="API key required"):
QAAgent(repo_path="/fake")
def test_creates_with_api_key(self):
agent, mock_repo, _ = _build_agent()
assert agent.max_retries == 3
def test_custom_max_retries(self):
agent, _, _ = _build_agent(max_retries=5)
assert agent.max_retries == 5
# ---------------------------------------------------------------------------
# Rebase
# ---------------------------------------------------------------------------
class TestRebaseOntoMain:
@pytest.mark.asyncio
async def test_rebase_success(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
with patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo):
result = await agent.rebase_onto_main("/worktree/path", "task-1")
assert result["success"] is True
assert result["conflicts"] == []
mock_wt_repo.git.rebase.assert_called_once_with("main")
@pytest.mark.asyncio
async def test_rebase_conflict_unresolvable(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
mock_wt_repo.git.rebase.side_effect = gitmod.GitCommandError("rebase", "CONFLICT")
mock_wt_repo.git.status.return_value = "UU conflicted_file.py"
with patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo), \
patch.object(agent, "auto_resolve_conflicts", return_value=False):
result = await agent.rebase_onto_main("/worktree/path", "task-1")
assert result["success"] is False
assert "conflicted_file.py" in result["conflicts"]
@pytest.mark.asyncio
async def test_rebase_conflict_auto_resolved(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
mock_wt_repo.git.rebase.side_effect = gitmod.GitCommandError("rebase", "CONFLICT")
mock_wt_repo.git.status.return_value = "UU file.py"
with patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo), \
patch.object(agent, "auto_resolve_conflicts", return_value=True):
result = await agent.rebase_onto_main("/worktree/path", "task-1")
assert result["success"] is True
@pytest.mark.asyncio
async def test_fetch_failure_continues(self):
"""If fetch fails (no remote), rebase should still be attempted."""
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
mock_wt_repo.git.fetch.side_effect = gitmod.GitCommandError("fetch", "No remote")
with patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo):
result = await agent.rebase_onto_main("/worktree/path", "task-1")
assert result["success"] is True
mock_wt_repo.git.rebase.assert_called_once_with("main")
# ---------------------------------------------------------------------------
# Linter
# ---------------------------------------------------------------------------
class TestRunLinter:
def test_lint_passes(self):
agent, _, _ = _build_agent()
mock_result = subprocess.CompletedProcess(
args=["ruff", "check", "."],
returncode=0,
stdout="All checks passed!\n",
stderr="",
)
with patch("app_factory.agents.qa_agent.subprocess.run", return_value=mock_result):
result = agent.run_linter("/worktree/path")
assert result["passed"] is True
assert result["errors"] == []
def test_lint_fails_with_errors(self):
agent, _, _ = _build_agent()
ruff_output = (
"app/main.py:10:1: E501 Line too long (120 > 88 characters)\n"
"app/main.py:15:5: F841 Local variable 'x' is assigned but never used\n"
"Found 2 errors.\n"
)
mock_result = subprocess.CompletedProcess(
args=["ruff", "check", "."],
returncode=1,
stdout=ruff_output,
stderr="",
)
with patch("app_factory.agents.qa_agent.subprocess.run", return_value=mock_result):
result = agent.run_linter("/worktree/path")
assert result["passed"] is False
assert len(result["errors"]) == 2
def test_lint_ruff_not_found(self):
agent, _, _ = _build_agent()
with patch("app_factory.agents.qa_agent.subprocess.run", side_effect=FileNotFoundError):
result = agent.run_linter("/worktree/path")
assert result["passed"] is True
assert "ruff not found" in result["warnings"][0]
def test_lint_timeout(self):
agent, _, _ = _build_agent()
with patch("app_factory.agents.qa_agent.subprocess.run",
side_effect=subprocess.TimeoutExpired(cmd="ruff", timeout=120)):
result = agent.run_linter("/worktree/path")
assert result["passed"] is False
assert "timed out" in result["errors"][0]
# ---------------------------------------------------------------------------
# Tests
# ---------------------------------------------------------------------------
class TestRunTests:
def test_all_tests_pass(self):
agent, _, _ = _build_agent()
pytest_output = (
"tests/test_foo.py::test_one PASSED\n"
"tests/test_foo.py::test_two PASSED\n"
"========================= 2 passed =========================\n"
)
mock_result = subprocess.CompletedProcess(
args=["python", "-m", "pytest"],
returncode=0,
stdout=pytest_output,
stderr="",
)
with patch("app_factory.agents.qa_agent.subprocess.run", return_value=mock_result):
result = agent.run_tests("/worktree/path")
assert result["passed"] is True
assert result["total"] == 2
assert result["failures"] == 0
assert result["errors"] == 0
def test_some_tests_fail(self):
agent, _, _ = _build_agent()
pytest_output = (
"tests/test_foo.py::test_one PASSED\n"
"tests/test_foo.py::test_two FAILED\n"
"=================== 1 failed, 1 passed ====================\n"
)
mock_result = subprocess.CompletedProcess(
args=["python", "-m", "pytest"],
returncode=1,
stdout=pytest_output,
stderr="",
)
with patch("app_factory.agents.qa_agent.subprocess.run", return_value=mock_result):
result = agent.run_tests("/worktree/path")
assert result["passed"] is False
assert result["total"] == 2
assert result["failures"] == 1
def test_pytest_not_found(self):
agent, _, _ = _build_agent()
with patch("app_factory.agents.qa_agent.subprocess.run", side_effect=FileNotFoundError):
result = agent.run_tests("/worktree/path")
assert result["passed"] is False
assert "pytest not found" in result["output"]
def test_pytest_timeout(self):
agent, _, _ = _build_agent()
with patch("app_factory.agents.qa_agent.subprocess.run",
side_effect=subprocess.TimeoutExpired(cmd="pytest", timeout=300)):
result = agent.run_tests("/worktree/path")
assert result["passed"] is False
assert "timed out" in result["output"]
# ---------------------------------------------------------------------------
# Code Review
# ---------------------------------------------------------------------------
class TestCodeReview:
@pytest.mark.asyncio
async def test_review_approved(self):
agent, _, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_review_response(APPROVED_REVIEW)
)
result = await agent.code_review("diff content", task={"id": "1", "title": "Add feature"})
assert result["approved"] is True
assert len(result["issues"]) == 1
assert result["issues"][0]["severity"] == "info"
assert result["summary"] != ""
@pytest.mark.asyncio
async def test_review_rejected(self):
agent, _, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_review_response(REJECTED_REVIEW)
)
result = await agent.code_review("diff with issues")
assert result["approved"] is False
assert len(result["issues"]) == 2
assert result["issues"][0]["severity"] == "critical"
assert "security" in result["summary"].lower()
@pytest.mark.asyncio
async def test_review_no_task_context(self):
agent, _, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_review_response(APPROVED_REVIEW)
)
result = await agent.code_review("diff content", task=None)
assert result["approved"] is True
mock_client.messages.create.assert_awaited_once()
@pytest.mark.asyncio
async def test_review_loads_template(self):
agent, _, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_review_response(APPROVED_REVIEW)
)
await agent.code_review("some diff")
call_args = mock_client.messages.create.call_args
prompt_text = call_args.kwargs["messages"][0]["content"]
assert "Review Checklist" in prompt_text
assert "OWASP" in prompt_text
# ---------------------------------------------------------------------------
# Merge to main
# ---------------------------------------------------------------------------
class TestMergeToMain:
def test_merge_success(self):
agent, mock_repo, _ = _build_agent()
mock_repo.head.commit.hexsha = "abc123def456"
result = agent.merge_to_main("/worktree/path", "42")
assert result["success"] is True
assert result["commit_sha"] == "abc123def456"
mock_repo.git.checkout.assert_called_once_with("main")
mock_repo.git.merge.assert_called_once_with(
"--no-ff", "feature/task-42", m="Merge feature/task-42"
)
def test_merge_failure(self):
agent, mock_repo, _ = _build_agent()
mock_repo.git.merge.side_effect = gitmod.GitCommandError("merge", "conflict")
result = agent.merge_to_main("/worktree/path", "42")
assert result["success"] is False
assert result["commit_sha"] is None
# ---------------------------------------------------------------------------
# Full pipeline: review_and_merge
# ---------------------------------------------------------------------------
class TestReviewAndMerge:
@pytest.mark.asyncio
async def test_happy_path(self):
agent, mock_repo, mock_client = _build_agent()
mock_repo.head.commit.hexsha = "merged123"
mock_wt_repo = MagicMock()
mock_wt_repo.git.diff.return_value = "diff --git a/file.py"
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": True, "conflicts": []}), \
patch.object(agent, "run_linter",
return_value={"passed": True, "errors": [], "warnings": []}), \
patch.object(agent, "run_tests",
return_value={"passed": True, "total": 5, "failures": 0, "errors": 0, "output": "ok"}), \
patch.object(agent, "code_review", new_callable=AsyncMock,
return_value={"approved": True, "issues": [], "summary": "All good"}), \
patch.object(agent, "merge_to_main",
return_value={"success": True, "commit_sha": "merged123"}), \
patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo):
result = await agent.review_and_merge("task-1", "/worktree/path")
assert result["status"] == "merged"
assert result["commit_sha"] == "merged123"
@pytest.mark.asyncio
async def test_rebase_failure(self):
agent, _, _ = _build_agent()
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": False, "conflicts": ["file.py"]}):
result = await agent.review_and_merge("task-2", "/worktree/path")
assert result["status"] == "rebase_failed"
assert "file.py" in result["conflicts"]
assert result["retry_count"] == 1
@pytest.mark.asyncio
async def test_lint_failure(self):
agent, _, _ = _build_agent()
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": True, "conflicts": []}), \
patch.object(agent, "run_linter",
return_value={"passed": False, "errors": ["E501 line too long"], "warnings": []}):
result = await agent.review_and_merge("task-3", "/worktree/path")
assert result["status"] == "lint_failed"
assert len(result["errors"]) == 1
@pytest.mark.asyncio
async def test_test_failure(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": True, "conflicts": []}), \
patch.object(agent, "run_linter",
return_value={"passed": True, "errors": [], "warnings": []}), \
patch.object(agent, "run_tests",
return_value={"passed": False, "total": 3, "failures": 1, "errors": 0, "output": "FAILED"}):
result = await agent.review_and_merge("task-4", "/worktree/path")
assert result["status"] == "tests_failed"
assert result["failures"] == 1
@pytest.mark.asyncio
async def test_review_failure(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
mock_wt_repo.git.diff.return_value = "diff"
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": True, "conflicts": []}), \
patch.object(agent, "run_linter",
return_value={"passed": True, "errors": [], "warnings": []}), \
patch.object(agent, "run_tests",
return_value={"passed": True, "total": 3, "failures": 0, "errors": 0, "output": "ok"}), \
patch.object(agent, "code_review", new_callable=AsyncMock,
return_value={"approved": False, "issues": [{"severity": "critical", "description": "vuln"}], "summary": "Bad"}), \
patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo):
result = await agent.review_and_merge("task-5", "/worktree/path")
assert result["status"] == "review_failed"
assert len(result["issues"]) == 1
# ---------------------------------------------------------------------------
# Parse test results
# ---------------------------------------------------------------------------
class TestParseTestResults:
def test_all_passed(self):
agent, _, _ = _build_agent()
output = "========================= 5 passed =========================\n"
result = agent.parse_test_results(output)
assert result["passed"] is True
assert result["total"] == 5
assert result["failures"] == 0
assert result["errors"] == 0
def test_mixed_results(self):
agent, _, _ = _build_agent()
output = "================ 1 failed, 4 passed, 1 error ================\n"
result = agent.parse_test_results(output)
assert result["passed"] is False
assert result["total"] == 6
assert result["failures"] == 1
assert result["errors"] == 1
def test_all_failed(self):
agent, _, _ = _build_agent()
output = "========================= 3 failed =========================\n"
result = agent.parse_test_results(output)
assert result["passed"] is False
assert result["total"] == 3
assert result["failures"] == 3
def test_no_tests(self):
agent, _, _ = _build_agent()
output = "no tests ran\n"
result = agent.parse_test_results(output)
assert result["passed"] is False
assert result["total"] == 0
def test_errors_only(self):
agent, _, _ = _build_agent()
output = "========================= 2 error =========================\n"
result = agent.parse_test_results(output)
assert result["passed"] is False
assert result["errors"] == 2
# ---------------------------------------------------------------------------
# Retry counter
# ---------------------------------------------------------------------------
class TestRetryCounter:
def test_initial_count_zero(self):
agent, _, _ = _build_agent()
assert agent.get_retry_count("task-1") == 0
def test_increment_and_get(self):
agent, _, _ = _build_agent()
agent._increment_retry("task-1")
assert agent.get_retry_count("task-1") == 1
agent._increment_retry("task-1")
assert agent.get_retry_count("task-1") == 2
def test_separate_task_counters(self):
agent, _, _ = _build_agent()
agent._increment_retry("task-1")
agent._increment_retry("task-1")
agent._increment_retry("task-2")
assert agent.get_retry_count("task-1") == 2
assert agent.get_retry_count("task-2") == 1
@pytest.mark.asyncio
async def test_pipeline_failure_increments_counter(self):
agent, _, _ = _build_agent()
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": False, "conflicts": ["a.py"]}):
await agent.review_and_merge("task-99", "/wt")
assert agent.get_retry_count("task-99") == 1
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": False, "conflicts": ["a.py"]}):
await agent.review_and_merge("task-99", "/wt")
assert agent.get_retry_count("task-99") == 2
# ---------------------------------------------------------------------------
# Review response parsing
# ---------------------------------------------------------------------------
class TestParseReviewResponse:
def test_approved_with_info(self):
agent, _, _ = _build_agent()
result = agent._parse_review_response(APPROVED_REVIEW)
assert result["approved"] is True
assert len(result["issues"]) == 1
assert result["issues"][0]["severity"] == "info"
assert "good" in result["summary"].lower()
def test_rejected_with_critical(self):
agent, _, _ = _build_agent()
result = agent._parse_review_response(REJECTED_REVIEW)
assert result["approved"] is False
assert len(result["issues"]) == 2
assert result["issues"][0]["severity"] == "critical"
assert result["issues"][1]["severity"] == "warning"
def test_empty_response(self):
agent, _, _ = _build_agent()
result = agent._parse_review_response("")
assert result["approved"] is False
assert result["issues"] == []
assert result["summary"] == ""

View File

@@ -0,0 +1,550 @@
"""Tests for QAAgent."""
import os
import subprocess
from types import SimpleNamespace
from unittest.mock import AsyncMock, MagicMock, patch
import git as gitmod
import pytest
from app_factory.agents.qa_agent import QAAgent
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_review_response(text, input_tokens=10, output_tokens=20):
"""Build a fake Anthropic messages.create response."""
return SimpleNamespace(
content=[SimpleNamespace(text=text)],
usage=SimpleNamespace(input_tokens=input_tokens, output_tokens=output_tokens),
)
def _build_agent(repo_path="/fake/repo", **kwargs):
"""Create a QAAgent with mocked git.Repo and Anthropic client."""
with patch("app_factory.agents.qa_agent.git.Repo") as mock_repo_cls, \
patch("app_factory.agents.qa_agent.anthropic") as mock_anthro:
mock_repo = MagicMock()
mock_repo_cls.return_value = mock_repo
mock_client = AsyncMock()
mock_anthro.AsyncAnthropic.return_value = mock_client
agent = QAAgent(repo_path=repo_path, api_key="test-key", **kwargs)
agent.client = mock_client
agent.repo = mock_repo
return agent, mock_repo, mock_client
APPROVED_REVIEW = """\
APPROVED: true
ISSUES:
- [severity: info] Minor style suggestion
SUMMARY: Code looks good overall."""
REJECTED_REVIEW = """\
APPROVED: false
ISSUES:
- [severity: critical] SQL injection in query builder
- [severity: warning] Missing input validation
SUMMARY: Critical security issue found."""
# ---------------------------------------------------------------------------
# Initialization
# ---------------------------------------------------------------------------
class TestInitialization:
def test_no_api_key_uses_default_client(self):
with patch("app_factory.agents.qa_agent.git.Repo"), \
patch("app_factory.agents.qa_agent.anthropic") as mock_anthro, \
patch.dict(os.environ, {}, clear=True):
env = os.environ.copy()
env.pop("ANTHROPIC_API_KEY", None)
with patch.dict(os.environ, env, clear=True):
mock_anthro.AsyncAnthropic.return_value = AsyncMock()
agent = QAAgent(repo_path="/fake")
# Should create default client without explicit key
mock_anthro.AsyncAnthropic.assert_called_once_with()
def test_creates_with_api_key(self):
agent, mock_repo, _ = _build_agent()
assert agent.max_retries == 3
def test_custom_max_retries(self):
agent, _, _ = _build_agent(max_retries=5)
assert agent.max_retries == 5
# ---------------------------------------------------------------------------
# Rebase
# ---------------------------------------------------------------------------
class TestRebaseOntoMain:
@pytest.mark.asyncio
async def test_rebase_success(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
with patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo):
result = await agent.rebase_onto_main("/worktree/path", "task-1")
assert result["success"] is True
assert result["conflicts"] == []
mock_wt_repo.git.rebase.assert_called_once_with("main")
@pytest.mark.asyncio
async def test_rebase_conflict_unresolvable(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
mock_wt_repo.git.rebase.side_effect = gitmod.GitCommandError("rebase", "CONFLICT")
mock_wt_repo.git.status.return_value = "UU conflicted_file.py"
with patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo), \
patch.object(agent, "auto_resolve_conflicts", return_value=False):
result = await agent.rebase_onto_main("/worktree/path", "task-1")
assert result["success"] is False
assert "conflicted_file.py" in result["conflicts"]
@pytest.mark.asyncio
async def test_rebase_conflict_auto_resolved(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
mock_wt_repo.git.rebase.side_effect = gitmod.GitCommandError("rebase", "CONFLICT")
mock_wt_repo.git.status.return_value = "UU file.py"
with patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo), \
patch.object(agent, "auto_resolve_conflicts", return_value=True):
result = await agent.rebase_onto_main("/worktree/path", "task-1")
assert result["success"] is True
@pytest.mark.asyncio
async def test_fetch_failure_continues(self):
"""If fetch fails (no remote), rebase should still be attempted."""
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
mock_wt_repo.git.fetch.side_effect = gitmod.GitCommandError("fetch", "No remote")
with patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo):
result = await agent.rebase_onto_main("/worktree/path", "task-1")
assert result["success"] is True
mock_wt_repo.git.rebase.assert_called_once_with("main")
# ---------------------------------------------------------------------------
# Linter
# ---------------------------------------------------------------------------
class TestRunLinter:
def test_lint_passes(self):
agent, _, _ = _build_agent()
mock_result = subprocess.CompletedProcess(
args=["ruff", "check", "."],
returncode=0,
stdout="All checks passed!\n",
stderr="",
)
with patch("app_factory.agents.qa_agent.subprocess.run", return_value=mock_result):
result = agent.run_linter("/worktree/path")
assert result["passed"] is True
assert result["errors"] == []
def test_lint_fails_with_errors(self):
agent, _, _ = _build_agent()
ruff_output = (
"app/main.py:10:1: E501 Line too long (120 > 88 characters)\n"
"app/main.py:15:5: F841 Local variable 'x' is assigned but never used\n"
"Found 2 errors.\n"
)
mock_result = subprocess.CompletedProcess(
args=["ruff", "check", "."],
returncode=1,
stdout=ruff_output,
stderr="",
)
with patch("app_factory.agents.qa_agent.subprocess.run", return_value=mock_result):
result = agent.run_linter("/worktree/path")
assert result["passed"] is False
assert len(result["errors"]) == 2
def test_lint_ruff_not_found(self):
agent, _, _ = _build_agent()
with patch("app_factory.agents.qa_agent.subprocess.run", side_effect=FileNotFoundError):
result = agent.run_linter("/worktree/path")
assert result["passed"] is True
assert "ruff not found" in result["warnings"][0]
def test_lint_timeout(self):
agent, _, _ = _build_agent()
with patch("app_factory.agents.qa_agent.subprocess.run",
side_effect=subprocess.TimeoutExpired(cmd="ruff", timeout=120)):
result = agent.run_linter("/worktree/path")
assert result["passed"] is False
assert "timed out" in result["errors"][0]
# ---------------------------------------------------------------------------
# Tests
# ---------------------------------------------------------------------------
class TestRunTests:
def test_all_tests_pass(self):
agent, _, _ = _build_agent()
pytest_output = (
"tests/test_foo.py::test_one PASSED\n"
"tests/test_foo.py::test_two PASSED\n"
"========================= 2 passed =========================\n"
)
mock_result = subprocess.CompletedProcess(
args=["python", "-m", "pytest"],
returncode=0,
stdout=pytest_output,
stderr="",
)
with patch("app_factory.agents.qa_agent.subprocess.run", return_value=mock_result):
result = agent.run_tests("/worktree/path")
assert result["passed"] is True
assert result["total"] == 2
assert result["failures"] == 0
assert result["errors"] == 0
def test_some_tests_fail(self):
agent, _, _ = _build_agent()
pytest_output = (
"tests/test_foo.py::test_one PASSED\n"
"tests/test_foo.py::test_two FAILED\n"
"=================== 1 failed, 1 passed ====================\n"
)
mock_result = subprocess.CompletedProcess(
args=["python", "-m", "pytest"],
returncode=1,
stdout=pytest_output,
stderr="",
)
with patch("app_factory.agents.qa_agent.subprocess.run", return_value=mock_result):
result = agent.run_tests("/worktree/path")
assert result["passed"] is False
assert result["total"] == 2
assert result["failures"] == 1
def test_pytest_not_found(self):
agent, _, _ = _build_agent()
with patch("app_factory.agents.qa_agent.subprocess.run", side_effect=FileNotFoundError):
result = agent.run_tests("/worktree/path")
assert result["passed"] is False
assert "pytest not found" in result["output"]
def test_pytest_timeout(self):
agent, _, _ = _build_agent()
with patch("app_factory.agents.qa_agent.subprocess.run",
side_effect=subprocess.TimeoutExpired(cmd="pytest", timeout=300)):
result = agent.run_tests("/worktree/path")
assert result["passed"] is False
assert "timed out" in result["output"]
# ---------------------------------------------------------------------------
# Code Review
# ---------------------------------------------------------------------------
class TestCodeReview:
@pytest.mark.asyncio
async def test_review_approved(self):
agent, _, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_review_response(APPROVED_REVIEW)
)
result = await agent.code_review("diff content", task={"id": "1", "title": "Add feature"})
assert result["approved"] is True
assert len(result["issues"]) == 1
assert result["issues"][0]["severity"] == "info"
assert result["summary"] != ""
@pytest.mark.asyncio
async def test_review_rejected(self):
agent, _, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_review_response(REJECTED_REVIEW)
)
result = await agent.code_review("diff with issues")
assert result["approved"] is False
assert len(result["issues"]) == 2
assert result["issues"][0]["severity"] == "critical"
assert "security" in result["summary"].lower()
@pytest.mark.asyncio
async def test_review_no_task_context(self):
agent, _, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_review_response(APPROVED_REVIEW)
)
result = await agent.code_review("diff content", task=None)
assert result["approved"] is True
mock_client.messages.create.assert_awaited_once()
@pytest.mark.asyncio
async def test_review_loads_template(self):
agent, _, mock_client = _build_agent()
mock_client.messages.create = AsyncMock(
return_value=_make_review_response(APPROVED_REVIEW)
)
await agent.code_review("some diff")
call_args = mock_client.messages.create.call_args
prompt_text = call_args.kwargs["messages"][0]["content"]
assert "Review Checklist" in prompt_text
assert "OWASP" in prompt_text
# ---------------------------------------------------------------------------
# Merge to main
# ---------------------------------------------------------------------------
class TestMergeToMain:
def test_merge_success(self):
agent, mock_repo, _ = _build_agent()
mock_repo.head.commit.hexsha = "abc123def456"
result = agent.merge_to_main("/worktree/path", "42")
assert result["success"] is True
assert result["commit_sha"] == "abc123def456"
mock_repo.git.checkout.assert_called_once_with("main")
mock_repo.git.merge.assert_called_once_with(
"--no-ff", "feature/task-42", m="Merge feature/task-42"
)
def test_merge_failure(self):
agent, mock_repo, _ = _build_agent()
mock_repo.git.merge.side_effect = gitmod.GitCommandError("merge", "conflict")
result = agent.merge_to_main("/worktree/path", "42")
assert result["success"] is False
assert result["commit_sha"] is None
# ---------------------------------------------------------------------------
# Full pipeline: review_and_merge
# ---------------------------------------------------------------------------
class TestReviewAndMerge:
@pytest.mark.asyncio
async def test_happy_path(self):
agent, mock_repo, mock_client = _build_agent()
mock_repo.head.commit.hexsha = "merged123"
mock_wt_repo = MagicMock()
mock_wt_repo.git.diff.return_value = "diff --git a/file.py"
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": True, "conflicts": []}), \
patch.object(agent, "run_linter",
return_value={"passed": True, "errors": [], "warnings": []}), \
patch.object(agent, "run_tests",
return_value={"passed": True, "total": 5, "failures": 0, "errors": 0, "output": "ok"}), \
patch.object(agent, "code_review", new_callable=AsyncMock,
return_value={"approved": True, "issues": [], "summary": "All good"}), \
patch.object(agent, "merge_to_main",
return_value={"success": True, "commit_sha": "merged123"}), \
patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo):
result = await agent.review_and_merge("task-1", "/worktree/path")
assert result["status"] == "merged"
assert result["commit_sha"] == "merged123"
@pytest.mark.asyncio
async def test_rebase_failure(self):
agent, _, _ = _build_agent()
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": False, "conflicts": ["file.py"]}):
result = await agent.review_and_merge("task-2", "/worktree/path")
assert result["status"] == "rebase_failed"
assert "file.py" in result["conflicts"]
assert result["retry_count"] == 1
@pytest.mark.asyncio
async def test_lint_failure(self):
agent, _, _ = _build_agent()
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": True, "conflicts": []}), \
patch.object(agent, "run_linter",
return_value={"passed": False, "errors": ["E501 line too long"], "warnings": []}):
result = await agent.review_and_merge("task-3", "/worktree/path")
assert result["status"] == "lint_failed"
assert len(result["errors"]) == 1
@pytest.mark.asyncio
async def test_test_failure(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": True, "conflicts": []}), \
patch.object(agent, "run_linter",
return_value={"passed": True, "errors": [], "warnings": []}), \
patch.object(agent, "run_tests",
return_value={"passed": False, "total": 3, "failures": 1, "errors": 0, "output": "FAILED"}):
result = await agent.review_and_merge("task-4", "/worktree/path")
assert result["status"] == "tests_failed"
assert result["failures"] == 1
@pytest.mark.asyncio
async def test_review_failure(self):
agent, _, _ = _build_agent()
mock_wt_repo = MagicMock()
mock_wt_repo.git.diff.return_value = "diff"
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": True, "conflicts": []}), \
patch.object(agent, "run_linter",
return_value={"passed": True, "errors": [], "warnings": []}), \
patch.object(agent, "run_tests",
return_value={"passed": True, "total": 3, "failures": 0, "errors": 0, "output": "ok"}), \
patch.object(agent, "code_review", new_callable=AsyncMock,
return_value={"approved": False, "issues": [{"severity": "critical", "description": "vuln"}], "summary": "Bad"}), \
patch("app_factory.agents.qa_agent.git.Repo", return_value=mock_wt_repo):
result = await agent.review_and_merge("task-5", "/worktree/path")
assert result["status"] == "review_failed"
assert len(result["issues"]) == 1
# ---------------------------------------------------------------------------
# Parse test results
# ---------------------------------------------------------------------------
class TestParseTestResults:
def test_all_passed(self):
agent, _, _ = _build_agent()
output = "========================= 5 passed =========================\n"
result = agent.parse_test_results(output)
assert result["passed"] is True
assert result["total"] == 5
assert result["failures"] == 0
assert result["errors"] == 0
def test_mixed_results(self):
agent, _, _ = _build_agent()
output = "================ 1 failed, 4 passed, 1 error ================\n"
result = agent.parse_test_results(output)
assert result["passed"] is False
assert result["total"] == 6
assert result["failures"] == 1
assert result["errors"] == 1
def test_all_failed(self):
agent, _, _ = _build_agent()
output = "========================= 3 failed =========================\n"
result = agent.parse_test_results(output)
assert result["passed"] is False
assert result["total"] == 3
assert result["failures"] == 3
def test_no_tests(self):
agent, _, _ = _build_agent()
output = "no tests ran\n"
result = agent.parse_test_results(output)
assert result["passed"] is False
assert result["total"] == 0
def test_errors_only(self):
agent, _, _ = _build_agent()
output = "========================= 2 error =========================\n"
result = agent.parse_test_results(output)
assert result["passed"] is False
assert result["errors"] == 2
# ---------------------------------------------------------------------------
# Retry counter
# ---------------------------------------------------------------------------
class TestRetryCounter:
def test_initial_count_zero(self):
agent, _, _ = _build_agent()
assert agent.get_retry_count("task-1") == 0
def test_increment_and_get(self):
agent, _, _ = _build_agent()
agent._increment_retry("task-1")
assert agent.get_retry_count("task-1") == 1
agent._increment_retry("task-1")
assert agent.get_retry_count("task-1") == 2
def test_separate_task_counters(self):
agent, _, _ = _build_agent()
agent._increment_retry("task-1")
agent._increment_retry("task-1")
agent._increment_retry("task-2")
assert agent.get_retry_count("task-1") == 2
assert agent.get_retry_count("task-2") == 1
@pytest.mark.asyncio
async def test_pipeline_failure_increments_counter(self):
agent, _, _ = _build_agent()
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": False, "conflicts": ["a.py"]}):
await agent.review_and_merge("task-99", "/wt")
assert agent.get_retry_count("task-99") == 1
with patch.object(agent, "rebase_onto_main", new_callable=AsyncMock,
return_value={"success": False, "conflicts": ["a.py"]}):
await agent.review_and_merge("task-99", "/wt")
assert agent.get_retry_count("task-99") == 2
# ---------------------------------------------------------------------------
# Review response parsing
# ---------------------------------------------------------------------------
class TestParseReviewResponse:
def test_approved_with_info(self):
agent, _, _ = _build_agent()
result = agent._parse_review_response(APPROVED_REVIEW)
assert result["approved"] is True
assert len(result["issues"]) == 1
assert result["issues"][0]["severity"] == "info"
assert "good" in result["summary"].lower()
def test_rejected_with_critical(self):
agent, _, _ = _build_agent()
result = agent._parse_review_response(REJECTED_REVIEW)
assert result["approved"] is False
assert len(result["issues"]) == 2
assert result["issues"][0]["severity"] == "critical"
assert result["issues"][1]["severity"] == "warning"
def test_empty_response(self):
agent, _, _ = _build_agent()
result = agent._parse_review_response("")
assert result["approved"] is False
assert result["issues"] == []
assert result["summary"] == ""

View File

@@ -0,0 +1,13 @@
package com.gigagimbank.runelite;
import net.runelite.client.RuneLite;
import net.runelite.client.externalplugins.ExternalPluginManager;
public class GigaGIMBankPluginTest
{
public static void main(String[] args) throws Exception
{
ExternalPluginManager.loadBuiltin(GigaGIMBankPlugin.class);
RuneLite.main(args);
}
}

View File

@@ -0,0 +1,14 @@
package com.gigagimbank.runelite;
import net.runelite.client.RuneLite;
import net.runelite.client.externalplugins.ExternalPluginManager;
public class GigaGIMBankPluginTest
{
@SuppressWarnings("unchecked")
public static void main(String[] args) throws Exception
{
ExternalPluginManager.loadBuiltin(GigaGIMBankPlugin.class);
RuneLite.main(args);
}
}

View File

@@ -0,0 +1,615 @@
import fs from 'node:fs';
import path from 'node:path';
import { DatabaseSync } from 'node:sqlite';
import {
createPostgresPool,
initializePostgresSchema,
runPostgresHealthcheck,
} from './dbPostgres.js';
import { openPostgresSyncBridge } from './dbPostgresSyncBridge.js';
import { parseBooleanFlag } from './utils.js';
const defaultDbPath = path.join(process.cwd(), 'data', 'gigagimbank.sqlite');
const DB_DRIVER_SQLITE = 'sqlite';
const DB_DRIVER_POSTGRES = 'postgres';
function normalizeDbDriver(value) {
const normalized = String(value ?? '').trim().toLowerCase();
if (normalized === DB_DRIVER_POSTGRES) {
return DB_DRIVER_POSTGRES;
}
return DB_DRIVER_SQLITE;
}
export function resolveDatabaseRuntimeConfig(env = process.env) {
const requestedDriver = normalizeDbDriver(env.DB_DRIVER);
const sqliteDevFallback = parseBooleanFlag(env.DEV_DB_SQLITE_FALLBACK);
if (requestedDriver === DB_DRIVER_POSTGRES && sqliteDevFallback) {
return {
requested_driver: requestedDriver,
resolved_driver: DB_DRIVER_SQLITE,
mode: 'sqlite_dev_fallback',
sqlite_dev_fallback: true,
postgres_adapter_available: true,
postgres_store_compatibility: 'sqlite_fallback',
};
}
return {
requested_driver: requestedDriver,
resolved_driver: requestedDriver,
mode: requestedDriver === DB_DRIVER_SQLITE ? 'sqlite_primary' : 'postgres_primary',
sqlite_dev_fallback: sqliteDevFallback,
postgres_adapter_available: true,
postgres_store_compatibility:
requestedDriver === DB_DRIVER_POSTGRES ? 'sync_bridge' : 'sqlite_primary',
};
}
export function openDatabase(env = process.env) {
const runtime = resolveDatabaseRuntimeConfig(env);
if (runtime.resolved_driver === DB_DRIVER_POSTGRES) {
const bridge = openPostgresSyncBridge(env);
return bridge;
}
const filePath = env.DB_PATH || defaultDbPath;
fs.mkdirSync(path.dirname(filePath), { recursive: true });
const db = new DatabaseSync(filePath);
db.exec('PRAGMA journal_mode = WAL;');
db.exec('PRAGMA foreign_keys = ON;');
return db;
}
export async function openPostgresMigrationAdapter(env = process.env) {
const runtime = resolveDatabaseRuntimeConfig(env);
if (runtime.requested_driver !== DB_DRIVER_POSTGRES) {
throw new Error('openPostgresMigrationAdapter requires DB_DRIVER=postgres.');
}
const { pool, runtime: postgresRuntime } = createPostgresPool(env);
return {
pool,
runtime: {
...runtime,
postgres: postgresRuntime,
},
};
}
export async function initializePostgresMigrationSchema(pool) {
await initializePostgresSchema(pool);
}
export async function checkPostgresMigrationHealth(pool) {
return runPostgresHealthcheck(pool);
}
export function initializeSchema(db) {
db.exec(`
CREATE TABLE IF NOT EXISTS users (
id TEXT PRIMARY KEY,
discord_id TEXT,
discord_avatar_url TEXT,
runelite_account_hash TEXT UNIQUE NOT NULL,
runelite_linked INTEGER NOT NULL DEFAULT 1,
default_display_name TEXT NOT NULL,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
last_seen_at TEXT,
created_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS user_runelite_accounts (
account_hash TEXT PRIMARY KEY,
user_id TEXT NOT NULL,
linked_at TEXT NOT NULL,
is_active INTEGER NOT NULL DEFAULT 1,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_user_runelite_accounts_user_id
ON user_runelite_accounts(user_id);
CREATE TABLE IF NOT EXISTS oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
account_hash TEXT NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS web_oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS groups_table (
id TEXT PRIMARY KEY,
group_name TEXT NOT NULL,
leader_user_id TEXT NOT NULL,
join_code TEXT UNIQUE NOT NULL,
join_code_expires_at TEXT NOT NULL,
allow_open_invite_join INTEGER NOT NULL DEFAULT 0,
open_invite_expires_at TEXT,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
webhook_config_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
FOREIGN KEY (leader_user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS group_members (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
user_id TEXT,
account_hash TEXT,
expected_runescape_name TEXT NOT NULL,
role TEXT NOT NULL,
webhook_config_perms INTEGER NOT NULL DEFAULT 0,
loadout_admin_perms INTEGER NOT NULL DEFAULT 0,
joined_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (user_id) REFERENCES users (id),
FOREIGN KEY (account_hash) REFERENCES user_runelite_accounts (account_hash)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL;
CREATE TABLE IF NOT EXISTS group_join_requests (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
requester_user_id TEXT NOT NULL,
requester_account_hash TEXT,
status TEXT NOT NULL,
requested_at TEXT NOT NULL,
resolved_at TEXT,
resolved_by_user_id TEXT,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (requester_user_id) REFERENCES users (id),
FOREIGN KEY (requester_account_hash) REFERENCES user_runelite_accounts (account_hash),
FOREIGN KEY (resolved_by_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_join_requests_group_status
ON group_join_requests(group_id, status, requested_at DESC);
CREATE INDEX IF NOT EXISTS idx_join_requests_requester
ON group_join_requests(requester_user_id, requested_at DESC);
CREATE TABLE IF NOT EXISTS storage_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
location_type TEXT NOT NULL,
items_json TEXT NOT NULL,
state_hash TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_group_slot
ON storage_snapshots(group_id, location_type)
WHERE group_member_id IS NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_member_slot
ON storage_snapshots(group_id, group_member_id, location_type)
WHERE group_member_id IS NOT NULL;
CREATE TABLE IF NOT EXISTS audit_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
location_type TEXT NOT NULL,
item_id INTEGER NOT NULL,
quantity_delta INTEGER NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_audit_group_created_at
ON audit_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS activity_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
event_type TEXT NOT NULL,
event_data_json TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_activity_group_created_at
ON activity_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS webhook_delivery_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
group_member_id TEXT,
activity_id TEXT,
event_type TEXT NOT NULL,
delivery_status TEXT NOT NULL,
attempt_count INTEGER NOT NULL DEFAULT 0,
http_status INTEGER,
error_message TEXT,
webhook_host TEXT,
payload_json TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_group_created_at
ON webhook_delivery_logs(group_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_status_created_at
ON webhook_delivery_logs(delivery_status, created_at DESC);
CREATE TABLE IF NOT EXISTS billing_customers (
user_id TEXT PRIMARY KEY,
stripe_customer_id TEXT UNIQUE NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_billing_customers_customer_id
ON billing_customers(stripe_customer_id);
CREATE TABLE IF NOT EXISTS billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
stripe_customer_id TEXT NOT NULL,
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_billing_subscriptions_customer_updated
ON billing_subscriptions(stripe_customer_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_billing_subscriptions_group_updated
ON group_billing_subscriptions(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_boost_credits (
user_id TEXT PRIMARY KEY,
available_boosts INTEGER NOT NULL DEFAULT 0,
updated_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS group_manual_boost_allocations (
user_id TEXT NOT NULL,
group_id TEXT NOT NULL,
boosts_assigned INTEGER NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
PRIMARY KEY (user_id, group_id),
FOREIGN KEY (user_id) REFERENCES users (id),
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_manual_boost_allocations_group
ON group_manual_boost_allocations(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_subscription_boost_overrides (
user_id TEXT NOT NULL,
allocation_month TEXT NOT NULL,
allocations_json TEXT NOT NULL DEFAULT '{}',
configured_at TEXT NOT NULL,
PRIMARY KEY (user_id, allocation_month),
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE TABLE IF NOT EXISTS item_catalog (
item_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
catalog_source TEXT,
catalog_version TEXT,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_name
ON item_catalog(name);
CREATE TABLE IF NOT EXISTS item_catalog_snapshots (
id TEXT PRIMARY KEY,
source_name TEXT NOT NULL,
source_version TEXT,
checksum_sha256 TEXT NOT NULL,
item_count INTEGER NOT NULL,
created_at TEXT NOT NULL,
notes TEXT
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_snapshots_created_at
ON item_catalog_snapshots(created_at DESC);
CREATE TABLE IF NOT EXISTS item_catalog_snapshot_items (
snapshot_id TEXT NOT NULL,
item_id INTEGER NOT NULL,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
PRIMARY KEY (snapshot_id, item_id),
FOREIGN KEY (snapshot_id) REFERENCES item_catalog_snapshots (id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS hiscores_fallback_state (
group_member_id TEXT PRIMARY KEY,
runescape_name TEXT NOT NULL,
snapshot_json TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_member_id) REFERENCES group_members (id)
);
CREATE TABLE IF NOT EXISTS group_wealth_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
total_value_gp INTEGER NOT NULL DEFAULT 0,
captured_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id)
);
CREATE INDEX IF NOT EXISTS idx_group_wealth_snapshots_group_time
ON group_wealth_snapshots(group_id, captured_at DESC);
CREATE TABLE IF NOT EXISTS group_goals (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
created_by_user_id TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT,
target_value_gp INTEGER NOT NULL,
current_value_gp INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'ACTIVE',
due_at TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (created_by_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_group_goals_group_status
ON group_goals(group_id, status, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadouts (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
owner_user_id TEXT NOT NULL,
name TEXT NOT NULL,
description TEXT,
scope TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (owner_user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_group_loadouts_group_scope_updated
ON group_loadouts(group_id, scope, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadout_items (
id TEXT PRIMARY KEY,
loadout_id TEXT NOT NULL,
item_id INTEGER NOT NULL,
required_qty INTEGER NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (loadout_id) REFERENCES group_loadouts (id)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_loadout_items_unique
ON group_loadout_items(loadout_id, item_id);
CREATE TABLE IF NOT EXISTS feature_usage_events (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL,
user_id TEXT,
feature_key TEXT NOT NULL,
action_key TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY (group_id) REFERENCES groups_table (id),
FOREIGN KEY (user_id) REFERENCES users (id)
);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_feature_time
ON feature_usage_events(feature_key, action_key, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_group_time
ON feature_usage_events(group_id, created_at DESC);
`);
addColumnIfMissing(db, 'groups_table', 'opt_out_hiscores INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', 'opt_out_activity_feed INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', 'allow_open_invite_join INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'groups_table', "join_code_expires_at TEXT NOT NULL DEFAULT ''");
addColumnIfMissing(db, 'groups_table', 'open_invite_expires_at TEXT');
addColumnIfMissing(db, 'groups_table', "webhook_config_json TEXT NOT NULL DEFAULT '{}'");
addColumnIfMissing(db, 'users', 'runelite_linked INTEGER NOT NULL DEFAULT 1');
addColumnIfMissing(db, 'users', 'discord_avatar_url TEXT');
addColumnIfMissing(db, 'users', 'last_seen_at TEXT');
addColumnIfMissing(db, 'item_catalog', 'is_tradeable INTEGER NOT NULL DEFAULT 1');
addColumnIfMissing(db, 'item_catalog', 'catalog_source TEXT');
addColumnIfMissing(db, 'item_catalog', 'catalog_version TEXT');
addColumnIfMissing(db, 'group_members', 'webhook_config_perms INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'group_members', 'loadout_admin_perms INTEGER NOT NULL DEFAULT 0');
addColumnIfMissing(db, 'group_members', 'account_hash TEXT');
addColumnIfMissing(db, 'group_join_requests', 'requester_account_hash TEXT');
const defaultInviteExpiry = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString();
db.prepare(
`UPDATE groups_table
SET join_code_expires_at = ?
WHERE join_code_expires_at IS NULL
OR join_code_expires_at = ''`
).run(defaultInviteExpiry);
db.prepare(
`UPDATE groups_table
SET webhook_config_json = '{}'
WHERE webhook_config_json IS NULL
OR webhook_config_json = ''`
).run();
const insertOrIgnoreAccountsSql = db.engine === 'postgres'
? `INSERT INTO user_runelite_accounts (
account_hash,
user_id,
linked_at,
is_active
)
SELECT
runelite_account_hash,
id,
created_at,
CASE WHEN runelite_linked = 1 THEN 1 ELSE 0 END
FROM users
WHERE runelite_account_hash IS NOT NULL
AND runelite_account_hash <> ''
ON CONFLICT(account_hash) DO NOTHING`
: `INSERT OR IGNORE INTO user_runelite_accounts (
account_hash,
user_id,
linked_at,
is_active
)
SELECT
runelite_account_hash,
id,
created_at,
CASE WHEN runelite_linked = 1 THEN 1 ELSE 0 END
FROM users
WHERE runelite_account_hash IS NOT NULL
AND runelite_account_hash <> ''`;
db.prepare(insertOrIgnoreAccountsSql).run();
db.prepare(
`UPDATE users
SET runelite_linked = CASE
WHEN EXISTS (
SELECT 1
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
) THEN 1
ELSE 0
END`
).run();
db.prepare(
`UPDATE users
SET runelite_account_hash = COALESCE(
(
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
),
runelite_account_hash
)`
).run();
db.prepare(
`UPDATE group_members
SET account_hash = (
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = group_members.user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
)
WHERE group_members.user_id IS NOT NULL
AND (group_members.account_hash IS NULL OR group_members.account_hash = '')`
).run();
db.prepare(
`UPDATE group_join_requests
SET requester_account_hash = (
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = group_join_requests.requester_user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
)
WHERE requester_account_hash IS NULL
OR requester_account_hash = ''`
).run();
db.exec(`DROP INDEX IF EXISTS idx_group_members_claimed_user;`);
db.exec(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL;`
);
db.exec(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL;`
);
}
export function nowIso() {
return new Date().toISOString();
}
function addColumnIfMissing(db, tableName, columnDefinition) {
if (db.engine === 'postgres') {
try {
db.exec(`ALTER TABLE ${tableName} ADD COLUMN IF NOT EXISTS ${columnDefinition};`);
} catch (error) {
const message = String(error?.message ?? '').toLowerCase();
if (!message.includes('already exists')) {
throw error;
}
}
return;
}
try {
db.exec(`ALTER TABLE ${tableName} ADD COLUMN ${columnDefinition};`);
} catch (error) {
const message = String(error?.message ?? '').toLowerCase();
const duplicateColumn =
message.includes('duplicate column name') || message.includes('already exists');
if (!duplicateColumn) {
throw error;
}
}
}

View File

@@ -0,0 +1,171 @@
import { getGroupWebhookConfig, recordWebhookDeliveryLog } from './store.js';
import { deliverWebhookWithRetry } from './webhookDelivery.js';
import { shouldDispatchWebhookEvent } from './webhookPolicy.js';
function truncateText(value, maxLength) {
const raw = String(value ?? '');
if (raw.length <= maxLength) {
return raw;
}
return `${raw.slice(0, maxLength)}...`;
}
function normalizeWebhookEventType(value) {
return String(value || '').trim().toUpperCase().slice(0, 64) || 'UNKNOWN';
}
function serializeWebhookEventData(eventData) {
try {
const serialized = JSON.stringify(eventData || {});
return truncateText(serialized, 1_000);
} catch {
return '{}';
}
}
function buildDiscordWebhookPayload({
membership,
eventType,
eventData,
}) {
const actor =
membership?.user?.default_display_name ||
membership?.member?.expected_runescape_name ||
'Unknown Member';
const groupName = membership?.group?.group_name || 'Unknown Group';
const normalizedType = normalizeWebhookEventType(eventType);
const summary = serializeWebhookEventData(eventData);
const header = `[${groupName}] ${actor} triggered ${normalizedType}`;
return {
username: 'GIGAGIMBank',
content: truncateText(
summary === '{}' ? header : `${header} | ${summary}`,
1_900
),
allowed_mentions: {
parse: [],
},
};
}
export function createActivityWebhookDeliveryProcessor(options = {}) {
const db = options.db;
if (!db) {
throw new Error('createActivityWebhookDeliveryProcessor requires db');
}
const webhookMaxAttempts = Number.isInteger(options.webhookMaxAttempts) && options.webhookMaxAttempts > 0
? options.webhookMaxAttempts
: 3;
const webhookInitialBackoffMs = Number.isInteger(options.webhookInitialBackoffMs) && options.webhookInitialBackoffMs > 0
? options.webhookInitialBackoffMs
: 750;
const webhookRequestTimeoutMs = Number.isInteger(options.webhookRequestTimeoutMs) && options.webhookRequestTimeoutMs > 0
? options.webhookRequestTimeoutMs
: 5_000;
const fetchImpl = typeof options.fetchImpl === 'function' ? options.fetchImpl : fetch;
const logger = options.logger && typeof options.logger === 'object' ? options.logger : console;
return async function deliverActivityWebhook({
membership,
eventType,
eventData,
activityResult,
}) {
const groupId = membership?.group?.id;
const groupMemberId = membership?.member?.id;
if (!groupId || !groupMemberId) {
logger.error?.('activity-webhook-delivery: missing membership context for delivery payload');
return;
}
const groupWebhookConfig = getGroupWebhookConfig(db, groupId);
if (!groupWebhookConfig) {
return;
}
const webhookConfig = groupWebhookConfig.webhook_config;
if (!webhookConfig.enabled) {
return;
}
const normalizedEventType = normalizeWebhookEventType(eventType);
const deliveryDecision = shouldDispatchWebhookEvent(
webhookConfig,
normalizedEventType,
eventData
);
if (!deliveryDecision.allow) {
recordWebhookDeliveryLog(db, {
groupId,
groupMemberId,
activityId: activityResult?.activity_id || null,
eventType: normalizedEventType,
deliveryStatus: 'SKIPPED',
attemptCount: 0,
httpStatus: null,
errorMessage: `filtered: ${deliveryDecision.reason}`,
webhookUrl: webhookConfig.primary_url,
payload: {
skipped: true,
reason: deliveryDecision.reason,
event_type: deliveryDecision.event_type,
},
});
return;
}
const webhookUrl = String(webhookConfig.primary_url || '').trim();
const payload = buildDiscordWebhookPayload({
membership,
eventType: normalizedEventType,
eventData,
});
if (!webhookUrl) {
recordWebhookDeliveryLog(db, {
groupId,
groupMemberId,
activityId: activityResult?.activity_id || null,
eventType: normalizedEventType,
deliveryStatus: 'FAILED',
attemptCount: 0,
httpStatus: null,
errorMessage: 'webhook_missing_primary_url',
webhookUrl: null,
payload,
});
return;
}
const delivery = await deliverWebhookWithRetry({
url: webhookUrl,
payload,
maxAttempts: webhookMaxAttempts,
initialBackoffMs: webhookInitialBackoffMs,
fetchImpl: (url, fetchOptions) =>
fetchImpl(url, {
...fetchOptions,
signal: AbortSignal.timeout(webhookRequestTimeoutMs),
}),
});
const logged = recordWebhookDeliveryLog(db, {
groupId,
groupMemberId,
activityId: activityResult?.activity_id || null,
eventType: normalizedEventType,
deliveryStatus: delivery.ok ? 'SENT' : 'FAILED',
attemptCount: delivery.attempt_count,
httpStatus: delivery.status_code,
errorMessage: delivery.error_message || delivery.response_body,
webhookUrl,
payload,
});
if (!logged.ok) {
logger.error?.('Failed to persist webhook delivery log:', logged.reason);
}
};
}

View File

@@ -0,0 +1,702 @@
import { Pool } from 'pg';
import { parseBooleanFlag, parsePositiveInteger } from './utils.js';
function normalizeSslMode(value) {
const normalized = String(value || '').trim().toLowerCase();
if (['disable', 'allow', 'prefer', 'require'].includes(normalized)) {
return normalized;
}
return 'prefer';
}
function buildSslConfig(mode, env = process.env) {
if (mode === 'disable') {
return false;
}
if (mode === 'require') {
return {
rejectUnauthorized: !parseBooleanFlag(env.POSTGRES_SSL_INSECURE_SKIP_VERIFY),
};
}
return undefined;
}
export function getPostgresConfigFromEnv(env = process.env) {
const connectionString = String(
env.POSTGRES_URL ||
env.DATABASE_URL ||
''
).trim();
const host = String(env.POSTGRES_HOST || '').trim();
const user = String(env.POSTGRES_USER || '').trim();
const database = String(env.POSTGRES_DB || '').trim();
const password = String(env.POSTGRES_PASSWORD || '').trim();
const port = parsePositiveInteger(env.POSTGRES_PORT, 5432);
const sslmode = normalizeSslMode(env.POSTGRES_SSLMODE);
const maxPoolSize = parsePositiveInteger(env.POSTGRES_POOL_MAX, 10);
const idleTimeoutMs = parsePositiveInteger(env.POSTGRES_IDLE_TIMEOUT_MS, 30_000);
const connectionTimeoutMs = parsePositiveInteger(env.POSTGRES_CONNECTION_TIMEOUT_MS, 10_000);
const hasDiscreteConfig = Boolean(host && user && database);
if (!connectionString && !hasDiscreteConfig) {
throw new Error(
'Postgres config missing. Set POSTGRES_URL (or DATABASE_URL), or POSTGRES_HOST + POSTGRES_USER + POSTGRES_DB.'
);
}
const ssl = buildSslConfig(sslmode, env);
const config = {
connectionString: connectionString || null,
host: connectionString ? null : host,
user: connectionString ? null : user,
database: connectionString ? null : database,
password: connectionString ? null : password || null,
port: connectionString ? null : port,
sslmode,
ssl,
maxPoolSize,
idleTimeoutMs,
connectionTimeoutMs,
};
const redacted = {
mode: connectionString ? 'url' : 'discrete',
host: connectionString ? null : host,
database: connectionString ? null : database,
user: connectionString ? null : user,
port: connectionString ? null : port,
sslmode,
max_pool_size: maxPoolSize,
idle_timeout_ms: idleTimeoutMs,
connection_timeout_ms: connectionTimeoutMs,
};
return {
config,
redacted,
};
}
export function convertSqliteParamsToPostgres(sqlText) {
let text = '';
let paramIndex = 1;
let inSingleQuote = false;
let inDoubleQuote = false;
let inLineComment = false;
let inBlockComment = false;
for (let i = 0; i < sqlText.length; i += 1) {
const char = sqlText[i];
const next = sqlText[i + 1] || '';
if (inLineComment) {
text += char;
if (char === '\n') {
inLineComment = false;
}
continue;
}
if (inBlockComment) {
text += char;
if (char === '*' && next === '/') {
text += next;
i += 1;
inBlockComment = false;
}
continue;
}
if (!inSingleQuote && !inDoubleQuote && char === '-' && next === '-') {
text += char + next;
i += 1;
inLineComment = true;
continue;
}
if (!inSingleQuote && !inDoubleQuote && char === '/' && next === '*') {
text += char + next;
i += 1;
inBlockComment = true;
continue;
}
if (char === "'" && !inDoubleQuote) {
text += char;
if (!inSingleQuote) {
inSingleQuote = true;
} else if (next === "'") {
text += next;
i += 1;
} else {
inSingleQuote = false;
}
continue;
}
if (char === '"' && !inSingleQuote) {
text += char;
inDoubleQuote = !inDoubleQuote;
continue;
}
if (char === '?' && !inSingleQuote && !inDoubleQuote) {
text += `$${paramIndex}`;
paramIndex += 1;
continue;
}
text += char;
}
return text;
}
export function createPostgresPool(env = process.env) {
const { config, redacted } = getPostgresConfigFromEnv(env);
const pool = new Pool({
connectionString: config.connectionString || undefined,
host: config.host || undefined,
user: config.user || undefined,
database: config.database || undefined,
password: config.password || undefined,
port: config.port || undefined,
ssl: config.ssl,
max: config.maxPoolSize,
idleTimeoutMillis: config.idleTimeoutMs,
connectionTimeoutMillis: config.connectionTimeoutMs,
allowExitOnIdle: parseBooleanFlag(env.POSTGRES_ALLOW_EXIT_ON_IDLE),
});
return {
pool,
runtime: redacted,
};
}
export async function closePostgresPool(pool) {
await pool.end();
}
export async function runPostgresHealthcheck(pool) {
const result = await pool.query('SELECT 1 AS ok');
return result.rows?.[0]?.ok === 1;
}
export async function postgresAll(pool, sqlText, params = []) {
const text = params.length ? convertSqliteParamsToPostgres(sqlText) : sqlText;
const result = await pool.query(text, params);
return result.rows || [];
}
export async function postgresGet(pool, sqlText, params = []) {
const rows = await postgresAll(pool, sqlText, params);
return rows[0] || null;
}
export async function postgresRun(pool, sqlText, params = []) {
const text = params.length ? convertSqliteParamsToPostgres(sqlText) : sqlText;
const result = await pool.query(text, params);
return {
rowCount: Number(result.rowCount || 0),
};
}
export async function postgresExec(pool, sqlText) {
const result = await pool.query(sqlText);
return {
rowCount: Number(result.rowCount || 0),
};
}
export function createPostgresQueryHelpers(pool) {
return {
all: (sqlText, params = []) => postgresAll(pool, sqlText, params),
get: (sqlText, params = []) => postgresGet(pool, sqlText, params),
run: (sqlText, params = []) => postgresRun(pool, sqlText, params),
exec: (sqlText) => postgresExec(pool, sqlText),
};
}
const POSTGRES_SCHEMA_SQL = `
CREATE TABLE IF NOT EXISTS users (
id TEXT PRIMARY KEY,
discord_id TEXT,
discord_avatar_url TEXT,
runelite_account_hash TEXT UNIQUE NOT NULL,
runelite_linked INTEGER NOT NULL DEFAULT 1,
default_display_name TEXT NOT NULL,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
last_seen_at TEXT,
created_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS user_runelite_accounts (
account_hash TEXT PRIMARY KEY,
user_id TEXT NOT NULL REFERENCES users(id),
linked_at TEXT NOT NULL,
is_active INTEGER NOT NULL DEFAULT 1
);
CREATE INDEX IF NOT EXISTS idx_user_runelite_accounts_user_id
ON user_runelite_accounts(user_id);
CREATE TABLE IF NOT EXISTS oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
account_hash TEXT NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS web_oauth_sessions (
id TEXT PRIMARY KEY,
session_token TEXT UNIQUE NOT NULL,
expires_at TEXT NOT NULL,
consumed_at TEXT
);
CREATE TABLE IF NOT EXISTS groups_table (
id TEXT PRIMARY KEY,
group_name TEXT NOT NULL,
leader_user_id TEXT NOT NULL REFERENCES users(id),
join_code TEXT UNIQUE NOT NULL,
join_code_expires_at TEXT NOT NULL,
allow_open_invite_join INTEGER NOT NULL DEFAULT 0,
open_invite_expires_at TEXT,
opt_out_hiscores INTEGER NOT NULL DEFAULT 0,
opt_out_activity_feed INTEGER NOT NULL DEFAULT 0,
webhook_config_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS group_members (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
user_id TEXT REFERENCES users(id),
account_hash TEXT REFERENCES user_runelite_accounts(account_hash),
expected_runescape_name TEXT NOT NULL,
role TEXT NOT NULL,
webhook_config_perms INTEGER NOT NULL DEFAULT 0,
loadout_admin_perms INTEGER NOT NULL DEFAULT 0,
joined_at TEXT NOT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL;
CREATE TABLE IF NOT EXISTS group_join_requests (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
requester_user_id TEXT NOT NULL REFERENCES users(id),
requester_account_hash TEXT REFERENCES user_runelite_accounts(account_hash),
status TEXT NOT NULL,
requested_at TEXT NOT NULL,
resolved_at TEXT,
resolved_by_user_id TEXT REFERENCES users(id)
);
CREATE INDEX IF NOT EXISTS idx_join_requests_group_status
ON group_join_requests(group_id, status, requested_at DESC);
CREATE INDEX IF NOT EXISTS idx_join_requests_requester
ON group_join_requests(requester_user_id, requested_at DESC);
CREATE TABLE IF NOT EXISTS storage_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
group_member_id TEXT REFERENCES group_members(id),
location_type TEXT NOT NULL,
items_json TEXT NOT NULL,
state_hash TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_group_slot
ON storage_snapshots(group_id, location_type)
WHERE group_member_id IS NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_storage_member_slot
ON storage_snapshots(group_id, group_member_id, location_type)
WHERE group_member_id IS NOT NULL;
CREATE TABLE IF NOT EXISTS audit_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
group_member_id TEXT REFERENCES group_members(id),
location_type TEXT NOT NULL,
item_id INTEGER NOT NULL,
quantity_delta INTEGER NOT NULL,
created_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_audit_group_created_at
ON audit_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS activity_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
group_member_id TEXT REFERENCES group_members(id),
event_type TEXT NOT NULL,
event_data_json TEXT NOT NULL,
created_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_activity_group_created_at
ON activity_logs(group_id, created_at DESC);
CREATE TABLE IF NOT EXISTS webhook_delivery_logs (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
group_member_id TEXT REFERENCES group_members(id),
activity_id TEXT,
event_type TEXT NOT NULL,
delivery_status TEXT NOT NULL,
attempt_count INTEGER NOT NULL DEFAULT 0,
http_status INTEGER,
error_message TEXT,
webhook_host TEXT,
payload_json TEXT NOT NULL,
created_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_group_created_at
ON webhook_delivery_logs(group_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_webhook_delivery_status_created_at
ON webhook_delivery_logs(delivery_status, created_at DESC);
CREATE TABLE IF NOT EXISTS billing_customers (
user_id TEXT PRIMARY KEY REFERENCES users(id),
stripe_customer_id TEXT UNIQUE NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_billing_customers_customer_id
ON billing_customers(stripe_customer_id);
CREATE TABLE IF NOT EXISTS billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
stripe_customer_id TEXT NOT NULL,
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_billing_subscriptions_customer_updated
ON billing_subscriptions(stripe_customer_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_billing_subscriptions (
stripe_subscription_id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
status TEXT NOT NULL,
price_id TEXT,
current_period_end TEXT,
cancel_at_period_end INTEGER NOT NULL DEFAULT 0,
raw_json TEXT NOT NULL DEFAULT '{}',
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_group_billing_subscriptions_group_updated
ON group_billing_subscriptions(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_boost_credits (
user_id TEXT PRIMARY KEY REFERENCES users(id),
available_boosts INTEGER NOT NULL DEFAULT 0,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS group_manual_boost_allocations (
user_id TEXT NOT NULL REFERENCES users(id),
group_id TEXT NOT NULL REFERENCES groups_table(id),
boosts_assigned INTEGER NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
PRIMARY KEY (user_id, group_id)
);
CREATE INDEX IF NOT EXISTS idx_group_manual_boost_allocations_group
ON group_manual_boost_allocations(group_id, updated_at DESC);
CREATE TABLE IF NOT EXISTS user_subscription_boost_overrides (
user_id TEXT NOT NULL REFERENCES users(id),
allocation_month TEXT NOT NULL,
allocations_json TEXT NOT NULL DEFAULT '{}',
configured_at TEXT NOT NULL,
PRIMARY KEY (user_id, allocation_month)
);
CREATE TABLE IF NOT EXISTS item_catalog (
item_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
catalog_source TEXT,
catalog_version TEXT,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_name
ON item_catalog(name);
CREATE TABLE IF NOT EXISTS item_catalog_snapshots (
id TEXT PRIMARY KEY,
source_name TEXT NOT NULL,
source_version TEXT,
checksum_sha256 TEXT NOT NULL,
item_count INTEGER NOT NULL,
created_at TEXT NOT NULL,
notes TEXT
);
CREATE INDEX IF NOT EXISTS idx_item_catalog_snapshots_created_at
ON item_catalog_snapshots(created_at DESC);
CREATE TABLE IF NOT EXISTS item_catalog_snapshot_items (
snapshot_id TEXT NOT NULL REFERENCES item_catalog_snapshots(id) ON DELETE CASCADE,
item_id INTEGER NOT NULL,
name TEXT NOT NULL,
ge_value INTEGER,
is_tradeable INTEGER NOT NULL DEFAULT 1,
icon_url TEXT,
PRIMARY KEY (snapshot_id, item_id)
);
CREATE TABLE IF NOT EXISTS hiscores_fallback_state (
group_member_id TEXT PRIMARY KEY REFERENCES group_members(id),
runescape_name TEXT NOT NULL,
snapshot_json TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS group_wealth_snapshots (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
total_value_gp INTEGER NOT NULL DEFAULT 0,
captured_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_group_wealth_snapshots_group_time
ON group_wealth_snapshots(group_id, captured_at DESC);
CREATE TABLE IF NOT EXISTS group_goals (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
created_by_user_id TEXT NOT NULL REFERENCES users(id),
title TEXT NOT NULL,
description TEXT,
target_value_gp INTEGER NOT NULL,
current_value_gp INTEGER NOT NULL DEFAULT 0,
status TEXT NOT NULL DEFAULT 'ACTIVE',
due_at TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_group_goals_group_status
ON group_goals(group_id, status, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadouts (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
owner_user_id TEXT NOT NULL REFERENCES users(id),
name TEXT NOT NULL,
description TEXT,
scope TEXT NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_group_loadouts_group_scope_updated
ON group_loadouts(group_id, scope, updated_at DESC);
CREATE TABLE IF NOT EXISTS group_loadout_items (
id TEXT PRIMARY KEY,
loadout_id TEXT NOT NULL REFERENCES group_loadouts(id),
item_id INTEGER NOT NULL,
required_qty INTEGER NOT NULL,
created_at TEXT NOT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_group_loadout_items_unique
ON group_loadout_items(loadout_id, item_id);
CREATE TABLE IF NOT EXISTS feature_usage_events (
id TEXT PRIMARY KEY,
group_id TEXT NOT NULL REFERENCES groups_table(id),
user_id TEXT REFERENCES users(id),
feature_key TEXT NOT NULL,
action_key TEXT NOT NULL,
created_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_feature_time
ON feature_usage_events(feature_key, action_key, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_feature_usage_events_group_time
ON feature_usage_events(group_id, created_at DESC);
`;
export async function initializePostgresSchema(pool) {
await pool.query('BEGIN');
try {
await pool.query(POSTGRES_SCHEMA_SQL);
await pool.query(
`ALTER TABLE users
ADD COLUMN IF NOT EXISTS discord_avatar_url TEXT`
);
await pool.query(
`ALTER TABLE users
ADD COLUMN IF NOT EXISTS last_seen_at TEXT`
);
await pool.query(
`ALTER TABLE group_members
ADD COLUMN IF NOT EXISTS webhook_config_perms INTEGER NOT NULL DEFAULT 0`
);
await pool.query(
`ALTER TABLE group_members
ADD COLUMN IF NOT EXISTS loadout_admin_perms INTEGER NOT NULL DEFAULT 0`
);
await pool.query(
`ALTER TABLE group_members
ADD COLUMN IF NOT EXISTS account_hash TEXT`
);
await pool.query(
`ALTER TABLE group_join_requests
ADD COLUMN IF NOT EXISTS requester_account_hash TEXT`
);
await pool.query(
`ALTER TABLE item_catalog
ADD COLUMN IF NOT EXISTS catalog_source TEXT`
);
await pool.query(
`ALTER TABLE item_catalog
ADD COLUMN IF NOT EXISTS catalog_version TEXT`
);
await pool.query(
`UPDATE groups_table
SET join_code_expires_at = NOW()::text
WHERE join_code_expires_at IS NULL
OR join_code_expires_at = ''`
);
await pool.query(
`UPDATE groups_table
SET webhook_config_json = '{}'
WHERE webhook_config_json IS NULL
OR webhook_config_json = ''`
);
await pool.query(
`INSERT INTO user_runelite_accounts (
account_hash,
user_id,
linked_at,
is_active
)
SELECT
runelite_account_hash,
id,
created_at,
CASE WHEN runelite_linked = 1 THEN 1 ELSE 0 END
FROM users
WHERE runelite_account_hash IS NOT NULL
AND runelite_account_hash <> ''
ON CONFLICT(account_hash) DO NOTHING`
);
await pool.query(
`UPDATE users
SET runelite_linked = CASE
WHEN EXISTS (
SELECT 1
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
) THEN 1
ELSE 0
END`
);
await pool.query(
`UPDATE users
SET runelite_account_hash = COALESCE(
(
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = users.id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
),
runelite_account_hash
)`
);
await pool.query(
`UPDATE group_members
SET account_hash = subquery.account_hash
FROM (
SELECT
gm.id AS group_member_id,
(
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = gm.user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
) AS account_hash
FROM group_members gm
WHERE gm.user_id IS NOT NULL
AND (gm.account_hash IS NULL OR gm.account_hash = '')
) AS subquery
WHERE group_members.id = subquery.group_member_id
AND subquery.account_hash IS NOT NULL`
);
await pool.query(
`UPDATE group_join_requests
SET requester_account_hash = subquery.account_hash
FROM (
SELECT
jr.id AS join_request_id,
(
SELECT ura.account_hash
FROM user_runelite_accounts ura
WHERE ura.user_id = jr.requester_user_id
AND ura.is_active = 1
ORDER BY ura.linked_at DESC
LIMIT 1
) AS account_hash
FROM group_join_requests jr
WHERE jr.requester_account_hash IS NULL
OR jr.requester_account_hash = ''
) AS subquery
WHERE group_join_requests.id = subquery.join_request_id
AND subquery.account_hash IS NOT NULL`
);
await pool.query(`DROP INDEX IF EXISTS idx_group_members_claimed_user`);
await pool.query(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_claimed_group_user
ON group_members(group_id, user_id)
WHERE user_id IS NOT NULL`
);
await pool.query(
`CREATE UNIQUE INDEX IF NOT EXISTS idx_group_members_account_hash
ON group_members(account_hash)
WHERE account_hash IS NOT NULL`
);
await pool.query('COMMIT');
} catch (error) {
await pool.query('ROLLBACK');
throw error;
}
}

View File

@@ -0,0 +1,57 @@
"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress of the autonomous coding agent.
"""
import json
from pathlib import Path
def count_passing_tests(project_dir: Path) -> tuple[int, int]:
"""
Count passing and total tests in feature_list.json.
Args:
project_dir: Directory containing feature_list.json
Returns:
(passing_count, total_count)
"""
tests_file = project_dir / "feature_list.json"
if not tests_file.exists():
return 0, 0
try:
with open(tests_file, "r") as f:
tests = json.load(f)
total = len(tests)
passing = sum(1 for test in tests if test.get("passes", False))
return passing, total
except (json.JSONDecodeError, IOError):
return 0, 0
def print_session_header(session_num: int, is_initializer: bool) -> None:
"""Print a formatted header for the session."""
session_type = "INITIALIZER" if is_initializer else "CODING AGENT"
print("\n" + "=" * 70)
print(f" SESSION {session_num}: {session_type}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current progress."""
passing, total = count_passing_tests(project_dir)
if total > 0:
percentage = (passing / total) * 100
print(f"\nProgress: {passing}/{total} tests passing ({percentage:.1f}%)")
else:
print("\nProgress: feature_list.json not yet created")

View File

@@ -0,0 +1,71 @@
"""
Progress Tracking Utilities
===========================
Functions for tracking and displaying progress using task-master's task list.
"""
import json
from pathlib import Path
TASKS_FILE = ".taskmaster/tasks/tasks.json"
def _load_tasks(project_dir: Path) -> list[dict]:
tasks_file = project_dir / TASKS_FILE
if not tasks_file.exists():
return []
try:
with open(tasks_file) as f:
data = json.load(f)
return data.get("tasks", [])
except (json.JSONDecodeError, IOError):
return []
def get_available_tasks(project_dir: Path) -> list[dict]:
"""Return tasks that are pending with all dependencies done."""
tasks = _load_tasks(project_dir)
done_ids = {t["id"] for t in tasks if t.get("status") == "done"}
return [
t for t in tasks
if t.get("status") == "pending"
and all(dep in done_ids for dep in t.get("dependencies", []))
]
def all_tasks_done(project_dir: Path) -> bool:
"""Return True if every task is done."""
tasks = _load_tasks(project_dir)
return bool(tasks) and all(t.get("status") == "done" for t in tasks)
def count_task_progress(project_dir: Path) -> tuple[int, int]:
"""Return (done_count, total_count)."""
tasks = _load_tasks(project_dir)
done = sum(1 for t in tasks if t.get("status") == "done")
return done, len(tasks)
def is_initialized(project_dir: Path) -> bool:
"""Return True if task-master has been initialized with tasks."""
return (project_dir / TASKS_FILE).exists()
def print_session_header(label: str) -> None:
"""Print a formatted session header."""
print("\n" + "=" * 70)
print(f" {label.upper()}")
print("=" * 70)
print()
def print_progress_summary(project_dir: Path) -> None:
"""Print a summary of current task progress."""
done, total = count_task_progress(project_dir)
if total > 0:
pct = (done / total) * 100
print(f"\nProgress: {done}/{total} tasks done ({pct:.1f}%)")
else:
print("\nProgress: no tasks found (initializer not yet run?)")

View File

@@ -0,0 +1,122 @@
"""
Claude SDK Client Configuration
===============================
Functions for creating and configuring the Claude Agent SDK client.
"""
import json
import os
from pathlib import Path
from claude_code_sdk import ClaudeCodeOptions, ClaudeSDKClient
from claude_code_sdk.types import HookMatcher
from security import bash_security_hook
# Puppeteer MCP tools for browser automation
PUPPETEER_TOOLS = [
"mcp__puppeteer__puppeteer_navigate",
"mcp__puppeteer__puppeteer_screenshot",
"mcp__puppeteer__puppeteer_click",
"mcp__puppeteer__puppeteer_fill",
"mcp__puppeteer__puppeteer_select",
"mcp__puppeteer__puppeteer_hover",
"mcp__puppeteer__puppeteer_evaluate",
]
# Built-in tools
BUILTIN_TOOLS = [
"Read",
"Write",
"Edit",
"Glob",
"Grep",
"Bash",
]
def create_client(project_dir: Path, model: str) -> ClaudeSDKClient:
"""
Create a Claude Agent SDK client with multi-layered security.
Args:
project_dir: Directory for the project
model: Claude model to use
Returns:
Configured ClaudeSDKClient
Security layers (defense in depth):
1. Sandbox - OS-level bash command isolation prevents filesystem escape
2. Permissions - File operations restricted to project_dir only
3. Security hooks - Bash commands validated against an allowlist
(see security.py for ALLOWED_COMMANDS)
"""
api_key = os.environ.get("ANTHROPIC_API_KEY")
if not api_key:
raise ValueError(
"ANTHROPIC_API_KEY environment variable not set.\n"
"Get your API key from: https://console.anthropic.com/"
)
# Create comprehensive security settings
# Note: Using relative paths ("./**") restricts access to project directory
# since cwd is set to project_dir
security_settings = {
"sandbox": {"enabled": True, "autoAllowBashIfSandboxed": True},
"permissions": {
"defaultMode": "acceptEdits", # Auto-approve edits within allowed directories
"allow": [
# Allow all file operations within the project directory
"Read(./**)",
"Write(./**)",
"Edit(./**)",
"Glob(./**)",
"Grep(./**)",
# Bash permission granted here, but actual commands are validated
# by the bash_security_hook (see security.py for allowed commands)
"Bash(*)",
# Allow Puppeteer MCP tools for browser automation
*PUPPETEER_TOOLS,
],
},
}
# Ensure project directory exists before creating settings file
project_dir.mkdir(parents=True, exist_ok=True)
# Write settings to a file in the project directory
settings_file = project_dir / ".claude_settings.json"
with open(settings_file, "w") as f:
json.dump(security_settings, f, indent=2)
print(f"Created security settings at {settings_file}")
print(" - Sandbox enabled (OS-level bash isolation)")
print(f" - Filesystem restricted to: {project_dir.resolve()}")
print(" - Bash commands restricted to allowlist (see security.py)")
print(" - MCP servers: puppeteer (browser automation)")
print()
return ClaudeSDKClient(
options=ClaudeCodeOptions(
model=model,
system_prompt="You are an expert full-stack developer building a production-quality web application.",
allowed_tools=[
*BUILTIN_TOOLS,
*PUPPETEER_TOOLS,
],
mcp_servers={
"puppeteer": {"command": "npx", "args": ["puppeteer-mcp-server"]}
},
hooks={
"PreToolUse": [
HookMatcher(matcher="Bash", hooks=[bash_security_hook]),
],
},
max_turns=1000,
cwd=str(project_dir.resolve()),
settings=str(settings_file.resolve()), # Use absolute path
)
)

Some files were not shown because too many files have changed in this diff Show More