diff --git a/cloudflare-gastown/container/plugin/mayor-tools.ts b/cloudflare-gastown/container/plugin/mayor-tools.ts index 09b28ded4..4d0c024e3 100644 --- a/cloudflare-gastown/container/plugin/mayor-tools.ts +++ b/cloudflare-gastown/container/plugin/mayor-tools.ts @@ -58,6 +58,10 @@ export function createMayorTools(client: MayorGastownClient) { .array(tool.schema.string()) .describe('Labels to attach to the bead (e.g. ["gt:pr-fixup"])') .optional(), + labels: tool.schema + .array(tool.schema.string()) + .describe('Labels to attach to the bead (e.g. ["gt:pr-fixup"])') + .optional(), }, async execute(args) { const result = await client.sling({ diff --git a/cloudflare-gastown/src/db/tables/town-events.table.ts b/cloudflare-gastown/src/db/tables/town-events.table.ts index 436aaceca..3d163ddc3 100644 --- a/cloudflare-gastown/src/db/tables/town-events.table.ts +++ b/cloudflare-gastown/src/db/tables/town-events.table.ts @@ -35,6 +35,7 @@ export const TownEventRecord = z.object({ .pipe(z.record(z.string(), z.unknown())), created_at: z.string(), processed_at: z.string().nullable(), + retry_count: z.coerce.number().default(0), }); export type TownEventRecord = z.output; @@ -50,9 +51,17 @@ export function createTableTownEvents(): string { payload: `text not null default '{}'`, created_at: `text not null`, processed_at: `text`, + retry_count: `integer not null default 0`, }); } +/** Idempotent ALTER statements for existing databases. */ +export function migrateTownEvents(): string[] { + return [ + `ALTER TABLE ${town_events} ADD COLUMN ${town_events.columns.retry_count} integer not null default 0`, + ]; +} + export function getIndexesTownEvents(): string[] { return [ `CREATE INDEX IF NOT EXISTS idx_town_events_pending ON ${town_events}(${town_events.columns.created_at}) WHERE ${town_events.columns.processed_at} IS NULL`, diff --git a/cloudflare-gastown/src/dos/Town.do.ts b/cloudflare-gastown/src/dos/Town.do.ts index 382749b84..41768af91 100644 --- a/cloudflare-gastown/src/dos/Town.do.ts +++ b/cloudflare-gastown/src/dos/Town.do.ts @@ -3460,6 +3460,9 @@ export class TownDO extends DurableObject { }; // Phase 0: Drain events and apply state transitions + // Events may return actions (e.g. stop_agent on bead cancellation) + // that are merged into the Phase 1 action list. + const eventActions: Action[] = []; try { const pending = events.drainEvents(this.sql); metrics.eventsDrained = pending.length; @@ -3467,19 +3470,65 @@ export class TownDO extends DurableObject { logger.info('reconciler: draining events', { count: pending.length }); } for (const event of pending) { + // Skip poison events that have already exceeded the retry limit + if (event.retry_count >= events.MAX_EVENT_RETRIES) { + logger.error('reconciler: marking poison event as processed', { + eventId: event.event_id, + eventType: event.event_type, + retryCount: event.retry_count, + payload: event.payload, + }); + Sentry.captureException( + new Error(`Poison event skipped: ${event.event_type} (${event.event_id})`), + { + extra: { + eventId: event.event_id, + eventType: event.event_type, + retryCount: event.retry_count, + payload: event.payload, + }, + } + ); + events.markPoisoned(this.sql, event.event_id); + continue; + } + try { - reconciler.applyEvent(this.sql, event); + const actions = reconciler.applyEvent(this.sql, event); + eventActions.push(...actions); events.markProcessed(this.sql, event.event_id); } catch (err) { + const retryCount = events.incrementRetryCount(this.sql, event.event_id); + const errorMessage = err instanceof Error ? err.message : String(err); logger.error('reconciler: applyEvent failed', { eventId: event.event_id, eventType: event.event_type, - error: err instanceof Error ? err.message : String(err), + retryCount, + maxRetries: events.MAX_EVENT_RETRIES, + error: errorMessage, }); - // Event stays unprocessed — will be retried on the next alarm tick. - // Mark it processed anyway after 3 consecutive failures to prevent - // a poison event from blocking the entire queue forever. - // For now, we skip it and let the next tick retry. + if (retryCount >= events.MAX_EVENT_RETRIES) { + logger.error('reconciler: event exceeded retry limit, marking as poison', { + eventId: event.event_id, + eventType: event.event_type, + retryCount, + payload: event.payload, + }); + Sentry.captureException( + new Error( + `Poison event after ${retryCount} failures: ${event.event_type} (${event.event_id}) — ${errorMessage}` + ), + { + extra: { + eventId: event.event_id, + eventType: event.event_type, + retryCount, + payload: event.payload, + }, + } + ); + events.markPoisoned(this.sql, event.event_id); + } } } } catch (err) { @@ -3510,10 +3559,13 @@ export class TownDO extends DurableObject { const sideEffects: Array<() => Promise> = []; try { const townConfig = await this.getTownConfig(); - const actions = reconciler.reconcile(this.sql, { - draining: this._draining, - refineryCodeReview: townConfig.refinery?.code_review ?? true, - }); + const actions = [ + ...eventActions, + ...reconciler.reconcile(this.sql, { + draining: this._draining, + refineryCodeReview: townConfig.refinery?.code_review ?? true, + }), + ]; metrics.actionsEmitted = actions.length; for (const a of actions) { metrics.actionsByType[a.type] = (metrics.actionsByType[a.type] ?? 0) + 1; @@ -4038,6 +4090,7 @@ export class TownDO extends DurableObject { const token = await this.resolveGitHubToken(townConfig); if (!token) { console.warn(`${TOWN_LOG} checkPRStatus: no GitHub token available, cannot poll ${prUrl}`); + return null; } @@ -4743,15 +4796,19 @@ export class TownDO extends DurableObject { ]); // Apply each event to reconstruct state transitions + const replayEventActions: Action[] = []; for (const event of rangeEvents) { - reconciler.applyEvent(this.sql, event); + replayEventActions.push(...reconciler.applyEvent(this.sql, event)); } // Run reconciler against the resulting state const tc = await this.getTownConfig(); - const actions = reconciler.reconcile(this.sql, { - refineryCodeReview: tc.refinery?.code_review ?? true, - }); + const actions = [ + ...replayEventActions, + ...reconciler.reconcile(this.sql, { + refineryCodeReview: tc.refinery?.code_review ?? true, + }), + ]; // Capture a state snapshot before rollback const agentSnapshot = [ @@ -4824,16 +4881,20 @@ export class TownDO extends DurableObject { try { // Phase 0: Drain and apply pending events (same as real alarm loop) const pending = events.drainEvents(this.sql); + const dryRunEventActions: Action[] = []; for (const event of pending) { - reconciler.applyEvent(this.sql, event); + dryRunEventActions.push(...reconciler.applyEvent(this.sql, event)); events.markProcessed(this.sql, event.event_id); } // Phase 1: Reconcile against now-current state const tc2 = await this.getTownConfig(); - const actions = reconciler.reconcile(this.sql, { - refineryCodeReview: tc2.refinery?.code_review ?? true, - }); + const actions = [ + ...dryRunEventActions, + ...reconciler.reconcile(this.sql, { + refineryCodeReview: tc2.refinery?.code_review ?? true, + }), + ]; const pendingEventCount = events.pendingEventCount(this.sql); const actionsByType: Record = {}; for (const a of actions) { diff --git a/cloudflare-gastown/src/dos/town/actions.ts b/cloudflare-gastown/src/dos/town/actions.ts index 323de6755..b84a49e16 100644 --- a/cloudflare-gastown/src/dos/town/actions.ts +++ b/cloudflare-gastown/src/dos/town/actions.ts @@ -317,6 +317,19 @@ export function applyAction(ctx: ApplyActionContext, action: Action): (() => Pro // ── Bead mutations ────────────────────────────────────────── case 'transition_bead': { + // When `from` is specified, verify the bead is in the expected state + // before transitioning. This guards against concurrent rule firings + // producing unexpected transitions. (H3, S1 — reconciliation-spec §4) + if (action.from !== null) { + const currentBead = beadOps.getBead(sql, action.bead_id); + if (currentBead && currentBead.status !== action.from) { + console.warn( + `${LOG} transition_bead: expected from=${action.from} but bead is ${currentBead.status}, skipping (bead=${action.bead_id})` + ); + return null; + } + } + try { const failureReason = action.to === 'failed' @@ -558,16 +571,25 @@ export function applyAction(ctx: ApplyActionContext, action: Action): (() => Pro const capturedAgentId = agentId; return async () => { - // Best-effort dispatch. If it fails, the agent stays 'working' - // and the bead stays 'in_progress'. The reconciler detects the - // mismatch on the next tick (idle agent hooked to in_progress - // bead) and retries dispatch. - await ctx.dispatchAgent(capturedAgentId, beadId, rigId).catch(err => { + // Best-effort dispatch. If it fails (thrown error or resolved + // false), roll the agent back to 'idle' so the reconciler can + // retry on the next tick. The bead stays 'in_progress' — no + // separate recovery needed (§5.4). + let accepted = false; + try { + accepted = await ctx.dispatchAgent(capturedAgentId, beadId, rigId); + } catch (err) { console.warn( - `${LOG} dispatch_agent: container start failed for agent=${capturedAgentId} bead=${beadId}`, + `${LOG} dispatch_agent: container start threw for agent=${capturedAgentId} bead=${beadId}, rolling back to idle`, err ); - }); + } + if (!accepted) { + console.warn( + `${LOG} dispatch_agent: container did not accept start for agent=${capturedAgentId} bead=${beadId}, rolling back to idle` + ); + agentOps.updateAgentStatus(sql, capturedAgentId, 'idle'); + } }; } diff --git a/cloudflare-gastown/src/dos/town/events.ts b/cloudflare-gastown/src/dos/town/events.ts index ea95388be..f45f3054d 100644 --- a/cloudflare-gastown/src/dos/town/events.ts +++ b/cloudflare-gastown/src/dos/town/events.ts @@ -11,6 +11,7 @@ import { TownEventRecord, createTableTownEvents, getIndexesTownEvents, + migrateTownEvents, } from '../../db/tables/town-events.table'; import type { TownEventType } from '../../db/tables/town-events.table'; import { query } from '../../util/query.util'; @@ -26,6 +27,16 @@ function now(): string { /** Create the town_events table and indexes. Idempotent. */ export function initTownEventsTable(sql: SqlStorage): void { query(sql, createTableTownEvents(), []); + + // Migrations: add columns to existing tables (idempotent) + for (const stmt of migrateTownEvents()) { + try { + query(sql, stmt, []); + } catch { + // Column already exists — expected after first run + } + } + for (const idx of getIndexesTownEvents()) { query(sql, idx, []); } @@ -138,6 +149,9 @@ export function upsertContainerStatus( }); } +/** Maximum number of times an event is retried before being marked as poison. */ +export const MAX_EVENT_RETRIES = 3; + /** * Drain all unprocessed events, ordered by creation time. * Returns events with processed_at = NULL, oldest first. @@ -150,7 +164,7 @@ export function drainEvents(sql: SqlStorage): TownEventRecord[] { SELECT ${town_events.event_id}, ${town_events.event_type}, ${town_events.agent_id}, ${town_events.bead_id}, ${town_events.payload}, ${town_events.created_at}, - ${town_events.processed_at} + ${town_events.processed_at}, ${town_events.retry_count} FROM ${town_events} WHERE ${town_events.processed_at} IS NULL ORDER BY ${town_events.created_at} ASC @@ -174,6 +188,44 @@ export function markProcessed(sql: SqlStorage, eventId: string): void { ); } +/** + * Increment the retry count for an event that failed processing. + * Returns the new retry count. + */ +export function incrementRetryCount(sql: SqlStorage, eventId: string): number { + const rows = [ + ...query( + sql, + /* sql */ ` + UPDATE ${town_events} + SET ${town_events.columns.retry_count} = ${town_events.columns.retry_count} + 1 + WHERE ${town_events.event_id} = ? + RETURNING ${town_events.retry_count} + `, + [eventId] + ), + ]; + const row = rows[0]; + return typeof row?.retry_count === 'number' ? row.retry_count : 1; +} + +/** + * Mark an event as a poison event — it has exceeded the retry limit and + * is being removed from the queue to unblock processing. Sets processed_at + * so drainEvents no longer returns it. + */ +export function markPoisoned(sql: SqlStorage, eventId: string): void { + query( + sql, + /* sql */ ` + UPDATE ${town_events} + SET ${town_events.columns.processed_at} = ? + WHERE ${town_events.event_id} = ? + `, + [now(), eventId] + ); +} + /** * Delete old processed events beyond the retention window. * Only deletes events that have been processed (processed_at IS NOT NULL) diff --git a/cloudflare-gastown/src/dos/town/reconciler.ts b/cloudflare-gastown/src/dos/town/reconciler.ts index 27b1e9f84..975f0fbed 100644 --- a/cloudflare-gastown/src/dos/town/reconciler.ts +++ b/cloudflare-gastown/src/dos/town/reconciler.ts @@ -52,6 +52,7 @@ const CIRCUIT_BREAKER_WINDOW_MINUTES = 30; * beads that eventually succeeded (status = 'closed'). */ function checkDispatchCircuitBreaker(sql: SqlStorage): Action[] { + const cutoff = new Date(Date.now() - CIRCUIT_BREAKER_WINDOW_MINUTES * 60_000).toISOString(); const rows = z .object({ failure_count: z.number() }) .array() @@ -61,11 +62,11 @@ function checkDispatchCircuitBreaker(sql: SqlStorage): Action[] { /* sql */ ` SELECT count(*) as failure_count FROM ${beads} - WHERE ${beads.last_dispatch_attempt_at} > strftime('%Y-%m-%dT%H:%M:%fZ', 'now', '-${CIRCUIT_BREAKER_WINDOW_MINUTES} minutes') + WHERE ${beads.last_dispatch_attempt_at} > ? AND ${beads.dispatch_attempts} > 0 AND ${beads.status} != 'closed' `, - [] + [cutoff] ), ]); @@ -199,40 +200,40 @@ type ConvoyRow = z.infer; * * See reconciliation-spec.md §5.2. */ -export function applyEvent(sql: SqlStorage, event: TownEventRecord): void { +export function applyEvent(sql: SqlStorage, event: TownEventRecord): Action[] { const payload = event.payload; switch (event.event_type) { case 'agent_done': { if (!event.agent_id) { console.warn(`${LOG} applyEvent: agent_done missing agent_id`); - return; + return []; } const branch = typeof payload.branch === 'string' ? payload.branch : ''; const pr_url = typeof payload.pr_url === 'string' ? payload.pr_url : undefined; const summary = typeof payload.summary === 'string' ? payload.summary : undefined; reviewQueue.agentDone(sql, event.agent_id, { branch, pr_url, summary }); - return; + return []; } case 'agent_completed': { if (!event.agent_id) { console.warn(`${LOG} applyEvent: agent_completed missing agent_id`); - return; + return []; } const status = payload.status === 'completed' || payload.status === 'failed' ? payload.status : 'failed'; const reason = typeof payload.reason === 'string' ? payload.reason : undefined; reviewQueue.agentCompleted(sql, event.agent_id, { status, reason }); - return; + return []; } case 'pr_status_changed': { if (!event.bead_id) { console.warn(`${LOG} applyEvent: pr_status_changed missing bead_id`); - return; + return []; } const pr_state = payload.pr_state; if (pr_state === 'merged') { @@ -248,19 +249,19 @@ export function applyEvent(sql: SqlStorage, event: TownEventRecord): void { message: 'PR closed without merge', }); } - return; + return []; } case 'bead_created': { // No state change needed — bead already exists in DB. // Reconciler will pick it up as unassigned on next pass. - return; + return []; } case 'bead_cancelled': { if (!event.bead_id) { console.warn(`${LOG} applyEvent: bead_cancelled missing bead_id`); - return; + return []; } const cancelStatus = payload.cancel_status === 'closed' || payload.cancel_status === 'failed' @@ -269,32 +270,44 @@ export function applyEvent(sql: SqlStorage, event: TownEventRecord): void { beadOps.updateBeadStatus(sql, event.bead_id, cancelStatus, 'system'); - // Unhook any agent hooked to this bead + // Unhook any agent hooked to this bead, then stop working agents const hookedAgentRows = z - .object({ bead_id: z.string() }) + .object({ bead_id: z.string(), status: z.string() }) .array() .parse([ ...query( sql, /* sql */ ` - SELECT ${agent_metadata.bead_id} + SELECT ${agent_metadata.bead_id}, ${agent_metadata.status} FROM ${agent_metadata} WHERE ${agent_metadata.current_hook_bead_id} = ? `, [event.bead_id] ), ]); + + const stopActions: Action[] = []; for (const row of hookedAgentRows) { + const wasWorking = row.status === 'working' || row.status === 'stalled'; agents.unhookBead(sql, row.bead_id); + // Stop the agent's container if it was actively working — + // avoids waiting 90s for the heartbeat timeout. + if (wasWorking) { + stopActions.push({ + type: 'stop_agent', + agent_id: row.bead_id, + reason: `bead ${event.bead_id} cancelled`, + }); + } } - return; + return stopActions; } case 'convoy_started': { const convoyId = typeof payload.convoy_id === 'string' ? payload.convoy_id : null; if (!convoyId) { console.warn(`${LOG} applyEvent: convoy_started missing convoy_id`); - return; + return []; } query( sql, @@ -305,15 +318,15 @@ export function applyEvent(sql: SqlStorage, event: TownEventRecord): void { `, [convoyId] ); - return; + return []; } case 'container_status': { - if (!event.agent_id) return; + if (!event.agent_id) return []; const containerStatus = payload.status as string; const agent = agents.getAgent(sql, event.agent_id); - if (!agent) return; + if (!agent) return []; // Only act on working/stalled agents whose container has stopped. // For 'not_found': skip if the agent was dispatched recently (#1358). @@ -324,7 +337,7 @@ export function applyEvent(sql: SqlStorage, event: TownEventRecord): void { // agents are caught by reconcileAgents after 90s of no heartbeats. if (containerStatus === 'not_found' && agent.last_activity_at) { const ageSec = (Date.now() - new Date(agent.last_activity_at).getTime()) / 1000; - if (ageSec < 180) return; // 3-minute grace for cold starts + if (ageSec < 180) return []; // 3-minute grace for cold starts } if ( @@ -354,34 +367,34 @@ export function applyEvent(sql: SqlStorage, event: TownEventRecord): void { agents.updateAgentStatus(sql, event.agent_id, 'idle'); } } - return; + return []; } case 'container_eviction': { // Draining flag is managed by the TownDO via KV storage. // The reconciler reads it from there; no SQL state change needed here. // The event is recorded for audit trail. - return; + return []; } case 'nudge_timeout': { // GUPP violations are handled by reconcileGUPP on the next pass. // The event just records the fact for audit trail. - return; + return []; } case 'pr_feedback_detected': { const mrBeadId = typeof payload.mr_bead_id === 'string' ? payload.mr_bead_id : null; if (!mrBeadId) { console.warn(`${LOG} applyEvent: pr_feedback_detected missing mr_bead_id`); - return; + return []; } const mrBead = beadOps.getBead(sql, mrBeadId); - if (!mrBead || mrBead.status === 'closed' || mrBead.status === 'failed') return; + if (!mrBead || mrBead.status === 'closed' || mrBead.status === 'failed') return []; // Check for existing non-terminal feedback bead to prevent duplicates - if (hasExistingPrFeedbackBead(sql, mrBeadId)) return; + if (hasExistingPrFeedbackBead(sql, mrBeadId)) return []; const prUrl = typeof payload.pr_url === 'string' ? payload.pr_url : ''; const prNumber = typeof payload.pr_number === 'number' ? payload.pr_number : 0; @@ -420,18 +433,18 @@ export function applyEvent(sql: SqlStorage, event: TownEventRecord): void { // Feedback bead blocks the MR bead (same pattern as rework beads) beadOps.insertDependency(sql, mrBeadId, feedbackBead.bead_id, 'blocks'); - return; + return []; } case 'pr_auto_merge': { const mrBeadId = typeof payload.mr_bead_id === 'string' ? payload.mr_bead_id : null; if (!mrBeadId) { console.warn(`${LOG} applyEvent: pr_auto_merge missing mr_bead_id`); - return; + return []; } const mrBead = beadOps.getBead(sql, mrBeadId); - if (!mrBead || mrBead.status === 'closed' || mrBead.status === 'failed') return; + if (!mrBead || mrBead.status === 'closed' || mrBead.status === 'failed') return []; // The actual merge is handled by the merge_pr side effect generated by // the reconciler on the next tick when it sees this event has been processed. @@ -446,11 +459,12 @@ export function applyEvent(sql: SqlStorage, event: TownEventRecord): void { `, [new Date().toISOString(), mrBeadId] ); - return; + return []; } default: { console.warn(`${LOG} applyEvent: unknown event type: ${event.event_type}`); + return []; } } } @@ -465,10 +479,20 @@ export function reconcile( ): Action[] { const draining = opts?.draining ?? false; const actions: Action[] = []; + + // Evaluate the dispatch circuit breaker once per reconcile tick and pass + // the result to sub-functions that need it, avoiding duplicate COUNT queries. + const circuitBreakerActions = checkDispatchCircuitBreaker(sql); + const circuitBreakerOpen = circuitBreakerActions.length > 0; + actions.push(...reconcileAgents(sql, { draining })); - actions.push(...reconcileBeads(sql, { draining })); + actions.push(...reconcileBeads(sql, { draining, circuitBreakerOpen, circuitBreakerActions })); actions.push( - ...reconcileReviewQueue(sql, { draining, refineryCodeReview: opts?.refineryCodeReview }) + ...reconcileReviewQueue(sql, { + draining, + refineryCodeReview: opts?.refineryCodeReview, + circuitBreakerOpen, + }) ); actions.push(...reconcileConvoys(sql)); actions.push(...reconcileGUPP(sql, { draining })); @@ -621,14 +645,21 @@ export function reconcileAgents(sql: SqlStorage, opts?: { draining?: boolean }): // reconcileBeads — handle unassigned beads, lost agents, stale reviews // ════════════════════════════════════════════════════════════════════ -export function reconcileBeads(sql: SqlStorage, opts?: { draining?: boolean }): Action[] { +export function reconcileBeads( + sql: SqlStorage, + opts?: { + draining?: boolean; + circuitBreakerOpen?: boolean; + circuitBreakerActions?: Action[]; + } +): Action[] { const draining = opts?.draining ?? false; const actions: Action[] = []; - // Town-level circuit breaker: if too many dispatch failures in the - // window, skip all dispatch_agent actions and escalate to mayor. - const circuitBreakerActions = checkDispatchCircuitBreaker(sql); - const circuitBreakerOpen = circuitBreakerActions.length > 0; + // Circuit breaker state is hoisted to reconcile() and passed in to + // avoid duplicate COUNT queries per tick. + const circuitBreakerActions = opts?.circuitBreakerActions ?? checkDispatchCircuitBreaker(sql); + const circuitBreakerOpen = opts?.circuitBreakerOpen ?? circuitBreakerActions.length > 0; // Rule 1: Open issue beads with no assignee, no blockers, not staged, not triage const unassigned = BeadRow.array().parse([ @@ -1031,14 +1062,16 @@ export function reconcileBeads(sql: SqlStorage, opts?: { draining?: boolean }): export function reconcileReviewQueue( sql: SqlStorage, - opts?: { draining?: boolean; refineryCodeReview?: boolean } + opts?: { draining?: boolean; refineryCodeReview?: boolean; circuitBreakerOpen?: boolean } ): Action[] { const draining = opts?.draining ?? false; const refineryCodeReview = opts?.refineryCodeReview ?? true; const actions: Action[] = []; - // Town-level circuit breaker - const circuitBreakerOpen = checkDispatchCircuitBreaker(sql).length > 0; + // Circuit breaker state is hoisted to reconcile() and passed in to + // avoid duplicate COUNT queries per tick. + const circuitBreakerOpen = + opts?.circuitBreakerOpen ?? checkDispatchCircuitBreaker(sql).length > 0; // Get all MR beads that need attention const mrBeads = MrBeadRow.array().parse([ @@ -1138,7 +1171,8 @@ export function reconcileReviewQueue( } // Rule 4: PR-strategy MR beads orphaned (refinery dispatched then died, stale >30min) - // Only in_progress — open beads are just waiting for the refinery to pop them. + // Only in_progress — open beads with a pr_url are still waiting in the normal + // review queue and haven't been claimed by a refinery yet, so they aren't orphaned. // Skip when refinery code review is disabled: poll_pr keeps the bead alive via // updated_at touches, and no refinery is expected to be working on it. if ( diff --git a/cloudflare-gastown/src/prompts/polecat-system.prompt.ts b/cloudflare-gastown/src/prompts/polecat-system.prompt.ts index 8815dea0e..76b1f95d0 100644 --- a/cloudflare-gastown/src/prompts/polecat-system.prompt.ts +++ b/cloudflare-gastown/src/prompts/polecat-system.prompt.ts @@ -96,6 +96,7 @@ When your hooked bead has the \`gt:pr-fixup\` label, you are fixing an existing Do NOT create a new PR. Push to the existing branch. + ## Commit & Push Hygiene - Commit after every meaningful unit of work (new function, passing test, config change).