Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1152,7 +1152,7 @@ export function KiloclawInstanceDetail({ instanceId }: { instanceId: string }) {

const { data, isLoading, error } = useQuery({
...trpc.admin.kiloclawInstances.get.queryOptions({ id: instanceId }),
refetchInterval: awaitingRestartCompletion || awaitingRestoreCompletion ? 3000 : false,
refetchInterval: awaitingRestoreCompletion ? 3000 : false,
});

const userId = data?.user_id;
Expand Down Expand Up @@ -1262,39 +1262,37 @@ export function KiloclawInstanceDetail({ instanceId }: { instanceId: string }) {
'2026.2.26'
);

// After a restart/upgrade, poll the machine status until it returns to "running",
// then invalidate controllerVersion so supportsConfigRestore reflects the new build.
const prevMachineStatus = useRef(data?.workerStatus?.status);
useEffect(() => {
const status = data?.workerStatus?.status;
const wasRestarting = prevMachineStatus.current !== 'running';
prevMachineStatus.current = status;
// After a restart/upgrade, poll the machine's get query until it returns to "running",
// then re-invalidate controllerVersion so supportsConfigRestore reflects the new build.
useQuery({
queryKey: ['machine-restart-poll', instanceId, awaitingRestartCompletion],
queryFn: async () => {
void queryClient.invalidateQueries({
queryKey: trpc.admin.kiloclawInstances.get.queryKey(),
});
return { ts: Date.now() };
},
enabled: awaitingRestartCompletion,
refetchInterval: awaitingRestartCompletion ? 3000 : false,
});

if (awaitingRestartCompletion && status === 'running' && wasRestarting) {
setAwaitingRestartCompletion(false);
if (data?.user_id && data?.id) {
void queryClient.invalidateQueries({
queryKey: trpc.admin.kiloclawInstances.controllerVersion.queryKey({
userId: data.user_id,
instanceId: data.id,
}),
});
void queryClient.invalidateQueries({
queryKey: trpc.admin.kiloclawInstances.gatewayStatus.queryKey({
userId: data.user_id,
instanceId: data.id,
}),
});
}
if (awaitingRestartCompletion && data?.workerStatus?.status === 'running') {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

WARNING: This can stop polling before the restart actually begins

awaitingRestartCompletion is set to true in the mutation onSuccess handler before the invalidated get query has a chance to observe the machine leaving running. On the first rerender after that state update, data?.workerStatus?.status is often still the pre-restart cached value (running), so this branch clears awaitingRestartCompletion immediately and re-invalidates controllerVersion/gatewayStatus against the old machine state. The previous prevMachineStatus guard avoided this false positive; this version needs to wait until the status has been observed as non-running at least once before treating running as restart completion.

setAwaitingRestartCompletion(false);
if (data.user_id && data.id) {
void queryClient.invalidateQueries({
queryKey: trpc.admin.kiloclawInstances.controllerVersion.queryKey({
userId: data.user_id,
instanceId: data.id,
}),
});
void queryClient.invalidateQueries({
queryKey: trpc.admin.kiloclawInstances.gatewayStatus.queryKey({
userId: data.user_id,
instanceId: data.id,
}),
});
}
}, [
data?.workerStatus?.status,
data?.user_id,
data?.id,
awaitingRestartCompletion,
queryClient,
trpc,
]);
}

// Stop polling when restore completes (status transitions from 'restoring' to something else).
// Track whether we've seen 'restoring' to avoid false positives when the mutation succeeds
Expand Down
Loading