diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 62fd05ca..ec67c83f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -73,3 +73,7 @@ jobs:
- name: Run tests
run: cargo test -p clawpal-core
working-directory: src-tauri
+
+ - name: Run perf metrics tests
+ run: cargo test -p clawpal --test perf_metrics -- --nocapture
+ working-directory: src-tauri
diff --git a/.github/workflows/home-perf-e2e.yml b/.github/workflows/home-perf-e2e.yml
index 75b57c1b..b0673732 100644
--- a/.github/workflows/home-perf-e2e.yml
+++ b/.github/workflows/home-perf-e2e.yml
@@ -70,14 +70,6 @@ jobs:
echo '⚠️ E2E run failed before probe collection. Check workflow logs.' >> tests/e2e/perf/report.md
fi
- - name: Post / update PR performance report
- if: always() && github.event_name == 'pull_request'
- uses: marocchino/sticky-pull-request-comment@v2
- with:
- header: home-perf-e2e
- path: tests/e2e/perf/report.md
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- name: Cleanup
if: always()
run: docker rm -f oc-perf 2>/dev/null || true
diff --git a/.github/workflows/metrics.yml b/.github/workflows/metrics.yml
new file mode 100644
index 00000000..94c9e8ca
--- /dev/null
+++ b/.github/workflows/metrics.yml
@@ -0,0 +1,519 @@
+name: Metrics Gate
+
+on:
+ pull_request:
+ branches: [develop, main]
+
+permissions:
+ contents: read
+ pull-requests: write
+
+concurrency:
+ group: metrics-${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ metrics:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Setup Bun
+ uses: oven-sh/setup-bun@v2
+
+ - name: Install frontend dependencies
+ run: bun install --frozen-lockfile
+
+ # ── Gate 1: Commit size ≤ 500 lines ──
+ - name: Check commit sizes
+ id: commit_size
+ run: |
+ MAX_LINES=500
+ BASE="${{ github.event.pull_request.base.sha }}"
+ HEAD="${{ github.sha }}"
+ FAIL=0
+ FAIL_COUNT=0
+ MAX_SEEN=0
+ DETAILS=""
+
+ for COMMIT in $(git rev-list $BASE..$HEAD); do
+ # Skip merge commits (GitHub auto-generated)
+ PARENTS=$(git rev-list --parents -1 $COMMIT | wc -w)
+ if [ "$PARENTS" -gt 2 ]; then
+ continue
+ fi
+ # Skip style-only commits (rustfmt, prettier, etc.)
+ SUBJECT=$(git log --format=%s -1 $COMMIT)
+ if echo "$SUBJECT" | grep -qiE '^style(\(|:)'; then
+ continue
+ fi
+ SHORT=$(git rev-parse --short $COMMIT)
+ SUBJECT=$(git log --format=%s -1 $COMMIT)
+ STAT=$(git diff --shortstat ${COMMIT}^..${COMMIT} 2>/dev/null || echo "0")
+ ADDS=$(echo "$STAT" | grep -oP '\d+ insertion' | grep -oP '\d+' || echo 0)
+ DELS=$(echo "$STAT" | grep -oP '\d+ deletion' | grep -oP '\d+' || echo 0)
+ TOTAL=$(( ${ADDS:-0} + ${DELS:-0} ))
+ if [ "$TOTAL" -gt "$MAX_SEEN" ]; then MAX_SEEN=$TOTAL; fi
+
+ if [ "$TOTAL" -gt "$MAX_LINES" ]; then
+ DETAILS="${DETAILS}| \`${SHORT}\` | ${TOTAL} | ≤ ${MAX_LINES} | ❌ | ${SUBJECT} |\n"
+ FAIL=1
+ FAIL_COUNT=$(( FAIL_COUNT + 1 ))
+ else
+ DETAILS="${DETAILS}| \`${SHORT}\` | ${TOTAL} | ≤ ${MAX_LINES} | ✅ | ${SUBJECT} |\n"
+ fi
+ done
+
+ TOTAL_COMMITS=$(git rev-list --no-merges $BASE..$HEAD | wc -l)
+ PASSED_COMMITS=$(( TOTAL_COMMITS - FAIL_COUNT ))
+
+ echo "fail=${FAIL}" >> "$GITHUB_OUTPUT"
+ echo "total=${TOTAL_COMMITS}" >> "$GITHUB_OUTPUT"
+ echo "passed=${PASSED_COMMITS}" >> "$GITHUB_OUTPUT"
+ echo "max_seen=${MAX_SEEN}" >> "$GITHUB_OUTPUT"
+ printf "%b" "$DETAILS" > /tmp/commit_details.txt
+ echo "max_lines=${MAX_LINES}" >> "$GITHUB_OUTPUT"
+
+ # ── Gate 2: Frontend bundle size ≤ 512 KB (gzip) ──
+ - name: Check bundle size
+ id: bundle_size
+ run: |
+ bun run build
+ BUNDLE_BYTES=$(find dist/assets -name '*.js' -exec cat {} + | wc -c)
+ BUNDLE_KB=$(( BUNDLE_BYTES / 1024 ))
+
+ GZIP_BYTES=0
+ for f in dist/assets/*.js; do
+ GZ=$(gzip -c "$f" | wc -c)
+ GZIP_BYTES=$(( GZIP_BYTES + GZ ))
+ done
+ GZIP_KB=$(( GZIP_BYTES / 1024 ))
+
+ LIMIT_KB=512
+ if [ "$GZIP_KB" -gt "$LIMIT_KB" ]; then
+ PASS="false"
+ else
+ PASS="true"
+ fi
+
+ # Measure initial-load chunks (exclude lazy page/component chunks)
+ INIT_GZIP=0
+ for f in dist/assets/*.js; do
+ BN=$(basename "$f")
+ case "$BN" in
+ index-*|vendor-react-*|vendor-ui-*|vendor-i18n-*|vendor-icons-*)
+ GZ_INIT=$(gzip -c "$f" | wc -c)
+ INIT_GZIP=$((INIT_GZIP + GZ_INIT))
+ ;;
+ esac
+ done
+ INIT_KB=$((INIT_GZIP / 1024))
+
+ echo "raw_kb=${BUNDLE_KB}" >> "$GITHUB_OUTPUT"
+ echo "gzip_kb=${GZIP_KB}" >> "$GITHUB_OUTPUT"
+ echo "init_gzip_kb=${INIT_KB}" >> "$GITHUB_OUTPUT"
+ echo "limit_kb=${LIMIT_KB}" >> "$GITHUB_OUTPUT"
+ echo "pass=${PASS}" >> "$GITHUB_OUTPUT"
+
+ # ── Gate 3: Perf metrics E2E ──
+ - name: Install system dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y \
+ libwebkit2gtk-4.1-dev \
+ libappindicator3-dev \
+ librsvg2-dev \
+ patchelf \
+ libssl-dev \
+ libgtk-3-dev \
+ libsoup-3.0-dev \
+ libjavascriptcoregtk-4.1-dev
+
+ - name: Setup Rust
+ uses: dtolnay/rust-toolchain@stable
+
+ - name: Cache Rust dependencies
+ uses: Swatinem/rust-cache@v2
+ with:
+ workspaces: src-tauri
+
+ - name: Run perf metrics tests
+ id: perf_tests
+ working-directory: src-tauri
+ run: |
+ set +e
+ OUTPUT=$(cargo test -p clawpal --test perf_metrics -- --nocapture 2>&1)
+ EXIT_CODE=$?
+ echo "$OUTPUT"
+
+ # Parse test results
+ PASSED=$(echo "$OUTPUT" | grep -oP '\d+ passed' | grep -oP '\d+' || echo 0)
+ FAILED=$(echo "$OUTPUT" | grep -oP '\d+ failed' | grep -oP '\d+' || echo 0)
+
+ # Extract structured metrics from METRIC: lines
+ RSS_MB=$(echo "$OUTPUT" | grep -oP 'METRIC:rss_mb=\K[0-9.]+' || echo "N/A")
+ VMS_MB=$(echo "$OUTPUT" | grep -oP 'METRIC:vms_mb=\K[0-9.]+' || echo "N/A")
+ CMD_P50=$(echo "$OUTPUT" | grep -oP 'METRIC:cmd_p50_ms=\K[0-9]+' || echo "N/A")
+ CMD_P95=$(echo "$OUTPUT" | grep -oP 'METRIC:cmd_p95_ms=\K[0-9]+' || echo "N/A")
+ CMD_MAX=$(echo "$OUTPUT" | grep -oP 'METRIC:cmd_max_ms=\K[0-9]+' || echo "N/A")
+ UPTIME=$(echo "$OUTPUT" | grep -oP 'METRIC:uptime_secs=\K[0-9.]+' || echo "N/A")
+
+ echo "passed=${PASSED}" >> "$GITHUB_OUTPUT"
+ echo "failed=${FAILED}" >> "$GITHUB_OUTPUT"
+ echo "exit_code=${EXIT_CODE}" >> "$GITHUB_OUTPUT"
+ echo "rss_mb=${RSS_MB}" >> "$GITHUB_OUTPUT"
+ echo "vms_mb=${VMS_MB}" >> "$GITHUB_OUTPUT"
+ echo "cmd_p50=${CMD_P50}" >> "$GITHUB_OUTPUT"
+ echo "cmd_p95=${CMD_P95}" >> "$GITHUB_OUTPUT"
+ echo "cmd_max=${CMD_MAX}" >> "$GITHUB_OUTPUT"
+ echo "uptime=${UPTIME}" >> "$GITHUB_OUTPUT"
+
+ if [ "$EXIT_CODE" -ne 0 ]; then
+ echo "pass=false" >> "$GITHUB_OUTPUT"
+ else
+ echo "pass=true" >> "$GITHUB_OUTPUT"
+ fi
+
+ # ── Gate 4: Large file check (informational) ──
+ - name: Check large files
+ id: large_files
+ run: |
+ MOD_LINES=$(wc -l < src-tauri/src/commands/mod.rs 2>/dev/null || echo 0)
+ APP_LINES=$(wc -l < src/App.tsx 2>/dev/null || echo 0)
+
+ DETAILS="| \`commands/mod.rs\` | ${MOD_LINES} | ≤ 2000 |"
+ if [ "$MOD_LINES" -gt 2000 ]; then
+ DETAILS="${DETAILS} ⚠️ |"
+ else
+ DETAILS="${DETAILS} ✅ |"
+ fi
+
+ DETAILS="${DETAILS}\n| \`App.tsx\` | ${APP_LINES} | ≤ 500 |"
+ if [ "$APP_LINES" -gt 500 ]; then
+ DETAILS="${DETAILS} ⚠️ |"
+ else
+ DETAILS="${DETAILS} ✅ |"
+ fi
+
+ LARGE_COUNT=$(find src/ src-tauri/src/ \( -name '*.ts' -o -name '*.tsx' -o -name '*.rs' \) -exec wc -l {} + 2>/dev/null | \
+ grep -v total | awk '$1 > 500 {count++} END {print count+0}')
+
+ printf "%b" "$DETAILS" > /tmp/large_file_details.txt
+ echo "mod_lines=${MOD_LINES}" >> "$GITHUB_OUTPUT"
+ echo "app_lines=${APP_LINES}" >> "$GITHUB_OUTPUT"
+ echo "large_count=${LARGE_COUNT}" >> "$GITHUB_OUTPUT"
+
+ # ── Gate 4b: Command perf E2E (local) ──
+ - name: Run command perf E2E
+ id: cmd_perf
+ working-directory: src-tauri
+ run: |
+ set +e
+ OUTPUT=$(cargo test -p clawpal --test command_perf_e2e -- --nocapture 2>&1)
+ EXIT_CODE=$?
+ echo "$OUTPUT"
+
+ PASSED=$(echo "$OUTPUT" | grep -oP '\d+ passed' | grep -oP '\d+' || echo 0)
+ FAILED=$(echo "$OUTPUT" | grep -oP '\d+ failed' | grep -oP '\d+' || echo 0)
+
+ # Extract LOCAL_CMD lines
+ echo "$OUTPUT" | grep '^LOCAL_CMD:' > /tmp/local_cmd_perf.txt || true
+ CMD_COUNT=$(wc -l < /tmp/local_cmd_perf.txt)
+
+ # Extract process metrics
+ PROC_RSS=$(echo "$OUTPUT" | grep -oP 'PROCESS:rss_mb=\K[0-9.]+' || echo "N/A")
+
+ echo "passed=${PASSED}" >> "$GITHUB_OUTPUT"
+ echo "failed=${FAILED}" >> "$GITHUB_OUTPUT"
+ echo "cmd_count=${CMD_COUNT}" >> "$GITHUB_OUTPUT"
+ echo "proc_rss=${PROC_RSS}" >> "$GITHUB_OUTPUT"
+
+ if [ "$EXIT_CODE" -ne 0 ]; then
+ echo "pass=false" >> "$GITHUB_OUTPUT"
+ else
+ echo "pass=true" >> "$GITHUB_OUTPUT"
+ fi
+
+ # ── Gate 4c: Command perf E2E (remote via SSH Docker) ──
+ - name: Install sshpass (for SSH perf tests)
+ run: sudo apt-get install -y sshpass
+
+ - name: Build Docker OpenClaw container (for remote perf)
+ run: docker build -t clawpal-perf-e2e -f tests/e2e/perf/Dockerfile .
+
+ - name: Start SSH container
+ run: |
+ docker run -d --name oc-remote-perf -p 2299:22 clawpal-perf-e2e
+ for i in $(seq 1 15); do
+ sshpass -p clawpal-perf-e2e ssh -o StrictHostKeyChecking=no -p 2299 root@localhost echo ok 2>/dev/null && break
+ sleep 1
+ done
+
+ - name: Run remote command timing via SSH
+ id: remote_perf
+ run: |
+ set +e
+ SSH="sshpass -p clawpal-perf-e2e ssh -o StrictHostKeyChecking=no -p 2299 root@localhost"
+
+ # Exercise remote OpenClaw commands and measure timing
+ CMDS=(
+ "openclaw status --json"
+ "cat /root/.openclaw/openclaw.json"
+ "openclaw gateway status --json"
+ "openclaw cron list --json"
+ "openclaw agent list --json"
+ )
+
+ echo "REMOTE_PERF_START" > /tmp/remote_perf.txt
+ for CMD in "${CMDS[@]}"; do
+ SHORT=$(echo "$CMD" | awk '{print $1"_"$2}' | tr '/' '_')
+ for i in $(seq 1 3); do
+ START=$(date +%s%N)
+ $SSH "$CMD" > /dev/null 2>&1
+ END=$(date +%s%N)
+ MS=$(( (END - START) / 1000000 ))
+ echo "REMOTE_CMD:${SHORT}:run${i}:${MS}ms" | tee -a /tmp/remote_perf.txt
+ done
+ done
+ echo "REMOTE_PERF_END" >> /tmp/remote_perf.txt
+
+ # Parse medians
+ DETAILS=""
+ for CMD in "${CMDS[@]}"; do
+ SHORT=$(echo "$CMD" | awk '{print $1"_"$2}' | tr '/' '_')
+ TIMES=$(grep "REMOTE_CMD:${SHORT}:" /tmp/remote_perf.txt | grep -oP '\d+(?=ms)' | sort -n)
+ MEDIAN=$(echo "$TIMES" | sed -n '2p')
+ MAX=$(echo "$TIMES" | tail -1)
+ DETAILS="${DETAILS}${SHORT}:median=${MEDIAN:-0}:max=${MAX:-0}\n"
+ done
+ printf "%b" "$DETAILS" > /tmp/remote_perf_summary.txt
+
+ # Also measure a batch command (single SSH hop)
+ BATCH_CMD="openclaw status --json && openclaw gateway status --json && openclaw cron list --json"
+ for i in $(seq 1 3); do
+ START=$(date +%s%N)
+ $SSH "$BATCH_CMD" > /dev/null 2>&1
+ END=$(date +%s%N)
+ MS=$(( (END - START) / 1000000 ))
+ echo "REMOTE_CMD:batch_all:run${i}:${MS}ms" | tee -a /tmp/remote_perf.txt
+ done
+
+ echo "pass=true" >> "$GITHUB_OUTPUT"
+
+ - name: Cleanup remote container
+ if: always()
+ run: docker rm -f oc-remote-perf 2>/dev/null || true
+
+ # ── Gate 5: Home page render probes ──
+ - name: Cache Playwright browsers
+ id: playwright-cache
+ uses: actions/cache@v4
+ with:
+ path: ~/.cache/ms-playwright
+ key: playwright-${{ runner.os }}-${{ hashFiles('package.json') }}
+
+ - name: Install Playwright
+ run: |
+ bun add -d @playwright/test
+ npx playwright install chromium --with-deps
+ timeout-minutes: 5
+
+ - name: Install sshpass
+ run: sudo apt-get install -y sshpass
+
+ - name: Start container (reuses image from remote perf step)
+ run: |
+ docker run -d --name oc-perf -p 2299:22 clawpal-perf-e2e
+ for i in $(seq 1 15); do
+ sshpass -p clawpal-perf-e2e ssh -o StrictHostKeyChecking=no -p 2299 root@localhost echo ok 2>/dev/null && break
+ sleep 1
+ done
+
+ - name: Extract fixtures from container
+ run: node tests/e2e/perf/extract-fixtures.mjs
+ env:
+ CLAWPAL_PERF_SSH_PORT: "2299"
+
+ - name: Start Vite dev server
+ run: |
+ bun run dev &
+ for i in $(seq 1 20); do
+ curl -s http://localhost:1420 > /dev/null 2>&1 && break
+ sleep 1
+ done
+
+ - name: Run render probe E2E
+ id: home_perf
+ run: |
+ set +e
+ npx playwright test --config tests/e2e/perf/playwright.config.mjs 2>&1
+ EXIT_CODE=$?
+
+ # Parse report.md for probe values
+ if [ -f tests/e2e/perf/report.md ]; then
+ STATUS_MS=$(grep -oP '\| status \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A")
+ VERSION_MS=$(grep -oP '\| version \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A")
+ AGENTS_MS=$(grep -oP '\| agents \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A")
+ MODELS_MS=$(grep -oP '\| models \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A")
+ SETTLED_MS=$(grep -oP '\| settled \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A")
+ else
+ STATUS_MS="N/A"; VERSION_MS="N/A"; AGENTS_MS="N/A"; MODELS_MS="N/A"; SETTLED_MS="N/A"
+ fi
+
+ echo "status_ms=${STATUS_MS}" >> "$GITHUB_OUTPUT"
+ echo "version_ms=${VERSION_MS}" >> "$GITHUB_OUTPUT"
+ echo "agents_ms=${AGENTS_MS}" >> "$GITHUB_OUTPUT"
+ echo "models_ms=${MODELS_MS}" >> "$GITHUB_OUTPUT"
+ echo "settled_ms=${SETTLED_MS}" >> "$GITHUB_OUTPUT"
+
+ if [ "$EXIT_CODE" -ne 0 ]; then
+ echo "pass=false" >> "$GITHUB_OUTPUT"
+ else
+ echo "pass=true" >> "$GITHUB_OUTPUT"
+ fi
+ env:
+ PERF_MOCK_LATENCY_MS: "50"
+ PERF_SETTLED_GATE_MS: "5000"
+
+ - name: Cleanup container
+ if: always()
+ run: docker rm -f oc-perf 2>/dev/null || true
+
+ # ── Post / update PR comment ──
+ - name: Generate metrics comment
+ id: metrics_body
+ run: |
+ LARGE_FILE_DETAILS=$(cat /tmp/large_file_details.txt)
+
+ GATE_FAIL=0
+ OVERALL="✅ All gates passed"
+
+ # Commit size is a soft gate (reported but not blocking)
+ # if [ "${{ steps.commit_size.outputs.fail }}" = "1" ]; then
+ # OVERALL="❌ Some gates failed"; GATE_FAIL=1
+ # fi
+ if [ "${{ steps.bundle_size.outputs.pass }}" = "false" ]; then
+ OVERALL="❌ Some gates failed"; GATE_FAIL=1
+ fi
+ if [ "${{ steps.perf_tests.outputs.pass }}" = "false" ]; then
+ OVERALL="❌ Some gates failed"; GATE_FAIL=1
+ fi
+ if [ "${{ steps.cmd_perf.outputs.pass }}" = "false" ]; then
+ OVERALL="❌ Some gates failed"; GATE_FAIL=1
+ fi
+ if [ "${{ steps.home_perf.outputs.pass }}" = "false" ]; then
+ OVERALL="❌ Some gates failed"; GATE_FAIL=1
+ fi
+
+ BUNDLE_ICON=$( [ "${{ steps.bundle_size.outputs.pass }}" = "true" ] && echo "✅" || echo "❌" )
+ COMMIT_ICON=$( [ "${{ steps.commit_size.outputs.fail }}" = "0" ] && echo "✅" || echo "❌" )
+
+ cat > /tmp/metrics_comment.md << COMMENTEOF
+
+ ## 📏 Metrics Gate Report
+
+ **Status**: ${OVERALL}
+
+ ### Commit Size ${COMMIT_ICON}
+
+ | Metric | Value | Limit | Status |
+ |--------|-------|-------|--------|
+ | Commits checked | ${{ steps.commit_size.outputs.total }} | — | — |
+ | All within limit | ${{ steps.commit_size.outputs.passed }}/${{ steps.commit_size.outputs.total }} | ≤ ${{ steps.commit_size.outputs.max_lines }} lines | ${COMMIT_ICON} |
+ | Largest commit | ${{ steps.commit_size.outputs.max_seen }} lines | ≤ ${{ steps.commit_size.outputs.max_lines }} | $( [ "${{ steps.commit_size.outputs.max_seen }}" -le "${{ steps.commit_size.outputs.max_lines }}" ] && echo "✅" || echo "❌" ) |
+
+ ### Bundle Size ${BUNDLE_ICON}
+
+ | Metric | Value | Limit | Status |
+ |--------|-------|-------|--------|
+ | JS bundle (raw) | ${{ steps.bundle_size.outputs.raw_kb }} KB | — | — |
+ | JS bundle (gzip) | ${{ steps.bundle_size.outputs.gzip_kb }} KB | ≤ ${{ steps.bundle_size.outputs.limit_kb }} KB | ${BUNDLE_ICON} |
+ | JS initial load (gzip) | ${{ steps.bundle_size.outputs.init_gzip_kb }} KB | — | ℹ️ |
+
+ ### Perf Metrics E2E $( [ "${{ steps.perf_tests.outputs.pass }}" = "true" ] && echo "✅" || echo "❌" )
+
+ | Metric | Value | Limit | Status |
+ |--------|-------|-------|--------|
+ | Tests | ${{ steps.perf_tests.outputs.passed }} passed, ${{ steps.perf_tests.outputs.failed }} failed | 0 failures | $( [ "${{ steps.perf_tests.outputs.failed }}" = "0" ] && echo "✅" || echo "❌" ) |
+ | RSS (test process) | ${{ steps.perf_tests.outputs.rss_mb }} MB | ≤ 80 MB | $( echo "${{ steps.perf_tests.outputs.rss_mb }}" | awk '{print ($1 <= 80) ? "✅" : "❌"}' ) |
+ | VMS (test process) | ${{ steps.perf_tests.outputs.vms_mb }} MB | — | ℹ️ |
+ | Command P50 latency | ${{ steps.perf_tests.outputs.cmd_p50 }} ms | — | ℹ️ |
+ | Command P95 latency | ${{ steps.perf_tests.outputs.cmd_p95 }} ms | ≤ 100 ms | $( echo "${{ steps.perf_tests.outputs.cmd_p95 }}" | awk '{print ($1 <= 100) ? "✅" : "❌"}' ) |
+ | Command max latency | ${{ steps.perf_tests.outputs.cmd_max }} ms | — | ℹ️ |
+
+ ### Command Perf (local) $( [ "${{ steps.cmd_perf.outputs.pass }}" = "true" ] && echo "✅" || echo "❌" )
+
+ | Metric | Value | Status |
+ |--------|-------|--------|
+ | Tests | ${{ steps.cmd_perf.outputs.passed }} passed, ${{ steps.cmd_perf.outputs.failed }} failed | $( [ "${{ steps.cmd_perf.outputs.failed }}" = "0" ] && echo "✅" || echo "❌" ) |
+ | Commands measured | ${{ steps.cmd_perf.outputs.cmd_count }} | ℹ️ |
+ | RSS (test process) | ${{ steps.cmd_perf.outputs.proc_rss }} MB | ℹ️ |
+
+ Local command timings
+
+ | Command | P50 | P95 | Max |
+ |---------|-----|-----|-----|
+ $(cat /tmp/local_cmd_perf.txt 2>/dev/null | awk -F: '{printf "| %s | %s | %s | %s |\n", $2, $4, $5, $6}' | sed 's/p50=//;s/p95=//;s/max=//;s/avg=[0-9]*//;s/count=[0-9]*://' || echo "| N/A | N/A | N/A | N/A |")
+
+
+
+ ### Command Perf (remote SSH) ✅
+
+ Remote command timings (via Docker SSH)
+
+ | Command | Median | Max |
+ |---------|--------|-----|
+ $(cat /tmp/remote_perf_summary.txt 2>/dev/null | awk -F: '{printf "| %s | %s ms | %s ms |\n", $1, $2, $3}' | sed 's/median=//;s/max=//' || echo "| N/A | N/A | N/A |")
+
+
+
+ ### Home Page Render Probes $( [ "${{ steps.home_perf.outputs.pass }}" = "true" ] && echo "✅" || echo "❌" )
+
+ | Probe | Value | Limit | Status |
+ |-------|-------|-------|--------|
+ | status | ${{ steps.home_perf.outputs.status_ms }} ms | — | ℹ️ |
+ | version | ${{ steps.home_perf.outputs.version_ms }} ms | — | ℹ️ |
+ | agents | ${{ steps.home_perf.outputs.agents_ms }} ms | — | ℹ️ |
+ | models | ${{ steps.home_perf.outputs.models_ms }} ms | — | ℹ️ |
+ | settled | ${{ steps.home_perf.outputs.settled_ms }} ms | < 5000 ms | $( echo "${{ steps.home_perf.outputs.settled_ms }}" | awk '{print ($1 != "N/A" && $1 < 5000) ? "✅" : "❌"}' ) |
+
+ ### Code Readability (informational)
+
+ | File | Lines | Target | Status |
+ |------|-------|--------|--------|
+ ${LARGE_FILE_DETAILS}
+ | Files > 500 lines | ${{ steps.large_files.outputs.large_count }} | trend ↓ | ℹ️ |
+
+ ---
+ > 📊 Metrics defined in [\`docs/architecture/metrics.md\`](../blob/${{ github.head_ref }}/docs/architecture/metrics.md)
+ COMMENTEOF
+
+ # Remove leading whitespace from heredoc
+ sed -i 's/^ //' /tmp/metrics_comment.md
+
+ echo "gate_fail=${GATE_FAIL}" >> "$GITHUB_OUTPUT"
+
+ - name: Find existing metrics comment
+ uses: peter-evans/find-comment@v3
+ id: fc
+ with:
+ issue-number: ${{ github.event.pull_request.number }}
+ comment-author: 'github-actions[bot]'
+ body-includes: ''
+
+ - name: Create or update metrics comment
+ uses: peter-evans/create-or-update-comment@v4
+ with:
+ comment-id: ${{ steps.fc.outputs.comment-id }}
+ issue-number: ${{ github.event.pull_request.number }}
+ body-path: /tmp/metrics_comment.md
+ edit-mode: replace
+
+ - name: Fail if gates not met
+ if: steps.metrics_body.outputs.gate_fail == '1'
+ run: |
+ echo "::error::Metrics gate failed — check the PR comment for details."
+ exit 1
diff --git a/docs/architecture/metrics.md b/docs/architecture/metrics.md
new file mode 100644
index 00000000..738c8c95
--- /dev/null
+++ b/docs/architecture/metrics.md
@@ -0,0 +1,265 @@
+# ClawPal 量化指标体系
+
+本文档定义 ClawPal 项目的量化指标、当前基线、目标值和量化方式。
+
+指标分为三类:
+1. **工程健康度** — PR、CI、测试、文档(来自 Harness Engineering 基线文档)
+2. **运行时性能** — 启动、内存、command 耗时、包体积
+3. **Tauri 专项** — command 漂移、打包验证、全平台构建
+
+## 1. 工程健康度
+
+### 1.1 Commit / PR 质量
+
+| 指标 | 基线值 (2026-03-17) | 目标 | 量化方式 | CI Gate |
+|------|---------------------|------|----------|---------|
+| 单 commit 变更行数 | 未追踪 | ≤ 500 行 | `git diff --stat` | ✅ |
+| PR 中位生命周期 | 1.0h | ≤ 4h | GitHub API | — |
+
+### 1.2 CI 稳定性
+
+| 指标 | 基线值 | 目标 | 量化方式 | CI Gate |
+|------|--------|------|----------|---------|
+| CI 成功率 | 75% | ≥ 90% | workflow run 统计 | — |
+| CI 失败中环境问题占比 | 未追踪 | 趋势下降 | 手动分类 | — |
+
+### 1.3 测试覆盖率
+
+| 指标 | 基线值 | 目标 | 量化方式 | CI Gate |
+|------|--------|------|----------|---------|
+| 行覆盖率 (core + cli) | 74.4% | ≥ 80% | `cargo llvm-cov` | ✅ 不得下降 |
+| 函数覆盖率 | 68.9% | ≥ 75% | `cargo llvm-cov` | ✅ 不得下降 |
+
+### 1.4 代码可读性
+
+| 指标 | 基线值 | 目标 | 量化方式 | CI Gate |
+|------|--------|------|----------|---------|
+| commands/mod.rs 行数 | 8,842 | ≤ 2,000 | `wc -l` | — |
+| App.tsx 行数 | 1,787 | ≤ 500 | `wc -l` | — |
+| 单文件 > 500 行数量 | 未统计 | 趋势下降 | 脚本统计 | — |
+
+## 2. 运行时性能
+
+### 2.1 启动与加载
+
+| 指标 | 基线值 | 目标 | 量化方式 | CI Gate |
+|------|--------|------|----------|---------|
+| 冷启动到首屏渲染 | 待埋点 | ≤ 2s | `performance.now()` 差值 | ✅ |
+| 首个 command 响应时间 | 待埋点 | ≤ 500ms | 首次 invoke 到返回的耗时 | ✅ |
+| 页面路由切换时间 | 待埋点 | ≤ 200ms | React Suspense fallback 持续时间 | — |
+
+**埋点方案**:
+
+前端(`src/App.tsx`):
+```typescript
+// 在模块顶部记录启动时间
+const APP_START = performance.now();
+
+// 在 App() 首次渲染完成的 useEffect 中
+useEffect(() => {
+ const ttfr = performance.now() - APP_START;
+ console.log(`[perf] time-to-first-render: ${ttfr.toFixed(0)}ms`);
+ invoke("log_app_event", {
+ event: "perf_ttfr",
+ data: JSON.stringify({ ttfr_ms: Math.round(ttfr) })
+ });
+}, []);
+```
+
+### 2.2 内存
+
+| 指标 | 基线值 | 目标 | 量化方式 | CI Gate |
+|------|--------|------|----------|---------|
+| 空闲内存占用(Rust 进程) | 待埋点 | ≤ 80MB | `sysinfo` crate 或 OS API | ✅ |
+| 空闲内存占用(WebView) | 待埋点 | ≤ 120MB | `performance.memory` (Chromium) | — |
+| SSH 长连接内存增长 | 待埋点 | ≤ 5MB/h | 连接后定期采样 | — |
+
+**埋点方案**:
+
+Rust 侧(`src-tauri/src/commands/overview.rs` 或新建 `perf.rs`):
+```rust
+#[tauri::command]
+pub fn get_process_metrics() -> Result {
+ let pid = std::process::id();
+ // 读取 /proc/{pid}/status (Linux) 或 mach_task_info (macOS)
+ // 返回 RSS, VmSize 等
+}
+```
+
+### 2.3 构建产物
+
+| 指标 | 基线值 | 目标 | 量化方式 | CI Gate |
+|------|--------|------|----------|---------|
+| macOS ARM64 包体积 | 12.6 MB | ≤ 15 MB | CI build artifact | ✅ |
+| macOS x64 包体积 | 13.3 MB | ≤ 15 MB | CI build artifact | ✅ |
+| Windows x64 包体积 | 16.3 MB | ≤ 20 MB | CI build artifact | ✅ |
+| Linux x64 包体积 | 103.8 MB | ≤ 110 MB | CI build artifact | ✅ |
+| 前端 JS bundle 大小 (gzip) | 待统计 | ≤ 500 KB | `vite build` + `gzip -k` | ✅ |
+
+**CI Gate 方案**:
+
+在 `ci.yml` 的 frontend job 中添加:
+```yaml
+- name: Check bundle size
+ run: |
+ bun run build
+ BUNDLE_SIZE=$(du -sb dist/assets/*.js | awk '{sum+=$1} END {print sum}')
+ BUNDLE_KB=$((BUNDLE_SIZE / 1024))
+ echo "Bundle size: ${BUNDLE_KB}KB"
+ if [ "$BUNDLE_KB" -gt 512 ]; then
+ echo "::error::Bundle size ${BUNDLE_KB}KB exceeds 512KB limit"
+ exit 1
+ fi
+```
+
+在 `pr-build.yml` 中添加包体积检查:
+```yaml
+- name: Check artifact size
+ run: |
+ # 平台对应的限制值 (bytes)
+ case "${{ matrix.platform }}" in
+ macos-latest) LIMIT=$((15 * 1024 * 1024)) ;;
+ windows-latest) LIMIT=$((20 * 1024 * 1024)) ;;
+ ubuntu-latest) LIMIT=$((110 * 1024 * 1024)) ;;
+ esac
+ ARTIFACT_SIZE=$(du -sb target/release/bundle/ | awk '{print $1}')
+ if [ "$ARTIFACT_SIZE" -gt "$LIMIT" ]; then
+ echo "::error::Artifact size exceeds limit"
+ exit 1
+ fi
+```
+
+### 2.4 Command 性能
+
+| 指标 | 基线值 | 目标 | 量化方式 | CI Gate |
+|------|--------|------|----------|---------|
+| 本地 command P95 耗时 | 待埋点 | ≤ 100ms | Rust `Instant::now()` | ✅ |
+| SSH command P95 耗时 | 待埋点 | ≤ 2s | 含网络 RTT | — |
+| Doctor 全量诊断耗时 | 待埋点 | ≤ 5s | 端到端计时 | — |
+| 配置文件读写耗时 | 待埋点 | ≤ 50ms | `Instant::now()` | — |
+
+**埋点方案**:
+
+在 command 层添加统一计时 wrapper(`src-tauri/src/commands/mod.rs`):
+```rust
+use std::time::Instant;
+use tracing::{info, warn};
+
+/// 记录 command 执行耗时,超过阈值发出 warning
+pub fn trace_command(name: &str, threshold_ms: u64, f: F) -> T
+where
+ F: FnOnce() -> T,
+{
+ let start = Instant::now();
+ let result = f();
+ let elapsed = start.elapsed();
+ let ms = elapsed.as_millis() as u64;
+ if ms > threshold_ms {
+ warn!(command = name, elapsed_ms = ms, "command exceeded threshold");
+ } else {
+ info!(command = name, elapsed_ms = ms, "command completed");
+ }
+ result
+}
+```
+
+## 3. Tauri 专项
+
+| 指标 | 基线值 | 目标 | 量化方式 | CI Gate |
+|------|--------|------|----------|---------|
+| Command 前后端漂移次数 | 未追踪 | 0 | contract test | ✅ (Phase 3 延后项) |
+| Packaged app smoke 通过率 | 无 smoke test | 100% | packaged smoke CI | ✅ (Phase 3 延后项) |
+| 全平台构建通过率 | 100% | ≥ 95% | PR build matrix | ✅ |
+
+## 4. CI Gate 实施计划
+
+### 阶段 1: 立即可加(本 PR 后续 commit)
+
+1. **单 commit 变更行数 gate** — PR 中每个 commit 不超过 500 行(additions + deletions)
+2. **前端 bundle 大小 gate** — `ci.yml` frontend job 增加 `du` 检查
+3. **覆盖率不得下降 gate** — 已有 `coverage.yml`,确认 delta ≥ 0 时 fail
+
+**Commit 大小检查脚本**(加入 `ci.yml`):
+```yaml
+- name: Check commit sizes
+ run: |
+ MAX_LINES=500
+ BASE="${{ github.event.pull_request.base.sha }}"
+ HEAD="${{ github.sha }}"
+ FAIL=0
+ for COMMIT in $(git rev-list $BASE..$HEAD); do
+ SHORT=$(git rev-parse --short $COMMIT)
+ SUBJECT=$(git log --format=%s -1 $COMMIT)
+ STAT=$(git diff --shortstat ${COMMIT}^..${COMMIT} 2>/dev/null || echo "0")
+ ADDS=$(echo "$STAT" | grep -oP '\d+ insertion' | grep -oP '\d+' || echo 0)
+ DELS=$(echo "$STAT" | grep -oP '\d+ deletion' | grep -oP '\d+' || echo 0)
+ TOTAL=$((${ADDS:-0} + ${DELS:-0}))
+ echo "$SHORT ($TOTAL lines): $SUBJECT"
+ if [ "$TOTAL" -gt "$MAX_LINES" ]; then
+ echo "::error::Commit $SHORT exceeds $MAX_LINES line limit ($TOTAL lines): $SUBJECT"
+ FAIL=1
+ fi
+ done
+ if [ "$FAIL" -eq 1 ]; then
+ echo "::error::One or more commits exceed the $MAX_LINES line limit. Split into smaller commits."
+ exit 1
+ fi
+```
+
+### 阶段 2: 埋点后可加
+
+3. **冷启动时间 gate** — 前端埋点 + E2E 测试中采集
+4. **command 耗时 gate** — Rust wrapper + 单元测试中断言
+5. **内存占用 gate** — `get_process_metrics` command + E2E 测试中采集
+
+### 阶段 3: 基础设施完善后
+
+6. **包体积 gate** — `pr-build.yml` 中按平台检查
+7. **Packaged app smoke gate** — 需要 headless 桌面环境或 Xvfb
+
+## 5. 指标记录与趋势
+
+每周熵治理时记录到 `docs/runbooks/entropy-governance.md` 的指标表中。
+
+建议每月输出一次指标趋势报告,重点关注:
+- 覆盖率是否稳步上升
+- PR 粒度是否持续减小
+- CI 成功率是否稳定在 90% 以上
+- 包体积是否异常增长
+- 新增 command 是否有对应的 contract test
+
+## Optimization Log
+
+### JS Bundle Size
+
+**Baseline**: 910 KB raw / 285 KB gzip (2026-03-17)
+
+**Optimization 1: Vendor chunk splitting** (vite.config.ts)
+- Split large vendor dependencies into separate chunks:
+ - `vendor-react`: react, react-dom (~140KB raw)
+ - `vendor-i18n`: i18next ecosystem (~80KB raw)
+ - `vendor-ui`: radix-ui, cmdk, CVA, clsx, tailwind-merge (~200KB raw)
+ - `vendor-icons`: lucide-react (~150KB raw)
+ - `vendor-diff`: react-diff-viewer-continued (lazy, ~100KB raw)
+- Expected impact: Better tree-shaking, smaller initial load, parallel chunk loading
+- Note: Total gzip may increase slightly due to less cross-chunk compression,
+ but initial load waterfall improves significantly
+
+### Remote SSH Command Latency
+
+**Baseline**: `openclaw status` 1981ms, `openclaw cron list` 1935ms (2026-03-17)
+
+The ~2s latency is dominated by OpenClaw CLI cold start (Node.js process spawn + module load).
+This is inherent to the CLI architecture and cannot be optimized in ClawPal.
+
+Potential future optimization: persistent SSH connection + daemon mode.
+
+### Home Page Models Probe
+
+**Baseline**: 106ms with 50ms mock latency (2026-03-17)
+
+The models probe measures time from mount to `modelProfiles` state population.
+With localStorage cache seeding (readPersistedReadCache), real-app first render is near-instant.
+The 106ms in E2E is the 50ms mock latency + React re-render cycle.
+
+Optimization: Not actionable — the real bottleneck (CLI call) is already cached client-side.
diff --git a/src-tauri/src/commands/agent.rs b/src-tauri/src/commands/agent.rs
index be9722b6..c8a4e53d 100644
--- a/src-tauri/src/commands/agent.rs
+++ b/src-tauri/src/commands/agent.rs
@@ -8,47 +8,49 @@ pub async fn remote_setup_agent_identity(
name: String,
emoji: Option,
) -> Result {
- let agent_id = agent_id.trim().to_string();
- let name = name.trim().to_string();
- if agent_id.is_empty() {
- return Err("Agent ID is required".into());
- }
- if name.is_empty() {
- return Err("Name is required".into());
- }
+ timed_async!("remote_setup_agent_identity", {
+ let agent_id = agent_id.trim().to_string();
+ let name = name.trim().to_string();
+ if agent_id.is_empty() {
+ return Err("Agent ID is required".into());
+ }
+ if name.is_empty() {
+ return Err("Name is required".into());
+ }
- // Read remote config to find agent workspace
- let (_config_path, _raw, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id)
- .await
- .map_err(|e| format!("Failed to parse config: {e}"))?;
+ // Read remote config to find agent workspace
+ let (_config_path, _raw, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id)
+ .await
+ .map_err(|e| format!("Failed to parse config: {e}"))?;
- let workspace = clawpal_core::doctor::resolve_agent_workspace_from_config(
- &cfg,
- &agent_id,
- Some("~/.openclaw/agents"),
- )?;
+ let workspace = clawpal_core::doctor::resolve_agent_workspace_from_config(
+ &cfg,
+ &agent_id,
+ Some("~/.openclaw/agents"),
+ )?;
- // Build IDENTITY.md content
- let mut content = format!("- Name: {}\n", name);
- if let Some(ref e) = emoji {
- let e = e.trim();
- if !e.is_empty() {
- content.push_str(&format!("- Emoji: {}\n", e));
+ // Build IDENTITY.md content
+ let mut content = format!("- Name: {}\n", name);
+ if let Some(ref e) = emoji {
+ let e = e.trim();
+ if !e.is_empty() {
+ content.push_str(&format!("- Emoji: {}\n", e));
+ }
}
- }
- // Write via SSH
- let ws = if workspace.starts_with("~/") {
- workspace.to_string()
- } else {
- format!("~/{workspace}")
- };
- pool.exec(&host_id, &format!("mkdir -p {}", shell_escape(&ws)))
- .await?;
- let identity_path = format!("{}/IDENTITY.md", ws);
- pool.sftp_write(&host_id, &identity_path, &content).await?;
+ // Write via SSH
+ let ws = if workspace.starts_with("~/") {
+ workspace.to_string()
+ } else {
+ format!("~/{workspace}")
+ };
+ pool.exec(&host_id, &format!("mkdir -p {}", shell_escape(&ws)))
+ .await?;
+ let identity_path = format!("{}/IDENTITY.md", ws);
+ pool.sftp_write(&host_id, &identity_path, &content).await?;
- Ok(true)
+ Ok(true)
+ })
}
#[tauri::command]
@@ -59,34 +61,36 @@ pub async fn remote_chat_via_openclaw(
message: String,
session_id: Option,
) -> Result {
- let escaped_msg = message.replace('\'', "'\\''");
- let escaped_agent = agent_id.replace('\'', "'\\''");
- let mut cmd = format!(
- "openclaw agent --local --agent '{}' --message '{}' --json --no-color",
- escaped_agent, escaped_msg
- );
- if let Some(sid) = session_id {
- let escaped_sid = sid.replace('\'', "'\\''");
- cmd.push_str(&format!(" --session-id '{}'", escaped_sid));
- }
- let result = pool.exec_login(&host_id, &cmd).await?;
- // Try to extract JSON from stdout first — even on non-zero exit the
- // command may have produced valid output (e.g. bash job-control warnings
- // in stderr cause exit 1 but the actual command succeeded).
- if let Some(json_str) = clawpal_core::doctor::extract_json_from_output(&result.stdout) {
- return serde_json::from_str(json_str)
- .map_err(|e| format!("Failed to parse remote chat response: {e}"));
- }
- if result.exit_code != 0 {
- return Err(format!(
- "Remote chat failed (exit {}): {}",
- result.exit_code, result.stderr
- ));
- }
- Err(format!(
- "No JSON in remote openclaw output: {}",
- result.stdout
- ))
+ timed_async!("remote_chat_via_openclaw", {
+ let escaped_msg = message.replace('\'', "'\\''");
+ let escaped_agent = agent_id.replace('\'', "'\\''");
+ let mut cmd = format!(
+ "openclaw agent --local --agent '{}' --message '{}' --json --no-color",
+ escaped_agent, escaped_msg
+ );
+ if let Some(sid) = session_id {
+ let escaped_sid = sid.replace('\'', "'\\''");
+ cmd.push_str(&format!(" --session-id '{}'", escaped_sid));
+ }
+ let result = pool.exec_login(&host_id, &cmd).await?;
+ // Try to extract JSON from stdout first — even on non-zero exit the
+ // command may have produced valid output (e.g. bash job-control warnings
+ // in stderr cause exit 1 but the actual command succeeded).
+ if let Some(json_str) = clawpal_core::doctor::extract_json_from_output(&result.stdout) {
+ return serde_json::from_str(json_str)
+ .map_err(|e| format!("Failed to parse remote chat response: {e}"));
+ }
+ if result.exit_code != 0 {
+ return Err(format!(
+ "Remote chat failed (exit {}): {}",
+ result.exit_code, result.stderr
+ ));
+ }
+ Err(format!(
+ "No JSON in remote openclaw output: {}",
+ result.stdout
+ ))
+ })
}
#[tauri::command]
@@ -95,123 +99,129 @@ pub fn create_agent(
model_value: Option,
independent: Option,
) -> Result {
- let agent_id = agent_id.trim().to_string();
- if agent_id.is_empty() {
- return Err("Agent ID is required".into());
- }
- if !agent_id
- .chars()
- .all(|c| c.is_alphanumeric() || c == '-' || c == '_')
- {
- return Err("Agent ID may only contain letters, numbers, hyphens, and underscores".into());
- }
+ timed_sync!("create_agent", {
+ let agent_id = agent_id.trim().to_string();
+ if agent_id.is_empty() {
+ return Err("Agent ID is required".into());
+ }
+ if !agent_id
+ .chars()
+ .all(|c| c.is_alphanumeric() || c == '-' || c == '_')
+ {
+ return Err(
+ "Agent ID may only contain letters, numbers, hyphens, and underscores".into(),
+ );
+ }
- let paths = resolve_paths();
- let mut cfg = read_openclaw_config(&paths)?;
- let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
+ let paths = resolve_paths();
+ let mut cfg = read_openclaw_config(&paths)?;
+ let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
- let existing_ids = collect_agent_ids(&cfg);
- if existing_ids
- .iter()
- .any(|id| id.eq_ignore_ascii_case(&agent_id))
- {
- return Err(format!("Agent '{}' already exists", agent_id));
- }
+ let existing_ids = collect_agent_ids(&cfg);
+ if existing_ids
+ .iter()
+ .any(|id| id.eq_ignore_ascii_case(&agent_id))
+ {
+ return Err(format!("Agent '{}' already exists", agent_id));
+ }
- let model_display = model_value
- .map(|v| v.trim().to_string())
- .filter(|v| !v.is_empty());
+ let model_display = model_value
+ .map(|v| v.trim().to_string())
+ .filter(|v| !v.is_empty());
- // If independent, create a dedicated workspace directory;
- // otherwise inherit the default workspace so the gateway doesn't auto-create one.
- let workspace = if independent.unwrap_or(false) {
- let ws_dir = paths.base_dir.join("workspaces").join(&agent_id);
- fs::create_dir_all(&ws_dir).map_err(|e| e.to_string())?;
- let ws_path = ws_dir.to_string_lossy().to_string();
- Some(ws_path)
- } else {
- cfg.pointer("/agents/defaults/workspace")
- .or_else(|| cfg.pointer("/agents/default/workspace"))
- .and_then(Value::as_str)
- .map(|s| s.to_string())
- };
+ // If independent, create a dedicated workspace directory;
+ // otherwise inherit the default workspace so the gateway doesn't auto-create one.
+ let workspace = if independent.unwrap_or(false) {
+ let ws_dir = paths.base_dir.join("workspaces").join(&agent_id);
+ fs::create_dir_all(&ws_dir).map_err(|e| e.to_string())?;
+ let ws_path = ws_dir.to_string_lossy().to_string();
+ Some(ws_path)
+ } else {
+ cfg.pointer("/agents/defaults/workspace")
+ .or_else(|| cfg.pointer("/agents/default/workspace"))
+ .and_then(Value::as_str)
+ .map(|s| s.to_string())
+ };
- // Build agent entry
- let mut agent_obj = serde_json::Map::new();
- agent_obj.insert("id".into(), Value::String(agent_id.clone()));
- if let Some(ref model_str) = model_display {
- agent_obj.insert("model".into(), Value::String(model_str.clone()));
- }
- if let Some(ref ws) = workspace {
- agent_obj.insert("workspace".into(), Value::String(ws.clone()));
- }
+ // Build agent entry
+ let mut agent_obj = serde_json::Map::new();
+ agent_obj.insert("id".into(), Value::String(agent_id.clone()));
+ if let Some(ref model_str) = model_display {
+ agent_obj.insert("model".into(), Value::String(model_str.clone()));
+ }
+ if let Some(ref ws) = workspace {
+ agent_obj.insert("workspace".into(), Value::String(ws.clone()));
+ }
- let agents = cfg
- .as_object_mut()
- .ok_or("config is not an object")?
- .entry("agents")
- .or_insert_with(|| Value::Object(serde_json::Map::new()))
- .as_object_mut()
- .ok_or("agents is not an object")?;
- let list = agents
- .entry("list")
- .or_insert_with(|| Value::Array(Vec::new()))
- .as_array_mut()
- .ok_or("agents.list is not an array")?;
- list.push(Value::Object(agent_obj));
+ let agents = cfg
+ .as_object_mut()
+ .ok_or("config is not an object")?
+ .entry("agents")
+ .or_insert_with(|| Value::Object(serde_json::Map::new()))
+ .as_object_mut()
+ .ok_or("agents is not an object")?;
+ let list = agents
+ .entry("list")
+ .or_insert_with(|| Value::Array(Vec::new()))
+ .as_array_mut()
+ .ok_or("agents.list is not an array")?;
+ list.push(Value::Object(agent_obj));
- write_config_with_snapshot(&paths, ¤t, &cfg, "create-agent")?;
- Ok(AgentOverview {
- id: agent_id,
- name: None,
- emoji: None,
- model: model_display,
- channels: vec![],
- online: false,
- workspace,
+ write_config_with_snapshot(&paths, ¤t, &cfg, "create-agent")?;
+ Ok(AgentOverview {
+ id: agent_id,
+ name: None,
+ emoji: None,
+ model: model_display,
+ channels: vec![],
+ online: false,
+ workspace,
+ })
})
}
#[tauri::command]
pub fn delete_agent(agent_id: String) -> Result {
- let agent_id = agent_id.trim().to_string();
- if agent_id.is_empty() {
- return Err("Agent ID is required".into());
- }
- if agent_id == "main" {
- return Err("Cannot delete the main agent".into());
- }
+ timed_sync!("delete_agent", {
+ let agent_id = agent_id.trim().to_string();
+ if agent_id.is_empty() {
+ return Err("Agent ID is required".into());
+ }
+ if agent_id == "main" {
+ return Err("Cannot delete the main agent".into());
+ }
- let paths = resolve_paths();
- let mut cfg = read_openclaw_config(&paths)?;
- let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
+ let paths = resolve_paths();
+ let mut cfg = read_openclaw_config(&paths)?;
+ let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
- let list = cfg
- .pointer_mut("/agents/list")
- .and_then(Value::as_array_mut)
- .ok_or("agents.list not found")?;
+ let list = cfg
+ .pointer_mut("/agents/list")
+ .and_then(Value::as_array_mut)
+ .ok_or("agents.list not found")?;
- let before = list.len();
- list.retain(|agent| agent.get("id").and_then(Value::as_str) != Some(&agent_id));
+ let before = list.len();
+ list.retain(|agent| agent.get("id").and_then(Value::as_str) != Some(&agent_id));
- if list.len() == before {
- return Err(format!("Agent '{}' not found", agent_id));
- }
+ if list.len() == before {
+ return Err(format!("Agent '{}' not found", agent_id));
+ }
- // Reset any bindings that reference this agent back to "main" (default)
- // so the channel doesn't lose its binding entry entirely.
- if let Some(bindings) = cfg.pointer_mut("/bindings").and_then(Value::as_array_mut) {
- for b in bindings.iter_mut() {
- if b.get("agentId").and_then(Value::as_str) == Some(&agent_id) {
- if let Some(obj) = b.as_object_mut() {
- obj.insert("agentId".into(), Value::String("main".into()));
+ // Reset any bindings that reference this agent back to "main" (default)
+ // so the channel doesn't lose its binding entry entirely.
+ if let Some(bindings) = cfg.pointer_mut("/bindings").and_then(Value::as_array_mut) {
+ for b in bindings.iter_mut() {
+ if b.get("agentId").and_then(Value::as_str) == Some(&agent_id) {
+ if let Some(obj) = b.as_object_mut() {
+ obj.insert("agentId".into(), Value::String("main".into()));
+ }
}
}
}
- }
- write_config_with_snapshot(&paths, ¤t, &cfg, "delete-agent")?;
- Ok(true)
+ write_config_with_snapshot(&paths, ¤t, &cfg, "delete-agent")?;
+ Ok(true)
+ })
}
#[tauri::command]
@@ -220,38 +230,41 @@ pub fn setup_agent_identity(
name: String,
emoji: Option,
) -> Result {
- let agent_id = agent_id.trim().to_string();
- let name = name.trim().to_string();
- if agent_id.is_empty() {
- return Err("Agent ID is required".into());
- }
- if name.is_empty() {
- return Err("Name is required".into());
- }
+ timed_sync!("setup_agent_identity", {
+ let agent_id = agent_id.trim().to_string();
+ let name = name.trim().to_string();
+ if agent_id.is_empty() {
+ return Err("Agent ID is required".into());
+ }
+ if name.is_empty() {
+ return Err("Name is required".into());
+ }
- let paths = resolve_paths();
- let cfg = read_openclaw_config(&paths)?;
+ let paths = resolve_paths();
+ let cfg = read_openclaw_config(&paths)?;
- let workspace =
- clawpal_core::doctor::resolve_agent_workspace_from_config(&cfg, &agent_id, None)
- .map(|s| expand_tilde(&s))?;
+ let workspace =
+ clawpal_core::doctor::resolve_agent_workspace_from_config(&cfg, &agent_id, None)
+ .map(|s| expand_tilde(&s))?;
- // Build IDENTITY.md content
- let mut content = format!("- Name: {}\n", name);
- if let Some(ref e) = emoji {
- let e = e.trim();
- if !e.is_empty() {
- content.push_str(&format!("- Emoji: {}\n", e));
+ // Build IDENTITY.md content
+ let mut content = format!("- Name: {}\n", name);
+ if let Some(ref e) = emoji {
+ let e = e.trim();
+ if !e.is_empty() {
+ content.push_str(&format!("- Emoji: {}\n", e));
+ }
}
- }
- let ws_path = std::path::Path::new(&workspace);
- fs::create_dir_all(ws_path).map_err(|e| format!("Failed to create workspace dir: {}", e))?;
- let identity_path = ws_path.join("IDENTITY.md");
- fs::write(&identity_path, &content)
- .map_err(|e| format!("Failed to write IDENTITY.md: {}", e))?;
+ let ws_path = std::path::Path::new(&workspace);
+ fs::create_dir_all(ws_path)
+ .map_err(|e| format!("Failed to create workspace dir: {}", e))?;
+ let identity_path = ws_path.join("IDENTITY.md");
+ fs::write(&identity_path, &content)
+ .map_err(|e| format!("Failed to write IDENTITY.md: {}", e))?;
- Ok(true)
+ Ok(true)
+ })
}
#[tauri::command]
@@ -260,32 +273,35 @@ pub async fn chat_via_openclaw(
message: String,
session_id: Option,
) -> Result {
- tauri::async_runtime::spawn_blocking(move || {
- let paths = resolve_paths();
- if let Err(err) = sync_main_auth_for_active_config(&paths) {
- eprintln!("Warning: pre-chat main auth sync failed: {err}");
- }
- let mut args = vec![
- "agent".to_string(),
- "--local".to_string(),
- "--agent".to_string(),
- agent_id,
- "--message".to_string(),
- message,
- "--json".to_string(),
- "--no-color".to_string(),
- ];
- if let Some(sid) = session_id {
- args.push("--session-id".to_string());
- args.push(sid);
- }
+ timed_async!("chat_via_openclaw", {
+ tauri::async_runtime::spawn_blocking(move || {
+ let paths = resolve_paths();
+ if let Err(err) = sync_main_auth_for_active_config(&paths) {
+ eprintln!("Warning: pre-chat main auth sync failed: {err}");
+ }
+ let mut args = vec![
+ "agent".to_string(),
+ "--local".to_string(),
+ "--agent".to_string(),
+ agent_id,
+ "--message".to_string(),
+ message,
+ "--json".to_string(),
+ "--no-color".to_string(),
+ ];
+ if let Some(sid) = session_id {
+ args.push("--session-id".to_string());
+ args.push(sid);
+ }
- let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect();
- let output = run_openclaw_raw(&arg_refs)?;
- let json_str = clawpal_core::doctor::extract_json_from_output(&output.stdout)
- .ok_or_else(|| format!("No JSON in openclaw output: {}", output.stdout))?;
- serde_json::from_str(json_str).map_err(|e| format!("Parse openclaw response failed: {}", e))
+ let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect();
+ let output = run_openclaw_raw(&arg_refs)?;
+ let json_str = clawpal_core::doctor::extract_json_from_output(&output.stdout)
+ .ok_or_else(|| format!("No JSON in openclaw output: {}", output.stdout))?;
+ serde_json::from_str(json_str)
+ .map_err(|e| format!("Parse openclaw response failed: {}", e))
+ })
+ .await
+ .map_err(|e| format!("Task join failed: {}", e))?
})
- .await
- .map_err(|e| format!("Task join failed: {}", e))?
}
diff --git a/src-tauri/src/commands/app_logs.rs b/src-tauri/src/commands/app_logs.rs
index 1311f0af..e65797f2 100644
--- a/src-tauri/src/commands/app_logs.rs
+++ b/src-tauri/src/commands/app_logs.rs
@@ -9,44 +9,56 @@ fn clamp_log_lines(lines: Option) -> usize {
#[tauri::command]
pub fn read_app_log(lines: Option) -> Result {
- crate::logging::read_log_tail("app.log", clamp_log_lines(lines))
+ timed_sync!("read_app_log", {
+ crate::logging::read_log_tail("app.log", clamp_log_lines(lines))
+ })
}
#[tauri::command]
pub fn read_error_log(lines: Option) -> Result {
- crate::logging::read_log_tail("error.log", clamp_log_lines(lines))
+ timed_sync!("read_error_log", {
+ crate::logging::read_log_tail("error.log", clamp_log_lines(lines))
+ })
}
#[tauri::command]
pub fn read_helper_log(lines: Option) -> Result {
- crate::logging::read_log_tail("helper.log", clamp_log_lines(lines))
+ timed_sync!("read_helper_log", {
+ crate::logging::read_log_tail("helper.log", clamp_log_lines(lines))
+ })
}
#[tauri::command]
pub fn log_app_event(message: String) -> Result {
- let trimmed = message.trim();
- if !trimmed.is_empty() {
- crate::logging::log_info(trimmed);
- }
- Ok(true)
+ timed_sync!("log_app_event", {
+ let trimmed = message.trim();
+ if !trimmed.is_empty() {
+ crate::logging::log_info(trimmed);
+ }
+ Ok(true)
+ })
}
#[tauri::command]
pub fn read_gateway_log(lines: Option) -> Result {
- let paths = crate::models::resolve_paths();
- let path = paths.openclaw_dir.join("logs/gateway.log");
- if !path.exists() {
- return Ok(String::new());
- }
- crate::logging::read_path_tail(&path, clamp_log_lines(lines))
+ timed_sync!("read_gateway_log", {
+ let paths = crate::models::resolve_paths();
+ let path = paths.openclaw_dir.join("logs/gateway.log");
+ if !path.exists() {
+ return Ok(String::new());
+ }
+ crate::logging::read_path_tail(&path, clamp_log_lines(lines))
+ })
}
#[tauri::command]
pub fn read_gateway_error_log(lines: Option) -> Result {
- let paths = crate::models::resolve_paths();
- let path = paths.openclaw_dir.join("logs/gateway.err.log");
- if !path.exists() {
- return Ok(String::new());
- }
- crate::logging::read_path_tail(&path, clamp_log_lines(lines))
+ timed_sync!("read_gateway_error_log", {
+ let paths = crate::models::resolve_paths();
+ let path = paths.openclaw_dir.join("logs/gateway.err.log");
+ if !path.exists() {
+ return Ok(String::new());
+ }
+ crate::logging::read_path_tail(&path, clamp_log_lines(lines))
+ })
}
diff --git a/src-tauri/src/commands/backup.rs b/src-tauri/src/commands/backup.rs
index 283d7acf..70d74461 100644
--- a/src-tauri/src/commands/backup.rs
+++ b/src-tauri/src/commands/backup.rs
@@ -5,41 +5,43 @@ pub async fn remote_backup_before_upgrade(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- let now_secs = unix_timestamp_secs();
- let now_dt = chrono::DateTime::::from_timestamp(now_secs as i64, 0);
- let name = now_dt
- .map(|dt| dt.format("%Y-%m-%d_%H%M%S").to_string())
- .unwrap_or_else(|| format!("{now_secs}"));
-
- let escaped_name = shell_escape(&name);
- let cmd = format!(
- concat!(
- "set -e; ",
- "BDIR=\"$HOME/.clawpal/backups/\"{name}; ",
- "mkdir -p \"$BDIR\"; ",
- "cp \"$HOME/.openclaw/openclaw.json\" \"$BDIR/\" 2>/dev/null || true; ",
- "cp -r \"$HOME/.openclaw/agents\" \"$BDIR/\" 2>/dev/null || true; ",
- "cp -r \"$HOME/.openclaw/memory\" \"$BDIR/\" 2>/dev/null || true; ",
- "du -sk \"$BDIR\" 2>/dev/null | awk '{{print $1 * 1024}}' || echo 0"
- ),
- name = escaped_name
- );
-
- let result = pool.exec_login(&host_id, &cmd).await?;
- if result.exit_code != 0 {
- return Err(format!(
- "Remote backup failed (exit {}): {}",
- result.exit_code, result.stderr
- ));
- }
-
- let size_bytes = clawpal_core::backup::parse_backup_result(&result.stdout).size_bytes;
-
- Ok(BackupInfo {
- name,
- path: String::new(),
- created_at: format_timestamp_from_unix(now_secs),
- size_bytes,
+ timed_async!("remote_backup_before_upgrade", {
+ let now_secs = unix_timestamp_secs();
+ let now_dt = chrono::DateTime::::from_timestamp(now_secs as i64, 0);
+ let name = now_dt
+ .map(|dt| dt.format("%Y-%m-%d_%H%M%S").to_string())
+ .unwrap_or_else(|| format!("{now_secs}"));
+
+ let escaped_name = shell_escape(&name);
+ let cmd = format!(
+ concat!(
+ "set -e; ",
+ "BDIR=\"$HOME/.clawpal/backups/\"{name}; ",
+ "mkdir -p \"$BDIR\"; ",
+ "cp \"$HOME/.openclaw/openclaw.json\" \"$BDIR/\" 2>/dev/null || true; ",
+ "cp -r \"$HOME/.openclaw/agents\" \"$BDIR/\" 2>/dev/null || true; ",
+ "cp -r \"$HOME/.openclaw/memory\" \"$BDIR/\" 2>/dev/null || true; ",
+ "du -sk \"$BDIR\" 2>/dev/null | awk '{{print $1 * 1024}}' || echo 0"
+ ),
+ name = escaped_name
+ );
+
+ let result = pool.exec_login(&host_id, &cmd).await?;
+ if result.exit_code != 0 {
+ return Err(format!(
+ "Remote backup failed (exit {}): {}",
+ result.exit_code, result.stderr
+ ));
+ }
+
+ let size_bytes = clawpal_core::backup::parse_backup_result(&result.stdout).size_bytes;
+
+ Ok(BackupInfo {
+ name,
+ path: String::new(),
+ created_at: format_timestamp_from_unix(now_secs),
+ size_bytes,
+ })
})
}
@@ -48,69 +50,71 @@ pub async fn remote_list_backups(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result, String> {
- // Migrate remote data from legacy path ~/.openclaw/.clawpal → ~/.clawpal
- let _ = pool
- .exec_login(
- &host_id,
- concat!(
- "if [ -d \"$HOME/.openclaw/.clawpal\" ]; then ",
- "mkdir -p \"$HOME/.clawpal\"; ",
- "cp -a \"$HOME/.openclaw/.clawpal/.\" \"$HOME/.clawpal/\" 2>/dev/null; ",
- "rm -rf \"$HOME/.openclaw/.clawpal\"; ",
- "fi"
- ),
- )
- .await;
-
- // List backup directory names
- let list_result = pool
- .exec_login(
- &host_id,
- "ls -1d \"$HOME/.clawpal/backups\"/*/ 2>/dev/null || true",
- )
- .await?;
-
- let dirs: Vec = list_result
- .stdout
- .lines()
- .filter(|l| !l.trim().is_empty())
- .map(|l| l.trim().trim_end_matches('/').to_string())
- .collect();
-
- if dirs.is_empty() {
- return Ok(Vec::new());
- }
-
- // Build a single command to get sizes for all backup dirs (du -sk is POSIX portable)
- let du_parts: Vec = dirs
- .iter()
- .map(|d| format!("du -sk '{}' 2>/dev/null || echo '0\t{}'", d, d))
- .collect();
- let du_cmd = du_parts.join("; ");
- let du_result = pool.exec_login(&host_id, &du_cmd).await?;
-
- let size_entries = clawpal_core::backup::parse_backup_list(&du_result.stdout);
- let size_map: std::collections::HashMap = size_entries
- .into_iter()
- .map(|e| (e.path, e.size_bytes))
- .collect();
-
- let mut backups: Vec = dirs
- .iter()
- .map(|d| {
- let name = d.rsplit('/').next().unwrap_or(d).to_string();
- let size_bytes = size_map.get(d.trim_end_matches('/')).copied().unwrap_or(0);
- BackupInfo {
- name: name.clone(),
- path: d.clone(),
- created_at: name.clone(), // Name is the timestamp
- size_bytes,
- }
- })
- .collect();
+ timed_async!("remote_list_backups", {
+ // Migrate remote data from legacy path ~/.openclaw/.clawpal → ~/.clawpal
+ let _ = pool
+ .exec_login(
+ &host_id,
+ concat!(
+ "if [ -d \"$HOME/.openclaw/.clawpal\" ]; then ",
+ "mkdir -p \"$HOME/.clawpal\"; ",
+ "cp -a \"$HOME/.openclaw/.clawpal/.\" \"$HOME/.clawpal/\" 2>/dev/null; ",
+ "rm -rf \"$HOME/.openclaw/.clawpal\"; ",
+ "fi"
+ ),
+ )
+ .await;
+
+ // List backup directory names
+ let list_result = pool
+ .exec_login(
+ &host_id,
+ "ls -1d \"$HOME/.clawpal/backups\"/*/ 2>/dev/null || true",
+ )
+ .await?;
+
+ let dirs: Vec = list_result
+ .stdout
+ .lines()
+ .filter(|l| !l.trim().is_empty())
+ .map(|l| l.trim().trim_end_matches('/').to_string())
+ .collect();
+
+ if dirs.is_empty() {
+ return Ok(Vec::new());
+ }
- backups.sort_by(|a, b| b.name.cmp(&a.name));
- Ok(backups)
+ // Build a single command to get sizes for all backup dirs (du -sk is POSIX portable)
+ let du_parts: Vec = dirs
+ .iter()
+ .map(|d| format!("du -sk '{}' 2>/dev/null || echo '0\t{}'", d, d))
+ .collect();
+ let du_cmd = du_parts.join("; ");
+ let du_result = pool.exec_login(&host_id, &du_cmd).await?;
+
+ let size_entries = clawpal_core::backup::parse_backup_list(&du_result.stdout);
+ let size_map: std::collections::HashMap = size_entries
+ .into_iter()
+ .map(|e| (e.path, e.size_bytes))
+ .collect();
+
+ let mut backups: Vec = dirs
+ .iter()
+ .map(|d| {
+ let name = d.rsplit('/').next().unwrap_or(d).to_string();
+ let size_bytes = size_map.get(d.trim_end_matches('/')).copied().unwrap_or(0);
+ BackupInfo {
+ name: name.clone(),
+ path: d.clone(),
+ created_at: name.clone(), // Name is the timestamp
+ size_bytes,
+ }
+ })
+ .collect();
+
+ backups.sort_by(|a, b| b.name.cmp(&a.name));
+ Ok(backups)
+ })
}
#[tauri::command]
@@ -119,26 +123,28 @@ pub async fn remote_restore_from_backup(
host_id: String,
backup_name: String,
) -> Result {
- let escaped_name = shell_escape(&backup_name);
- let cmd = format!(
- concat!(
- "set -e; ",
- "BDIR=\"$HOME/.clawpal/backups/\"{name}; ",
- "[ -d \"$BDIR\" ] || {{ echo 'Backup not found'; exit 1; }}; ",
- "cp \"$BDIR/openclaw.json\" \"$HOME/.openclaw/openclaw.json\" 2>/dev/null || true; ",
- "[ -d \"$BDIR/agents\" ] && cp -r \"$BDIR/agents\" \"$HOME/.openclaw/\" 2>/dev/null || true; ",
- "[ -d \"$BDIR/memory\" ] && cp -r \"$BDIR/memory\" \"$HOME/.openclaw/\" 2>/dev/null || true; ",
- "echo 'Restored from backup '{name}"
- ),
- name = escaped_name
- );
-
- let result = pool.exec_login(&host_id, &cmd).await?;
- if result.exit_code != 0 {
- return Err(format!("Remote restore failed: {}", result.stderr));
- }
-
- Ok(format!("Restored from backup '{}'", backup_name))
+ timed_async!("remote_restore_from_backup", {
+ let escaped_name = shell_escape(&backup_name);
+ let cmd = format!(
+ concat!(
+ "set -e; ",
+ "BDIR=\"$HOME/.clawpal/backups/\"{name}; ",
+ "[ -d \"$BDIR\" ] || {{ echo 'Backup not found'; exit 1; }}; ",
+ "cp \"$BDIR/openclaw.json\" \"$HOME/.openclaw/openclaw.json\" 2>/dev/null || true; ",
+ "[ -d \"$BDIR/agents\" ] && cp -r \"$BDIR/agents\" \"$HOME/.openclaw/\" 2>/dev/null || true; ",
+ "[ -d \"$BDIR/memory\" ] && cp -r \"$BDIR/memory\" \"$HOME/.openclaw/\" 2>/dev/null || true; ",
+ "echo 'Restored from backup '{name}"
+ ),
+ name = escaped_name
+ );
+
+ let result = pool.exec_login(&host_id, &cmd).await?;
+ if result.exit_code != 0 {
+ return Err(format!("Remote restore failed: {}", result.stderr));
+ }
+
+ Ok(format!("Restored from backup '{}'", backup_name))
+ })
}
#[tauri::command]
@@ -146,44 +152,49 @@ pub async fn remote_run_openclaw_upgrade(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- // Use the official install script with --no-prompt for non-interactive SSH.
- // The script handles npm prefix/permissions, bin links, and PATH fixups
- // that plain `npm install -g` misses (e.g. stale /usr/bin/openclaw symlinks).
- let version_before = pool
- .exec_login(&host_id, "openclaw --version 2>/dev/null || true")
- .await
- .map(|r| r.stdout.trim().to_string())
- .unwrap_or_default();
-
- let install_cmd = "curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-prompt --no-onboard 2>&1";
- let result = pool.exec_login(&host_id, install_cmd).await?;
- let combined = if result.stderr.is_empty() {
- result.stdout.clone()
- } else {
- format!("{}\n{}", result.stdout, result.stderr)
- };
-
- if result.exit_code != 0 {
- return Err(combined);
- }
-
- // Restart gateway after successful upgrade (best-effort)
- let _ = pool
- .exec_login(&host_id, "openclaw gateway restart 2>/dev/null || true")
- .await;
-
- // Verify version actually changed
- let version_after = pool
- .exec_login(&host_id, "openclaw --version 2>/dev/null || true")
- .await
- .map(|r| r.stdout.trim().to_string())
- .unwrap_or_default();
- let _upgrade_info = clawpal_core::backup::parse_upgrade_result(&combined);
- if !version_before.is_empty() && !version_after.is_empty() && version_before == version_after {
- return Err(format!("{combined}\n\nWarning: version unchanged after upgrade ({version_before}). Check PATH or npm prefix."));
- }
-
- Ok(combined)
+ timed_async!("remote_run_openclaw_upgrade", {
+ // Use the official install script with --no-prompt for non-interactive SSH.
+ // The script handles npm prefix/permissions, bin links, and PATH fixups
+ // that plain `npm install -g` misses (e.g. stale /usr/bin/openclaw symlinks).
+ let version_before = pool
+ .exec_login(&host_id, "openclaw --version 2>/dev/null || true")
+ .await
+ .map(|r| r.stdout.trim().to_string())
+ .unwrap_or_default();
+
+ let install_cmd = "curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-prompt --no-onboard 2>&1";
+ let result = pool.exec_login(&host_id, install_cmd).await?;
+ let combined = if result.stderr.is_empty() {
+ result.stdout.clone()
+ } else {
+ format!("{}\n{}", result.stdout, result.stderr)
+ };
+
+ if result.exit_code != 0 {
+ return Err(combined);
+ }
+
+ // Restart gateway after successful upgrade (best-effort)
+ let _ = pool
+ .exec_login(&host_id, "openclaw gateway restart 2>/dev/null || true")
+ .await;
+
+ // Verify version actually changed
+ let version_after = pool
+ .exec_login(&host_id, "openclaw --version 2>/dev/null || true")
+ .await
+ .map(|r| r.stdout.trim().to_string())
+ .unwrap_or_default();
+ let _upgrade_info = clawpal_core::backup::parse_upgrade_result(&combined);
+ if !version_before.is_empty()
+ && !version_after.is_empty()
+ && version_before == version_after
+ {
+ return Err(format!("{combined}\n\nWarning: version unchanged after upgrade ({version_before}). Check PATH or npm prefix."));
+ }
+
+ Ok(combined)
+ })
}
#[tauri::command]
@@ -191,137 +202,149 @@ pub async fn remote_check_openclaw_update(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- // Get installed version and extract clean semver — don't fail if binary not found
- let installed_version = match pool.exec_login(&host_id, "openclaw --version").await {
- Ok(r) => extract_version_from_text(r.stdout.trim())
- .unwrap_or_else(|| r.stdout.trim().to_string()),
- Err(_) => String::new(),
- };
-
- let paths = resolve_paths();
- let cache = tokio::task::spawn_blocking(move || {
- resolve_openclaw_latest_release_cached(&paths, false).ok()
+ timed_async!("remote_check_openclaw_update", {
+ // Get installed version and extract clean semver — don't fail if binary not found
+ let installed_version = match pool.exec_login(&host_id, "openclaw --version").await {
+ Ok(r) => extract_version_from_text(r.stdout.trim())
+ .unwrap_or_else(|| r.stdout.trim().to_string()),
+ Err(_) => String::new(),
+ };
+
+ let paths = resolve_paths();
+ let cache = tokio::task::spawn_blocking(move || {
+ resolve_openclaw_latest_release_cached(&paths, false).ok()
+ })
+ .await
+ .unwrap_or(None);
+ let latest_version = cache.and_then(|entry| entry.latest_version);
+ let upgrade = latest_version
+ .as_ref()
+ .is_some_and(|latest| compare_semver(&installed_version, Some(latest.as_str())));
+ Ok(serde_json::json!({
+ "upgradeAvailable": upgrade,
+ "latestVersion": latest_version,
+ "installedVersion": installed_version,
+ }))
})
- .await
- .unwrap_or(None);
- let latest_version = cache.and_then(|entry| entry.latest_version);
- let upgrade = latest_version
- .as_ref()
- .is_some_and(|latest| compare_semver(&installed_version, Some(latest.as_str())));
- Ok(serde_json::json!({
- "upgradeAvailable": upgrade,
- "latestVersion": latest_version,
- "installedVersion": installed_version,
- }))
}
#[tauri::command]
pub fn backup_before_upgrade() -> Result {
- let paths = resolve_paths();
- let backups_dir = paths.clawpal_dir.join("backups");
- fs::create_dir_all(&backups_dir).map_err(|e| format!("Failed to create backups dir: {e}"))?;
-
- let now_secs = unix_timestamp_secs();
- let now_dt = chrono::DateTime::::from_timestamp(now_secs as i64, 0);
- let name = now_dt
- .map(|dt| dt.format("%Y-%m-%d_%H%M%S").to_string())
- .unwrap_or_else(|| format!("{now_secs}"));
- let backup_dir = backups_dir.join(&name);
- fs::create_dir_all(&backup_dir).map_err(|e| format!("Failed to create backup dir: {e}"))?;
-
- let mut total_bytes = 0u64;
-
- // Copy config file
- if paths.config_path.exists() {
- let dest = backup_dir.join("openclaw.json");
- fs::copy(&paths.config_path, &dest).map_err(|e| format!("Failed to copy config: {e}"))?;
- total_bytes += fs::metadata(&dest).map(|m| m.len()).unwrap_or(0);
- }
-
- // Copy directories, excluding sessions and archive
- let skip_dirs: HashSet<&str> = ["sessions", "archive", ".clawpal"]
- .iter()
- .copied()
- .collect();
- copy_dir_recursive(&paths.base_dir, &backup_dir, &skip_dirs, &mut total_bytes)?;
-
- Ok(BackupInfo {
- name: name.clone(),
- path: backup_dir.to_string_lossy().to_string(),
- created_at: format_timestamp_from_unix(now_secs),
- size_bytes: total_bytes,
+ timed_sync!("backup_before_upgrade", {
+ let paths = resolve_paths();
+ let backups_dir = paths.clawpal_dir.join("backups");
+ fs::create_dir_all(&backups_dir)
+ .map_err(|e| format!("Failed to create backups dir: {e}"))?;
+
+ let now_secs = unix_timestamp_secs();
+ let now_dt = chrono::DateTime::::from_timestamp(now_secs as i64, 0);
+ let name = now_dt
+ .map(|dt| dt.format("%Y-%m-%d_%H%M%S").to_string())
+ .unwrap_or_else(|| format!("{now_secs}"));
+ let backup_dir = backups_dir.join(&name);
+ fs::create_dir_all(&backup_dir).map_err(|e| format!("Failed to create backup dir: {e}"))?;
+
+ let mut total_bytes = 0u64;
+
+ // Copy config file
+ if paths.config_path.exists() {
+ let dest = backup_dir.join("openclaw.json");
+ fs::copy(&paths.config_path, &dest)
+ .map_err(|e| format!("Failed to copy config: {e}"))?;
+ total_bytes += fs::metadata(&dest).map(|m| m.len()).unwrap_or(0);
+ }
+
+ // Copy directories, excluding sessions and archive
+ let skip_dirs: HashSet<&str> = ["sessions", "archive", ".clawpal"]
+ .iter()
+ .copied()
+ .collect();
+ copy_dir_recursive(&paths.base_dir, &backup_dir, &skip_dirs, &mut total_bytes)?;
+
+ Ok(BackupInfo {
+ name: name.clone(),
+ path: backup_dir.to_string_lossy().to_string(),
+ created_at: format_timestamp_from_unix(now_secs),
+ size_bytes: total_bytes,
+ })
})
}
#[tauri::command]
pub fn list_backups() -> Result, String> {
- let paths = resolve_paths();
- let backups_dir = paths.clawpal_dir.join("backups");
- if !backups_dir.exists() {
- return Ok(Vec::new());
- }
- let mut backups = Vec::new();
- let entries = fs::read_dir(&backups_dir).map_err(|e| e.to_string())?;
- for entry in entries {
- let entry = entry.map_err(|e| e.to_string())?;
- if !entry.file_type().map(|t| t.is_dir()).unwrap_or(false) {
- continue;
+ timed_sync!("list_backups", {
+ let paths = resolve_paths();
+ let backups_dir = paths.clawpal_dir.join("backups");
+ if !backups_dir.exists() {
+ return Ok(Vec::new());
}
- let name = entry.file_name().to_string_lossy().to_string();
- let path = entry.path();
- let size = dir_size(&path);
- let created_at = fs::metadata(&path)
- .and_then(|m| m.created())
- .map(|t| {
- let secs = t.duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
- format_timestamp_from_unix(secs)
- })
- .unwrap_or_else(|_| name.clone());
- backups.push(BackupInfo {
- name,
- path: path.to_string_lossy().to_string(),
- created_at,
- size_bytes: size,
- });
- }
- backups.sort_by(|a, b| b.name.cmp(&a.name));
- Ok(backups)
+ let mut backups = Vec::new();
+ let entries = fs::read_dir(&backups_dir).map_err(|e| e.to_string())?;
+ for entry in entries {
+ let entry = entry.map_err(|e| e.to_string())?;
+ if !entry.file_type().map(|t| t.is_dir()).unwrap_or(false) {
+ continue;
+ }
+ let name = entry.file_name().to_string_lossy().to_string();
+ let path = entry.path();
+ let size = dir_size(&path);
+ let created_at = fs::metadata(&path)
+ .and_then(|m| m.created())
+ .map(|t| {
+ let secs = t.duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
+ format_timestamp_from_unix(secs)
+ })
+ .unwrap_or_else(|_| name.clone());
+ backups.push(BackupInfo {
+ name,
+ path: path.to_string_lossy().to_string(),
+ created_at,
+ size_bytes: size,
+ });
+ }
+ backups.sort_by(|a, b| b.name.cmp(&a.name));
+ Ok(backups)
+ })
}
#[tauri::command]
pub fn restore_from_backup(backup_name: String) -> Result {
- let paths = resolve_paths();
- let backup_dir = paths.clawpal_dir.join("backups").join(&backup_name);
- if !backup_dir.exists() {
- return Err(format!("Backup '{}' not found", backup_name));
- }
-
- // Restore config file
- let backup_config = backup_dir.join("openclaw.json");
- if backup_config.exists() {
- fs::copy(&backup_config, &paths.config_path)
- .map_err(|e| format!("Failed to restore config: {e}"))?;
- }
-
- // Restore other directories (agents except sessions/archive, memory, etc.)
- let skip_dirs: HashSet<&str> = ["sessions", "archive", ".clawpal"]
- .iter()
- .copied()
- .collect();
- restore_dir_recursive(&backup_dir, &paths.base_dir, &skip_dirs)?;
-
- Ok(format!("Restored from backup '{}'", backup_name))
+ timed_sync!("restore_from_backup", {
+ let paths = resolve_paths();
+ let backup_dir = paths.clawpal_dir.join("backups").join(&backup_name);
+ if !backup_dir.exists() {
+ return Err(format!("Backup '{}' not found", backup_name));
+ }
+
+ // Restore config file
+ let backup_config = backup_dir.join("openclaw.json");
+ if backup_config.exists() {
+ fs::copy(&backup_config, &paths.config_path)
+ .map_err(|e| format!("Failed to restore config: {e}"))?;
+ }
+
+ // Restore other directories (agents except sessions/archive, memory, etc.)
+ let skip_dirs: HashSet<&str> = ["sessions", "archive", ".clawpal"]
+ .iter()
+ .copied()
+ .collect();
+ restore_dir_recursive(&backup_dir, &paths.base_dir, &skip_dirs)?;
+
+ Ok(format!("Restored from backup '{}'", backup_name))
+ })
}
#[tauri::command]
pub fn delete_backup(backup_name: String) -> Result {
- let paths = resolve_paths();
- let backup_dir = paths.clawpal_dir.join("backups").join(&backup_name);
- if !backup_dir.exists() {
- return Ok(false);
- }
- fs::remove_dir_all(&backup_dir).map_err(|e| format!("Failed to delete backup: {e}"))?;
- Ok(true)
+ timed_sync!("delete_backup", {
+ let paths = resolve_paths();
+ let backup_dir = paths.clawpal_dir.join("backups").join(&backup_name);
+ if !backup_dir.exists() {
+ return Ok(false);
+ }
+ fs::remove_dir_all(&backup_dir).map_err(|e| format!("Failed to delete backup: {e}"))?;
+ Ok(true)
+ })
}
#[tauri::command]
@@ -330,18 +353,22 @@ pub async fn remote_delete_backup(
host_id: String,
backup_name: String,
) -> Result {
- let escaped_name = shell_escape(&backup_name);
- let cmd = format!(
- "BDIR=\"$HOME/.clawpal/backups/\"{name}; [ -d \"$BDIR\" ] && rm -rf \"$BDIR\" && echo 'deleted' || echo 'not_found'",
- name = escaped_name
- );
-
- let result = pool.exec_login(&host_id, &cmd).await?;
- Ok(result.stdout.trim() == "deleted")
+ timed_async!("remote_delete_backup", {
+ let escaped_name = shell_escape(&backup_name);
+ let cmd = format!(
+ "BDIR=\"$HOME/.clawpal/backups/\"{name}; [ -d \"$BDIR\" ] && rm -rf \"$BDIR\" && echo 'deleted' || echo 'not_found'",
+ name = escaped_name
+ );
+
+ let result = pool.exec_login(&host_id, &cmd).await?;
+ Ok(result.stdout.trim() == "deleted")
+ })
}
#[tauri::command]
pub fn check_openclaw_update() -> Result {
- let paths = resolve_paths();
- check_openclaw_update_cached(&paths, true)
+ timed_sync!("check_openclaw_update", {
+ let paths = resolve_paths();
+ check_openclaw_update_cached(&paths, true)
+ })
}
diff --git a/src-tauri/src/commands/config.rs b/src-tauri/src/commands/config.rs
index 9182d872..1074846d 100644
--- a/src-tauri/src/commands/config.rs
+++ b/src-tauri/src/commands/config.rs
@@ -5,10 +5,12 @@ pub async fn remote_read_raw_config(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- // openclaw config get requires a path — there's no way to dump the full config via CLI.
- // Use sftp_read directly since this function's purpose is returning the entire raw config.
- let config_path = remote_resolve_openclaw_config_path(&pool, &host_id).await?;
- pool.sftp_read(&host_id, &config_path).await
+ timed_async!("remote_read_raw_config", {
+ // openclaw config get requires a path — there's no way to dump the full config via CLI.
+ // Use sftp_read directly since this function's purpose is returning the entire raw config.
+ let config_path = remote_resolve_openclaw_config_path(&pool, &host_id).await?;
+ pool.sftp_read(&host_id, &config_path).await
+ })
}
#[tauri::command]
@@ -17,18 +19,27 @@ pub async fn remote_write_raw_config(
host_id: String,
content: String,
) -> Result {
- // Validate it's valid config JSON using core module
- let next = clawpal_core::config::validate_config_json(&content)
- .map_err(|e| format!("Invalid JSON: {e}"))?;
- // Read current for snapshot
- let config_path = remote_resolve_openclaw_config_path(&pool, &host_id).await?;
- let current = pool
- .sftp_read(&host_id, &config_path)
- .await
- .unwrap_or_default();
- remote_write_config_with_snapshot(&pool, &host_id, &config_path, ¤t, &next, "raw-edit")
+ timed_async!("remote_write_raw_config", {
+ // Validate it's valid config JSON using core module
+ let next = clawpal_core::config::validate_config_json(&content)
+ .map_err(|e| format!("Invalid JSON: {e}"))?;
+ // Read current for snapshot
+ let config_path = remote_resolve_openclaw_config_path(&pool, &host_id).await?;
+ let current = pool
+ .sftp_read(&host_id, &config_path)
+ .await
+ .unwrap_or_default();
+ remote_write_config_with_snapshot(
+ &pool,
+ &host_id,
+ &config_path,
+ ¤t,
+ &next,
+ "raw-edit",
+ )
.await?;
- Ok(true)
+ Ok(true)
+ })
}
#[tauri::command]
@@ -38,29 +49,31 @@ pub async fn remote_apply_config_patch(
patch_template: String,
params: Map,
) -> Result {
- let (config_path, current_text, current) =
- remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
+ timed_async!("remote_apply_config_patch", {
+ let (config_path, current_text, current) =
+ remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
- // Use core function to build candidate config
- let (candidate, _changes) =
- clawpal_core::config::build_candidate_config(¤t, &patch_template, ¶ms)?;
+ // Use core function to build candidate config
+ let (candidate, _changes) =
+ clawpal_core::config::build_candidate_config(¤t, &patch_template, ¶ms)?;
- remote_write_config_with_snapshot(
- &pool,
- &host_id,
- &config_path,
- ¤t_text,
- &candidate,
- "config-patch",
- )
- .await?;
- Ok(ApplyResult {
- ok: true,
- snapshot_id: None,
- config_path,
- backup_path: None,
- warnings: Vec::new(),
- errors: Vec::new(),
+ remote_write_config_with_snapshot(
+ &pool,
+ &host_id,
+ &config_path,
+ ¤t_text,
+ &candidate,
+ "config-patch",
+ )
+ .await?;
+ Ok(ApplyResult {
+ ok: true,
+ snapshot_id: None,
+ config_path,
+ backup_path: None,
+ warnings: Vec::new(),
+ errors: Vec::new(),
+ })
})
}
@@ -69,41 +82,43 @@ pub async fn remote_list_history(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- // Ensure dir exists
- pool.exec(&host_id, "mkdir -p ~/.clawpal/snapshots").await?;
- let entries = pool.sftp_list(&host_id, "~/.clawpal/snapshots").await?;
- let mut items: Vec = Vec::new();
- for entry in entries {
- if entry.name.starts_with('.') || entry.is_dir {
- continue;
+ timed_async!("remote_list_history", {
+ // Ensure dir exists
+ pool.exec(&host_id, "mkdir -p ~/.clawpal/snapshots").await?;
+ let entries = pool.sftp_list(&host_id, "~/.clawpal/snapshots").await?;
+ let mut items: Vec = Vec::new();
+ for entry in entries {
+ if entry.name.starts_with('.') || entry.is_dir {
+ continue;
+ }
+ // Parse filename: {unix_ts}-{source}-{summary}.json
+ let stem = entry.name.trim_end_matches(".json");
+ let parts: Vec<&str> = stem.splitn(3, '-').collect();
+ let ts_str = parts.first().unwrap_or(&"0");
+ let source = parts.get(1).unwrap_or(&"unknown");
+ let recipe_id = parts.get(2).map(|s| s.to_string());
+ let created_at = ts_str.parse::().unwrap_or(0);
+ // Convert Unix timestamp to ISO 8601 format for frontend compatibility
+ let created_at_iso = chrono::DateTime::from_timestamp(created_at, 0)
+ .map(|dt| dt.format("%Y-%m-%dT%H:%M:%SZ").to_string())
+ .unwrap_or_else(|| created_at.to_string());
+ let is_rollback = *source == "rollback";
+ items.push(serde_json::json!({
+ "id": entry.name,
+ "recipeId": recipe_id,
+ "createdAt": created_at_iso,
+ "source": source,
+ "canRollback": !is_rollback,
+ }));
}
- // Parse filename: {unix_ts}-{source}-{summary}.json
- let stem = entry.name.trim_end_matches(".json");
- let parts: Vec<&str> = stem.splitn(3, '-').collect();
- let ts_str = parts.first().unwrap_or(&"0");
- let source = parts.get(1).unwrap_or(&"unknown");
- let recipe_id = parts.get(2).map(|s| s.to_string());
- let created_at = ts_str.parse::().unwrap_or(0);
- // Convert Unix timestamp to ISO 8601 format for frontend compatibility
- let created_at_iso = chrono::DateTime::from_timestamp(created_at, 0)
- .map(|dt| dt.format("%Y-%m-%dT%H:%M:%SZ").to_string())
- .unwrap_or_else(|| created_at.to_string());
- let is_rollback = *source == "rollback";
- items.push(serde_json::json!({
- "id": entry.name,
- "recipeId": recipe_id,
- "createdAt": created_at_iso,
- "source": source,
- "canRollback": !is_rollback,
- }));
- }
- // Sort newest first
- items.sort_by(|a, b| {
- let ta = a["createdAt"].as_str().unwrap_or("");
- let tb = b["createdAt"].as_str().unwrap_or("");
- tb.cmp(ta)
- });
- Ok(serde_json::json!({ "items": items }))
+ // Sort newest first
+ items.sort_by(|a, b| {
+ let ta = a["createdAt"].as_str().unwrap_or("");
+ let tb = b["createdAt"].as_str().unwrap_or("");
+ tb.cmp(ta)
+ });
+ Ok(serde_json::json!({ "items": items }))
+ })
}
#[tauri::command]
@@ -112,28 +127,30 @@ pub async fn remote_preview_rollback(
host_id: String,
snapshot_id: String,
) -> Result {
- let snapshot_path = format!("~/.clawpal/snapshots/{snapshot_id}");
- let snapshot_text = pool.sftp_read(&host_id, &snapshot_path).await?;
- let target = clawpal_core::config::validate_config_json(&snapshot_text)
- .map_err(|e| format!("Failed to parse snapshot: {e}"))?;
+ timed_async!("remote_preview_rollback", {
+ let snapshot_path = format!("~/.clawpal/snapshots/{snapshot_id}");
+ let snapshot_text = pool.sftp_read(&host_id, &snapshot_path).await?;
+ let target = clawpal_core::config::validate_config_json(&snapshot_text)
+ .map_err(|e| format!("Failed to parse snapshot: {e}"))?;
- let (_config_path, _current_text, current) =
- remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
+ let (_config_path, _current_text, current) =
+ remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
- let before = clawpal_core::config::format_config_diff(¤t, ¤t);
- let after = clawpal_core::config::format_config_diff(&target, &target);
- let diff = clawpal_core::config::format_config_diff(¤t, &target);
+ let before = clawpal_core::config::format_config_diff(¤t, ¤t);
+ let after = clawpal_core::config::format_config_diff(&target, &target);
+ let diff = clawpal_core::config::format_config_diff(¤t, &target);
- Ok(PreviewResult {
- recipe_id: "rollback".into(),
- diff,
- config_before: before,
- config_after: after,
- changes: Vec::new(), // Core module doesn't expose change paths directly
- overwrites_existing: true,
- can_rollback: true,
- impact_level: "medium".into(),
- warnings: vec!["Rollback will replace current configuration".into()],
+ Ok(PreviewResult {
+ recipe_id: "rollback".into(),
+ diff,
+ config_before: before,
+ config_after: after,
+ changes: Vec::new(), // Core module doesn't expose change paths directly
+ overwrites_existing: true,
+ can_rollback: true,
+ impact_level: "medium".into(),
+ warnings: vec!["Rollback will replace current configuration".into()],
+ })
})
}
@@ -143,38 +160,42 @@ pub async fn remote_rollback(
host_id: String,
snapshot_id: String,
) -> Result {
- let snapshot_path = format!("~/.clawpal/snapshots/{snapshot_id}");
- let target_text = pool.sftp_read(&host_id, &snapshot_path).await?;
- let target = clawpal_core::config::validate_config_json(&target_text)
- .map_err(|e| format!("Failed to parse snapshot: {e}"))?;
+ timed_async!("remote_rollback", {
+ let snapshot_path = format!("~/.clawpal/snapshots/{snapshot_id}");
+ let target_text = pool.sftp_read(&host_id, &snapshot_path).await?;
+ let target = clawpal_core::config::validate_config_json(&target_text)
+ .map_err(|e| format!("Failed to parse snapshot: {e}"))?;
- let (config_path, current_text, _current) =
- remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
- remote_write_config_with_snapshot(
- &pool,
- &host_id,
- &config_path,
- ¤t_text,
- &target,
- "rollback",
- )
- .await?;
+ let (config_path, current_text, _current) =
+ remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
+ remote_write_config_with_snapshot(
+ &pool,
+ &host_id,
+ &config_path,
+ ¤t_text,
+ &target,
+ "rollback",
+ )
+ .await?;
- Ok(ApplyResult {
- ok: true,
- snapshot_id: Some(snapshot_id),
- config_path,
- backup_path: None,
- warnings: vec!["rolled back".into()],
- errors: Vec::new(),
+ Ok(ApplyResult {
+ ok: true,
+ snapshot_id: Some(snapshot_id),
+ config_path,
+ backup_path: None,
+ warnings: vec!["rolled back".into()],
+ errors: Vec::new(),
+ })
})
}
#[tauri::command]
pub fn read_raw_config() -> Result {
- let paths = resolve_paths();
- let cfg = read_openclaw_config(&paths)?;
- serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())
+ timed_sync!("read_raw_config", {
+ let paths = resolve_paths();
+ let cfg = read_openclaw_config(&paths)?;
+ serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())
+ })
}
#[tauri::command]
@@ -182,120 +203,128 @@ pub fn apply_config_patch(
patch_template: String,
params: Map,
) -> Result {
- let paths = resolve_paths();
- ensure_dirs(&paths)?;
- let current = read_openclaw_config(&paths)?;
- let current_text = serde_json::to_string_pretty(¤t).map_err(|e| e.to_string())?;
- let snapshot = add_snapshot(
- &paths.history_dir,
- &paths.metadata_path,
- Some("config-patch".into()),
- "apply",
- true,
- ¤t_text,
- None,
- )?;
- let (candidate, _changes) =
- build_candidate_config_from_template(¤t, &patch_template, ¶ms)?;
- write_json(&paths.config_path, &candidate)?;
- let mut warnings = Vec::new();
- if let Err(err) = sync_main_auth_for_config(&paths, &candidate) {
- warnings.push(format!("main auth sync skipped: {err}"));
- }
- Ok(ApplyResult {
- ok: true,
- snapshot_id: Some(snapshot.id),
- config_path: paths.config_path.to_string_lossy().to_string(),
- backup_path: Some(snapshot.config_path),
- warnings,
- errors: Vec::new(),
+ timed_sync!("apply_config_patch", {
+ let paths = resolve_paths();
+ ensure_dirs(&paths)?;
+ let current = read_openclaw_config(&paths)?;
+ let current_text = serde_json::to_string_pretty(¤t).map_err(|e| e.to_string())?;
+ let snapshot = add_snapshot(
+ &paths.history_dir,
+ &paths.metadata_path,
+ Some("config-patch".into()),
+ "apply",
+ true,
+ ¤t_text,
+ None,
+ )?;
+ let (candidate, _changes) =
+ build_candidate_config_from_template(¤t, &patch_template, ¶ms)?;
+ write_json(&paths.config_path, &candidate)?;
+ let mut warnings = Vec::new();
+ if let Err(err) = sync_main_auth_for_config(&paths, &candidate) {
+ warnings.push(format!("main auth sync skipped: {err}"));
+ }
+ Ok(ApplyResult {
+ ok: true,
+ snapshot_id: Some(snapshot.id),
+ config_path: paths.config_path.to_string_lossy().to_string(),
+ backup_path: Some(snapshot.config_path),
+ warnings,
+ errors: Vec::new(),
+ })
})
}
#[tauri::command]
pub fn list_history(limit: usize, offset: usize) -> Result {
- let paths = resolve_paths();
- let index = list_snapshots(&paths.metadata_path)?;
- let items = index
- .items
- .into_iter()
- .skip(offset)
- .take(limit)
- .map(|item| HistoryItem {
- id: item.id,
- recipe_id: item.recipe_id,
- created_at: item.created_at,
- source: item.source,
- can_rollback: item.can_rollback,
- rollback_of: item.rollback_of,
- })
- .collect();
- Ok(HistoryPage { items })
+ timed_sync!("list_history", {
+ let paths = resolve_paths();
+ let index = list_snapshots(&paths.metadata_path)?;
+ let items = index
+ .items
+ .into_iter()
+ .skip(offset)
+ .take(limit)
+ .map(|item| HistoryItem {
+ id: item.id,
+ recipe_id: item.recipe_id,
+ created_at: item.created_at,
+ source: item.source,
+ can_rollback: item.can_rollback,
+ rollback_of: item.rollback_of,
+ })
+ .collect();
+ Ok(HistoryPage { items })
+ })
}
#[tauri::command]
pub fn preview_rollback(snapshot_id: String) -> Result {
- let paths = resolve_paths();
- let index = list_snapshots(&paths.metadata_path)?;
- let target = index
- .items
- .into_iter()
- .find(|s| s.id == snapshot_id)
- .ok_or_else(|| "snapshot not found".to_string())?;
- if !target.can_rollback {
- return Err("snapshot is not rollbackable".to_string());
- }
+ timed_sync!("preview_rollback", {
+ let paths = resolve_paths();
+ let index = list_snapshots(&paths.metadata_path)?;
+ let target = index
+ .items
+ .into_iter()
+ .find(|s| s.id == snapshot_id)
+ .ok_or_else(|| "snapshot not found".to_string())?;
+ if !target.can_rollback {
+ return Err("snapshot is not rollbackable".to_string());
+ }
- let current = read_openclaw_config(&paths)?;
- let target_text = read_snapshot(&target.config_path)?;
- let target_json = clawpal_core::doctor::parse_json5_document_or_default(&target_text);
- let before_text = serde_json::to_string_pretty(¤t).unwrap_or_else(|_| "{}".into());
- let after_text = serde_json::to_string_pretty(&target_json).unwrap_or_else(|_| "{}".into());
- Ok(PreviewResult {
- recipe_id: "rollback".into(),
- diff: format_diff(¤t, &target_json),
- config_before: before_text,
- config_after: after_text,
- changes: collect_change_paths(¤t, &target_json),
- overwrites_existing: true,
- can_rollback: true,
- impact_level: "medium".into(),
- warnings: vec!["Rollback will replace current configuration".into()],
+ let current = read_openclaw_config(&paths)?;
+ let target_text = read_snapshot(&target.config_path)?;
+ let target_json = clawpal_core::doctor::parse_json5_document_or_default(&target_text);
+ let before_text = serde_json::to_string_pretty(¤t).unwrap_or_else(|_| "{}".into());
+ let after_text = serde_json::to_string_pretty(&target_json).unwrap_or_else(|_| "{}".into());
+ Ok(PreviewResult {
+ recipe_id: "rollback".into(),
+ diff: format_diff(¤t, &target_json),
+ config_before: before_text,
+ config_after: after_text,
+ changes: collect_change_paths(¤t, &target_json),
+ overwrites_existing: true,
+ can_rollback: true,
+ impact_level: "medium".into(),
+ warnings: vec!["Rollback will replace current configuration".into()],
+ })
})
}
#[tauri::command]
pub fn rollback(snapshot_id: String) -> Result {
- let paths = resolve_paths();
- ensure_dirs(&paths)?;
- let index = list_snapshots(&paths.metadata_path)?;
- let target = index
- .items
- .into_iter()
- .find(|s| s.id == snapshot_id)
- .ok_or_else(|| "snapshot not found".to_string())?;
- if !target.can_rollback {
- return Err("snapshot is not rollbackable".to_string());
- }
- let target_text = read_snapshot(&target.config_path)?;
- let backup = read_openclaw_config(&paths)?;
- let backup_text = serde_json::to_string_pretty(&backup).map_err(|e| e.to_string())?;
- let _ = add_snapshot(
- &paths.history_dir,
- &paths.metadata_path,
- target.recipe_id.clone(),
- "rollback",
- true,
- &backup_text,
- Some(target.id.clone()),
- )?;
- write_text(&paths.config_path, &target_text)?;
- Ok(ApplyResult {
- ok: true,
- snapshot_id: Some(target.id),
- config_path: paths.config_path.to_string_lossy().to_string(),
- backup_path: None,
- warnings: vec!["rolled back".into()],
- errors: Vec::new(),
+ timed_sync!("rollback", {
+ let paths = resolve_paths();
+ ensure_dirs(&paths)?;
+ let index = list_snapshots(&paths.metadata_path)?;
+ let target = index
+ .items
+ .into_iter()
+ .find(|s| s.id == snapshot_id)
+ .ok_or_else(|| "snapshot not found".to_string())?;
+ if !target.can_rollback {
+ return Err("snapshot is not rollbackable".to_string());
+ }
+ let target_text = read_snapshot(&target.config_path)?;
+ let backup = read_openclaw_config(&paths)?;
+ let backup_text = serde_json::to_string_pretty(&backup).map_err(|e| e.to_string())?;
+ let _ = add_snapshot(
+ &paths.history_dir,
+ &paths.metadata_path,
+ target.recipe_id.clone(),
+ "rollback",
+ true,
+ &backup_text,
+ Some(target.id.clone()),
+ )?;
+ write_text(&paths.config_path, &target_text)?;
+ Ok(ApplyResult {
+ ok: true,
+ snapshot_id: Some(target.id),
+ config_path: paths.config_path.to_string_lossy().to_string(),
+ backup_path: None,
+ warnings: vec!["rolled back".into()],
+ errors: Vec::new(),
+ })
})
}
diff --git a/src-tauri/src/commands/cron.rs b/src-tauri/src/commands/cron.rs
index 0a7b0978..51ebfe35 100644
--- a/src-tauri/src/commands/cron.rs
+++ b/src-tauri/src/commands/cron.rs
@@ -5,11 +5,13 @@ pub async fn remote_list_cron_jobs(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- let raw = pool.sftp_read(&host_id, "~/.openclaw/cron/jobs.json").await;
- match raw {
- Ok(text) => Ok(parse_cron_jobs(&text)),
- Err(_) => Ok(Value::Array(vec![])),
- }
+ timed_async!("remote_list_cron_jobs", {
+ let raw = pool.sftp_read(&host_id, "~/.openclaw/cron/jobs.json").await;
+ match raw {
+ Ok(text) => Ok(parse_cron_jobs(&text)),
+ Err(_) => Ok(Value::Array(vec![])),
+ }
+ })
}
#[tauri::command]
@@ -19,17 +21,19 @@ pub async fn remote_get_cron_runs(
job_id: String,
limit: Option,
) -> Result, String> {
- let path = format!("~/.openclaw/cron/runs/{}.jsonl", job_id);
- let raw = pool.sftp_read(&host_id, &path).await;
- match raw {
- Ok(text) => {
- let mut runs = clawpal_core::cron::parse_cron_runs(&text)?;
- let limit = limit.unwrap_or(10);
- runs.truncate(limit);
- Ok(runs)
+ timed_async!("remote_get_cron_runs", {
+ let path = format!("~/.openclaw/cron/runs/{}.jsonl", job_id);
+ let raw = pool.sftp_read(&host_id, &path).await;
+ match raw {
+ Ok(text) => {
+ let mut runs = clawpal_core::cron::parse_cron_runs(&text)?;
+ let limit = limit.unwrap_or(10);
+ runs.truncate(limit);
+ Ok(runs)
+ }
+ Err(_) => Ok(vec![]),
}
- Err(_) => Ok(vec![]),
- }
+ })
}
#[tauri::command]
@@ -38,17 +42,19 @@ pub async fn remote_trigger_cron_job(
host_id: String,
job_id: String,
) -> Result {
- let result = pool
- .exec_login(
- &host_id,
- &format!("openclaw cron run {}", shell_escape(&job_id)),
- )
- .await?;
- if result.exit_code == 0 {
- Ok(result.stdout)
- } else {
- Err(format!("{}\n{}", result.stdout, result.stderr))
- }
+ timed_async!("remote_trigger_cron_job", {
+ let result = pool
+ .exec_login(
+ &host_id,
+ &format!("openclaw cron run {}", shell_escape(&job_id)),
+ )
+ .await?;
+ if result.exit_code == 0 {
+ Ok(result.stdout)
+ } else {
+ Err(format!("{}\n{}", result.stdout, result.stderr))
+ }
+ })
}
#[tauri::command]
@@ -57,53 +63,88 @@ pub async fn remote_delete_cron_job(
host_id: String,
job_id: String,
) -> Result {
- let result = pool
- .exec_login(
- &host_id,
- &format!("openclaw cron remove {}", shell_escape(&job_id)),
- )
- .await?;
- if result.exit_code == 0 {
- Ok(result.stdout)
- } else {
- Err(format!("{}\n{}", result.stdout, result.stderr))
- }
+ timed_async!("remote_delete_cron_job", {
+ let result = pool
+ .exec_login(
+ &host_id,
+ &format!("openclaw cron remove {}", shell_escape(&job_id)),
+ )
+ .await?;
+ if result.exit_code == 0 {
+ Ok(result.stdout)
+ } else {
+ Err(format!("{}\n{}", result.stdout, result.stderr))
+ }
+ })
}
#[tauri::command]
pub fn list_cron_jobs() -> Result {
- let paths = resolve_paths();
- let jobs_path = paths.base_dir.join("cron").join("jobs.json");
- if !jobs_path.exists() {
- return Ok(Value::Array(vec![]));
- }
- let text = std::fs::read_to_string(&jobs_path).map_err(|e| e.to_string())?;
- Ok(parse_cron_jobs(&text))
+ timed_sync!("list_cron_jobs", {
+ let paths = resolve_paths();
+ let jobs_path = paths.base_dir.join("cron").join("jobs.json");
+ if !jobs_path.exists() {
+ return Ok(Value::Array(vec![]));
+ }
+ let text = std::fs::read_to_string(&jobs_path).map_err(|e| e.to_string())?;
+ Ok(parse_cron_jobs(&text))
+ })
}
#[tauri::command]
pub fn get_cron_runs(job_id: String, limit: Option) -> Result, String> {
- let paths = resolve_paths();
- let runs_path = paths
- .base_dir
- .join("cron")
- .join("runs")
- .join(format!("{}.jsonl", job_id));
- if !runs_path.exists() {
- return Ok(vec![]);
- }
- let text = std::fs::read_to_string(&runs_path).map_err(|e| e.to_string())?;
- let mut runs = clawpal_core::cron::parse_cron_runs(&text)?;
- let limit = limit.unwrap_or(10);
- runs.truncate(limit);
- Ok(runs)
+ timed_sync!("get_cron_runs", {
+ let paths = resolve_paths();
+ let runs_path = paths
+ .base_dir
+ .join("cron")
+ .join("runs")
+ .join(format!("{}.jsonl", job_id));
+ if !runs_path.exists() {
+ return Ok(vec![]);
+ }
+ let text = std::fs::read_to_string(&runs_path).map_err(|e| e.to_string())?;
+ let mut runs = clawpal_core::cron::parse_cron_runs(&text)?;
+ let limit = limit.unwrap_or(10);
+ runs.truncate(limit);
+ Ok(runs)
+ })
}
#[tauri::command]
pub async fn trigger_cron_job(job_id: String) -> Result {
- tauri::async_runtime::spawn_blocking(move || {
+ timed_async!("trigger_cron_job", {
+ tauri::async_runtime::spawn_blocking(move || {
+ let mut cmd =
+ std::process::Command::new(clawpal_core::openclaw::resolve_openclaw_bin());
+ cmd.args(["cron", "run", &job_id]);
+ if let Some(path) = crate::cli_runner::get_active_openclaw_home_override() {
+ cmd.env("OPENCLAW_HOME", path);
+ }
+ let output = cmd
+ .output()
+ .map_err(|e| format!("Failed to run openclaw: {e}"))?;
+ let stdout = String::from_utf8_lossy(&output.stdout).to_string();
+ let stderr = String::from_utf8_lossy(&output.stderr).to_string();
+ if output.status.success() {
+ Ok(stdout)
+ } else {
+ // Extract meaningful error lines, skip Doctor warning banners
+ let error_msg =
+ clawpal_core::doctor::strip_doctor_banner(&format!("{stdout}\n{stderr}"));
+ Err(error_msg)
+ }
+ })
+ .await
+ .map_err(|e| format!("Task failed: {e}"))?
+ })
+}
+
+#[tauri::command]
+pub fn delete_cron_job(job_id: String) -> Result {
+ timed_sync!("delete_cron_job", {
let mut cmd = std::process::Command::new(clawpal_core::openclaw::resolve_openclaw_bin());
- cmd.args(["cron", "run", &job_id]);
+ cmd.args(["cron", "remove", &job_id]);
if let Some(path) = crate::cli_runner::get_active_openclaw_home_override() {
cmd.env("OPENCLAW_HOME", path);
}
@@ -115,31 +156,7 @@ pub async fn trigger_cron_job(job_id: String) -> Result {
if output.status.success() {
Ok(stdout)
} else {
- // Extract meaningful error lines, skip Doctor warning banners
- let error_msg =
- clawpal_core::doctor::strip_doctor_banner(&format!("{stdout}\n{stderr}"));
- Err(error_msg)
+ Err(format!("{stdout}\n{stderr}"))
}
})
- .await
- .map_err(|e| format!("Task failed: {e}"))?
-}
-
-#[tauri::command]
-pub fn delete_cron_job(job_id: String) -> Result {
- let mut cmd = std::process::Command::new(clawpal_core::openclaw::resolve_openclaw_bin());
- cmd.args(["cron", "remove", &job_id]);
- if let Some(path) = crate::cli_runner::get_active_openclaw_home_override() {
- cmd.env("OPENCLAW_HOME", path);
- }
- let output = cmd
- .output()
- .map_err(|e| format!("Failed to run openclaw: {e}"))?;
- let stdout = String::from_utf8_lossy(&output.stdout).to_string();
- let stderr = String::from_utf8_lossy(&output.stderr).to_string();
- if output.status.success() {
- Ok(stdout)
- } else {
- Err(format!("{stdout}\n{stderr}"))
- }
}
diff --git a/src-tauri/src/commands/discover_local.rs b/src-tauri/src/commands/discover_local.rs
index 3df602b6..7d7f70dd 100644
--- a/src-tauri/src/commands/discover_local.rs
+++ b/src-tauri/src/commands/discover_local.rs
@@ -45,9 +45,11 @@ fn slug_from_name(name: &str) -> String {
/// or exist as data directories under `~/.clawpal/`.
#[tauri::command]
pub async fn discover_local_instances() -> Result, String> {
- tauri::async_runtime::spawn_blocking(|| discover_blocking())
- .await
- .map_err(|e| e.to_string())?
+ timed_async!("discover_local_instances", {
+ tauri::async_runtime::spawn_blocking(|| discover_blocking())
+ .await
+ .map_err(|e| e.to_string())?
+ })
}
fn discover_blocking() -> Result, String> {
diff --git a/src-tauri/src/commands/discovery.rs b/src-tauri/src/commands/discovery.rs
index 5ba0ebbd..dc3fd7f0 100644
--- a/src-tauri/src/commands/discovery.rs
+++ b/src-tauri/src/commands/discovery.rs
@@ -5,282 +5,284 @@ pub async fn remote_list_discord_guild_channels(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result, String> {
- let output = crate::cli_runner::run_openclaw_remote(
- &pool,
- &host_id,
- &["config", "get", "channels.discord", "--json"],
- )
- .await?;
- let discord_section = if output.exit_code == 0 {
- crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null)
- } else {
- Value::Null
- };
- let bindings_output = crate::cli_runner::run_openclaw_remote(
- &pool,
- &host_id,
- &["config", "get", "bindings", "--json"],
- )
- .await?;
- let bindings_section = if bindings_output.exit_code == 0 {
- crate::cli_runner::parse_json_output(&bindings_output)
- .unwrap_or_else(|_| Value::Array(Vec::new()))
- } else {
- Value::Array(Vec::new())
- };
- // Wrap to match existing code expectations (rest of function uses cfg.get("channels").and_then(|c| c.get("discord")))
- let cfg = serde_json::json!({
- "channels": { "discord": discord_section },
- "bindings": bindings_section
- });
-
- let discord_cfg = cfg.get("channels").and_then(|c| c.get("discord"));
- let configured_single_guild_id = discord_cfg
- .and_then(|d| d.get("guilds"))
- .and_then(Value::as_object)
- .and_then(|guilds| {
- if guilds.len() == 1 {
- guilds.keys().next().cloned()
- } else {
- None
- }
+ timed_async!("remote_list_discord_guild_channels", {
+ let output = crate::cli_runner::run_openclaw_remote(
+ &pool,
+ &host_id,
+ &["config", "get", "channels.discord", "--json"],
+ )
+ .await?;
+ let discord_section = if output.exit_code == 0 {
+ crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null)
+ } else {
+ Value::Null
+ };
+ let bindings_output = crate::cli_runner::run_openclaw_remote(
+ &pool,
+ &host_id,
+ &["config", "get", "bindings", "--json"],
+ )
+ .await?;
+ let bindings_section = if bindings_output.exit_code == 0 {
+ crate::cli_runner::parse_json_output(&bindings_output)
+ .unwrap_or_else(|_| Value::Array(Vec::new()))
+ } else {
+ Value::Array(Vec::new())
+ };
+ // Wrap to match existing code expectations (rest of function uses cfg.get("channels").and_then(|c| c.get("discord")))
+ let cfg = serde_json::json!({
+ "channels": { "discord": discord_section },
+ "bindings": bindings_section
});
- // Extract bot token: top-level first, then fall back to first account token
- let bot_token = discord_cfg
- .and_then(|d| d.get("botToken").or_else(|| d.get("token")))
- .and_then(Value::as_str)
- .map(|s| s.to_string())
- .or_else(|| {
- discord_cfg
- .and_then(|d| d.get("accounts"))
- .and_then(Value::as_object)
- .and_then(|accounts| {
- accounts.values().find_map(|acct| {
- acct.get("token")
- .and_then(Value::as_str)
- .filter(|s| !s.is_empty())
- .map(|s| s.to_string())
+ let discord_cfg = cfg.get("channels").and_then(|c| c.get("discord"));
+ let configured_single_guild_id = discord_cfg
+ .and_then(|d| d.get("guilds"))
+ .and_then(Value::as_object)
+ .and_then(|guilds| {
+ if guilds.len() == 1 {
+ guilds.keys().next().cloned()
+ } else {
+ None
+ }
+ });
+
+ // Extract bot token: top-level first, then fall back to first account token
+ let bot_token = discord_cfg
+ .and_then(|d| d.get("botToken").or_else(|| d.get("token")))
+ .and_then(Value::as_str)
+ .map(|s| s.to_string())
+ .or_else(|| {
+ discord_cfg
+ .and_then(|d| d.get("accounts"))
+ .and_then(Value::as_object)
+ .and_then(|accounts| {
+ accounts.values().find_map(|acct| {
+ acct.get("token")
+ .and_then(Value::as_str)
+ .filter(|s| !s.is_empty())
+ .map(|s| s.to_string())
+ })
})
- })
- });
- let mut guild_name_fallback_map = pool
- .sftp_read(&host_id, "~/.clawpal/discord-guild-channels.json")
- .await
- .ok()
- .map(|text| parse_discord_cache_guild_name_fallbacks(&text))
- .unwrap_or_default();
- guild_name_fallback_map.extend(collect_discord_config_guild_name_fallbacks(discord_cfg));
-
- let core_channels = clawpal_core::discovery::parse_guild_channels(&cfg.to_string())?;
- let mut entries: Vec = core_channels
- .iter()
- .map(|c| DiscordGuildChannel {
- guild_id: c.guild_id.clone(),
- guild_name: c.guild_name.clone(),
- channel_id: c.channel_id.clone(),
- channel_name: c.channel_name.clone(),
- default_agent_id: None,
- })
- .collect();
- let mut channel_ids: Vec = entries.iter().map(|e| e.channel_id.clone()).collect();
- let mut unresolved_guild_ids: Vec = entries
- .iter()
- .filter(|e| e.guild_name == e.guild_id)
- .map(|e| e.guild_id.clone())
- .collect();
- unresolved_guild_ids.sort();
- unresolved_guild_ids.dedup();
-
- // Fallback A: if we have token + guild ids, fetch channels from Discord REST directly.
- // This avoids hard-failing when CLI rejects config due non-critical schema drift.
- if channel_ids.is_empty() {
- let configured_guild_ids = collect_discord_config_guild_ids(discord_cfg);
- if let Some(token) = bot_token.clone() {
- let rest_entries = tokio::task::spawn_blocking(move || {
- let mut out: Vec = Vec::new();
- for guild_id in configured_guild_ids {
- if let Ok(channels) = fetch_discord_guild_channels(&token, &guild_id) {
- for (channel_id, channel_name) in channels {
- if out
- .iter()
- .any(|e| e.guild_id == guild_id && e.channel_id == channel_id)
- {
- continue;
+ });
+ let mut guild_name_fallback_map = pool
+ .sftp_read(&host_id, "~/.clawpal/discord-guild-channels.json")
+ .await
+ .ok()
+ .map(|text| parse_discord_cache_guild_name_fallbacks(&text))
+ .unwrap_or_default();
+ guild_name_fallback_map.extend(collect_discord_config_guild_name_fallbacks(discord_cfg));
+
+ let core_channels = clawpal_core::discovery::parse_guild_channels(&cfg.to_string())?;
+ let mut entries: Vec = core_channels
+ .iter()
+ .map(|c| DiscordGuildChannel {
+ guild_id: c.guild_id.clone(),
+ guild_name: c.guild_name.clone(),
+ channel_id: c.channel_id.clone(),
+ channel_name: c.channel_name.clone(),
+ default_agent_id: None,
+ })
+ .collect();
+ let mut channel_ids: Vec = entries.iter().map(|e| e.channel_id.clone()).collect();
+ let mut unresolved_guild_ids: Vec = entries
+ .iter()
+ .filter(|e| e.guild_name == e.guild_id)
+ .map(|e| e.guild_id.clone())
+ .collect();
+ unresolved_guild_ids.sort();
+ unresolved_guild_ids.dedup();
+
+ // Fallback A: if we have token + guild ids, fetch channels from Discord REST directly.
+ // This avoids hard-failing when CLI rejects config due non-critical schema drift.
+ if channel_ids.is_empty() {
+ let configured_guild_ids = collect_discord_config_guild_ids(discord_cfg);
+ if let Some(token) = bot_token.clone() {
+ let rest_entries = tokio::task::spawn_blocking(move || {
+ let mut out: Vec = Vec::new();
+ for guild_id in configured_guild_ids {
+ if let Ok(channels) = fetch_discord_guild_channels(&token, &guild_id) {
+ for (channel_id, channel_name) in channels {
+ if out
+ .iter()
+ .any(|e| e.guild_id == guild_id && e.channel_id == channel_id)
+ {
+ continue;
+ }
+ out.push(DiscordGuildChannel {
+ guild_id: guild_id.clone(),
+ guild_name: guild_id.clone(),
+ channel_id,
+ channel_name,
+ default_agent_id: None,
+ });
}
- out.push(DiscordGuildChannel {
- guild_id: guild_id.clone(),
- guild_name: guild_id.clone(),
- channel_id,
- channel_name,
- default_agent_id: None,
- });
}
}
- }
- out
- })
- .await
- .unwrap_or_default();
- for entry in rest_entries {
- if entries
- .iter()
- .any(|e| e.guild_id == entry.guild_id && e.channel_id == entry.channel_id)
- {
- continue;
- }
- channel_ids.push(entry.channel_id.clone());
- entries.push(entry);
- }
- }
- }
-
- // Fallback B: query channel ids from directory and keep compatibility
- // with existing cache shape when config has no explicit channel map.
- if channel_ids.is_empty() {
- let cmd = "openclaw directory groups list --channel discord --json";
- if let Ok(r) = pool.exec_login(&host_id, cmd).await {
- if r.exit_code == 0 && !r.stdout.trim().is_empty() {
- for channel_id in parse_directory_group_channel_ids(&r.stdout) {
- if entries.iter().any(|e| e.channel_id == channel_id) {
+ out
+ })
+ .await
+ .unwrap_or_default();
+ for entry in rest_entries {
+ if entries
+ .iter()
+ .any(|e| e.guild_id == entry.guild_id && e.channel_id == entry.channel_id)
+ {
continue;
}
- let (guild_id, guild_name) =
- if let Some(gid) = configured_single_guild_id.clone() {
- (gid.clone(), gid)
- } else {
- ("discord".to_string(), "Discord".to_string())
- };
- channel_ids.push(channel_id.clone());
- entries.push(DiscordGuildChannel {
- guild_id,
- guild_name,
- channel_id: channel_id.clone(),
- channel_name: channel_id,
- default_agent_id: None,
- });
+ channel_ids.push(entry.channel_id.clone());
+ entries.push(entry);
}
}
}
- }
-
- // Resolve channel names via openclaw CLI on remote
- if !channel_ids.is_empty() {
- let ids_arg = channel_ids.join(" ");
- let cmd = format!(
- "openclaw channels resolve --json --channel discord --kind auto {}",
- ids_arg
- );
- if let Ok(r) = pool.exec_login(&host_id, &cmd).await {
- if r.exit_code == 0 && !r.stdout.trim().is_empty() {
- if let Some(name_map) = parse_resolve_name_map(&r.stdout) {
- for entry in &mut entries {
- if let Some(name) = name_map.get(&entry.channel_id) {
- entry.channel_name = name.clone();
+
+ // Fallback B: query channel ids from directory and keep compatibility
+ // with existing cache shape when config has no explicit channel map.
+ if channel_ids.is_empty() {
+ let cmd = "openclaw directory groups list --channel discord --json";
+ if let Ok(r) = pool.exec_login(&host_id, cmd).await {
+ if r.exit_code == 0 && !r.stdout.trim().is_empty() {
+ for channel_id in parse_directory_group_channel_ids(&r.stdout) {
+ if entries.iter().any(|e| e.channel_id == channel_id) {
+ continue;
}
+ let (guild_id, guild_name) =
+ if let Some(gid) = configured_single_guild_id.clone() {
+ (gid.clone(), gid)
+ } else {
+ ("discord".to_string(), "Discord".to_string())
+ };
+ channel_ids.push(channel_id.clone());
+ entries.push(DiscordGuildChannel {
+ guild_id,
+ guild_name,
+ channel_id: channel_id.clone(),
+ channel_name: channel_id,
+ default_agent_id: None,
+ });
}
}
}
}
- }
-
- // Resolve guild names via Discord REST API (guild names can't be resolved by openclaw CLI)
- // Must use spawn_blocking because reqwest::blocking panics in async context
- if let Some(token) = bot_token {
- if !unresolved_guild_ids.is_empty() {
- let guild_name_map = tokio::task::spawn_blocking(move || {
- let mut map = std::collections::HashMap::new();
- for gid in &unresolved_guild_ids {
- if let Ok(name) = fetch_discord_guild_name(&token, gid) {
- map.insert(gid.clone(), name);
+
+ // Resolve channel names via openclaw CLI on remote
+ if !channel_ids.is_empty() {
+ let ids_arg = channel_ids.join(" ");
+ let cmd = format!(
+ "openclaw channels resolve --json --channel discord --kind auto {}",
+ ids_arg
+ );
+ if let Ok(r) = pool.exec_login(&host_id, &cmd).await {
+ if r.exit_code == 0 && !r.stdout.trim().is_empty() {
+ if let Some(name_map) = parse_resolve_name_map(&r.stdout) {
+ for entry in &mut entries {
+ if let Some(name) = name_map.get(&entry.channel_id) {
+ entry.channel_name = name.clone();
+ }
+ }
}
}
- map
- })
- .await
- .unwrap_or_default();
- for entry in &mut entries {
- if let Some(name) = guild_name_map.get(&entry.guild_id) {
- entry.guild_name = name.clone();
- }
}
}
- }
- for entry in &mut entries {
- if entry.guild_name == entry.guild_id {
- if let Some(name) = guild_name_fallback_map.get(&entry.guild_id) {
- entry.guild_name = name.clone();
+
+ // Resolve guild names via Discord REST API (guild names can't be resolved by openclaw CLI)
+ // Must use spawn_blocking because reqwest::blocking panics in async context
+ if let Some(token) = bot_token {
+ if !unresolved_guild_ids.is_empty() {
+ let guild_name_map = tokio::task::spawn_blocking(move || {
+ let mut map = std::collections::HashMap::new();
+ for gid in &unresolved_guild_ids {
+ if let Ok(name) = fetch_discord_guild_name(&token, gid) {
+ map.insert(gid.clone(), name);
+ }
+ }
+ map
+ })
+ .await
+ .unwrap_or_default();
+ for entry in &mut entries {
+ if let Some(name) = guild_name_map.get(&entry.guild_id) {
+ entry.guild_name = name.clone();
+ }
+ }
}
}
- }
-
- // Resolve default agent per guild from account config + bindings (remote)
- {
- // Build account_id -> default agent_id from bindings (account-level, no peer)
- let mut account_agent_map: std::collections::HashMap =
- std::collections::HashMap::new();
- if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) {
- for b in bindings {
- let m = match b.get("match") {
- Some(m) => m,
- None => continue,
- };
- if m.get("channel").and_then(Value::as_str) != Some("discord") {
- continue;
- }
- let account_id = match m.get("accountId").and_then(Value::as_str) {
- Some(s) => s,
- None => continue,
- };
- if m.get("peer").and_then(|p| p.get("id")).is_some() {
- continue;
- } // skip channel-specific
- if let Some(agent_id) = b.get("agentId").and_then(Value::as_str) {
- account_agent_map
- .entry(account_id.to_string())
- .or_insert_with(|| agent_id.to_string());
+ for entry in &mut entries {
+ if entry.guild_name == entry.guild_id {
+ if let Some(name) = guild_name_fallback_map.get(&entry.guild_id) {
+ entry.guild_name = name.clone();
}
}
}
- // Build guild_id -> default agent from account->guild mapping
- let mut guild_default_agent: std::collections::HashMap =
- std::collections::HashMap::new();
- if let Some(accounts) = discord_cfg
- .and_then(|d| d.get("accounts"))
- .and_then(Value::as_object)
+
+ // Resolve default agent per guild from account config + bindings (remote)
{
- for (account_id, account_val) in accounts {
- let agent = account_agent_map
- .get(account_id)
- .cloned()
- .unwrap_or_else(|| account_id.clone());
- if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) {
- for guild_id in guilds.keys() {
- guild_default_agent
- .entry(guild_id.clone())
- .or_insert(agent.clone());
+ // Build account_id -> default agent_id from bindings (account-level, no peer)
+ let mut account_agent_map: std::collections::HashMap =
+ std::collections::HashMap::new();
+ if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) {
+ for b in bindings {
+ let m = match b.get("match") {
+ Some(m) => m,
+ None => continue,
+ };
+ if m.get("channel").and_then(Value::as_str) != Some("discord") {
+ continue;
+ }
+ let account_id = match m.get("accountId").and_then(Value::as_str) {
+ Some(s) => s,
+ None => continue,
+ };
+ if m.get("peer").and_then(|p| p.get("id")).is_some() {
+ continue;
+ } // skip channel-specific
+ if let Some(agent_id) = b.get("agentId").and_then(Value::as_str) {
+ account_agent_map
+ .entry(account_id.to_string())
+ .or_insert_with(|| agent_id.to_string());
}
}
}
- }
- for entry in &mut entries {
- if entry.default_agent_id.is_none() {
- if let Some(agent_id) = guild_default_agent.get(&entry.guild_id) {
- entry.default_agent_id = Some(agent_id.clone());
+ // Build guild_id -> default agent from account->guild mapping
+ let mut guild_default_agent: std::collections::HashMap =
+ std::collections::HashMap::new();
+ if let Some(accounts) = discord_cfg
+ .and_then(|d| d.get("accounts"))
+ .and_then(Value::as_object)
+ {
+ for (account_id, account_val) in accounts {
+ let agent = account_agent_map
+ .get(account_id)
+ .cloned()
+ .unwrap_or_else(|| account_id.clone());
+ if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) {
+ for guild_id in guilds.keys() {
+ guild_default_agent
+ .entry(guild_id.clone())
+ .or_insert(agent.clone());
+ }
+ }
+ }
+ }
+ for entry in &mut entries {
+ if entry.default_agent_id.is_none() {
+ if let Some(agent_id) = guild_default_agent.get(&entry.guild_id) {
+ entry.default_agent_id = Some(agent_id.clone());
+ }
}
}
}
- }
- // Persist to remote cache
- if !entries.is_empty() {
- let json = serde_json::to_string_pretty(&entries).map_err(|e| e.to_string())?;
- let _ = pool
- .sftp_write(&host_id, "~/.clawpal/discord-guild-channels.json", &json)
- .await;
- }
+ // Persist to remote cache
+ if !entries.is_empty() {
+ let json = serde_json::to_string_pretty(&entries).map_err(|e| e.to_string())?;
+ let _ = pool
+ .sftp_write(&host_id, "~/.clawpal/discord-guild-channels.json", &json)
+ .await;
+ }
- Ok(entries)
+ Ok(entries)
+ })
}
#[tauri::command]
@@ -288,21 +290,23 @@ pub async fn remote_list_bindings(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result, String> {
- let output = crate::cli_runner::run_openclaw_remote(
- &pool,
- &host_id,
- &["config", "get", "bindings", "--json"],
- )
- .await?;
- // "bindings" may not exist yet — treat non-zero exit with "not found" as empty
- if output.exit_code != 0 {
- let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase();
- if msg.contains("not found") {
- return Ok(Vec::new());
+ timed_async!("remote_list_bindings", {
+ let output = crate::cli_runner::run_openclaw_remote(
+ &pool,
+ &host_id,
+ &["config", "get", "bindings", "--json"],
+ )
+ .await?;
+ // "bindings" may not exist yet — treat non-zero exit with "not found" as empty
+ if output.exit_code != 0 {
+ let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase();
+ if msg.contains("not found") {
+ return Ok(Vec::new());
+ }
}
- }
- let json = crate::cli_runner::parse_json_output(&output)?;
- clawpal_core::discovery::parse_bindings(&json.to_string())
+ let json = crate::cli_runner::parse_json_output(&output)?;
+ clawpal_core::discovery::parse_bindings(&json.to_string())
+ })
}
#[tauri::command]
@@ -310,27 +314,29 @@ pub async fn remote_list_channels_minimal(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result, String> {
- let output = crate::cli_runner::run_openclaw_remote(
- &pool,
- &host_id,
- &["config", "get", "channels", "--json"],
- )
- .await?;
- // channels key might not exist yet
- if output.exit_code != 0 {
- let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase();
- if msg.contains("not found") {
- return Ok(Vec::new());
+ timed_async!("remote_list_channels_minimal", {
+ let output = crate::cli_runner::run_openclaw_remote(
+ &pool,
+ &host_id,
+ &["config", "get", "channels", "--json"],
+ )
+ .await?;
+ // channels key might not exist yet
+ if output.exit_code != 0 {
+ let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase();
+ if msg.contains("not found") {
+ return Ok(Vec::new());
+ }
+ return Err(format!(
+ "openclaw config get channels failed: {}",
+ output.stderr
+ ));
}
- return Err(format!(
- "openclaw config get channels failed: {}",
- output.stderr
- ));
- }
- let channels_val = crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null);
- // Wrap in top-level object with "channels" key so collect_channel_nodes works
- let cfg = serde_json::json!({ "channels": channels_val });
- Ok(collect_channel_nodes(&cfg))
+ let channels_val = crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null);
+ // Wrap in top-level object with "channels" key so collect_channel_nodes works
+ let cfg = serde_json::json!({ "channels": channels_val });
+ Ok(collect_channel_nodes(&cfg))
+ })
}
#[tauri::command]
@@ -338,518 +344,535 @@ pub async fn remote_list_agents_overview(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result, String> {
- let output =
- run_openclaw_remote_with_autofix(&pool, &host_id, &["agents", "list", "--json"]).await?;
- if output.exit_code != 0 {
- let details = format!("{}\n{}", output.stderr.trim(), output.stdout.trim());
- return Err(format!(
- "openclaw agents list failed ({}): {}",
- output.exit_code,
- details.trim()
- ));
- }
- let json = crate::cli_runner::parse_json_output(&output)?;
- // Check which agents have sessions remotely (single command, batch check)
- // Lists agents whose sessions.json is larger than 2 bytes (not just "{}")
- let online_set = match pool.exec_login(
- &host_id,
- "for d in ~/.openclaw/agents/*/sessions/sessions.json; do [ -f \"$d\" ] && [ $(wc -c < \"$d\") -gt 2 ] && basename $(dirname $(dirname \"$d\")); done",
- ).await {
- Ok(result) => {
- result.stdout.lines()
- .map(|l| l.trim().to_string())
- .filter(|l| !l.is_empty())
- .collect::>()
+ timed_async!("remote_list_agents_overview", {
+ let output =
+ run_openclaw_remote_with_autofix(&pool, &host_id, &["agents", "list", "--json"])
+ .await?;
+ if output.exit_code != 0 {
+ let details = format!("{}\n{}", output.stderr.trim(), output.stdout.trim());
+ return Err(format!(
+ "openclaw agents list failed ({}): {}",
+ output.exit_code,
+ details.trim()
+ ));
}
- Err(_) => std::collections::HashSet::new(), // fallback: all offline
- };
- parse_agents_cli_output(&json, Some(&online_set))
+ let json = crate::cli_runner::parse_json_output(&output)?;
+ // Check which agents have sessions remotely (single command, batch check)
+ // Lists agents whose sessions.json is larger than 2 bytes (not just "{}")
+ let online_set = match pool.exec_login(
+ &host_id,
+ "for d in ~/.openclaw/agents/*/sessions/sessions.json; do [ -f \"$d\" ] && [ $(wc -c < \"$d\") -gt 2 ] && basename $(dirname $(dirname \"$d\")); done",
+ ).await {
+ Ok(result) => {
+ result.stdout.lines()
+ .map(|l| l.trim().to_string())
+ .filter(|l| !l.is_empty())
+ .collect::>()
+ }
+ Err(_) => std::collections::HashSet::new(), // fallback: all offline
+ };
+ parse_agents_cli_output(&json, Some(&online_set))
+ })
}
#[tauri::command]
pub async fn list_channels() -> Result, String> {
- tauri::async_runtime::spawn_blocking(|| {
- let paths = resolve_paths();
- let cfg = read_openclaw_config(&paths)?;
- let mut nodes = collect_channel_nodes(&cfg);
- enrich_channel_display_names(&paths, &cfg, &mut nodes)?;
- Ok(nodes)
+ timed_async!("list_channels", {
+ tauri::async_runtime::spawn_blocking(|| {
+ let paths = resolve_paths();
+ let cfg = read_openclaw_config(&paths)?;
+ let mut nodes = collect_channel_nodes(&cfg);
+ enrich_channel_display_names(&paths, &cfg, &mut nodes)?;
+ Ok(nodes)
+ })
+ .await
+ .map_err(|e| e.to_string())?
})
- .await
- .map_err(|e| e.to_string())?
}
#[tauri::command]
pub async fn list_channels_minimal(
cache: tauri::State<'_, crate::cli_runner::CliCache>,
) -> Result, String> {
- let cache_key = local_cli_cache_key("channels-minimal");
- let ttl = Some(std::time::Duration::from_secs(30));
- if let Some(cached) = cache.get(&cache_key, ttl) {
- return serde_json::from_str(&cached).map_err(|e| e.to_string());
- }
- let cache = cache.inner().clone();
- let cache_key_cloned = cache_key.clone();
- tauri::async_runtime::spawn_blocking(move || {
- let output = crate::cli_runner::run_openclaw(&["config", "get", "channels", "--json"])
- .map_err(|e| format!("Failed to run openclaw: {e}"))?;
- if output.exit_code != 0 {
- let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase();
- if msg.contains("not found") {
- return Ok(Vec::new());
+ timed_async!("list_channels_minimal", {
+ let cache_key = local_cli_cache_key("channels-minimal");
+ let ttl = Some(std::time::Duration::from_secs(30));
+ if let Some(cached) = cache.get(&cache_key, ttl) {
+ return serde_json::from_str(&cached).map_err(|e| e.to_string());
+ }
+ let cache = cache.inner().clone();
+ let cache_key_cloned = cache_key.clone();
+ tauri::async_runtime::spawn_blocking(move || {
+ let output = crate::cli_runner::run_openclaw(&["config", "get", "channels", "--json"])
+ .map_err(|e| format!("Failed to run openclaw: {e}"))?;
+ if output.exit_code != 0 {
+ let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase();
+ if msg.contains("not found") {
+ return Ok(Vec::new());
+ }
+ // Fallback: direct read
+ let paths = resolve_paths();
+ let cfg = read_openclaw_config(&paths)?;
+ let result = collect_channel_nodes(&cfg);
+ if let Ok(serialized) = serde_json::to_string(&result) {
+ cache.set(cache_key_cloned, serialized);
+ }
+ return Ok(result);
}
- // Fallback: direct read
- let paths = resolve_paths();
- let cfg = read_openclaw_config(&paths)?;
+ let channels_val = crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null);
+ let cfg = serde_json::json!({ "channels": channels_val });
let result = collect_channel_nodes(&cfg);
if let Ok(serialized) = serde_json::to_string(&result) {
cache.set(cache_key_cloned, serialized);
}
- return Ok(result);
- }
- let channels_val = crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null);
- let cfg = serde_json::json!({ "channels": channels_val });
- let result = collect_channel_nodes(&cfg);
- if let Ok(serialized) = serde_json::to_string(&result) {
- cache.set(cache_key_cloned, serialized);
- }
- Ok(result)
+ Ok(result)
+ })
+ .await
+ .map_err(|e| e.to_string())?
})
- .await
- .map_err(|e| e.to_string())?
}
#[tauri::command]
pub fn list_discord_guild_channels() -> Result, String> {
- let paths = resolve_paths();
- let cache_file = paths.clawpal_dir.join("discord-guild-channels.json");
- if cache_file.exists() {
- let text = fs::read_to_string(&cache_file).map_err(|e| e.to_string())?;
- let entries: Vec = serde_json::from_str(&text).unwrap_or_default();
- return Ok(entries);
- }
- Ok(Vec::new())
+ timed_sync!("list_discord_guild_channels", {
+ let paths = resolve_paths();
+ let cache_file = paths.clawpal_dir.join("discord-guild-channels.json");
+ if cache_file.exists() {
+ let text = fs::read_to_string(&cache_file).map_err(|e| e.to_string())?;
+ let entries: Vec = serde_json::from_str(&text).unwrap_or_default();
+ return Ok(entries);
+ }
+ Ok(Vec::new())
+ })
}
#[tauri::command]
pub async fn refresh_discord_guild_channels() -> Result, String> {
- tauri::async_runtime::spawn_blocking(move || {
- let paths = resolve_paths();
- ensure_dirs(&paths)?;
- let cfg = read_openclaw_config(&paths)?;
+ timed_async!("refresh_discord_guild_channels", {
+ tauri::async_runtime::spawn_blocking(move || {
+ let paths = resolve_paths();
+ ensure_dirs(&paths)?;
+ let cfg = read_openclaw_config(&paths)?;
- let discord_cfg = cfg.get("channels").and_then(|c| c.get("discord"));
- let configured_single_guild_id = discord_cfg
- .and_then(|d| d.get("guilds"))
- .and_then(Value::as_object)
- .and_then(|guilds| {
- if guilds.len() == 1 {
- guilds.keys().next().cloned()
- } else {
- None
- }
- });
+ let discord_cfg = cfg.get("channels").and_then(|c| c.get("discord"));
+ let configured_single_guild_id = discord_cfg
+ .and_then(|d| d.get("guilds"))
+ .and_then(Value::as_object)
+ .and_then(|guilds| {
+ if guilds.len() == 1 {
+ guilds.keys().next().cloned()
+ } else {
+ None
+ }
+ });
- // Extract bot token: top-level first, then fall back to first account token
- let bot_token = discord_cfg
- .and_then(|d| d.get("botToken").or_else(|| d.get("token")))
- .and_then(Value::as_str)
- .map(|s| s.to_string())
- .or_else(|| {
- discord_cfg
- .and_then(|d| d.get("accounts"))
- .and_then(Value::as_object)
- .and_then(|accounts| {
- accounts.values().find_map(|acct| {
- acct.get("token")
- .and_then(Value::as_str)
- .filter(|s| !s.is_empty())
- .map(|s| s.to_string())
+ // Extract bot token: top-level first, then fall back to first account token
+ let bot_token = discord_cfg
+ .and_then(|d| d.get("botToken").or_else(|| d.get("token")))
+ .and_then(Value::as_str)
+ .map(|s| s.to_string())
+ .or_else(|| {
+ discord_cfg
+ .and_then(|d| d.get("accounts"))
+ .and_then(Value::as_object)
+ .and_then(|accounts| {
+ accounts.values().find_map(|acct| {
+ acct.get("token")
+ .and_then(Value::as_str)
+ .filter(|s| !s.is_empty())
+ .map(|s| s.to_string())
+ })
})
- })
- });
- let cache_file = paths.clawpal_dir.join("discord-guild-channels.json");
- let mut guild_name_fallback_map = fs::read_to_string(&cache_file)
- .ok()
- .map(|text| parse_discord_cache_guild_name_fallbacks(&text))
- .unwrap_or_default();
- guild_name_fallback_map.extend(collect_discord_config_guild_name_fallbacks(discord_cfg));
+ });
+ let cache_file = paths.clawpal_dir.join("discord-guild-channels.json");
+ let mut guild_name_fallback_map = fs::read_to_string(&cache_file)
+ .ok()
+ .map(|text| parse_discord_cache_guild_name_fallbacks(&text))
+ .unwrap_or_default();
+ guild_name_fallback_map
+ .extend(collect_discord_config_guild_name_fallbacks(discord_cfg));
+
+ let mut entries: Vec = Vec::new();
+ let mut channel_ids: Vec = Vec::new();
+ let mut unresolved_guild_ids: Vec = Vec::new();
+
+ // Helper: collect guilds from a guilds object
+ let mut collect_guilds = |guilds: &serde_json::Map| {
+ for (guild_id, guild_val) in guilds {
+ let guild_name = guild_val
+ .get("slug")
+ .or_else(|| guild_val.get("name"))
+ .and_then(Value::as_str)
+ .map(|s| s.trim().to_string())
+ .filter(|s| !s.is_empty())
+ .unwrap_or_else(|| guild_id.clone());
- let mut entries: Vec = Vec::new();
- let mut channel_ids: Vec = Vec::new();
- let mut unresolved_guild_ids: Vec = Vec::new();
-
- // Helper: collect guilds from a guilds object
- let mut collect_guilds = |guilds: &serde_json::Map| {
- for (guild_id, guild_val) in guilds {
- let guild_name = guild_val
- .get("slug")
- .or_else(|| guild_val.get("name"))
- .and_then(Value::as_str)
- .map(|s| s.trim().to_string())
- .filter(|s| !s.is_empty())
- .unwrap_or_else(|| guild_id.clone());
-
- if guild_name == *guild_id && !unresolved_guild_ids.contains(guild_id) {
- unresolved_guild_ids.push(guild_id.clone());
- }
+ if guild_name == *guild_id && !unresolved_guild_ids.contains(guild_id) {
+ unresolved_guild_ids.push(guild_id.clone());
+ }
- if let Some(channels) = guild_val.get("channels").and_then(Value::as_object) {
- for (channel_id, _channel_val) in channels {
- // Skip glob/wildcard patterns (e.g. "*") — not real channel IDs
- if channel_id.contains('*') || channel_id.contains('?') {
- continue;
- }
- if entries
- .iter()
- .any(|e| e.guild_id == *guild_id && e.channel_id == *channel_id)
- {
- continue;
+ if let Some(channels) = guild_val.get("channels").and_then(Value::as_object) {
+ for (channel_id, _channel_val) in channels {
+ // Skip glob/wildcard patterns (e.g. "*") — not real channel IDs
+ if channel_id.contains('*') || channel_id.contains('?') {
+ continue;
+ }
+ if entries
+ .iter()
+ .any(|e| e.guild_id == *guild_id && e.channel_id == *channel_id)
+ {
+ continue;
+ }
+ channel_ids.push(channel_id.clone());
+ entries.push(DiscordGuildChannel {
+ guild_id: guild_id.clone(),
+ guild_name: guild_name.clone(),
+ channel_id: channel_id.clone(),
+ channel_name: channel_id.clone(),
+ default_agent_id: None,
+ });
}
- channel_ids.push(channel_id.clone());
- entries.push(DiscordGuildChannel {
- guild_id: guild_id.clone(),
- guild_name: guild_name.clone(),
- channel_id: channel_id.clone(),
- channel_name: channel_id.clone(),
- default_agent_id: None,
- });
}
}
- }
- };
+ };
- // Collect from channels.discord.guilds (top-level structured config)
- if let Some(guilds) = discord_cfg
- .and_then(|d| d.get("guilds"))
- .and_then(Value::as_object)
- {
- collect_guilds(guilds);
- }
+ // Collect from channels.discord.guilds (top-level structured config)
+ if let Some(guilds) = discord_cfg
+ .and_then(|d| d.get("guilds"))
+ .and_then(Value::as_object)
+ {
+ collect_guilds(guilds);
+ }
- // Collect from channels.discord.accounts..guilds (multi-account config)
- if let Some(accounts) = discord_cfg
- .and_then(|d| d.get("accounts"))
- .and_then(Value::as_object)
- {
- for (_account_id, account_val) in accounts {
- if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) {
- collect_guilds(guilds);
+ // Collect from channels.discord.accounts..guilds (multi-account config)
+ if let Some(accounts) = discord_cfg
+ .and_then(|d| d.get("accounts"))
+ .and_then(Value::as_object)
+ {
+ for (_account_id, account_val) in accounts {
+ if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) {
+ collect_guilds(guilds);
+ }
}
}
- }
- drop(collect_guilds); // Release mutable borrows before bindings section
-
- // Also collect from bindings array (users may only have bindings, no guilds map)
- if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) {
- for b in bindings {
- let m = match b.get("match") {
- Some(m) => m,
- None => continue,
- };
- if m.get("channel").and_then(Value::as_str) != Some("discord") {
- continue;
- }
- let guild_id = match m.get("guildId") {
- Some(Value::String(s)) => s.clone(),
- Some(Value::Number(n)) => n.to_string(),
- _ => continue,
- };
- let channel_id = match m.pointer("/peer/id") {
- Some(Value::String(s)) => s.clone(),
- Some(Value::Number(n)) => n.to_string(),
- _ => continue,
- };
- // Skip if already collected from guilds map
- if entries
- .iter()
- .any(|e| e.guild_id == guild_id && e.channel_id == channel_id)
- {
- continue;
- }
- if !unresolved_guild_ids.contains(&guild_id) {
- unresolved_guild_ids.push(guild_id.clone());
+ drop(collect_guilds); // Release mutable borrows before bindings section
+
+ // Also collect from bindings array (users may only have bindings, no guilds map)
+ if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) {
+ for b in bindings {
+ let m = match b.get("match") {
+ Some(m) => m,
+ None => continue,
+ };
+ if m.get("channel").and_then(Value::as_str) != Some("discord") {
+ continue;
+ }
+ let guild_id = match m.get("guildId") {
+ Some(Value::String(s)) => s.clone(),
+ Some(Value::Number(n)) => n.to_string(),
+ _ => continue,
+ };
+ let channel_id = match m.pointer("/peer/id") {
+ Some(Value::String(s)) => s.clone(),
+ Some(Value::Number(n)) => n.to_string(),
+ _ => continue,
+ };
+ // Skip if already collected from guilds map
+ if entries
+ .iter()
+ .any(|e| e.guild_id == guild_id && e.channel_id == channel_id)
+ {
+ continue;
+ }
+ if !unresolved_guild_ids.contains(&guild_id) {
+ unresolved_guild_ids.push(guild_id.clone());
+ }
+ channel_ids.push(channel_id.clone());
+ entries.push(DiscordGuildChannel {
+ guild_id: guild_id.clone(),
+ guild_name: guild_id.clone(),
+ channel_id: channel_id.clone(),
+ channel_name: channel_id.clone(),
+ default_agent_id: None,
+ });
}
- channel_ids.push(channel_id.clone());
- entries.push(DiscordGuildChannel {
- guild_id: guild_id.clone(),
- guild_name: guild_id.clone(),
- channel_id: channel_id.clone(),
- channel_name: channel_id.clone(),
- default_agent_id: None,
- });
}
- }
- // Fallback A: fetch channels from Discord REST for guilds that have no entries yet.
- // Build a guild_id -> token mapping so each guild uses the correct bot token.
- {
- let mut guild_token_map: std::collections::HashMap =
- std::collections::HashMap::new();
-
- // Map guilds from accounts to their respective tokens
- if let Some(accounts) = discord_cfg
- .and_then(|d| d.get("accounts"))
- .and_then(Value::as_object)
+ // Fallback A: fetch channels from Discord REST for guilds that have no entries yet.
+ // Build a guild_id -> token mapping so each guild uses the correct bot token.
{
- for (_acct_id, acct_val) in accounts {
- let acct_token = acct_val
- .get("token")
- .and_then(Value::as_str)
- .filter(|s| !s.is_empty())
- .map(|s| s.to_string());
- if let Some(token) = acct_token {
- if let Some(guilds) = acct_val.get("guilds").and_then(Value::as_object) {
- for guild_id in guilds.keys() {
- guild_token_map
- .entry(guild_id.clone())
- .or_insert_with(|| token.clone());
+ let mut guild_token_map: std::collections::HashMap =
+ std::collections::HashMap::new();
+
+ // Map guilds from accounts to their respective tokens
+ if let Some(accounts) = discord_cfg
+ .and_then(|d| d.get("accounts"))
+ .and_then(Value::as_object)
+ {
+ for (_acct_id, acct_val) in accounts {
+ let acct_token = acct_val
+ .get("token")
+ .and_then(Value::as_str)
+ .filter(|s| !s.is_empty())
+ .map(|s| s.to_string());
+ if let Some(token) = acct_token {
+ if let Some(guilds) = acct_val.get("guilds").and_then(Value::as_object)
+ {
+ for guild_id in guilds.keys() {
+ guild_token_map
+ .entry(guild_id.clone())
+ .or_insert_with(|| token.clone());
+ }
}
}
}
}
- }
- // Also map top-level guilds to the top-level bot token
- if let Some(token) = &bot_token {
- let configured_guild_ids = collect_discord_config_guild_ids(discord_cfg);
- for guild_id in &configured_guild_ids {
- guild_token_map
- .entry(guild_id.clone())
- .or_insert_with(|| token.clone());
+ // Also map top-level guilds to the top-level bot token
+ if let Some(token) = &bot_token {
+ let configured_guild_ids = collect_discord_config_guild_ids(discord_cfg);
+ for guild_id in &configured_guild_ids {
+ guild_token_map
+ .entry(guild_id.clone())
+ .or_insert_with(|| token.clone());
+ }
}
- }
- for (guild_id, token) in &guild_token_map {
- // Skip guilds that already have entries from config/bindings
- if entries.iter().any(|e| e.guild_id == *guild_id) {
- continue;
+ for (guild_id, token) in &guild_token_map {
+ // Skip guilds that already have entries from config/bindings
+ if entries.iter().any(|e| e.guild_id == *guild_id) {
+ continue;
+ }
+ if let Ok(channels) = fetch_discord_guild_channels(token, guild_id) {
+ for (channel_id, channel_name) in channels {
+ if entries
+ .iter()
+ .any(|e| e.guild_id == *guild_id && e.channel_id == channel_id)
+ {
+ continue;
+ }
+ channel_ids.push(channel_id.clone());
+ entries.push(DiscordGuildChannel {
+ guild_id: guild_id.clone(),
+ guild_name: guild_id.clone(),
+ channel_id,
+ channel_name,
+ default_agent_id: None,
+ });
+ }
+ }
}
- if let Ok(channels) = fetch_discord_guild_channels(token, guild_id) {
- for (channel_id, channel_name) in channels {
- if entries
- .iter()
- .any(|e| e.guild_id == *guild_id && e.channel_id == channel_id)
- {
+ }
+
+ // Fallback B: query channel ids from directory and keep compatibility
+ // with existing cache shape when config has no explicit channel map.
+ if channel_ids.is_empty() {
+ if let Ok(output) = run_openclaw_raw(&[
+ "directory",
+ "groups",
+ "list",
+ "--channel",
+ "discord",
+ "--json",
+ ]) {
+ for channel_id in parse_directory_group_channel_ids(&output.stdout) {
+ if entries.iter().any(|e| e.channel_id == channel_id) {
continue;
}
+ let (guild_id, guild_name) =
+ if let Some(gid) = configured_single_guild_id.clone() {
+ (gid.clone(), gid)
+ } else {
+ ("discord".to_string(), "Discord".to_string())
+ };
channel_ids.push(channel_id.clone());
entries.push(DiscordGuildChannel {
- guild_id: guild_id.clone(),
- guild_name: guild_id.clone(),
- channel_id,
- channel_name,
+ guild_id,
+ guild_name,
+ channel_id: channel_id.clone(),
+ channel_name: channel_id,
default_agent_id: None,
});
}
}
}
- }
- // Fallback B: query channel ids from directory and keep compatibility
- // with existing cache shape when config has no explicit channel map.
- if channel_ids.is_empty() {
- if let Ok(output) = run_openclaw_raw(&[
- "directory",
- "groups",
- "list",
- "--channel",
- "discord",
- "--json",
- ]) {
- for channel_id in parse_directory_group_channel_ids(&output.stdout) {
- if entries.iter().any(|e| e.channel_id == channel_id) {
- continue;
- }
- let (guild_id, guild_name) =
- if let Some(gid) = configured_single_guild_id.clone() {
- (gid.clone(), gid)
- } else {
- ("discord".to_string(), "Discord".to_string())
- };
- channel_ids.push(channel_id.clone());
- entries.push(DiscordGuildChannel {
- guild_id,
- guild_name,
- channel_id: channel_id.clone(),
- channel_name: channel_id,
- default_agent_id: None,
- });
- }
+ if entries.is_empty() {
+ return Ok(Vec::new());
}
- }
- if entries.is_empty() {
- return Ok(Vec::new());
- }
-
- // Resolve channel names via openclaw CLI
- if !channel_ids.is_empty() {
- let mut args = vec![
- "channels",
- "resolve",
- "--json",
- "--channel",
- "discord",
- "--kind",
- "auto",
- ];
- let id_refs: Vec<&str> = channel_ids.iter().map(String::as_str).collect();
- args.extend_from_slice(&id_refs);
-
- if let Ok(output) = run_openclaw_raw(&args) {
- if let Some(name_map) = parse_resolve_name_map(&output.stdout) {
- for entry in &mut entries {
- if let Some(name) = name_map.get(&entry.channel_id) {
- entry.channel_name = name.clone();
+ // Resolve channel names via openclaw CLI
+ if !channel_ids.is_empty() {
+ let mut args = vec![
+ "channels",
+ "resolve",
+ "--json",
+ "--channel",
+ "discord",
+ "--kind",
+ "auto",
+ ];
+ let id_refs: Vec<&str> = channel_ids.iter().map(String::as_str).collect();
+ args.extend_from_slice(&id_refs);
+
+ if let Ok(output) = run_openclaw_raw(&args) {
+ if let Some(name_map) = parse_resolve_name_map(&output.stdout) {
+ for entry in &mut entries {
+ if let Some(name) = name_map.get(&entry.channel_id) {
+ entry.channel_name = name.clone();
+ }
}
}
}
}
- }
- // Resolve guild names via Discord REST API
- if let Some(token) = &bot_token {
- if !unresolved_guild_ids.is_empty() {
- let mut guild_name_map: std::collections::HashMap =
- std::collections::HashMap::new();
- for gid in &unresolved_guild_ids {
- if let Ok(name) = fetch_discord_guild_name(token, gid) {
- guild_name_map.insert(gid.clone(), name);
+ // Resolve guild names via Discord REST API
+ if let Some(token) = &bot_token {
+ if !unresolved_guild_ids.is_empty() {
+ let mut guild_name_map: std::collections::HashMap =
+ std::collections::HashMap::new();
+ for gid in &unresolved_guild_ids {
+ if let Ok(name) = fetch_discord_guild_name(token, gid) {
+ guild_name_map.insert(gid.clone(), name);
+ }
}
- }
- for entry in &mut entries {
- if let Some(name) = guild_name_map.get(&entry.guild_id) {
- entry.guild_name = name.clone();
+ for entry in &mut entries {
+ if let Some(name) = guild_name_map.get(&entry.guild_id) {
+ entry.guild_name = name.clone();
+ }
}
}
}
- }
- for entry in &mut entries {
- if entry.guild_name == entry.guild_id {
- if let Some(name) = guild_name_fallback_map.get(&entry.guild_id) {
- entry.guild_name = name.clone();
+ for entry in &mut entries {
+ if entry.guild_name == entry.guild_id {
+ if let Some(name) = guild_name_fallback_map.get(&entry.guild_id) {
+ entry.guild_name = name.clone();
+ }
}
}
- }
- // Resolve default agent per guild from account config + bindings
- {
- // Build account_id -> default agent_id from bindings (account-level, no peer)
- let mut account_agent_map: std::collections::HashMap =
- std::collections::HashMap::new();
- if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) {
- for b in bindings {
- let m = match b.get("match") {
- Some(m) => m,
- None => continue,
- };
- if m.get("channel").and_then(Value::as_str) != Some("discord") {
- continue;
- }
- let account_id = match m.get("accountId").and_then(Value::as_str) {
- Some(s) => s,
- None => continue,
- };
- if m.get("peer").and_then(|p| p.get("id")).is_some() {
- continue;
- }
- if let Some(agent_id) = b.get("agentId").and_then(Value::as_str) {
- account_agent_map
- .entry(account_id.to_string())
- .or_insert_with(|| agent_id.to_string());
+ // Resolve default agent per guild from account config + bindings
+ {
+ // Build account_id -> default agent_id from bindings (account-level, no peer)
+ let mut account_agent_map: std::collections::HashMap =
+ std::collections::HashMap::new();
+ if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) {
+ for b in bindings {
+ let m = match b.get("match") {
+ Some(m) => m,
+ None => continue,
+ };
+ if m.get("channel").and_then(Value::as_str) != Some("discord") {
+ continue;
+ }
+ let account_id = match m.get("accountId").and_then(Value::as_str) {
+ Some(s) => s,
+ None => continue,
+ };
+ if m.get("peer").and_then(|p| p.get("id")).is_some() {
+ continue;
+ }
+ if let Some(agent_id) = b.get("agentId").and_then(Value::as_str) {
+ account_agent_map
+ .entry(account_id.to_string())
+ .or_insert_with(|| agent_id.to_string());
+ }
}
}
- }
- let mut guild_default_agent: std::collections::HashMap =
- std::collections::HashMap::new();
- if let Some(accounts) = discord_cfg
- .and_then(|d| d.get("accounts"))
- .and_then(Value::as_object)
- {
- for (account_id, account_val) in accounts {
- let agent = account_agent_map
- .get(account_id)
- .cloned()
- .unwrap_or_else(|| account_id.clone());
- if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) {
- for guild_id in guilds.keys() {
- guild_default_agent
- .entry(guild_id.clone())
- .or_insert(agent.clone());
+ let mut guild_default_agent: std::collections::HashMap =
+ std::collections::HashMap::new();
+ if let Some(accounts) = discord_cfg
+ .and_then(|d| d.get("accounts"))
+ .and_then(Value::as_object)
+ {
+ for (account_id, account_val) in accounts {
+ let agent = account_agent_map
+ .get(account_id)
+ .cloned()
+ .unwrap_or_else(|| account_id.clone());
+ if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) {
+ for guild_id in guilds.keys() {
+ guild_default_agent
+ .entry(guild_id.clone())
+ .or_insert(agent.clone());
+ }
}
}
}
- }
- for entry in &mut entries {
- if entry.default_agent_id.is_none() {
- if let Some(agent_id) = guild_default_agent.get(&entry.guild_id) {
- entry.default_agent_id = Some(agent_id.clone());
+ for entry in &mut entries {
+ if entry.default_agent_id.is_none() {
+ if let Some(agent_id) = guild_default_agent.get(&entry.guild_id) {
+ entry.default_agent_id = Some(agent_id.clone());
+ }
}
}
}
- }
- // Persist to cache
- let json = serde_json::to_string_pretty(&entries).map_err(|e| e.to_string())?;
- write_text(&cache_file, &json)?;
+ // Persist to cache
+ let json = serde_json::to_string_pretty(&entries).map_err(|e| e.to_string())?;
+ write_text(&cache_file, &json)?;
- Ok(entries)
+ Ok(entries)
+ })
+ .await
+ .map_err(|e| e.to_string())?
})
- .await
- .map_err(|e| e.to_string())?
}
#[tauri::command]
pub async fn list_bindings(
cache: tauri::State<'_, crate::cli_runner::CliCache>,
) -> Result, String> {
- let cache_key = local_cli_cache_key("bindings");
- if let Some(cached) = cache.get(&cache_key, None) {
- return serde_json::from_str(&cached).map_err(|e| e.to_string());
- }
- let cache = cache.inner().clone();
- let cache_key_cloned = cache_key.clone();
- tauri::async_runtime::spawn_blocking(move || {
- let output = crate::cli_runner::run_openclaw(&["config", "get", "bindings", "--json"])?;
- // "bindings" may not exist yet — treat "not found" as empty
- if output.exit_code != 0 {
- let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase();
- if msg.contains("not found") {
- return Ok(Vec::new());
- }
+ timed_async!("list_bindings", {
+ let cache_key = local_cli_cache_key("bindings");
+ if let Some(cached) = cache.get(&cache_key, None) {
+ return serde_json::from_str(&cached).map_err(|e| e.to_string());
}
- let json = crate::cli_runner::parse_json_output(&output)?;
- let result = json.as_array().cloned().unwrap_or_default();
- if let Ok(serialized) = serde_json::to_string(&result) {
- cache.set(cache_key_cloned, serialized);
- }
- Ok(result)
+ let cache = cache.inner().clone();
+ let cache_key_cloned = cache_key.clone();
+ tauri::async_runtime::spawn_blocking(move || {
+ let output = crate::cli_runner::run_openclaw(&["config", "get", "bindings", "--json"])?;
+ // "bindings" may not exist yet — treat "not found" as empty
+ if output.exit_code != 0 {
+ let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase();
+ if msg.contains("not found") {
+ return Ok(Vec::new());
+ }
+ }
+ let json = crate::cli_runner::parse_json_output(&output)?;
+ let result = json.as_array().cloned().unwrap_or_default();
+ if let Ok(serialized) = serde_json::to_string(&result) {
+ cache.set(cache_key_cloned, serialized);
+ }
+ Ok(result)
+ })
+ .await
+ .map_err(|e| e.to_string())?
})
- .await
- .map_err(|e| e.to_string())?
}
#[tauri::command]
pub async fn list_agents_overview(
cache: tauri::State<'_, crate::cli_runner::CliCache>,
) -> Result, String> {
- let cache_key = local_cli_cache_key("agents-list");
- if let Some(cached) = cache.get(&cache_key, None) {
- return serde_json::from_str(&cached).map_err(|e| e.to_string());
- }
- let cache = cache.inner().clone();
- let cache_key_cloned = cache_key.clone();
- tauri::async_runtime::spawn_blocking(move || {
- let output = crate::cli_runner::run_openclaw(&["agents", "list", "--json"])?;
- let json = crate::cli_runner::parse_json_output(&output)?;
- let result = parse_agents_cli_output(&json, None)?;
- if let Ok(serialized) = serde_json::to_string(&result) {
- cache.set(cache_key_cloned, serialized);
+ timed_async!("list_agents_overview", {
+ let cache_key = local_cli_cache_key("agents-list");
+ if let Some(cached) = cache.get(&cache_key, None) {
+ return serde_json::from_str(&cached).map_err(|e| e.to_string());
}
- Ok(result)
+ let cache = cache.inner().clone();
+ let cache_key_cloned = cache_key.clone();
+ tauri::async_runtime::spawn_blocking(move || {
+ let output = crate::cli_runner::run_openclaw(&["agents", "list", "--json"])?;
+ let json = crate::cli_runner::parse_json_output(&output)?;
+ let result = parse_agents_cli_output(&json, None)?;
+ if let Ok(serialized) = serde_json::to_string(&result) {
+ cache.set(cache_key_cloned, serialized);
+ }
+ Ok(result)
+ })
+ .await
+ .map_err(|e| e.to_string())?
})
- .await
- .map_err(|e| e.to_string())?
}
diff --git a/src-tauri/src/commands/doctor.rs b/src-tauri/src/commands/doctor.rs
index c837dd28..ad65b1b3 100644
--- a/src-tauri/src/commands/doctor.rs
+++ b/src-tauri/src/commands/doctor.rs
@@ -762,23 +762,25 @@ pub async fn remote_run_doctor(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- let result = pool
- .exec_login(
- &host_id,
- "openclaw doctor --json 2>/dev/null || openclaw doctor 2>&1",
- )
- .await?;
- // Try to parse as JSON first
- if let Ok(json) = serde_json::from_str::(&result.stdout) {
- return Ok(json);
- }
- // Fallback: return raw output as a simple report
- Ok(serde_json::json!({
- "ok": result.exit_code == 0,
- "score": if result.exit_code == 0 { 100 } else { 0 },
- "issues": [],
- "rawOutput": result.stdout,
- }))
+ timed_async!("remote_run_doctor", {
+ let result = pool
+ .exec_login(
+ &host_id,
+ "openclaw doctor --json 2>/dev/null || openclaw doctor 2>&1",
+ )
+ .await?;
+ // Try to parse as JSON first
+ if let Ok(json) = serde_json::from_str::(&result.stdout) {
+ return Ok(json);
+ }
+ // Fallback: return raw output as a simple report
+ Ok(serde_json::json!({
+ "ok": result.exit_code == 0,
+ "score": if result.exit_code == 0 { 100 } else { 0 },
+ "issues": [],
+ "rawOutput": result.stdout,
+ }))
+ })
}
#[tauri::command]
@@ -787,21 +789,30 @@ pub async fn remote_fix_issues(
host_id: String,
ids: Vec,
) -> Result {
- let (config_path, raw, _cfg) =
- remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
- let mut cfg = clawpal_core::doctor::parse_json5_document_or_default(&raw);
- let applied = clawpal_core::doctor::apply_issue_fixes(&mut cfg, &ids)?;
-
- if !applied.is_empty() {
- remote_write_config_with_snapshot(&pool, &host_id, &config_path, &raw, &cfg, "doctor-fix")
+ timed_async!("remote_fix_issues", {
+ let (config_path, raw, _cfg) =
+ remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
+ let mut cfg = clawpal_core::doctor::parse_json5_document_or_default(&raw);
+ let applied = clawpal_core::doctor::apply_issue_fixes(&mut cfg, &ids)?;
+
+ if !applied.is_empty() {
+ remote_write_config_with_snapshot(
+ &pool,
+ &host_id,
+ &config_path,
+ &raw,
+ &cfg,
+ "doctor-fix",
+ )
.await?;
- }
+ }
- let remaining: Vec = ids.into_iter().filter(|id| !applied.contains(id)).collect();
- Ok(FixResult {
- ok: true,
- applied,
- remaining_issues: remaining,
+ let remaining: Vec = ids.into_iter().filter(|id| !applied.contains(id)).collect();
+ Ok(FixResult {
+ ok: true,
+ applied,
+ remaining_issues: remaining,
+ })
})
}
@@ -810,81 +821,88 @@ pub async fn remote_get_system_status(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- // Tier 1: fast, essential — health check + config + real agent list.
- let (config_res, agents_res, pgrep_res) = tokio::join!(
- run_openclaw_remote_with_autofix(&pool, &host_id, &["config", "get", "agents", "--json"]),
- run_openclaw_remote_with_autofix(&pool, &host_id, &["agents", "list", "--json"]),
- pool.exec(&host_id, "pgrep -f '[o]penclaw-gateway' >/dev/null 2>&1"),
- );
+ timed_async!("remote_get_system_status", {
+ // Tier 1: fast, essential — health check + config + real agent list.
+ let (config_res, agents_res, pgrep_res) = tokio::join!(
+ run_openclaw_remote_with_autofix(
+ &pool,
+ &host_id,
+ &["config", "get", "agents", "--json"]
+ ),
+ run_openclaw_remote_with_autofix(&pool, &host_id, &["agents", "list", "--json"]),
+ pool.exec(&host_id, "pgrep -f '[o]penclaw-gateway' >/dev/null 2>&1"),
+ );
- let config_ok = matches!(&config_res, Ok(output) if output.exit_code == 0);
- let ssh_diagnostic = match (&config_res, &agents_res, &pgrep_res) {
- (Err(error), _, _) => Some(from_any_error(
- SshStage::RemoteExec,
- SshIntent::HealthCheck,
- error.clone(),
- )),
- (_, Err(error), _) => Some(from_any_error(
- SshStage::RemoteExec,
- SshIntent::HealthCheck,
- error.clone(),
- )),
- (_, _, Err(error)) => Some(from_any_error(
- SshStage::RemoteExec,
- SshIntent::HealthCheck,
- error.clone(),
- )),
- _ => None,
- };
+ let config_ok = matches!(&config_res, Ok(output) if output.exit_code == 0);
+ let ssh_diagnostic = match (&config_res, &agents_res, &pgrep_res) {
+ (Err(error), _, _) => Some(from_any_error(
+ SshStage::RemoteExec,
+ SshIntent::HealthCheck,
+ error.clone(),
+ )),
+ (_, Err(error), _) => Some(from_any_error(
+ SshStage::RemoteExec,
+ SshIntent::HealthCheck,
+ error.clone(),
+ )),
+ (_, _, Err(error)) => Some(from_any_error(
+ SshStage::RemoteExec,
+ SshIntent::HealthCheck,
+ error.clone(),
+ )),
+ _ => None,
+ };
- let active_agents = match &agents_res {
- Ok(output) if output.exit_code == 0 => {
- let json = crate::cli_runner::parse_json_output(output).unwrap_or(Value::Null);
- count_agent_entries_from_cli_json(&json).unwrap_or(0)
- }
- _ => 0,
- };
+ let active_agents = match &agents_res {
+ Ok(output) if output.exit_code == 0 => {
+ let json = crate::cli_runner::parse_json_output(output).unwrap_or(Value::Null);
+ count_agent_entries_from_cli_json(&json).unwrap_or(0)
+ }
+ _ => 0,
+ };
- let (global_default_model, fallback_models) = match config_res {
- Ok(ref output) if output.exit_code == 0 => {
- let cfg: Value = crate::cli_runner::parse_json_output(output).unwrap_or(Value::Null);
- let model = cfg
- .pointer("/defaults/model")
- .and_then(|v| read_model_value(v))
- .or_else(|| {
- cfg.pointer("/default/model")
- .and_then(|v| read_model_value(v))
- });
- let fallbacks = cfg
- .pointer("/defaults/model/fallbacks")
- .and_then(Value::as_array)
- .map(|arr| {
- arr.iter()
- .filter_map(Value::as_str)
- .map(String::from)
- .collect()
- })
- .unwrap_or_default();
- (model, fallbacks)
- }
- _ => (None, Vec::new()),
- };
+ let (global_default_model, fallback_models) = match config_res {
+ Ok(ref output) if output.exit_code == 0 => {
+ let cfg: Value =
+ crate::cli_runner::parse_json_output(output).unwrap_or(Value::Null);
+ let model = cfg
+ .pointer("/defaults/model")
+ .and_then(|v| read_model_value(v))
+ .or_else(|| {
+ cfg.pointer("/default/model")
+ .and_then(|v| read_model_value(v))
+ });
+ let fallbacks = cfg
+ .pointer("/defaults/model/fallbacks")
+ .and_then(Value::as_array)
+ .map(|arr| {
+ arr.iter()
+ .filter_map(Value::as_str)
+ .map(String::from)
+ .collect()
+ })
+ .unwrap_or_default();
+ (model, fallbacks)
+ }
+ _ => (None, Vec::new()),
+ };
- // Avoid false negatives from transient SSH exec failures:
- // if health probe fails but config fetch in the same cycle succeeded,
- // keep health as true instead of flipping to unhealthy.
- let healthy = match pgrep_res {
- Ok(r) => r.exit_code == 0,
- Err(_) if config_ok => true,
- Err(_) => false,
- };
+ // Avoid false negatives from transient SSH exec failures:
+ // if health probe fails but config fetch in the same cycle succeeded,
+ // keep health as true instead of flipping to unhealthy.
+ let healthy = match pgrep_res {
+ Ok(r) => r.exit_code == 0,
+ Err(_) if config_ok => true,
+ Err(_) => false,
+ };
- Ok(StatusLight {
- healthy,
- active_agents,
- global_default_model,
- fallback_models,
- ssh_diagnostic,
+ Ok(StatusLight {
+ healthy,
+ active_agents,
+ global_default_model,
+ fallback_models,
+ ssh_diagnostic,
+ })
})
}
@@ -895,27 +913,29 @@ pub async fn probe_ssh_connection_profile(
request_id: String,
app: AppHandle,
) -> Result {
- let emitter = ProbeEmitter {
- app,
- host_id: host_id.clone(),
- request_id,
- current_stage: Arc::new(Mutex::new("connect".to_string())),
- };
+ timed_async!("probe_ssh_connection_profile", {
+ let emitter = ProbeEmitter {
+ app,
+ host_id: host_id.clone(),
+ request_id,
+ current_stage: Arc::new(Mutex::new("connect".to_string())),
+ };
- match timeout(
- Duration::from_secs(SSH_PROBE_TOTAL_TIMEOUT_SECS),
- probe_ssh_connection_profile_impl(&pool, &host_id, Some(emitter.clone())),
- )
- .await
- {
- Ok(result) => result,
- Err(_) => {
- let current_stage = emitter.current_stage();
- let message = format!("ssh probe timed out during {current_stage}");
- emitter.emit(¤t_stage, "failed", None, Some(message.clone()));
- Err(message)
+ match timeout(
+ Duration::from_secs(SSH_PROBE_TOTAL_TIMEOUT_SECS),
+ probe_ssh_connection_profile_impl(&pool, &host_id, Some(emitter.clone())),
+ )
+ .await
+ {
+ Ok(result) => result,
+ Err(_) => {
+ let current_stage = emitter.current_stage();
+ let message = format!("ssh probe timed out during {current_stage}");
+ emitter.emit(¤t_stage, "failed", None, Some(message.clone()));
+ Err(message)
+ }
}
- }
+ })
}
#[tauri::command]
@@ -923,12 +943,14 @@ pub async fn remote_get_ssh_connection_profile(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- timeout(
- Duration::from_secs(SSH_PROBE_TOTAL_TIMEOUT_SECS),
- probe_ssh_connection_profile_impl(&pool, &host_id, None),
- )
- .await
- .map_err(|_| "ssh probe timed out".to_string())?
+ timed_async!("remote_get_ssh_connection_profile", {
+ timeout(
+ Duration::from_secs(SSH_PROBE_TOTAL_TIMEOUT_SECS),
+ probe_ssh_connection_profile_impl(&pool, &host_id, None),
+ )
+ .await
+ .map_err(|_| "ssh probe timed out".to_string())?
+ })
}
#[tauri::command]
@@ -936,199 +958,211 @@ pub async fn remote_get_status_extra(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- let detect_duplicates_script = concat!(
- "seen=''; for p in $(which -a openclaw 2>/dev/null) ",
- "\"$HOME/.npm-global/bin/openclaw\" \"/usr/local/bin/openclaw\" \"/opt/homebrew/bin/openclaw\"; do ",
- "[ -x \"$p\" ] || continue; ",
- "rp=$(readlink -f \"$p\" 2>/dev/null || echo \"$p\"); ",
- "echo \"$seen\" | grep -qF \"$rp\" && continue; ",
- "seen=\"$seen $rp\"; ",
- "v=$($p --version 2>/dev/null || echo 'unknown'); ",
- "echo \"$p: $v\"; ",
- "done"
- );
+ timed_async!("remote_get_status_extra", {
+ let detect_duplicates_script = concat!(
+ "seen=''; for p in $(which -a openclaw 2>/dev/null) ",
+ "\"$HOME/.npm-global/bin/openclaw\" \"/usr/local/bin/openclaw\" \"/opt/homebrew/bin/openclaw\"; do ",
+ "[ -x \"$p\" ] || continue; ",
+ "rp=$(readlink -f \"$p\" 2>/dev/null || echo \"$p\"); ",
+ "echo \"$seen\" | grep -qF \"$rp\" && continue; ",
+ "seen=\"$seen $rp\"; ",
+ "v=$($p --version 2>/dev/null || echo 'unknown'); ",
+ "echo \"$p: $v\"; ",
+ "done"
+ );
- let (version_res, dup_res) = tokio::join!(
- pool.exec_login(&host_id, "openclaw --version"),
- pool.exec_login(&host_id, detect_duplicates_script),
- );
+ let (version_res, dup_res) = tokio::join!(
+ pool.exec_login(&host_id, "openclaw --version"),
+ pool.exec_login(&host_id, detect_duplicates_script),
+ );
- let openclaw_version = match version_res {
- Ok(r) if r.exit_code == 0 => Some(r.stdout.trim().to_string()),
- Ok(r) => {
- let trimmed = r.stdout.trim().to_string();
- if trimmed.is_empty() {
- None
- } else {
- Some(trimmed)
+ let openclaw_version = match version_res {
+ Ok(r) if r.exit_code == 0 => Some(r.stdout.trim().to_string()),
+ Ok(r) => {
+ let trimmed = r.stdout.trim().to_string();
+ if trimmed.is_empty() {
+ None
+ } else {
+ Some(trimmed)
+ }
}
- }
- Err(_) => None,
- };
+ Err(_) => None,
+ };
- let duplicate_installs = match dup_res {
- Ok(r) => {
- let entries: Vec = r
- .stdout
- .lines()
- .map(|l| l.trim().to_string())
- .filter(|l| !l.is_empty())
- .collect();
- if entries.len() > 1 {
- entries
- } else {
- Vec::new()
+ let duplicate_installs = match dup_res {
+ Ok(r) => {
+ let entries: Vec = r
+ .stdout
+ .lines()
+ .map(|l| l.trim().to_string())
+ .filter(|l| !l.is_empty())
+ .collect();
+ if entries.len() > 1 {
+ entries
+ } else {
+ Vec::new()
+ }
}
- }
- Err(_) => Vec::new(),
- };
+ Err(_) => Vec::new(),
+ };
- Ok(StatusExtra {
- openclaw_version,
- duplicate_installs,
+ Ok(StatusExtra {
+ openclaw_version,
+ duplicate_installs,
+ })
})
}
#[tauri::command]
pub async fn get_status_light() -> Result {
- tauri::async_runtime::spawn_blocking(|| {
- let paths = resolve_paths();
- let cfg = read_openclaw_config(&paths)?;
- let local_health = clawpal_core::health::check_instance(&local_health_instance())
- .map_err(|e| e.to_string())?;
- let active_agents = crate::cli_runner::run_openclaw(&["agents", "list", "--json"])
- .ok()
- .and_then(|output| crate::cli_runner::parse_json_output(&output).ok())
- .and_then(|json| count_agent_entries_from_cli_json(&json).ok())
- .unwrap_or(0);
- let global_default_model = cfg
- .pointer("/agents/defaults/model")
- .and_then(read_model_value)
- .or_else(|| {
- cfg.pointer("/agents/default/model")
- .and_then(read_model_value)
- });
+ timed_async!("get_status_light", {
+ tauri::async_runtime::spawn_blocking(|| {
+ let paths = resolve_paths();
+ let cfg = read_openclaw_config(&paths)?;
+ let local_health = clawpal_core::health::check_instance(&local_health_instance())
+ .map_err(|e| e.to_string())?;
+ let active_agents = crate::cli_runner::run_openclaw(&["agents", "list", "--json"])
+ .ok()
+ .and_then(|output| crate::cli_runner::parse_json_output(&output).ok())
+ .and_then(|json| count_agent_entries_from_cli_json(&json).ok())
+ .unwrap_or(0);
+ let global_default_model = cfg
+ .pointer("/agents/defaults/model")
+ .and_then(read_model_value)
+ .or_else(|| {
+ cfg.pointer("/agents/default/model")
+ .and_then(read_model_value)
+ });
- let fallback_models = cfg
- .pointer("/agents/defaults/model/fallbacks")
- .and_then(Value::as_array)
- .map(|arr| {
- arr.iter()
- .filter_map(Value::as_str)
- .map(String::from)
- .collect()
- })
- .unwrap_or_default();
+ let fallback_models = cfg
+ .pointer("/agents/defaults/model/fallbacks")
+ .and_then(Value::as_array)
+ .map(|arr| {
+ arr.iter()
+ .filter_map(Value::as_str)
+ .map(String::from)
+ .collect()
+ })
+ .unwrap_or_default();
- Ok(StatusLight {
- healthy: local_health.healthy,
- active_agents,
- global_default_model,
- fallback_models,
- ssh_diagnostic: None,
+ Ok(StatusLight {
+ healthy: local_health.healthy,
+ active_agents,
+ global_default_model,
+ fallback_models,
+ ssh_diagnostic: None,
+ })
})
+ .await
+ .map_err(|e| e.to_string())?
})
- .await
- .map_err(|e| e.to_string())?
}
#[tauri::command]
pub async fn get_status_extra() -> Result {
- tauri::async_runtime::spawn_blocking(|| {
- let openclaw_version = {
- let mut cache = OPENCLAW_VERSION_CACHE.lock().unwrap();
- if cache.is_none() {
- let version = clawpal_core::health::check_instance(&local_health_instance())
- .ok()
- .and_then(|status| status.version);
- *cache = Some(version);
- }
- cache.as_ref().unwrap().clone()
- };
- Ok(StatusExtra {
- openclaw_version,
- duplicate_installs: Vec::new(),
+ timed_async!("get_status_extra", {
+ tauri::async_runtime::spawn_blocking(|| {
+ let openclaw_version = {
+ let mut cache = OPENCLAW_VERSION_CACHE.lock().unwrap();
+ if cache.is_none() {
+ let version = clawpal_core::health::check_instance(&local_health_instance())
+ .ok()
+ .and_then(|status| status.version);
+ *cache = Some(version);
+ }
+ cache.as_ref().unwrap().clone()
+ };
+ Ok(StatusExtra {
+ openclaw_version,
+ duplicate_installs: Vec::new(),
+ })
})
+ .await
+ .map_err(|e| e.to_string())?
})
- .await
- .map_err(|e| e.to_string())?
}
#[tauri::command]
pub fn get_system_status() -> Result {
- let paths = resolve_paths();
- ensure_dirs(&paths)?;
- let cfg = read_openclaw_config(&paths)?;
- let active_agents = cfg
- .get("agents")
- .and_then(|a| a.get("list"))
- .and_then(|a| a.as_array())
- .map(|a| a.len() as u32)
- .unwrap_or(0);
- let snapshots = list_snapshots(&paths.metadata_path)
- .unwrap_or_default()
- .items
- .len();
- let model_summary = collect_model_summary(&cfg);
- let channel_summary = collect_channel_summary(&cfg);
- let memory = collect_memory_overview(&paths.base_dir);
- let sessions = collect_session_overview(&paths.base_dir);
- let openclaw_version = resolve_openclaw_version();
- let openclaw_update =
- check_openclaw_update_cached(&paths, false).unwrap_or_else(|_| OpenclawUpdateCheck {
- installed_version: openclaw_version.clone(),
- latest_version: None,
- upgrade_available: false,
- channel: None,
- details: Some("update status unavailable".into()),
- source: "unknown".into(),
- checked_at: format_timestamp_from_unix(unix_timestamp_secs()),
- });
- Ok(SystemStatus {
- healthy: true,
- config_path: paths.config_path.to_string_lossy().to_string(),
- openclaw_dir: paths.openclaw_dir.to_string_lossy().to_string(),
- clawpal_dir: paths.clawpal_dir.to_string_lossy().to_string(),
- openclaw_version,
- active_agents,
- snapshots,
- channels: channel_summary,
- models: model_summary,
- memory,
- sessions,
- openclaw_update,
+ timed_sync!("get_system_status", {
+ let paths = resolve_paths();
+ ensure_dirs(&paths)?;
+ let cfg = read_openclaw_config(&paths)?;
+ let active_agents = cfg
+ .get("agents")
+ .and_then(|a| a.get("list"))
+ .and_then(|a| a.as_array())
+ .map(|a| a.len() as u32)
+ .unwrap_or(0);
+ let snapshots = list_snapshots(&paths.metadata_path)
+ .unwrap_or_default()
+ .items
+ .len();
+ let model_summary = collect_model_summary(&cfg);
+ let channel_summary = collect_channel_summary(&cfg);
+ let memory = collect_memory_overview(&paths.base_dir);
+ let sessions = collect_session_overview(&paths.base_dir);
+ let openclaw_version = resolve_openclaw_version();
+ let openclaw_update =
+ check_openclaw_update_cached(&paths, false).unwrap_or_else(|_| OpenclawUpdateCheck {
+ installed_version: openclaw_version.clone(),
+ latest_version: None,
+ upgrade_available: false,
+ channel: None,
+ details: Some("update status unavailable".into()),
+ source: "unknown".into(),
+ checked_at: format_timestamp_from_unix(unix_timestamp_secs()),
+ });
+ Ok(SystemStatus {
+ healthy: true,
+ config_path: paths.config_path.to_string_lossy().to_string(),
+ openclaw_dir: paths.openclaw_dir.to_string_lossy().to_string(),
+ clawpal_dir: paths.clawpal_dir.to_string_lossy().to_string(),
+ openclaw_version,
+ active_agents,
+ snapshots,
+ channels: channel_summary,
+ models: model_summary,
+ memory,
+ sessions,
+ openclaw_update,
+ })
})
}
#[tauri::command]
pub fn run_doctor_command() -> Result {
- let paths = resolve_paths();
- Ok(run_doctor(&paths))
+ timed_sync!("run_doctor_command", {
+ let paths = resolve_paths();
+ Ok(run_doctor(&paths))
+ })
}
#[tauri::command]
pub fn fix_issues(ids: Vec) -> Result {
- let paths = resolve_paths();
- let issues = run_doctor(&paths);
- let mut fixable = Vec::new();
- for issue in issues.issues {
- if ids.contains(&issue.id) && issue.auto_fixable {
- fixable.push(issue.id);
+ timed_sync!("fix_issues", {
+ let paths = resolve_paths();
+ let issues = run_doctor(&paths);
+ let mut fixable = Vec::new();
+ for issue in issues.issues {
+ if ids.contains(&issue.id) && issue.auto_fixable {
+ fixable.push(issue.id);
+ }
}
- }
- let auto_applied = apply_auto_fixes(&paths, &fixable);
- let mut remaining = Vec::new();
- let mut applied = Vec::new();
- for id in ids {
- if fixable.contains(&id) && auto_applied.iter().any(|x| x == &id) {
- applied.push(id);
- } else {
- remaining.push(id);
+ let auto_applied = apply_auto_fixes(&paths, &fixable);
+ let mut remaining = Vec::new();
+ let mut applied = Vec::new();
+ for id in ids {
+ if fixable.contains(&id) && auto_applied.iter().any(|x| x == &id) {
+ applied.push(id);
+ } else {
+ remaining.push(id);
+ }
}
- }
- Ok(FixResult {
- ok: true,
- applied,
- remaining_issues: remaining,
+ Ok(FixResult {
+ ok: true,
+ applied,
+ remaining_issues: remaining,
+ })
})
}
diff --git a/src-tauri/src/commands/doctor_assistant.rs b/src-tauri/src/commands/doctor_assistant.rs
index bac699e0..2e4bc2b7 100644
--- a/src-tauri/src/commands/doctor_assistant.rs
+++ b/src-tauri/src/commands/doctor_assistant.rs
@@ -4292,12 +4292,14 @@ fn build_temp_gateway_record(
pub async fn diagnose_doctor_assistant(
app: AppHandle,
) -> Result {
- let run_id = Uuid::new_v4().to_string();
- tauri::async_runtime::spawn_blocking(move || {
- diagnose_doctor_assistant_local_impl(&app, &run_id, DOCTOR_ASSISTANT_TARGET_PROFILE)
+ timed_async!("diagnose_doctor_assistant", {
+ let run_id = Uuid::new_v4().to_string();
+ tauri::async_runtime::spawn_blocking(move || {
+ diagnose_doctor_assistant_local_impl(&app, &run_id, DOCTOR_ASSISTANT_TARGET_PROFILE)
+ })
+ .await
+ .map_err(|error| error.to_string())?
})
- .await
- .map_err(|error| error.to_string())?
}
#[tauri::command]
@@ -4306,15 +4308,17 @@ pub async fn remote_diagnose_doctor_assistant(
host_id: String,
app: AppHandle,
) -> Result {
- let run_id = Uuid::new_v4().to_string();
- diagnose_doctor_assistant_remote_impl(
- &pool,
- &host_id,
- &app,
- &run_id,
- DOCTOR_ASSISTANT_TARGET_PROFILE,
- )
- .await
+ timed_async!("remote_diagnose_doctor_assistant", {
+ let run_id = Uuid::new_v4().to_string();
+ diagnose_doctor_assistant_remote_impl(
+ &pool,
+ &host_id,
+ &app,
+ &run_id,
+ DOCTOR_ASSISTANT_TARGET_PROFILE,
+ )
+ .await
+ })
}
#[tauri::command]
@@ -4323,16 +4327,373 @@ pub async fn repair_doctor_assistant(
temp_provider_profile_id: Option,
app: AppHandle,
) -> Result {
- let run_id = Uuid::new_v4().to_string();
- tauri::async_runtime::spawn_blocking(move || -> Result {
+ timed_async!("repair_doctor_assistant", {
+ let run_id = Uuid::new_v4().to_string();
+ tauri::async_runtime::spawn_blocking(
+ move || -> Result {
+ let paths = resolve_paths();
+ let before = match current_diagnosis {
+ Some(diagnosis) => diagnosis,
+ None => diagnose_doctor_assistant_local_impl(
+ &app,
+ &run_id,
+ DOCTOR_ASSISTANT_TARGET_PROFILE,
+ )?,
+ };
+ let attempted_at = format_timestamp_from_unix(unix_timestamp_secs());
+ let (selected_issue_ids, skipped_issue_ids) = collect_repairable_primary_issue_ids(
+ &before,
+ &before.summary.selected_fix_issue_ids,
+ );
+ let mut applied_issue_ids = Vec::new();
+ let mut failed_issue_ids = Vec::new();
+ let mut steps = Vec::new();
+ let mut current = before.clone();
+
+ if diagnose_doctor_assistant_status(&before) {
+ append_step(
+ &mut steps,
+ "repair.noop",
+ "No automatic repairs needed",
+ true,
+ "The primary gateway is already healthy",
+ None,
+ );
+ return Ok(doctor_assistant_completed_result(
+ attempted_at,
+ "temporary".into(),
+ selected_issue_ids,
+ applied_issue_ids,
+ skipped_issue_ids,
+ failed_issue_ids,
+ steps,
+ before.clone(),
+ before,
+ ));
+ }
+
+ if !diagnose_doctor_assistant_status(¤t) {
+ let temp_profile = choose_temp_gateway_profile_name();
+ let temp_port =
+ choose_temp_gateway_port(resolve_main_port_from_diagnosis(¤t));
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "bootstrap_temp_gateway",
+ "Bootstrapping temporary gateway",
+ 0.56,
+ 0,
+ None,
+ None,
+ );
+ upsert_doctor_temp_gateway_record(
+ &paths,
+ build_temp_gateway_record(
+ DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL,
+ &temp_profile,
+ temp_port,
+ "bootstrapping",
+ resolve_main_port_from_diagnosis(¤t),
+ Some("bootstrap".into()),
+ ),
+ )?;
+
+ let temp_flow = (|| -> Result<(), String> {
+ run_local_temp_gateway_action(
+ RescueBotAction::Set,
+ &temp_profile,
+ temp_port,
+ true,
+ &mut steps,
+ "temp.setup",
+ )?;
+ write_local_temp_gateway_marker(
+ &paths.openclaw_dir,
+ DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL,
+ &temp_profile,
+ )?;
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "bootstrap_temp_gateway",
+ "Syncing provider configuration into temporary gateway",
+ 0.58,
+ 0,
+ None,
+ None,
+ );
+ let (provider, model) = sync_local_temp_gateway_provider_context(
+ &temp_profile,
+ temp_provider_profile_id.as_deref(),
+ &mut steps,
+ )?;
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "bootstrap_temp_gateway",
+ format!("Temporary gateway ready: {provider}/{model}"),
+ 0.64,
+ 0,
+ None,
+ None,
+ );
+ upsert_doctor_temp_gateway_record(
+ &paths,
+ build_temp_gateway_record(
+ DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL,
+ &temp_profile,
+ temp_port,
+ "repairing",
+ resolve_main_port_from_diagnosis(¤t),
+ Some("repair".into()),
+ ),
+ )?;
+
+ for round in 1..=DOCTOR_ASSISTANT_TEMP_REPAIR_ROUNDS {
+ run_local_temp_gateway_agent_repair_round(
+ &app,
+ &run_id,
+ &temp_profile,
+ ¤t,
+ round,
+ &mut steps,
+ )?;
+ let next = diagnose_doctor_assistant_local_impl(
+ &app,
+ &run_id,
+ DOCTOR_ASSISTANT_TARGET_PROFILE,
+ )?;
+ for (issue_id, label) in collect_resolved_issues(¤t, &next) {
+ merge_issue_lists(
+ &mut applied_issue_ids,
+ std::iter::once(issue_id.clone()),
+ );
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "agent_repair",
+ format!("{label} fixed"),
+ 0.6 + (round as f32 * 0.03),
+ round,
+ Some(issue_id),
+ Some(label),
+ );
+ }
+ current = next;
+ if diagnose_doctor_assistant_status(¤t) {
+ break;
+ }
+ }
+ Ok(())
+ })();
+ let temp_flow_error = temp_flow.as_ref().err().cloned();
+ let pending_reason = temp_flow_error.as_ref().and_then(|error| {
+ doctor_assistant_extract_temp_provider_setup_reason(error)
+ });
+
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "cleanup",
+ "Cleaning up temporary gateway",
+ 0.94,
+ 0,
+ None,
+ None,
+ );
+ let cleanup_result = run_local_temp_gateway_action(
+ RescueBotAction::Unset,
+ &temp_profile,
+ temp_port,
+ false,
+ &mut steps,
+ "temp.cleanup",
+ );
+ let _ = remove_doctor_temp_gateway_record(
+ &paths,
+ DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL,
+ &temp_profile,
+ );
+ match cleanup_result {
+ Ok(()) => match prune_local_temp_gateway_profile_roots(&paths.openclaw_dir)
+ {
+ Ok(removed) => append_step(
+ &mut steps,
+ "temp.cleanup.roots",
+ "Delete temporary gateway profiles",
+ true,
+ if removed.is_empty() {
+ "No temporary gateway profiles remained on disk".into()
+ } else {
+ format!(
+ "Removed {} temporary gateway profile directorie(s)",
+ removed.len()
+ )
+ },
+ None,
+ ),
+ Err(error) => append_step(
+ &mut steps,
+ "temp.cleanup.roots",
+ "Delete temporary gateway profiles",
+ false,
+ error,
+ None,
+ ),
+ },
+ Err(error) => append_step(
+ &mut steps,
+ "temp.cleanup.error",
+ "Cleanup temporary gateway",
+ false,
+ error,
+ None,
+ ),
+ }
+ if temp_flow_error.is_some() || !diagnose_doctor_assistant_status(¤t) {
+ let fallback_reason = pending_reason
+ .clone()
+ .or(temp_flow_error.clone())
+ .unwrap_or_else(|| {
+ "Temporary gateway repair finished with remaining issues".into()
+ });
+ match fallback_restore_local_primary_config(
+ &app,
+ &run_id,
+ &mut steps,
+ &fallback_reason,
+ ) {
+ Ok(Some(next)) => {
+ for (issue_id, label) in collect_resolved_issues(¤t, &next) {
+ merge_issue_lists(
+ &mut applied_issue_ids,
+ std::iter::once(issue_id.clone()),
+ );
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "cleanup",
+ format!("{label} fixed"),
+ 0.94,
+ 0,
+ Some(issue_id),
+ Some(label),
+ );
+ }
+ current = next
+ }
+ Ok(None) => {}
+ Err(error) => append_step(
+ &mut steps,
+ "repair.fallback.error",
+ "Fallback restore primary config",
+ false,
+ error,
+ None,
+ ),
+ }
+ }
+ if let Some(reason) = pending_reason {
+ if !diagnose_doctor_assistant_status(¤t) {
+ emit_doctor_assistant_progress(
+ &app, &run_id, "cleanup", &reason, 0.96, 0, None, None,
+ );
+ return Ok(doctor_assistant_pending_temp_provider_result(
+ attempted_at,
+ temp_profile,
+ selected_issue_ids.clone(),
+ applied_issue_ids.clone(),
+ skipped_issue_ids.clone(),
+ selected_issue_ids
+ .iter()
+ .filter(|id| !applied_issue_ids.contains(id))
+ .cloned()
+ .collect(),
+ steps,
+ before,
+ current,
+ temp_provider_profile_id,
+ reason,
+ ));
+ }
+ }
+ }
+
+ let after = diagnose_doctor_assistant_local_impl(
+ &app,
+ &run_id,
+ DOCTOR_ASSISTANT_TARGET_PROFILE,
+ )?;
+ for (issue_id, _label) in collect_resolved_issues(¤t, &after) {
+ merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id));
+ }
+ let remaining = after
+ .issues
+ .iter()
+ .map(|issue| issue.id.clone())
+ .collect::>();
+ failed_issue_ids = selected_issue_ids
+ .iter()
+ .filter(|id| remaining.contains(id))
+ .cloned()
+ .collect();
+
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "cleanup",
+ if diagnose_doctor_assistant_status(&after) {
+ "Repair complete"
+ } else {
+ "Repair finished with remaining issues"
+ },
+ 1.0,
+ 0,
+ None,
+ None,
+ );
+
+ Ok(doctor_assistant_completed_result(
+ attempted_at,
+ current.rescue_profile.clone(),
+ selected_issue_ids,
+ applied_issue_ids,
+ skipped_issue_ids,
+ failed_issue_ids,
+ steps,
+ before,
+ after,
+ ))
+ },
+ )
+ .await
+ .map_err(|error| error.to_string())?
+ })
+}
+
+#[tauri::command]
+pub async fn remote_repair_doctor_assistant(
+ pool: State<'_, SshConnectionPool>,
+ host_id: String,
+ current_diagnosis: Option,
+ temp_provider_profile_id: Option,
+ app: AppHandle,
+) -> Result {
+ timed_async!("remote_repair_doctor_assistant", {
+ let run_id = Uuid::new_v4().to_string();
let paths = resolve_paths();
let before = match current_diagnosis {
Some(diagnosis) => diagnosis,
- None => diagnose_doctor_assistant_local_impl(
- &app,
- &run_id,
- DOCTOR_ASSISTANT_TARGET_PROFILE,
- )?,
+ None => {
+ diagnose_doctor_assistant_remote_impl(
+ &pool,
+ &host_id,
+ &app,
+ &run_id,
+ DOCTOR_ASSISTANT_TARGET_PROFILE,
+ )
+ .await?
+ }
};
let attempted_at = format_timestamp_from_unix(unix_timestamp_secs());
let (selected_issue_ids, skipped_issue_ids) =
@@ -4380,7 +4741,7 @@ pub async fn repair_doctor_assistant(
upsert_doctor_temp_gateway_record(
&paths,
build_temp_gateway_record(
- DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL,
+ &host_id,
&temp_profile,
temp_port,
"bootstrapping",
@@ -4389,20 +4750,37 @@ pub async fn repair_doctor_assistant(
),
)?;
- let temp_flow = (|| -> Result<(), String> {
- run_local_temp_gateway_action(
+ let mut temp_flow = async {
+ run_remote_temp_gateway_action(
+ &pool,
+ &host_id,
RescueBotAction::Set,
&temp_profile,
temp_port,
true,
&mut steps,
"temp.setup",
- )?;
- write_local_temp_gateway_marker(
- &paths.openclaw_dir,
- DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL,
+ )
+ .await?;
+ let main_root = resolve_remote_main_root(&pool, &host_id).await;
+ if let Err(error) = write_remote_temp_gateway_marker(
+ &pool,
+ &host_id,
+ &main_root,
+ &host_id,
&temp_profile,
- )?;
+ )
+ .await
+ {
+ append_step(
+ &mut steps,
+ "temp.marker",
+ "Mark temporary gateway ownership",
+ false,
+ error,
+ None,
+ );
+ }
emit_doctor_assistant_progress(
&app,
&run_id,
@@ -4413,25 +4791,84 @@ pub async fn repair_doctor_assistant(
None,
None,
);
- let (provider, model) = sync_local_temp_gateway_provider_context(
+ let (main_root, temp_root, donor_cfg) = sync_remote_temp_gateway_provider_context(
+ &pool,
+ &host_id,
&temp_profile,
temp_provider_profile_id.as_deref(),
&mut steps,
- )?;
- emit_doctor_assistant_progress(
- &app,
- &run_id,
- "bootstrap_temp_gateway",
- format!("Temporary gateway ready: {provider}/{model}"),
- 0.64,
- 0,
- None,
- None,
- );
+ )
+ .await?;
+ let mut provider_identity = None;
+ if let Err(error) = probe_remote_temp_gateway_agent_smoke(
+ &pool,
+ &host_id,
+ &temp_profile,
+ &mut steps,
+ )
+ .await
+ {
+ let should_retry_from_remote_auth_store = temp_provider_profile_id.is_none()
+ && doctor_assistant_extract_temp_provider_setup_reason(&error).is_some();
+ if !should_retry_from_remote_auth_store {
+ return Err(error);
+ }
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "bootstrap_temp_gateway",
+ "Rebuilding temporary gateway provider from remote auth store",
+ 0.62,
+ 0,
+ None,
+ None,
+ );
+ rebuild_remote_temp_gateway_provider_context_from_auth_store(
+ &pool,
+ &host_id,
+ &main_root,
+ &temp_root,
+ &donor_cfg,
+ &mut steps,
+ )
+ .await?;
+ probe_remote_temp_gateway_agent_smoke(
+ &pool,
+ &host_id,
+ &temp_profile,
+ &mut steps,
+ )
+ .await
+ .map(|identity| provider_identity = Some(identity))?;
+ } else {
+ provider_identity = steps
+ .iter()
+ .rev()
+ .find(|step| step.id == "temp.probe.agent.identity")
+ .and_then(|step| {
+ let detail = step.detail.trim();
+ detail
+ .strip_prefix("Temporary gateway replied using ")
+ .and_then(|value| value.split_once('/'))
+ .map(|(provider, model)| (provider.to_string(), model.to_string()))
+ });
+ }
+ if let Some((provider, model)) = provider_identity.as_ref() {
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "bootstrap_temp_gateway",
+ format!("Temporary gateway ready: {provider}/{model}"),
+ 0.64,
+ 0,
+ None,
+ None,
+ );
+ }
upsert_doctor_temp_gateway_record(
&paths,
build_temp_gateway_record(
- DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL,
+ &host_id,
&temp_profile,
temp_port,
"repairing",
@@ -4440,43 +4877,74 @@ pub async fn repair_doctor_assistant(
),
)?;
- for round in 1..=DOCTOR_ASSISTANT_TEMP_REPAIR_ROUNDS {
- run_local_temp_gateway_agent_repair_round(
- &app,
- &run_id,
- &temp_profile,
- ¤t,
- round,
+ if DOCTOR_ASSISTANT_REMOTE_SKIP_AGENT_REPAIR {
+ append_step(
&mut steps,
- )?;
- let next = diagnose_doctor_assistant_local_impl(
- &app,
- &run_id,
- DOCTOR_ASSISTANT_TARGET_PROFILE,
- )?;
- for (issue_id, label) in collect_resolved_issues(¤t, &next) {
- merge_issue_lists(
- &mut applied_issue_ids,
- std::iter::once(issue_id.clone()),
- );
- emit_doctor_assistant_progress(
+ "temp.debug.skip_agent_repair",
+ "Skip temporary gateway repair loop",
+ true,
+ "Remote Doctor debug mode leaves the primary gateway unchanged after temp bootstrap so the temporary gateway configuration can be inspected in isolation.",
+ None,
+ );
+ } else {
+ for round in 1..=DOCTOR_ASSISTANT_TEMP_REPAIR_ROUNDS {
+ run_remote_temp_gateway_agent_repair_round(
+ &pool,
+ &host_id,
&app,
&run_id,
- "agent_repair",
- format!("{label} fixed"),
- 0.6 + (round as f32 * 0.03),
+ &temp_profile,
+ ¤t,
round,
- Some(issue_id),
- Some(label),
- );
+ &mut steps,
+ )
+ .await?;
+ let next = diagnose_doctor_assistant_remote_impl(
+ &pool,
+ &host_id,
+ &app,
+ &run_id,
+ DOCTOR_ASSISTANT_TARGET_PROFILE,
+ )
+ .await?;
+ for (issue_id, label) in collect_resolved_issues(¤t, &next) {
+ merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id.clone()));
+ emit_doctor_assistant_progress(
+ &app,
+ &run_id,
+ "agent_repair",
+ format!("{label} fixed"),
+ 0.6 + (round as f32 * 0.03),
+ round,
+ Some(issue_id),
+ Some(label),
+ );
+ }
+ current = next;
+ if diagnose_doctor_assistant_status(¤t) {
+ break;
+ }
}
- current = next;
- if diagnose_doctor_assistant_status(¤t) {
- break;
+ }
+ Ok::<(), String>(())
+ }
+ .await;
+ if let Err(error) = temp_flow.as_ref() {
+ if doctor_assistant_is_remote_exec_timeout(error) {
+ let recovered = remote_wait_for_primary_gateway_recovery_after_timeout(
+ &pool, &host_id, &app, &run_id, &mut steps,
+ )
+ .await?;
+ if recovered {
+ temp_flow = Ok(());
+ } else {
+ temp_flow = Err(
+ "Temporary gateway repair timed out before health could be confirmed. Open Gateway Logs and inspect the latest repair output."
+ .into(),
+ );
}
}
- Ok(())
- })();
+ }
let temp_flow_error = temp_flow.as_ref().err().cloned();
let pending_reason = temp_flow_error
.as_ref()
@@ -4492,67 +4960,71 @@ pub async fn repair_doctor_assistant(
None,
None,
);
- let cleanup_result = run_local_temp_gateway_action(
+ let cleanup_result = run_remote_temp_gateway_action(
+ &pool,
+ &host_id,
RescueBotAction::Unset,
&temp_profile,
temp_port,
false,
&mut steps,
"temp.cleanup",
- );
- let _ = remove_doctor_temp_gateway_record(
- &paths,
- DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL,
- &temp_profile,
- );
- match cleanup_result {
- Ok(()) => match prune_local_temp_gateway_profile_roots(&paths.openclaw_dir) {
- Ok(removed) => append_step(
- &mut steps,
- "temp.cleanup.roots",
- "Delete temporary gateway profiles",
- true,
- if removed.is_empty() {
- "No temporary gateway profiles remained on disk".into()
- } else {
- format!(
- "Removed {} temporary gateway profile directorie(s)",
- removed.len()
- )
- },
- None,
- ),
- Err(error) => append_step(
- &mut steps,
- "temp.cleanup.roots",
- "Delete temporary gateway profiles",
- false,
- error,
- None,
- ),
- },
- Err(error) => append_step(
+ )
+ .await;
+ let _ = remove_doctor_temp_gateway_record(&paths, &host_id, &temp_profile);
+ if let Err(error) = cleanup_result {
+ append_step(
&mut steps,
"temp.cleanup.error",
"Cleanup temporary gateway",
false,
error,
None,
- ),
+ );
}
- if temp_flow_error.is_some() || !diagnose_doctor_assistant_status(¤t) {
- let fallback_reason = pending_reason
- .clone()
- .or(temp_flow_error.clone())
- .unwrap_or_else(|| {
- "Temporary gateway repair finished with remaining issues".into()
- });
- match fallback_restore_local_primary_config(
- &app,
+ let main_root = resolve_remote_main_root(&pool, &host_id).await;
+ match prune_remote_temp_gateway_profile_roots(&pool, &host_id, &main_root).await {
+ Ok(removed) => append_step(
+ &mut steps,
+ "temp.cleanup.roots",
+ "Delete temporary gateway profiles",
+ true,
+ if removed.is_empty() {
+ "No temporary gateway profiles remained on disk".into()
+ } else {
+ format!(
+ "Removed {} temporary gateway profile directorie(s)",
+ removed.len()
+ )
+ },
+ None,
+ ),
+ Err(error) => append_step(
+ &mut steps,
+ "temp.cleanup.roots",
+ "Delete temporary gateway profiles",
+ false,
+ error,
+ None,
+ ),
+ }
+ if temp_flow_error.is_some() || !diagnose_doctor_assistant_status(¤t) {
+ let fallback_reason = pending_reason
+ .clone()
+ .or(temp_flow_error.clone())
+ .unwrap_or_else(|| {
+ "Temporary gateway repair finished with remaining issues".into()
+ });
+ match fallback_restore_remote_primary_config(
+ &pool,
+ &host_id,
+ &app,
&run_id,
&mut steps,
&fallback_reason,
- ) {
+ )
+ .await
+ {
Ok(Some(next)) => {
for (issue_id, label) in collect_resolved_issues(¤t, &next) {
merge_issue_lists(
@@ -4609,8 +5081,14 @@ pub async fn repair_doctor_assistant(
}
}
- let after =
- diagnose_doctor_assistant_local_impl(&app, &run_id, DOCTOR_ASSISTANT_TARGET_PROFILE)?;
+ let after = diagnose_doctor_assistant_remote_impl(
+ &pool,
+ &host_id,
+ &app,
+ &run_id,
+ DOCTOR_ASSISTANT_TARGET_PROFILE,
+ )
+ .await?;
for (issue_id, _label) in collect_resolved_issues(¤t, &after) {
merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id));
}
@@ -4652,467 +5130,6 @@ pub async fn repair_doctor_assistant(
after,
))
})
- .await
- .map_err(|error| error.to_string())?
-}
-
-#[tauri::command]
-pub async fn remote_repair_doctor_assistant(
- pool: State<'_, SshConnectionPool>,
- host_id: String,
- current_diagnosis: Option,
- temp_provider_profile_id: Option,
- app: AppHandle,
-) -> Result {
- let run_id = Uuid::new_v4().to_string();
- let paths = resolve_paths();
- let before = match current_diagnosis {
- Some(diagnosis) => diagnosis,
- None => {
- diagnose_doctor_assistant_remote_impl(
- &pool,
- &host_id,
- &app,
- &run_id,
- DOCTOR_ASSISTANT_TARGET_PROFILE,
- )
- .await?
- }
- };
- let attempted_at = format_timestamp_from_unix(unix_timestamp_secs());
- let (selected_issue_ids, skipped_issue_ids) =
- collect_repairable_primary_issue_ids(&before, &before.summary.selected_fix_issue_ids);
- let mut applied_issue_ids = Vec::new();
- let mut failed_issue_ids = Vec::new();
- let mut steps = Vec::new();
- let mut current = before.clone();
-
- if diagnose_doctor_assistant_status(&before) {
- append_step(
- &mut steps,
- "repair.noop",
- "No automatic repairs needed",
- true,
- "The primary gateway is already healthy",
- None,
- );
- return Ok(doctor_assistant_completed_result(
- attempted_at,
- "temporary".into(),
- selected_issue_ids,
- applied_issue_ids,
- skipped_issue_ids,
- failed_issue_ids,
- steps,
- before.clone(),
- before,
- ));
- }
-
- if !diagnose_doctor_assistant_status(¤t) {
- let temp_profile = choose_temp_gateway_profile_name();
- let temp_port = choose_temp_gateway_port(resolve_main_port_from_diagnosis(¤t));
- emit_doctor_assistant_progress(
- &app,
- &run_id,
- "bootstrap_temp_gateway",
- "Bootstrapping temporary gateway",
- 0.56,
- 0,
- None,
- None,
- );
- upsert_doctor_temp_gateway_record(
- &paths,
- build_temp_gateway_record(
- &host_id,
- &temp_profile,
- temp_port,
- "bootstrapping",
- resolve_main_port_from_diagnosis(¤t),
- Some("bootstrap".into()),
- ),
- )?;
-
- let mut temp_flow = async {
- run_remote_temp_gateway_action(
- &pool,
- &host_id,
- RescueBotAction::Set,
- &temp_profile,
- temp_port,
- true,
- &mut steps,
- "temp.setup",
- )
- .await?;
- let main_root = resolve_remote_main_root(&pool, &host_id).await;
- if let Err(error) = write_remote_temp_gateway_marker(
- &pool,
- &host_id,
- &main_root,
- &host_id,
- &temp_profile,
- )
- .await
- {
- append_step(
- &mut steps,
- "temp.marker",
- "Mark temporary gateway ownership",
- false,
- error,
- None,
- );
- }
- emit_doctor_assistant_progress(
- &app,
- &run_id,
- "bootstrap_temp_gateway",
- "Syncing provider configuration into temporary gateway",
- 0.58,
- 0,
- None,
- None,
- );
- let (main_root, temp_root, donor_cfg) = sync_remote_temp_gateway_provider_context(
- &pool,
- &host_id,
- &temp_profile,
- temp_provider_profile_id.as_deref(),
- &mut steps,
- )
- .await?;
- let mut provider_identity = None;
- if let Err(error) = probe_remote_temp_gateway_agent_smoke(
- &pool,
- &host_id,
- &temp_profile,
- &mut steps,
- )
- .await
- {
- let should_retry_from_remote_auth_store = temp_provider_profile_id.is_none()
- && doctor_assistant_extract_temp_provider_setup_reason(&error).is_some();
- if !should_retry_from_remote_auth_store {
- return Err(error);
- }
- emit_doctor_assistant_progress(
- &app,
- &run_id,
- "bootstrap_temp_gateway",
- "Rebuilding temporary gateway provider from remote auth store",
- 0.62,
- 0,
- None,
- None,
- );
- rebuild_remote_temp_gateway_provider_context_from_auth_store(
- &pool,
- &host_id,
- &main_root,
- &temp_root,
- &donor_cfg,
- &mut steps,
- )
- .await?;
- probe_remote_temp_gateway_agent_smoke(
- &pool,
- &host_id,
- &temp_profile,
- &mut steps,
- )
- .await
- .map(|identity| provider_identity = Some(identity))?;
- } else {
- provider_identity = steps
- .iter()
- .rev()
- .find(|step| step.id == "temp.probe.agent.identity")
- .and_then(|step| {
- let detail = step.detail.trim();
- detail
- .strip_prefix("Temporary gateway replied using ")
- .and_then(|value| value.split_once('/'))
- .map(|(provider, model)| (provider.to_string(), model.to_string()))
- });
- }
- if let Some((provider, model)) = provider_identity.as_ref() {
- emit_doctor_assistant_progress(
- &app,
- &run_id,
- "bootstrap_temp_gateway",
- format!("Temporary gateway ready: {provider}/{model}"),
- 0.64,
- 0,
- None,
- None,
- );
- }
- upsert_doctor_temp_gateway_record(
- &paths,
- build_temp_gateway_record(
- &host_id,
- &temp_profile,
- temp_port,
- "repairing",
- resolve_main_port_from_diagnosis(¤t),
- Some("repair".into()),
- ),
- )?;
-
- if DOCTOR_ASSISTANT_REMOTE_SKIP_AGENT_REPAIR {
- append_step(
- &mut steps,
- "temp.debug.skip_agent_repair",
- "Skip temporary gateway repair loop",
- true,
- "Remote Doctor debug mode leaves the primary gateway unchanged after temp bootstrap so the temporary gateway configuration can be inspected in isolation.",
- None,
- );
- } else {
- for round in 1..=DOCTOR_ASSISTANT_TEMP_REPAIR_ROUNDS {
- run_remote_temp_gateway_agent_repair_round(
- &pool,
- &host_id,
- &app,
- &run_id,
- &temp_profile,
- ¤t,
- round,
- &mut steps,
- )
- .await?;
- let next = diagnose_doctor_assistant_remote_impl(
- &pool,
- &host_id,
- &app,
- &run_id,
- DOCTOR_ASSISTANT_TARGET_PROFILE,
- )
- .await?;
- for (issue_id, label) in collect_resolved_issues(¤t, &next) {
- merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id.clone()));
- emit_doctor_assistant_progress(
- &app,
- &run_id,
- "agent_repair",
- format!("{label} fixed"),
- 0.6 + (round as f32 * 0.03),
- round,
- Some(issue_id),
- Some(label),
- );
- }
- current = next;
- if diagnose_doctor_assistant_status(¤t) {
- break;
- }
- }
- }
- Ok::<(), String>(())
- }
- .await;
- if let Err(error) = temp_flow.as_ref() {
- if doctor_assistant_is_remote_exec_timeout(error) {
- let recovered = remote_wait_for_primary_gateway_recovery_after_timeout(
- &pool, &host_id, &app, &run_id, &mut steps,
- )
- .await?;
- if recovered {
- temp_flow = Ok(());
- } else {
- temp_flow = Err(
- "Temporary gateway repair timed out before health could be confirmed. Open Gateway Logs and inspect the latest repair output."
- .into(),
- );
- }
- }
- }
- let temp_flow_error = temp_flow.as_ref().err().cloned();
- let pending_reason = temp_flow_error
- .as_ref()
- .and_then(|error| doctor_assistant_extract_temp_provider_setup_reason(error));
-
- emit_doctor_assistant_progress(
- &app,
- &run_id,
- "cleanup",
- "Cleaning up temporary gateway",
- 0.94,
- 0,
- None,
- None,
- );
- let cleanup_result = run_remote_temp_gateway_action(
- &pool,
- &host_id,
- RescueBotAction::Unset,
- &temp_profile,
- temp_port,
- false,
- &mut steps,
- "temp.cleanup",
- )
- .await;
- let _ = remove_doctor_temp_gateway_record(&paths, &host_id, &temp_profile);
- if let Err(error) = cleanup_result {
- append_step(
- &mut steps,
- "temp.cleanup.error",
- "Cleanup temporary gateway",
- false,
- error,
- None,
- );
- }
- let main_root = resolve_remote_main_root(&pool, &host_id).await;
- match prune_remote_temp_gateway_profile_roots(&pool, &host_id, &main_root).await {
- Ok(removed) => append_step(
- &mut steps,
- "temp.cleanup.roots",
- "Delete temporary gateway profiles",
- true,
- if removed.is_empty() {
- "No temporary gateway profiles remained on disk".into()
- } else {
- format!(
- "Removed {} temporary gateway profile directorie(s)",
- removed.len()
- )
- },
- None,
- ),
- Err(error) => append_step(
- &mut steps,
- "temp.cleanup.roots",
- "Delete temporary gateway profiles",
- false,
- error,
- None,
- ),
- }
- if temp_flow_error.is_some() || !diagnose_doctor_assistant_status(¤t) {
- let fallback_reason = pending_reason
- .clone()
- .or(temp_flow_error.clone())
- .unwrap_or_else(|| {
- "Temporary gateway repair finished with remaining issues".into()
- });
- match fallback_restore_remote_primary_config(
- &pool,
- &host_id,
- &app,
- &run_id,
- &mut steps,
- &fallback_reason,
- )
- .await
- {
- Ok(Some(next)) => {
- for (issue_id, label) in collect_resolved_issues(¤t, &next) {
- merge_issue_lists(
- &mut applied_issue_ids,
- std::iter::once(issue_id.clone()),
- );
- emit_doctor_assistant_progress(
- &app,
- &run_id,
- "cleanup",
- format!("{label} fixed"),
- 0.94,
- 0,
- Some(issue_id),
- Some(label),
- );
- }
- current = next
- }
- Ok(None) => {}
- Err(error) => append_step(
- &mut steps,
- "repair.fallback.error",
- "Fallback restore primary config",
- false,
- error,
- None,
- ),
- }
- }
- if let Some(reason) = pending_reason {
- if !diagnose_doctor_assistant_status(¤t) {
- emit_doctor_assistant_progress(
- &app, &run_id, "cleanup", &reason, 0.96, 0, None, None,
- );
- return Ok(doctor_assistant_pending_temp_provider_result(
- attempted_at,
- temp_profile,
- selected_issue_ids.clone(),
- applied_issue_ids.clone(),
- skipped_issue_ids.clone(),
- selected_issue_ids
- .iter()
- .filter(|id| !applied_issue_ids.contains(id))
- .cloned()
- .collect(),
- steps,
- before,
- current,
- temp_provider_profile_id,
- reason,
- ));
- }
- }
- }
-
- let after = diagnose_doctor_assistant_remote_impl(
- &pool,
- &host_id,
- &app,
- &run_id,
- DOCTOR_ASSISTANT_TARGET_PROFILE,
- )
- .await?;
- for (issue_id, _label) in collect_resolved_issues(¤t, &after) {
- merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id));
- }
- let remaining = after
- .issues
- .iter()
- .map(|issue| issue.id.clone())
- .collect::>();
- failed_issue_ids = selected_issue_ids
- .iter()
- .filter(|id| remaining.contains(id))
- .cloned()
- .collect();
-
- emit_doctor_assistant_progress(
- &app,
- &run_id,
- "cleanup",
- if diagnose_doctor_assistant_status(&after) {
- "Repair complete"
- } else {
- "Repair finished with remaining issues"
- },
- 1.0,
- 0,
- None,
- None,
- );
-
- Ok(doctor_assistant_completed_result(
- attempted_at,
- current.rescue_profile.clone(),
- selected_issue_ids,
- applied_issue_ids,
- skipped_issue_ids,
- failed_issue_ids,
- steps,
- before,
- after,
- ))
}
fn resolve_main_port_from_diagnosis(diagnosis: &RescuePrimaryDiagnosisResult) -> u16 {
diff --git a/src-tauri/src/commands/gateway.rs b/src-tauri/src/commands/gateway.rs
index ce38ceeb..e75dd4fe 100644
--- a/src-tauri/src/commands/gateway.rs
+++ b/src-tauri/src/commands/gateway.rs
@@ -5,17 +5,21 @@ pub async fn remote_restart_gateway(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- pool.exec_login(&host_id, "openclaw gateway restart")
- .await?;
- Ok(true)
+ timed_async!("remote_restart_gateway", {
+ pool.exec_login(&host_id, "openclaw gateway restart")
+ .await?;
+ Ok(true)
+ })
}
#[tauri::command]
pub async fn restart_gateway() -> Result {
- tauri::async_runtime::spawn_blocking(move || {
- run_openclaw_raw(&["gateway", "restart"])?;
- Ok(true)
+ timed_async!("restart_gateway", {
+ tauri::async_runtime::spawn_blocking(move || {
+ run_openclaw_raw(&["gateway", "restart"])?;
+ Ok(true)
+ })
+ .await
+ .map_err(|e| e.to_string())?
})
- .await
- .map_err(|e| e.to_string())?
}
diff --git a/src-tauri/src/commands/instance.rs b/src-tauri/src/commands/instance.rs
index 421c903e..080dd83e 100644
--- a/src-tauri/src/commands/instance.rs
+++ b/src-tauri/src/commands/instance.rs
@@ -2,70 +2,80 @@ use super::*;
#[tauri::command]
pub fn set_active_openclaw_home(path: Option) -> Result {
- crate::cli_runner::set_active_openclaw_home_override(path)?;
- Ok(true)
+ timed_sync!("set_active_openclaw_home", {
+ crate::cli_runner::set_active_openclaw_home_override(path)?;
+ Ok(true)
+ })
}
#[tauri::command]
pub fn set_active_clawpal_data_dir(path: Option) -> Result {
- crate::cli_runner::set_active_clawpal_data_override(path)?;
- Ok(true)
+ timed_sync!("set_active_clawpal_data_dir", {
+ crate::cli_runner::set_active_clawpal_data_override(path)?;
+ Ok(true)
+ })
}
#[tauri::command]
pub fn local_openclaw_config_exists(openclaw_home: String) -> Result {
- let home = openclaw_home.trim();
- if home.is_empty() {
- return Ok(false);
- }
- let expanded = shellexpand::tilde(home).to_string();
- let config_path = PathBuf::from(expanded)
- .join(".openclaw")
- .join("openclaw.json");
- Ok(config_path.exists())
+ timed_sync!("local_openclaw_config_exists", {
+ let home = openclaw_home.trim();
+ if home.is_empty() {
+ return Ok(false);
+ }
+ let expanded = shellexpand::tilde(home).to_string();
+ let config_path = PathBuf::from(expanded)
+ .join(".openclaw")
+ .join("openclaw.json");
+ Ok(config_path.exists())
+ })
}
#[tauri::command]
pub fn local_openclaw_cli_available() -> Result {
- Ok(run_openclaw_raw(&["--version"]).is_ok())
+ timed_sync!("local_openclaw_cli_available", {
+ Ok(run_openclaw_raw(&["--version"]).is_ok())
+ })
}
#[tauri::command]
pub fn delete_local_instance_home(openclaw_home: String) -> Result {
- let home = openclaw_home.trim();
- if home.is_empty() {
- return Err("openclaw_home is required".to_string());
- }
- let expanded = shellexpand::tilde(home).to_string();
- let target = PathBuf::from(expanded);
- if !target.exists() {
- return Ok(true);
- }
+ timed_sync!("delete_local_instance_home", {
+ let home = openclaw_home.trim();
+ if home.is_empty() {
+ return Err("openclaw_home is required".to_string());
+ }
+ let expanded = shellexpand::tilde(home).to_string();
+ let target = PathBuf::from(expanded);
+ if !target.exists() {
+ return Ok(true);
+ }
- let canonical_target = target
- .canonicalize()
- .map_err(|e| format!("failed to resolve target path: {e}"))?;
- let user_home =
- dirs::home_dir().ok_or_else(|| "failed to resolve HOME directory".to_string())?;
- let allowed_root = user_home.join(".clawpal");
- let canonical_allowed_root = allowed_root
- .canonicalize()
- .map_err(|e| format!("failed to resolve ~/.clawpal path: {e}"))?;
-
- if !canonical_target.starts_with(&canonical_allowed_root) {
- return Err("refuse to delete path outside ~/.clawpal".to_string());
- }
- if canonical_target == canonical_allowed_root {
- return Err("refuse to delete ~/.clawpal root".to_string());
- }
+ let canonical_target = target
+ .canonicalize()
+ .map_err(|e| format!("failed to resolve target path: {e}"))?;
+ let user_home =
+ dirs::home_dir().ok_or_else(|| "failed to resolve HOME directory".to_string())?;
+ let allowed_root = user_home.join(".clawpal");
+ let canonical_allowed_root = allowed_root
+ .canonicalize()
+ .map_err(|e| format!("failed to resolve ~/.clawpal path: {e}"))?;
+
+ if !canonical_target.starts_with(&canonical_allowed_root) {
+ return Err("refuse to delete path outside ~/.clawpal".to_string());
+ }
+ if canonical_target == canonical_allowed_root {
+ return Err("refuse to delete ~/.clawpal root".to_string());
+ }
- fs::remove_dir_all(&canonical_target).map_err(|e| {
- format!(
- "failed to delete '{}': {e}",
- canonical_target.to_string_lossy()
- )
- })?;
- Ok(true)
+ fs::remove_dir_all(&canonical_target).map_err(|e| {
+ format!(
+ "failed to delete '{}': {e}",
+ canonical_target.to_string_lossy()
+ )
+ })?;
+ Ok(true)
+ })
}
#[derive(Debug, Serialize, Deserialize)]
@@ -137,7 +147,9 @@ pub async fn ensure_access_profile(
instance_id: String,
transport: String,
) -> Result {
- ensure_access_profile_impl(instance_id, transport).await
+ timed_async!("ensure_access_profile", {
+ ensure_access_profile_impl(instance_id, transport).await
+ })
}
pub async fn ensure_access_profile_for_test(
@@ -165,64 +177,71 @@ pub async fn record_install_experience(
goal: String,
store: State<'_, InstallSessionStore>,
) -> Result {
- let id = session_id.trim();
- if id.is_empty() {
- return Err("session_id is required".to_string());
- }
- let session = store
- .get(id)?
- .ok_or_else(|| format!("install session not found: {id}"))?;
- if !matches!(session.state, InstallState::Ready) {
- return Err(format!(
- "install session is not ready: {}",
- session.state.as_str()
- ));
- }
+ timed_async!("record_install_experience", {
+ let id = session_id.trim();
+ if id.is_empty() {
+ return Err("session_id is required".to_string());
+ }
+ let session = store
+ .get(id)?
+ .ok_or_else(|| format!("install session not found: {id}"))?;
+ if !matches!(session.state, InstallState::Ready) {
+ return Err(format!(
+ "install session is not ready: {}",
+ session.state.as_str()
+ ));
+ }
- let transport = session.method.as_str().to_string();
- let paths = resolve_paths();
- let discovery_store = AccessDiscoveryStore::new(paths.clawpal_dir.join("access-discovery"));
- let profile = discovery_store.load_profile(&instance_id)?;
- let successful_chain = profile.map(|p| p.working_chain).unwrap_or_default();
- let commands = value_array_as_strings(session.artifacts.get("executed_commands"));
-
- let experience = ExecutionExperience {
- instance_id: instance_id.clone(),
- goal,
- transport,
- method: session.method.as_str().to_string(),
- commands,
- successful_chain,
- recorded_at: unix_timestamp_secs(),
- };
- let total_count = discovery_store.save_experience(experience)?;
- Ok(RecordInstallExperienceResult {
- saved: true,
- total_count,
+ let transport = session.method.as_str().to_string();
+ let paths = resolve_paths();
+ let discovery_store = AccessDiscoveryStore::new(paths.clawpal_dir.join("access-discovery"));
+ let profile = discovery_store.load_profile(&instance_id)?;
+ let successful_chain = profile.map(|p| p.working_chain).unwrap_or_default();
+ let commands = value_array_as_strings(session.artifacts.get("executed_commands"));
+
+ let experience = ExecutionExperience {
+ instance_id: instance_id.clone(),
+ goal,
+ transport,
+ method: session.method.as_str().to_string(),
+ commands,
+ successful_chain,
+ recorded_at: unix_timestamp_secs(),
+ };
+ let total_count = discovery_store.save_experience(experience)?;
+ Ok(RecordInstallExperienceResult {
+ saved: true,
+ total_count,
+ })
})
}
#[tauri::command]
pub fn list_registered_instances() -> Result, String> {
- let registry = clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
- // Best-effort self-heal: persist normalized instance ids (e.g., legacy empty SSH ids).
- let _ = registry.save();
- Ok(registry.list())
+ timed_sync!("list_registered_instances", {
+ let registry =
+ clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
+ // Best-effort self-heal: persist normalized instance ids (e.g., legacy empty SSH ids).
+ let _ = registry.save();
+ Ok(registry.list())
+ })
}
#[tauri::command]
pub fn delete_registered_instance(instance_id: String) -> Result {
- let id = instance_id.trim();
- if id.is_empty() || id == "local" {
- return Ok(false);
- }
- let mut registry =
- clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
- let removed = registry.remove(id).is_some();
- if removed {
- registry.save().map_err(|e| e.to_string())?;
- }
- Ok(removed)
+ timed_sync!("delete_registered_instance", {
+ let id = instance_id.trim();
+ if id.is_empty() || id == "local" {
+ return Ok(false);
+ }
+ let mut registry =
+ clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
+ let removed = registry.remove(id).is_some();
+ if removed {
+ registry.save().map_err(|e| e.to_string())?;
+ }
+ Ok(removed)
+ })
}
#[tauri::command]
@@ -231,9 +250,11 @@ pub async fn connect_docker_instance(
label: Option,
instance_id: Option,
) -> Result {
- clawpal_core::connect::connect_docker(&home, label.as_deref(), instance_id.as_deref())
- .await
- .map_err(|e| e.to_string())
+ timed_async!("connect_docker_instance", {
+ clawpal_core::connect::connect_docker(&home, label.as_deref(), instance_id.as_deref())
+ .await
+ .map_err(|e| e.to_string())
+ })
}
#[tauri::command]
@@ -242,36 +263,40 @@ pub async fn connect_local_instance(
label: Option,
instance_id: Option,
) -> Result {
- clawpal_core::connect::connect_local(&home, label.as_deref(), instance_id.as_deref())
- .await
- .map_err(|e| e.to_string())
+ timed_async!("connect_local_instance", {
+ clawpal_core::connect::connect_local(&home, label.as_deref(), instance_id.as_deref())
+ .await
+ .map_err(|e| e.to_string())
+ })
}
#[tauri::command]
pub async fn connect_ssh_instance(
host_id: String,
) -> Result {
- let hosts = read_hosts_from_registry()?;
- let host = hosts
- .into_iter()
- .find(|h| h.id == host_id)
- .ok_or_else(|| format!("No SSH host config with id: {host_id}"))?;
- // Register the SSH host as an instance in the instance registry
- // (skip the actual SSH connectivity probe — the caller already connected)
- let instance = clawpal_core::instance::Instance {
- id: host.id.clone(),
- instance_type: clawpal_core::instance::InstanceType::RemoteSsh,
- label: host.label.clone(),
- openclaw_home: None,
- clawpal_data_dir: None,
- ssh_host_config: Some(host),
- };
- let mut registry =
- clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
- let _ = registry.remove(&instance.id);
- registry.add(instance.clone()).map_err(|e| e.to_string())?;
- registry.save().map_err(|e| e.to_string())?;
- Ok(instance)
+ timed_async!("connect_ssh_instance", {
+ let hosts = read_hosts_from_registry()?;
+ let host = hosts
+ .into_iter()
+ .find(|h| h.id == host_id)
+ .ok_or_else(|| format!("No SSH host config with id: {host_id}"))?;
+ // Register the SSH host as an instance in the instance registry
+ // (skip the actual SSH connectivity probe — the caller already connected)
+ let instance = clawpal_core::instance::Instance {
+ id: host.id.clone(),
+ instance_type: clawpal_core::instance::InstanceType::RemoteSsh,
+ label: host.label.clone(),
+ openclaw_home: None,
+ clawpal_data_dir: None,
+ ssh_host_config: Some(host),
+ };
+ let mut registry =
+ clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
+ let _ = registry.remove(&instance.id);
+ registry.add(instance.clone()).map_err(|e| e.to_string())?;
+ registry.save().map_err(|e| e.to_string())?;
+ Ok(instance)
+ })
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -363,112 +388,114 @@ pub fn migrate_legacy_instances(
legacy_docker_instances: Vec,
legacy_open_tab_ids: Vec,
) -> Result {
- let paths = resolve_paths();
- let mut registry =
- clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
-
- // Ensure local instance exists for old users.
- if registry.get("local").is_none() {
- upsert_registry_instance(
- &mut registry,
- clawpal_core::instance::Instance {
- id: "local".to_string(),
- instance_type: clawpal_core::instance::InstanceType::Local,
- label: "Local".to_string(),
- openclaw_home: None,
- clawpal_data_dir: None,
- ssh_host_config: None,
- },
- )?;
- }
-
- let imported_ssh_hosts = migrate_legacy_ssh_file(&paths, &mut registry)?;
-
- let mut imported_docker_instances = 0usize;
- for docker in legacy_docker_instances {
- let id = docker.id.trim();
- if id.is_empty() {
- continue;
- }
- let label = if docker.label.trim().is_empty() {
- fallback_label_from_instance_id(id)
- } else {
- docker.label.clone()
- };
- upsert_registry_instance(
- &mut registry,
- clawpal_core::instance::Instance {
- id: id.to_string(),
- instance_type: clawpal_core::instance::InstanceType::Docker,
- label,
- openclaw_home: docker.openclaw_home.clone(),
- clawpal_data_dir: docker.clawpal_data_dir.clone(),
- ssh_host_config: None,
- },
- )?;
- imported_docker_instances += 1;
- }
+ timed_sync!("migrate_legacy_instances", {
+ let paths = resolve_paths();
+ let mut registry =
+ clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
- let mut imported_open_tab_instances = 0usize;
- for tab_id in legacy_open_tab_ids {
- let id = tab_id.trim();
- if id.is_empty() {
- continue;
- }
- if registry.get(id).is_some() {
- continue;
- }
- if id == "local" {
- continue;
- }
- if id.starts_with("docker:") {
+ // Ensure local instance exists for old users.
+ if registry.get("local").is_none() {
upsert_registry_instance(
&mut registry,
clawpal_core::instance::Instance {
- id: id.to_string(),
- instance_type: clawpal_core::instance::InstanceType::Docker,
- label: fallback_label_from_instance_id(id),
+ id: "local".to_string(),
+ instance_type: clawpal_core::instance::InstanceType::Local,
+ label: "Local".to_string(),
openclaw_home: None,
clawpal_data_dir: None,
ssh_host_config: None,
},
)?;
- imported_open_tab_instances += 1;
- continue;
}
- if id.starts_with("ssh:") {
- let host_alias = id.strip_prefix("ssh:").unwrap_or("").to_string();
+
+ let imported_ssh_hosts = migrate_legacy_ssh_file(&paths, &mut registry)?;
+
+ let mut imported_docker_instances = 0usize;
+ for docker in legacy_docker_instances {
+ let id = docker.id.trim();
+ if id.is_empty() {
+ continue;
+ }
+ let label = if docker.label.trim().is_empty() {
+ fallback_label_from_instance_id(id)
+ } else {
+ docker.label.clone()
+ };
upsert_registry_instance(
&mut registry,
clawpal_core::instance::Instance {
id: id.to_string(),
- instance_type: clawpal_core::instance::InstanceType::RemoteSsh,
- label: fallback_label_from_instance_id(id),
- openclaw_home: None,
- clawpal_data_dir: None,
- ssh_host_config: Some(clawpal_core::instance::SshHostConfig {
- id: id.to_string(),
- label: fallback_label_from_instance_id(id),
- host: host_alias,
- port: 22,
- username: String::new(),
- auth_method: "ssh_config".to_string(),
- key_path: None,
- password: None,
- passphrase: None,
- }),
+ instance_type: clawpal_core::instance::InstanceType::Docker,
+ label,
+ openclaw_home: docker.openclaw_home.clone(),
+ clawpal_data_dir: docker.clawpal_data_dir.clone(),
+ ssh_host_config: None,
},
)?;
- imported_open_tab_instances += 1;
+ imported_docker_instances += 1;
+ }
+
+ let mut imported_open_tab_instances = 0usize;
+ for tab_id in legacy_open_tab_ids {
+ let id = tab_id.trim();
+ if id.is_empty() {
+ continue;
+ }
+ if registry.get(id).is_some() {
+ continue;
+ }
+ if id == "local" {
+ continue;
+ }
+ if id.starts_with("docker:") {
+ upsert_registry_instance(
+ &mut registry,
+ clawpal_core::instance::Instance {
+ id: id.to_string(),
+ instance_type: clawpal_core::instance::InstanceType::Docker,
+ label: fallback_label_from_instance_id(id),
+ openclaw_home: None,
+ clawpal_data_dir: None,
+ ssh_host_config: None,
+ },
+ )?;
+ imported_open_tab_instances += 1;
+ continue;
+ }
+ if id.starts_with("ssh:") {
+ let host_alias = id.strip_prefix("ssh:").unwrap_or("").to_string();
+ upsert_registry_instance(
+ &mut registry,
+ clawpal_core::instance::Instance {
+ id: id.to_string(),
+ instance_type: clawpal_core::instance::InstanceType::RemoteSsh,
+ label: fallback_label_from_instance_id(id),
+ openclaw_home: None,
+ clawpal_data_dir: None,
+ ssh_host_config: Some(clawpal_core::instance::SshHostConfig {
+ id: id.to_string(),
+ label: fallback_label_from_instance_id(id),
+ host: host_alias,
+ port: 22,
+ username: String::new(),
+ auth_method: "ssh_config".to_string(),
+ key_path: None,
+ password: None,
+ passphrase: None,
+ }),
+ },
+ )?;
+ imported_open_tab_instances += 1;
+ }
}
- }
- registry.save().map_err(|e| e.to_string())?;
- let total_instances = registry.list().len();
- Ok(LegacyMigrationResult {
- imported_ssh_hosts,
- imported_docker_instances,
- imported_open_tab_instances,
- total_instances,
+ registry.save().map_err(|e| e.to_string())?;
+ let total_instances = registry.list().len();
+ Ok(LegacyMigrationResult {
+ imported_ssh_hosts,
+ imported_docker_instances,
+ imported_open_tab_instances,
+ total_instances,
+ })
})
}
diff --git a/src-tauri/src/commands/logs.rs b/src-tauri/src/commands/logs.rs
index 4b5b5ee5..cf88facf 100644
--- a/src-tauri/src/commands/logs.rs
+++ b/src-tauri/src/commands/logs.rs
@@ -70,18 +70,20 @@ pub async fn remote_read_app_log(
host_id: String,
lines: Option,
) -> Result {
- let n = clamp_lines(lines);
- let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "app");
- log_debug(&format!(
- "remote_read_app_log start host_id={host_id} lines={n} cmd={cmd}"
- ));
- let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ timed_async!("remote_read_app_log", {
+ let n = clamp_lines(lines);
+ let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "app");
log_debug(&format!(
- "remote_read_app_log failed host_id={host_id} error={error}"
+ "remote_read_app_log start host_id={host_id} lines={n} cmd={cmd}"
));
- error
- })?;
- Ok(result.stdout)
+ let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ log_debug(&format!(
+ "remote_read_app_log failed host_id={host_id} error={error}"
+ ));
+ error
+ })?;
+ Ok(result.stdout)
+ })
}
#[tauri::command]
@@ -90,18 +92,20 @@ pub async fn remote_read_error_log(
host_id: String,
lines: Option,
) -> Result {
- let n = clamp_lines(lines);
- let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "error");
- log_debug(&format!(
- "remote_read_error_log start host_id={host_id} lines={n} cmd={cmd}"
- ));
- let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ timed_async!("remote_read_error_log", {
+ let n = clamp_lines(lines);
+ let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "error");
log_debug(&format!(
- "remote_read_error_log failed host_id={host_id} error={error}"
+ "remote_read_error_log start host_id={host_id} lines={n} cmd={cmd}"
));
- error
- })?;
- Ok(result.stdout)
+ let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ log_debug(&format!(
+ "remote_read_error_log failed host_id={host_id} error={error}"
+ ));
+ error
+ })?;
+ Ok(result.stdout)
+ })
}
#[tauri::command]
@@ -110,18 +114,20 @@ pub async fn remote_read_helper_log(
host_id: String,
lines: Option,
) -> Result {
- let n = clamp_lines(lines);
- let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "helper");
- log_debug(&format!(
- "remote_read_helper_log start host_id={host_id} lines={n} cmd={cmd}"
- ));
- let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ timed_async!("remote_read_helper_log", {
+ let n = clamp_lines(lines);
+ let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "helper");
log_debug(&format!(
- "remote_read_helper_log failed host_id={host_id} error={error}"
+ "remote_read_helper_log start host_id={host_id} lines={n} cmd={cmd}"
));
- error
- })?;
- Ok(result.stdout)
+ let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ log_debug(&format!(
+ "remote_read_helper_log failed host_id={host_id} error={error}"
+ ));
+ error
+ })?;
+ Ok(result.stdout)
+ })
}
#[tauri::command]
@@ -130,18 +136,20 @@ pub async fn remote_read_gateway_log(
host_id: String,
lines: Option,
) -> Result {
- let n = clamp_lines(lines);
- let cmd = remote_gateway_log_command(n);
- log_debug(&format!(
- "remote_read_gateway_log start host_id={host_id} lines={n} cmd={cmd}"
- ));
- let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ timed_async!("remote_read_gateway_log", {
+ let n = clamp_lines(lines);
+ let cmd = remote_gateway_log_command(n);
log_debug(&format!(
- "remote_read_gateway_log failed host_id={host_id} error={error}"
+ "remote_read_gateway_log start host_id={host_id} lines={n} cmd={cmd}"
));
- error
- })?;
- Ok(result.stdout)
+ let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ log_debug(&format!(
+ "remote_read_gateway_log failed host_id={host_id} error={error}"
+ ));
+ error
+ })?;
+ Ok(result.stdout)
+ })
}
#[tauri::command]
@@ -150,16 +158,18 @@ pub async fn remote_read_gateway_error_log(
host_id: String,
lines: Option,
) -> Result {
- let n = clamp_lines(lines);
- let cmd = clawpal_core::doctor::remote_gateway_error_log_tail_script(n);
- log_debug(&format!(
- "remote_read_gateway_error_log start host_id={host_id} lines={n} cmd={cmd}"
- ));
- let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ timed_async!("remote_read_gateway_error_log", {
+ let n = clamp_lines(lines);
+ let cmd = clawpal_core::doctor::remote_gateway_error_log_tail_script(n);
log_debug(&format!(
- "remote_read_gateway_error_log failed host_id={host_id} error={error}"
+ "remote_read_gateway_error_log start host_id={host_id} lines={n} cmd={cmd}"
));
- error
- })?;
- Ok(result.stdout)
+ let result = pool.exec(&host_id, &cmd).await.map_err(|error| {
+ log_debug(&format!(
+ "remote_read_gateway_error_log failed host_id={host_id} error={error}"
+ ));
+ error
+ })?;
+ Ok(result.stdout)
+ })
}
diff --git a/src-tauri/src/commands/mod.rs b/src-tauri/src/commands/mod.rs
index 6a35c54a..eb0b4849 100644
--- a/src-tauri/src/commands/mod.rs
+++ b/src-tauri/src/commands/mod.rs
@@ -1,3 +1,25 @@
+/// Macro for wrapping synchronous command bodies with timing.
+macro_rules! timed_sync {
+ ($name:expr, $body:block) => {{
+ let __start = std::time::Instant::now();
+ let __result = $body;
+ let __elapsed_ms = __start.elapsed().as_millis() as u64;
+ crate::commands::perf::record_timing($name, __elapsed_ms);
+ __result
+ }};
+}
+
+/// Macro for wrapping async command bodies with timing.
+macro_rules! timed_async {
+ ($name:expr, $body:block) => {{
+ let __start = std::time::Instant::now();
+ let __result = $body;
+ let __elapsed_ms = __start.elapsed().as_millis() as u64;
+ crate::commands::perf::record_timing($name, __elapsed_ms);
+ __result
+ }};
+}
+
use serde::{Deserialize, Serialize};
use serde_json::{json, Map, Value};
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque};
@@ -44,6 +66,7 @@ pub mod instance;
pub mod logs;
pub mod model;
pub mod overview;
+pub mod perf;
pub mod precheck;
pub mod preferences;
pub mod profiles;
@@ -85,6 +108,8 @@ pub use model::*;
#[allow(unused_imports)]
pub use overview::*;
#[allow(unused_imports)]
+pub use perf::*;
+#[allow(unused_imports)]
pub use precheck::*;
#[allow(unused_imports)]
pub use preferences::*;
diff --git a/src-tauri/src/commands/model.rs b/src-tauri/src/commands/model.rs
index 70a4ab38..26c8b3a6 100644
--- a/src-tauri/src/commands/model.rs
+++ b/src-tauri/src/commands/model.rs
@@ -9,119 +9,131 @@ pub fn update_channel_config(
allowlist: Vec,
model: Option,
) -> Result {
- if path.trim().is_empty() {
- return Err("channel path is required".into());
- }
- let paths = resolve_paths();
- let mut cfg = read_openclaw_config(&paths)?;
- let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
- set_nested_value(
- &mut cfg,
- &format!("{path}.type"),
- channel_type.map(Value::String),
- )?;
- set_nested_value(&mut cfg, &format!("{path}.mode"), mode.map(Value::String))?;
- let allowlist_values = allowlist.into_iter().map(Value::String).collect::>();
- set_nested_value(
- &mut cfg,
- &format!("{path}.allowlist"),
- Some(Value::Array(allowlist_values)),
- )?;
- set_nested_value(&mut cfg, &format!("{path}.model"), model.map(Value::String))?;
- write_config_with_snapshot(&paths, ¤t, &cfg, "update-channel")?;
- Ok(true)
+ timed_sync!("update_channel_config", {
+ if path.trim().is_empty() {
+ return Err("channel path is required".into());
+ }
+ let paths = resolve_paths();
+ let mut cfg = read_openclaw_config(&paths)?;
+ let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
+ set_nested_value(
+ &mut cfg,
+ &format!("{path}.type"),
+ channel_type.map(Value::String),
+ )?;
+ set_nested_value(&mut cfg, &format!("{path}.mode"), mode.map(Value::String))?;
+ let allowlist_values = allowlist.into_iter().map(Value::String).collect::>();
+ set_nested_value(
+ &mut cfg,
+ &format!("{path}.allowlist"),
+ Some(Value::Array(allowlist_values)),
+ )?;
+ set_nested_value(&mut cfg, &format!("{path}.model"), model.map(Value::String))?;
+ write_config_with_snapshot(&paths, ¤t, &cfg, "update-channel")?;
+ Ok(true)
+ })
}
/// List current channel→agent bindings from config.
#[tauri::command]
pub fn delete_channel_node(path: String) -> Result {
- if path.trim().is_empty() {
- return Err("channel path is required".into());
- }
- let paths = resolve_paths();
- let mut cfg = read_openclaw_config(&paths)?;
- let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
- let before = cfg.to_string();
- set_nested_value(&mut cfg, &path, None)?;
- if cfg.to_string() == before {
- return Ok(false);
- }
- write_config_with_snapshot(&paths, ¤t, &cfg, "delete-channel")?;
- Ok(true)
+ timed_sync!("delete_channel_node", {
+ if path.trim().is_empty() {
+ return Err("channel path is required".into());
+ }
+ let paths = resolve_paths();
+ let mut cfg = read_openclaw_config(&paths)?;
+ let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
+ let before = cfg.to_string();
+ set_nested_value(&mut cfg, &path, None)?;
+ if cfg.to_string() == before {
+ return Ok(false);
+ }
+ write_config_with_snapshot(&paths, ¤t, &cfg, "delete-channel")?;
+ Ok(true)
+ })
}
#[tauri::command]
pub fn set_global_model(model_value: Option) -> Result {
- let paths = resolve_paths();
- let mut cfg = read_openclaw_config(&paths)?;
- let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
- let model = model_value
- .map(|v| v.trim().to_string())
- .filter(|v| !v.is_empty());
- // If existing model is an object (has fallbacks etc.), only update "primary" inside it
- if let Some(existing) = cfg.pointer_mut("/agents/defaults/model") {
- if let Some(model_obj) = existing.as_object_mut() {
- let sync_model_value = match model.clone() {
- Some(v) => {
- model_obj.insert("primary".into(), Value::String(v.clone()));
- Some(v)
- }
- None => {
- model_obj.remove("primary");
- None
- }
- };
- write_config_with_snapshot(&paths, ¤t, &cfg, "set-global-model")?;
- maybe_sync_main_auth_for_model_value(&paths, sync_model_value)?;
- return Ok(true);
+ timed_sync!("set_global_model", {
+ let paths = resolve_paths();
+ let mut cfg = read_openclaw_config(&paths)?;
+ let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
+ let model = model_value
+ .map(|v| v.trim().to_string())
+ .filter(|v| !v.is_empty());
+ // If existing model is an object (has fallbacks etc.), only update "primary" inside it
+ if let Some(existing) = cfg.pointer_mut("/agents/defaults/model") {
+ if let Some(model_obj) = existing.as_object_mut() {
+ let sync_model_value = match model.clone() {
+ Some(v) => {
+ model_obj.insert("primary".into(), Value::String(v.clone()));
+ Some(v)
+ }
+ None => {
+ model_obj.remove("primary");
+ None
+ }
+ };
+ write_config_with_snapshot(&paths, ¤t, &cfg, "set-global-model")?;
+ maybe_sync_main_auth_for_model_value(&paths, sync_model_value)?;
+ return Ok(true);
+ }
}
- }
- // Fallback: plain string or missing — set the whole value
- set_nested_value(&mut cfg, "agents.defaults.model", model.map(Value::String))?;
- write_config_with_snapshot(&paths, ¤t, &cfg, "set-global-model")?;
- let model_to_sync = cfg
- .pointer("/agents/defaults/model")
- .and_then(read_model_value);
- maybe_sync_main_auth_for_model_value(&paths, model_to_sync)?;
- Ok(true)
+ // Fallback: plain string or missing — set the whole value
+ set_nested_value(&mut cfg, "agents.defaults.model", model.map(Value::String))?;
+ write_config_with_snapshot(&paths, ¤t, &cfg, "set-global-model")?;
+ let model_to_sync = cfg
+ .pointer("/agents/defaults/model")
+ .and_then(read_model_value);
+ maybe_sync_main_auth_for_model_value(&paths, model_to_sync)?;
+ Ok(true)
+ })
}
#[tauri::command]
pub fn set_agent_model(agent_id: String, model_value: Option) -> Result {
- if agent_id.trim().is_empty() {
- return Err("agent id is required".into());
- }
- let paths = resolve_paths();
- let mut cfg = read_openclaw_config(&paths)?;
- let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
- let value = model_value
- .map(|v| v.trim().to_string())
- .filter(|v| !v.is_empty());
- set_agent_model_value(&mut cfg, &agent_id, value)?;
- write_config_with_snapshot(&paths, ¤t, &cfg, "set-agent-model")?;
- Ok(true)
+ timed_sync!("set_agent_model", {
+ if agent_id.trim().is_empty() {
+ return Err("agent id is required".into());
+ }
+ let paths = resolve_paths();
+ let mut cfg = read_openclaw_config(&paths)?;
+ let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
+ let value = model_value
+ .map(|v| v.trim().to_string())
+ .filter(|v| !v.is_empty());
+ set_agent_model_value(&mut cfg, &agent_id, value)?;
+ write_config_with_snapshot(&paths, ¤t, &cfg, "set-agent-model")?;
+ Ok(true)
+ })
}
#[tauri::command]
pub fn set_channel_model(path: String, model_value: Option) -> Result {
- if path.trim().is_empty() {
- return Err("channel path is required".into());
- }
- let paths = resolve_paths();
- let mut cfg = read_openclaw_config(&paths)?;
- let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
- let value = model_value
- .map(|v| v.trim().to_string())
- .filter(|v| !v.is_empty());
- set_nested_value(&mut cfg, &format!("{path}.model"), value.map(Value::String))?;
- write_config_with_snapshot(&paths, ¤t, &cfg, "set-channel-model")?;
- Ok(true)
+ timed_sync!("set_channel_model", {
+ if path.trim().is_empty() {
+ return Err("channel path is required".into());
+ }
+ let paths = resolve_paths();
+ let mut cfg = read_openclaw_config(&paths)?;
+ let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?;
+ let value = model_value
+ .map(|v| v.trim().to_string())
+ .filter(|v| !v.is_empty());
+ set_nested_value(&mut cfg, &format!("{path}.model"), value.map(Value::String))?;
+ write_config_with_snapshot(&paths, ¤t, &cfg, "set-channel-model")?;
+ Ok(true)
+ })
}
#[tauri::command]
pub fn list_model_bindings() -> Result, String> {
- let paths = resolve_paths();
- let cfg = read_openclaw_config(&paths)?;
- let profiles = load_model_profiles(&paths);
- Ok(collect_model_bindings(&cfg, &profiles))
+ timed_sync!("list_model_bindings", {
+ let paths = resolve_paths();
+ let cfg = read_openclaw_config(&paths)?;
+ let profiles = load_model_profiles(&paths);
+ Ok(collect_model_bindings(&cfg, &profiles))
+ })
}
diff --git a/src-tauri/src/commands/overview.rs b/src-tauri/src/commands/overview.rs
index e5a3e93c..c8f8c16b 100644
--- a/src-tauri/src/commands/overview.rs
+++ b/src-tauri/src/commands/overview.rs
@@ -292,12 +292,14 @@ async fn remote_channels_runtime_snapshot_impl(
#[tauri::command]
pub async fn get_instance_config_snapshot() -> Result {
- tauri::async_runtime::spawn_blocking(|| {
- let cfg = read_openclaw_config(&resolve_paths())?;
- Ok(extract_instance_config_snapshot(&cfg))
+ timed_async!("get_instance_config_snapshot", {
+ tauri::async_runtime::spawn_blocking(|| {
+ let cfg = read_openclaw_config(&resolve_paths())?;
+ Ok(extract_instance_config_snapshot(&cfg))
+ })
+ .await
+ .map_err(|error| error.to_string())?
})
- .await
- .map_err(|error| error.to_string())?
}
#[tauri::command]
@@ -305,21 +307,25 @@ pub async fn remote_get_instance_config_snapshot(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
- Ok(extract_instance_config_snapshot(&cfg))
+ timed_async!("remote_get_instance_config_snapshot", {
+ let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
+ Ok(extract_instance_config_snapshot(&cfg))
+ })
}
#[tauri::command]
pub async fn get_instance_runtime_snapshot(
cache: tauri::State<'_, crate::cli_runner::CliCache>,
) -> Result {
- let status = get_status_light().await?;
- let agents = list_agents_overview(cache).await?;
- Ok(InstanceRuntimeSnapshot {
- global_default_model: status.global_default_model.clone(),
- fallback_models: status.fallback_models.clone(),
- status,
- agents,
+ timed_async!("get_instance_runtime_snapshot", {
+ let status = get_status_light().await?;
+ let agents = list_agents_overview(cache).await?;
+ Ok(InstanceRuntimeSnapshot {
+ global_default_model: status.global_default_model.clone(),
+ fallback_models: status.fallback_models.clone(),
+ status,
+ agents,
+ })
})
}
@@ -328,17 +334,21 @@ pub async fn remote_get_instance_runtime_snapshot(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- remote_instance_runtime_snapshot_impl(&pool, &host_id).await
+ timed_async!("remote_get_instance_runtime_snapshot", {
+ remote_instance_runtime_snapshot_impl(&pool, &host_id).await
+ })
}
#[tauri::command]
pub async fn get_channels_config_snapshot() -> Result {
- tauri::async_runtime::spawn_blocking(|| {
- let cfg = read_openclaw_config(&resolve_paths())?;
- extract_channels_config_snapshot(&cfg)
+ timed_async!("get_channels_config_snapshot", {
+ tauri::async_runtime::spawn_blocking(|| {
+ let cfg = read_openclaw_config(&resolve_paths())?;
+ extract_channels_config_snapshot(&cfg)
+ })
+ .await
+ .map_err(|error| error.to_string())?
})
- .await
- .map_err(|error| error.to_string())?
}
#[tauri::command]
@@ -346,26 +356,30 @@ pub async fn remote_get_channels_config_snapshot(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
- extract_channels_config_snapshot(&cfg)
+ timed_async!("remote_get_channels_config_snapshot", {
+ let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?;
+ extract_channels_config_snapshot(&cfg)
+ })
}
#[tauri::command]
pub async fn get_channels_runtime_snapshot(
cache: tauri::State<'_, crate::cli_runner::CliCache>,
) -> Result {
- let channels = list_channels_minimal(cache.clone()).await?;
- let bindings = list_bindings(cache.clone()).await?;
- let agents = list_agents_overview(cache).await?;
- let bindings = serde_json::to_value(bindings)
- .map_err(|error| error.to_string())?
- .as_array()
- .cloned()
- .unwrap_or_default();
- Ok(ChannelsRuntimeSnapshot {
- channels,
- bindings,
- agents,
+ timed_async!("get_channels_runtime_snapshot", {
+ let channels = list_channels_minimal(cache.clone()).await?;
+ let bindings = list_bindings(cache.clone()).await?;
+ let agents = list_agents_overview(cache).await?;
+ let bindings = serde_json::to_value(bindings)
+ .map_err(|error| error.to_string())?
+ .as_array()
+ .cloned()
+ .unwrap_or_default();
+ Ok(ChannelsRuntimeSnapshot {
+ channels,
+ bindings,
+ agents,
+ })
})
}
@@ -374,14 +388,18 @@ pub async fn remote_get_channels_runtime_snapshot(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- remote_channels_runtime_snapshot_impl(&pool, &host_id).await
+ timed_async!("remote_get_channels_runtime_snapshot", {
+ remote_channels_runtime_snapshot_impl(&pool, &host_id).await
+ })
}
#[tauri::command]
pub fn get_cron_config_snapshot() -> Result {
- let jobs = list_cron_jobs()?;
- let jobs = jobs.as_array().cloned().unwrap_or_default();
- Ok(CronConfigSnapshot { jobs })
+ timed_sync!("get_cron_config_snapshot", {
+ let jobs = list_cron_jobs()?;
+ let jobs = jobs.as_array().cloned().unwrap_or_default();
+ Ok(CronConfigSnapshot { jobs })
+ })
}
#[tauri::command]
@@ -389,17 +407,21 @@ pub async fn remote_get_cron_config_snapshot(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- let jobs = remote_list_cron_jobs(pool, host_id).await?;
- let jobs = jobs.as_array().cloned().unwrap_or_default();
- Ok(CronConfigSnapshot { jobs })
+ timed_async!("remote_get_cron_config_snapshot", {
+ let jobs = remote_list_cron_jobs(pool, host_id).await?;
+ let jobs = jobs.as_array().cloned().unwrap_or_default();
+ Ok(CronConfigSnapshot { jobs })
+ })
}
#[tauri::command]
pub async fn get_cron_runtime_snapshot() -> Result {
- let jobs = list_cron_jobs()?;
- let watchdog = get_watchdog_status().await?;
- let jobs = jobs.as_array().cloned().unwrap_or_default();
- Ok(CronRuntimeSnapshot { jobs, watchdog })
+ timed_async!("get_cron_runtime_snapshot", {
+ let jobs = list_cron_jobs()?;
+ let watchdog = get_watchdog_status().await?;
+ let jobs = jobs.as_array().cloned().unwrap_or_default();
+ Ok(CronRuntimeSnapshot { jobs, watchdog })
+ })
}
#[tauri::command]
@@ -407,12 +429,14 @@ pub async fn remote_get_cron_runtime_snapshot(
pool: State<'_, SshConnectionPool>,
host_id: String,
) -> Result {
- let jobs = remote_list_cron_jobs(pool.clone(), host_id.clone()).await?;
- let watchdog = remote_get_watchdog_status(pool, host_id).await?;
- let jobs = jobs.as_array().cloned().unwrap_or_default();
- Ok(CronRuntimeSnapshot {
- jobs,
- watchdog: parse_remote_watchdog_value(watchdog),
+ timed_async!("remote_get_cron_runtime_snapshot", {
+ let jobs = remote_list_cron_jobs(pool.clone(), host_id.clone()).await?;
+ let watchdog = remote_get_watchdog_status(pool, host_id).await?;
+ let jobs = jobs.as_array().cloned().unwrap_or_default();
+ Ok(CronRuntimeSnapshot {
+ jobs,
+ watchdog: parse_remote_watchdog_value(watchdog),
+ })
})
}
diff --git a/src-tauri/src/commands/perf.rs b/src-tauri/src/commands/perf.rs
new file mode 100644
index 00000000..9d57ed7f
--- /dev/null
+++ b/src-tauri/src/commands/perf.rs
@@ -0,0 +1,280 @@
+use super::*;
+
+/// Metrics about the current process, exposed to the frontend and E2E tests.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ProcessMetrics {
+ /// Process ID
+ pub pid: u32,
+ /// Resident Set Size in bytes (physical memory used)
+ pub rss_bytes: u64,
+ /// Virtual memory size in bytes
+ pub vms_bytes: u64,
+ /// Process uptime in seconds
+ pub uptime_secs: f64,
+ /// Platform identifier
+ pub platform: String,
+}
+
+/// Tracks elapsed time of a named operation and logs it.
+/// Returns `(result, elapsed_ms)`.
+pub fn trace_command(name: &str, f: F) -> (T, u64)
+where
+ F: FnOnce() -> T,
+{
+ let start = Instant::now();
+ let result = f();
+ let elapsed_ms = start.elapsed().as_millis() as u64;
+
+ let threshold_ms = if name.starts_with("remote_") || name.starts_with("ssh_") {
+ 2000
+ } else {
+ 100
+ };
+
+ if elapsed_ms > threshold_ms {
+ crate::logging::log_info(&format!(
+ "[perf] SLOW {} completed in {}ms (threshold: {}ms)",
+ name, elapsed_ms, threshold_ms
+ ));
+ } else {
+ crate::logging::log_info(&format!("[perf] {} completed in {}ms", name, elapsed_ms));
+ }
+
+ (result, elapsed_ms)
+}
+
+/// Single perf sample emitted to the frontend via events or returned directly.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct PerfSample {
+ /// The command or operation name
+ pub name: String,
+ /// Elapsed time in milliseconds
+ pub elapsed_ms: u64,
+ /// Timestamp (Unix millis) when the sample was taken
+ pub timestamp: u64,
+ /// Whether the command exceeded its latency threshold
+ pub exceeded_threshold: bool,
+}
+
+static APP_START: LazyLock = LazyLock::new(Instant::now);
+
+/// Initialize the start time — call this once during app setup.
+pub fn init_perf_clock() {
+ // Force lazy evaluation so the clock starts ticking from app init, not first command.
+ let _ = *APP_START;
+}
+
+/// Get the time since app start in milliseconds.
+pub fn uptime_ms() -> u64 {
+ APP_START.elapsed().as_millis() as u64
+}
+
+#[tauri::command]
+pub fn get_process_metrics() -> Result {
+ let pid = std::process::id();
+
+ let (rss_bytes, vms_bytes) = read_process_memory(pid)?;
+
+ let uptime_secs = APP_START.elapsed().as_secs_f64();
+
+ Ok(ProcessMetrics {
+ pid,
+ rss_bytes,
+ vms_bytes,
+ uptime_secs,
+ platform: std::env::consts::OS.to_string(),
+ })
+}
+
+/// Read memory info for a given PID from the OS.
+#[cfg(target_os = "linux")]
+fn read_process_memory(pid: u32) -> Result<(u64, u64), String> {
+ let status_path = format!("/proc/{}/status", pid);
+ let content = fs::read_to_string(&status_path)
+ .map_err(|e| format!("Failed to read {}: {}", status_path, e))?;
+
+ let mut rss: u64 = 0;
+ let mut vms: u64 = 0;
+
+ for line in content.lines() {
+ if line.starts_with("VmRSS:") {
+ if let Some(val) = parse_proc_kb(line) {
+ rss = val * 1024; // Convert KB to bytes
+ }
+ } else if line.starts_with("VmSize:") {
+ if let Some(val) = parse_proc_kb(line) {
+ vms = val * 1024;
+ }
+ }
+ }
+
+ Ok((rss, vms))
+}
+
+#[cfg(target_os = "linux")]
+fn parse_proc_kb(line: &str) -> Option {
+ line.split_whitespace().nth(1)?.parse::().ok()
+}
+
+#[cfg(target_os = "macos")]
+fn read_process_memory(pid: u32) -> Result<(u64, u64), String> {
+ // Use `ps` as a portable fallback — mach_task_info requires unsafe FFI
+ let output = Command::new("ps")
+ .args(["-o", "rss=,vsz=", "-p", &pid.to_string()])
+ .output()
+ .map_err(|e| format!("Failed to run ps: {}", e))?;
+
+ let text = String::from_utf8_lossy(&output.stdout);
+ let parts: Vec<&str> = text.trim().split_whitespace().collect();
+ if parts.len() >= 2 {
+ let rss_kb: u64 = parts[0].parse().unwrap_or(0);
+ let vms_kb: u64 = parts[1].parse().unwrap_or(0);
+ Ok((rss_kb * 1024, vms_kb * 1024))
+ } else {
+ Err("Failed to parse ps output".to_string())
+ }
+}
+
+#[cfg(target_os = "windows")]
+fn read_process_memory(_pid: u32) -> Result<(u64, u64), String> {
+ // Windows: use tasklist /FI to get memory info
+ let output = Command::new("tasklist")
+ .args(["/FI", &format!("PID eq {}", _pid), "/FO", "CSV", "/NH"])
+ .output()
+ .map_err(|e| format!("Failed to run tasklist: {}", e))?;
+
+ let text = String::from_utf8_lossy(&output.stdout);
+ // CSV format: "name","pid","session","session#","mem usage"
+ // mem usage is like "12,345 K"
+ for line in text.lines() {
+ let fields: Vec<&str> = line.split(',').collect();
+ if fields.len() >= 5 {
+ let mem_str = fields[4].trim().trim_matches('"');
+ let mem_kb: u64 = mem_str
+ .replace(" K", "")
+ .replace(',', "")
+ .trim()
+ .parse()
+ .unwrap_or(0);
+ return Ok((mem_kb * 1024, 0)); // VMS not easily available
+ }
+ }
+
+ Ok((0, 0))
+}
+
+#[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))]
+fn read_process_memory(_pid: u32) -> Result<(u64, u64), String> {
+ Ok((0, 0))
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_trace_command_returns_result_and_timing() {
+ let (result, elapsed) = trace_command("test_noop", || 42);
+ assert_eq!(result, 42);
+ // Should complete in well under 100ms
+ assert!(elapsed < 100, "noop took {}ms", elapsed);
+ }
+
+ #[test]
+ fn test_get_process_metrics_returns_valid_data() {
+ init_perf_clock();
+ let metrics = get_process_metrics().expect("should succeed");
+ assert!(metrics.pid > 0);
+ assert!(metrics.rss_bytes > 0, "RSS should be non-zero");
+ assert!(!metrics.platform.is_empty());
+ }
+
+ #[test]
+ fn test_uptime_increases() {
+ init_perf_clock();
+ let t1 = uptime_ms();
+ std::thread::sleep(std::time::Duration::from_millis(10));
+ let t2 = uptime_ms();
+ assert!(t2 > t1, "uptime should increase: {} vs {}", t1, t2);
+ }
+}
+
+// ── Global performance registry ──
+
+use std::sync::Arc;
+
+/// Thread-safe registry of command timing samples.
+static PERF_REGISTRY: LazyLock>>> =
+ LazyLock::new(|| Arc::new(Mutex::new(Vec::with_capacity(1024))));
+
+/// Record a timing sample into the global registry.
+pub fn record_timing(name: &str, elapsed_ms: u64) {
+ let ts = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .unwrap_or_default()
+ .as_millis() as u64;
+ let threshold = if name.starts_with("remote_") {
+ 2000
+ } else {
+ 100
+ };
+ let sample = PerfSample {
+ name: name.to_string(),
+ elapsed_ms,
+ timestamp: ts,
+ exceeded_threshold: elapsed_ms > threshold,
+ };
+ if let Ok(mut reg) = PERF_REGISTRY.lock() {
+ reg.push(sample);
+ }
+}
+
+/// Get all recorded timing samples and clear the registry.
+#[tauri::command]
+pub fn get_perf_timings() -> Result, String> {
+ let mut reg = PERF_REGISTRY.lock().map_err(|e| e.to_string())?;
+ let samples = reg.drain(..).collect();
+ Ok(samples)
+}
+
+/// Get a summary report of all recorded timings grouped by command name.
+#[tauri::command]
+pub fn get_perf_report() -> Result {
+ let reg = PERF_REGISTRY.lock().map_err(|e| e.to_string())?;
+
+ let mut by_name: HashMap> = HashMap::new();
+ for s in reg.iter() {
+ by_name
+ .entry(s.name.clone())
+ .or_default()
+ .push(s.elapsed_ms);
+ }
+
+ let mut report = serde_json::Map::new();
+ for (name, mut times) in by_name {
+ times.sort();
+ let count = times.len();
+ let sum: u64 = times.iter().sum();
+ let p50 = times.get(count / 2).copied().unwrap_or(0);
+ let p95 = times
+ .get((count as f64 * 0.95) as usize)
+ .copied()
+ .unwrap_or(0);
+ let max = times.last().copied().unwrap_or(0);
+
+ report.insert(
+ name,
+ json!({
+ "count": count,
+ "p50_ms": p50,
+ "p95_ms": p95,
+ "max_ms": max,
+ "avg_ms": if count > 0 { sum / count as u64 } else { 0 },
+ }),
+ );
+ }
+
+ Ok(Value::Object(report))
+}
diff --git a/src-tauri/src/commands/precheck.rs b/src-tauri/src/commands/precheck.rs
index f5cbafa4..471cce89 100644
--- a/src-tauri/src/commands/precheck.rs
+++ b/src-tauri/src/commands/precheck.rs
@@ -5,17 +5,22 @@ use crate::ssh::SshConnectionPool;
#[tauri::command]
pub async fn precheck_registry() -> Result, String> {
- let registry_path = clawpal_core::instance::registry_path();
- Ok(precheck::precheck_registry(®istry_path))
+ timed_async!("precheck_registry", {
+ let registry_path = clawpal_core::instance::registry_path();
+ Ok(precheck::precheck_registry(®istry_path))
+ })
}
#[tauri::command]
pub async fn precheck_instance(instance_id: String) -> Result, String> {
- let registry = clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
- let instance = registry
- .get(&instance_id)
- .ok_or_else(|| format!("Instance not found: {instance_id}"))?;
- Ok(precheck::precheck_instance_state(instance))
+ timed_async!("precheck_instance", {
+ let registry =
+ clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
+ let instance = registry
+ .get(&instance_id)
+ .ok_or_else(|| format!("Instance not found: {instance_id}"))?;
+ Ok(precheck::precheck_instance_state(instance))
+ })
}
#[tauri::command]
@@ -23,55 +28,61 @@ pub async fn precheck_transport(
pool: State<'_, SshConnectionPool>,
instance_id: String,
) -> Result, String> {
- let registry = clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
- let instance = registry
- .get(&instance_id)
- .ok_or_else(|| format!("Instance not found: {instance_id}"))?;
+ timed_async!("precheck_transport", {
+ let registry =
+ clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?;
+ let instance = registry
+ .get(&instance_id)
+ .ok_or_else(|| format!("Instance not found: {instance_id}"))?;
- let mut issues = Vec::new();
+ let mut issues = Vec::new();
- match &instance.instance_type {
- clawpal_core::instance::InstanceType::RemoteSsh => {
- if !pool.is_connected(&instance_id).await {
- issues.push(PrecheckIssue {
- code: "TRANSPORT_STALE".into(),
- severity: "warn".into(),
- message: format!(
- "SSH connection for instance '{}' is not active",
- instance.label
- ),
- auto_fixable: false,
- });
+ match &instance.instance_type {
+ clawpal_core::instance::InstanceType::RemoteSsh => {
+ if !pool.is_connected(&instance_id).await {
+ issues.push(PrecheckIssue {
+ code: "TRANSPORT_STALE".into(),
+ severity: "warn".into(),
+ message: format!(
+ "SSH connection for instance '{}' is not active",
+ instance.label
+ ),
+ auto_fixable: false,
+ });
+ }
}
- }
- clawpal_core::instance::InstanceType::Docker => {
- let docker_ok = tokio::process::Command::new("docker")
- .args(["info", "--format", "{{.ServerVersion}}"])
- .stdout(std::process::Stdio::null())
- .stderr(std::process::Stdio::null())
- .status()
- .await
- .map(|s| s.success())
- .unwrap_or(false);
- if !docker_ok {
- issues.push(PrecheckIssue {
- code: "TRANSPORT_STALE".into(),
- severity: "error".into(),
- message: "Docker daemon is not running or unreachable".into(),
- auto_fixable: false,
- });
+ clawpal_core::instance::InstanceType::Docker => {
+ let docker_ok = tokio::process::Command::new("docker")
+ .args(["info", "--format", "{{.ServerVersion}}"])
+ .stdout(std::process::Stdio::null())
+ .stderr(std::process::Stdio::null())
+ .status()
+ .await
+ .map(|s| s.success())
+ .unwrap_or(false);
+ if !docker_ok {
+ issues.push(PrecheckIssue {
+ code: "TRANSPORT_STALE".into(),
+ severity: "error".into(),
+ message: "Docker daemon is not running or unreachable".into(),
+ auto_fixable: false,
+ });
+ }
}
+ _ => {}
}
- _ => {}
- }
- Ok(issues)
+ Ok(issues)
+ })
}
#[tauri::command]
pub async fn precheck_auth(instance_id: String) -> Result, String> {
- let openclaw = clawpal_core::openclaw::OpenclawCli::new();
- let profiles = clawpal_core::profile::list_profiles(&openclaw).map_err(|e| e.to_string())?;
- let _ = instance_id; // reserved for future per-instance profile filtering
- Ok(precheck::precheck_auth(&profiles))
+ timed_async!("precheck_auth", {
+ let openclaw = clawpal_core::openclaw::OpenclawCli::new();
+ let profiles =
+ clawpal_core::profile::list_profiles(&openclaw).map_err(|e| e.to_string())?;
+ let _ = instance_id; // reserved for future per-instance profile filtering
+ Ok(precheck::precheck_auth(&profiles))
+ })
}
diff --git a/src-tauri/src/commands/preferences.rs b/src-tauri/src/commands/preferences.rs
index 150fb15d..b77295d8 100644
--- a/src-tauri/src/commands/preferences.rs
+++ b/src-tauri/src/commands/preferences.rs
@@ -87,29 +87,37 @@ pub fn save_bug_report_settings_from_paths(
#[tauri::command]
pub fn get_app_preferences() -> Result {
- let paths = resolve_paths();
- Ok(load_app_preferences_from_paths(&paths))
+ timed_sync!("get_app_preferences", {
+ let paths = resolve_paths();
+ Ok(load_app_preferences_from_paths(&paths))
+ })
}
#[tauri::command]
pub fn get_bug_report_settings() -> Result {
- let paths = resolve_paths();
- Ok(load_bug_report_settings_from_paths(&paths))
+ timed_sync!("get_bug_report_settings", {
+ let paths = resolve_paths();
+ Ok(load_bug_report_settings_from_paths(&paths))
+ })
}
#[tauri::command]
pub fn set_bug_report_settings(settings: BugReportSettings) -> Result {
- let paths = resolve_paths();
- save_bug_report_settings_from_paths(&paths, settings)
+ timed_sync!("set_bug_report_settings", {
+ let paths = resolve_paths();
+ save_bug_report_settings_from_paths(&paths, settings)
+ })
}
#[tauri::command]
pub fn set_ssh_transfer_speed_ui_preference(show_ui: bool) -> Result {
- let paths = resolve_paths();
- let mut prefs = load_app_preferences_from_paths(&paths);
- prefs.show_ssh_transfer_speed_ui = show_ui;
- save_app_preferences_from_paths(&paths, &prefs)?;
- Ok(prefs)
+ timed_sync!("set_ssh_transfer_speed_ui_preference", {
+ let paths = resolve_paths();
+ let mut prefs = load_app_preferences_from_paths(&paths);
+ prefs.show_ssh_transfer_speed_ui = show_ui;
+ save_app_preferences_from_paths(&paths, &prefs)?;
+ Ok(prefs)
+ })
}
// ---------------------------------------------------------------------------
@@ -132,30 +140,36 @@ pub fn lookup_session_model_override(session_id: &str) -> Option {
#[tauri::command]
pub fn set_session_model_override(session_id: String, model: String) -> Result<(), String> {
- let trimmed = model.trim().to_string();
- if trimmed.is_empty() {
- return Err("model must not be empty".into());
- }
- if let Ok(mut map) = session_model_overrides().lock() {
- map.insert(session_id, trimmed);
- }
- Ok(())
+ timed_sync!("set_session_model_override", {
+ let trimmed = model.trim().to_string();
+ if trimmed.is_empty() {
+ return Err("model must not be empty".into());
+ }
+ if let Ok(mut map) = session_model_overrides().lock() {
+ map.insert(session_id, trimmed);
+ }
+ Ok(())
+ })
}
#[tauri::command]
pub fn get_session_model_override(session_id: String) -> Result