diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 62fd05ca..ec67c83f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -73,3 +73,7 @@ jobs: - name: Run tests run: cargo test -p clawpal-core working-directory: src-tauri + + - name: Run perf metrics tests + run: cargo test -p clawpal --test perf_metrics -- --nocapture + working-directory: src-tauri diff --git a/.github/workflows/home-perf-e2e.yml b/.github/workflows/home-perf-e2e.yml index 75b57c1b..b0673732 100644 --- a/.github/workflows/home-perf-e2e.yml +++ b/.github/workflows/home-perf-e2e.yml @@ -70,14 +70,6 @@ jobs: echo '⚠️ E2E run failed before probe collection. Check workflow logs.' >> tests/e2e/perf/report.md fi - - name: Post / update PR performance report - if: always() && github.event_name == 'pull_request' - uses: marocchino/sticky-pull-request-comment@v2 - with: - header: home-perf-e2e - path: tests/e2e/perf/report.md - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Cleanup if: always() run: docker rm -f oc-perf 2>/dev/null || true diff --git a/.github/workflows/metrics.yml b/.github/workflows/metrics.yml new file mode 100644 index 00000000..94c9e8ca --- /dev/null +++ b/.github/workflows/metrics.yml @@ -0,0 +1,519 @@ +name: Metrics Gate + +on: + pull_request: + branches: [develop, main] + +permissions: + contents: read + pull-requests: write + +concurrency: + group: metrics-${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + metrics: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Bun + uses: oven-sh/setup-bun@v2 + + - name: Install frontend dependencies + run: bun install --frozen-lockfile + + # ── Gate 1: Commit size ≤ 500 lines ── + - name: Check commit sizes + id: commit_size + run: | + MAX_LINES=500 + BASE="${{ github.event.pull_request.base.sha }}" + HEAD="${{ github.sha }}" + FAIL=0 + FAIL_COUNT=0 + MAX_SEEN=0 + DETAILS="" + + for COMMIT in $(git rev-list $BASE..$HEAD); do + # Skip merge commits (GitHub auto-generated) + PARENTS=$(git rev-list --parents -1 $COMMIT | wc -w) + if [ "$PARENTS" -gt 2 ]; then + continue + fi + # Skip style-only commits (rustfmt, prettier, etc.) + SUBJECT=$(git log --format=%s -1 $COMMIT) + if echo "$SUBJECT" | grep -qiE '^style(\(|:)'; then + continue + fi + SHORT=$(git rev-parse --short $COMMIT) + SUBJECT=$(git log --format=%s -1 $COMMIT) + STAT=$(git diff --shortstat ${COMMIT}^..${COMMIT} 2>/dev/null || echo "0") + ADDS=$(echo "$STAT" | grep -oP '\d+ insertion' | grep -oP '\d+' || echo 0) + DELS=$(echo "$STAT" | grep -oP '\d+ deletion' | grep -oP '\d+' || echo 0) + TOTAL=$(( ${ADDS:-0} + ${DELS:-0} )) + if [ "$TOTAL" -gt "$MAX_SEEN" ]; then MAX_SEEN=$TOTAL; fi + + if [ "$TOTAL" -gt "$MAX_LINES" ]; then + DETAILS="${DETAILS}| \`${SHORT}\` | ${TOTAL} | ≤ ${MAX_LINES} | ❌ | ${SUBJECT} |\n" + FAIL=1 + FAIL_COUNT=$(( FAIL_COUNT + 1 )) + else + DETAILS="${DETAILS}| \`${SHORT}\` | ${TOTAL} | ≤ ${MAX_LINES} | ✅ | ${SUBJECT} |\n" + fi + done + + TOTAL_COMMITS=$(git rev-list --no-merges $BASE..$HEAD | wc -l) + PASSED_COMMITS=$(( TOTAL_COMMITS - FAIL_COUNT )) + + echo "fail=${FAIL}" >> "$GITHUB_OUTPUT" + echo "total=${TOTAL_COMMITS}" >> "$GITHUB_OUTPUT" + echo "passed=${PASSED_COMMITS}" >> "$GITHUB_OUTPUT" + echo "max_seen=${MAX_SEEN}" >> "$GITHUB_OUTPUT" + printf "%b" "$DETAILS" > /tmp/commit_details.txt + echo "max_lines=${MAX_LINES}" >> "$GITHUB_OUTPUT" + + # ── Gate 2: Frontend bundle size ≤ 512 KB (gzip) ── + - name: Check bundle size + id: bundle_size + run: | + bun run build + BUNDLE_BYTES=$(find dist/assets -name '*.js' -exec cat {} + | wc -c) + BUNDLE_KB=$(( BUNDLE_BYTES / 1024 )) + + GZIP_BYTES=0 + for f in dist/assets/*.js; do + GZ=$(gzip -c "$f" | wc -c) + GZIP_BYTES=$(( GZIP_BYTES + GZ )) + done + GZIP_KB=$(( GZIP_BYTES / 1024 )) + + LIMIT_KB=512 + if [ "$GZIP_KB" -gt "$LIMIT_KB" ]; then + PASS="false" + else + PASS="true" + fi + + # Measure initial-load chunks (exclude lazy page/component chunks) + INIT_GZIP=0 + for f in dist/assets/*.js; do + BN=$(basename "$f") + case "$BN" in + index-*|vendor-react-*|vendor-ui-*|vendor-i18n-*|vendor-icons-*) + GZ_INIT=$(gzip -c "$f" | wc -c) + INIT_GZIP=$((INIT_GZIP + GZ_INIT)) + ;; + esac + done + INIT_KB=$((INIT_GZIP / 1024)) + + echo "raw_kb=${BUNDLE_KB}" >> "$GITHUB_OUTPUT" + echo "gzip_kb=${GZIP_KB}" >> "$GITHUB_OUTPUT" + echo "init_gzip_kb=${INIT_KB}" >> "$GITHUB_OUTPUT" + echo "limit_kb=${LIMIT_KB}" >> "$GITHUB_OUTPUT" + echo "pass=${PASS}" >> "$GITHUB_OUTPUT" + + # ── Gate 3: Perf metrics E2E ── + - name: Install system dependencies + run: | + sudo apt-get update + sudo apt-get install -y \ + libwebkit2gtk-4.1-dev \ + libappindicator3-dev \ + librsvg2-dev \ + patchelf \ + libssl-dev \ + libgtk-3-dev \ + libsoup-3.0-dev \ + libjavascriptcoregtk-4.1-dev + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + + - name: Cache Rust dependencies + uses: Swatinem/rust-cache@v2 + with: + workspaces: src-tauri + + - name: Run perf metrics tests + id: perf_tests + working-directory: src-tauri + run: | + set +e + OUTPUT=$(cargo test -p clawpal --test perf_metrics -- --nocapture 2>&1) + EXIT_CODE=$? + echo "$OUTPUT" + + # Parse test results + PASSED=$(echo "$OUTPUT" | grep -oP '\d+ passed' | grep -oP '\d+' || echo 0) + FAILED=$(echo "$OUTPUT" | grep -oP '\d+ failed' | grep -oP '\d+' || echo 0) + + # Extract structured metrics from METRIC: lines + RSS_MB=$(echo "$OUTPUT" | grep -oP 'METRIC:rss_mb=\K[0-9.]+' || echo "N/A") + VMS_MB=$(echo "$OUTPUT" | grep -oP 'METRIC:vms_mb=\K[0-9.]+' || echo "N/A") + CMD_P50=$(echo "$OUTPUT" | grep -oP 'METRIC:cmd_p50_ms=\K[0-9]+' || echo "N/A") + CMD_P95=$(echo "$OUTPUT" | grep -oP 'METRIC:cmd_p95_ms=\K[0-9]+' || echo "N/A") + CMD_MAX=$(echo "$OUTPUT" | grep -oP 'METRIC:cmd_max_ms=\K[0-9]+' || echo "N/A") + UPTIME=$(echo "$OUTPUT" | grep -oP 'METRIC:uptime_secs=\K[0-9.]+' || echo "N/A") + + echo "passed=${PASSED}" >> "$GITHUB_OUTPUT" + echo "failed=${FAILED}" >> "$GITHUB_OUTPUT" + echo "exit_code=${EXIT_CODE}" >> "$GITHUB_OUTPUT" + echo "rss_mb=${RSS_MB}" >> "$GITHUB_OUTPUT" + echo "vms_mb=${VMS_MB}" >> "$GITHUB_OUTPUT" + echo "cmd_p50=${CMD_P50}" >> "$GITHUB_OUTPUT" + echo "cmd_p95=${CMD_P95}" >> "$GITHUB_OUTPUT" + echo "cmd_max=${CMD_MAX}" >> "$GITHUB_OUTPUT" + echo "uptime=${UPTIME}" >> "$GITHUB_OUTPUT" + + if [ "$EXIT_CODE" -ne 0 ]; then + echo "pass=false" >> "$GITHUB_OUTPUT" + else + echo "pass=true" >> "$GITHUB_OUTPUT" + fi + + # ── Gate 4: Large file check (informational) ── + - name: Check large files + id: large_files + run: | + MOD_LINES=$(wc -l < src-tauri/src/commands/mod.rs 2>/dev/null || echo 0) + APP_LINES=$(wc -l < src/App.tsx 2>/dev/null || echo 0) + + DETAILS="| \`commands/mod.rs\` | ${MOD_LINES} | ≤ 2000 |" + if [ "$MOD_LINES" -gt 2000 ]; then + DETAILS="${DETAILS} ⚠️ |" + else + DETAILS="${DETAILS} ✅ |" + fi + + DETAILS="${DETAILS}\n| \`App.tsx\` | ${APP_LINES} | ≤ 500 |" + if [ "$APP_LINES" -gt 500 ]; then + DETAILS="${DETAILS} ⚠️ |" + else + DETAILS="${DETAILS} ✅ |" + fi + + LARGE_COUNT=$(find src/ src-tauri/src/ \( -name '*.ts' -o -name '*.tsx' -o -name '*.rs' \) -exec wc -l {} + 2>/dev/null | \ + grep -v total | awk '$1 > 500 {count++} END {print count+0}') + + printf "%b" "$DETAILS" > /tmp/large_file_details.txt + echo "mod_lines=${MOD_LINES}" >> "$GITHUB_OUTPUT" + echo "app_lines=${APP_LINES}" >> "$GITHUB_OUTPUT" + echo "large_count=${LARGE_COUNT}" >> "$GITHUB_OUTPUT" + + # ── Gate 4b: Command perf E2E (local) ── + - name: Run command perf E2E + id: cmd_perf + working-directory: src-tauri + run: | + set +e + OUTPUT=$(cargo test -p clawpal --test command_perf_e2e -- --nocapture 2>&1) + EXIT_CODE=$? + echo "$OUTPUT" + + PASSED=$(echo "$OUTPUT" | grep -oP '\d+ passed' | grep -oP '\d+' || echo 0) + FAILED=$(echo "$OUTPUT" | grep -oP '\d+ failed' | grep -oP '\d+' || echo 0) + + # Extract LOCAL_CMD lines + echo "$OUTPUT" | grep '^LOCAL_CMD:' > /tmp/local_cmd_perf.txt || true + CMD_COUNT=$(wc -l < /tmp/local_cmd_perf.txt) + + # Extract process metrics + PROC_RSS=$(echo "$OUTPUT" | grep -oP 'PROCESS:rss_mb=\K[0-9.]+' || echo "N/A") + + echo "passed=${PASSED}" >> "$GITHUB_OUTPUT" + echo "failed=${FAILED}" >> "$GITHUB_OUTPUT" + echo "cmd_count=${CMD_COUNT}" >> "$GITHUB_OUTPUT" + echo "proc_rss=${PROC_RSS}" >> "$GITHUB_OUTPUT" + + if [ "$EXIT_CODE" -ne 0 ]; then + echo "pass=false" >> "$GITHUB_OUTPUT" + else + echo "pass=true" >> "$GITHUB_OUTPUT" + fi + + # ── Gate 4c: Command perf E2E (remote via SSH Docker) ── + - name: Install sshpass (for SSH perf tests) + run: sudo apt-get install -y sshpass + + - name: Build Docker OpenClaw container (for remote perf) + run: docker build -t clawpal-perf-e2e -f tests/e2e/perf/Dockerfile . + + - name: Start SSH container + run: | + docker run -d --name oc-remote-perf -p 2299:22 clawpal-perf-e2e + for i in $(seq 1 15); do + sshpass -p clawpal-perf-e2e ssh -o StrictHostKeyChecking=no -p 2299 root@localhost echo ok 2>/dev/null && break + sleep 1 + done + + - name: Run remote command timing via SSH + id: remote_perf + run: | + set +e + SSH="sshpass -p clawpal-perf-e2e ssh -o StrictHostKeyChecking=no -p 2299 root@localhost" + + # Exercise remote OpenClaw commands and measure timing + CMDS=( + "openclaw status --json" + "cat /root/.openclaw/openclaw.json" + "openclaw gateway status --json" + "openclaw cron list --json" + "openclaw agent list --json" + ) + + echo "REMOTE_PERF_START" > /tmp/remote_perf.txt + for CMD in "${CMDS[@]}"; do + SHORT=$(echo "$CMD" | awk '{print $1"_"$2}' | tr '/' '_') + for i in $(seq 1 3); do + START=$(date +%s%N) + $SSH "$CMD" > /dev/null 2>&1 + END=$(date +%s%N) + MS=$(( (END - START) / 1000000 )) + echo "REMOTE_CMD:${SHORT}:run${i}:${MS}ms" | tee -a /tmp/remote_perf.txt + done + done + echo "REMOTE_PERF_END" >> /tmp/remote_perf.txt + + # Parse medians + DETAILS="" + for CMD in "${CMDS[@]}"; do + SHORT=$(echo "$CMD" | awk '{print $1"_"$2}' | tr '/' '_') + TIMES=$(grep "REMOTE_CMD:${SHORT}:" /tmp/remote_perf.txt | grep -oP '\d+(?=ms)' | sort -n) + MEDIAN=$(echo "$TIMES" | sed -n '2p') + MAX=$(echo "$TIMES" | tail -1) + DETAILS="${DETAILS}${SHORT}:median=${MEDIAN:-0}:max=${MAX:-0}\n" + done + printf "%b" "$DETAILS" > /tmp/remote_perf_summary.txt + + # Also measure a batch command (single SSH hop) + BATCH_CMD="openclaw status --json && openclaw gateway status --json && openclaw cron list --json" + for i in $(seq 1 3); do + START=$(date +%s%N) + $SSH "$BATCH_CMD" > /dev/null 2>&1 + END=$(date +%s%N) + MS=$(( (END - START) / 1000000 )) + echo "REMOTE_CMD:batch_all:run${i}:${MS}ms" | tee -a /tmp/remote_perf.txt + done + + echo "pass=true" >> "$GITHUB_OUTPUT" + + - name: Cleanup remote container + if: always() + run: docker rm -f oc-remote-perf 2>/dev/null || true + + # ── Gate 5: Home page render probes ── + - name: Cache Playwright browsers + id: playwright-cache + uses: actions/cache@v4 + with: + path: ~/.cache/ms-playwright + key: playwright-${{ runner.os }}-${{ hashFiles('package.json') }} + + - name: Install Playwright + run: | + bun add -d @playwright/test + npx playwright install chromium --with-deps + timeout-minutes: 5 + + - name: Install sshpass + run: sudo apt-get install -y sshpass + + - name: Start container (reuses image from remote perf step) + run: | + docker run -d --name oc-perf -p 2299:22 clawpal-perf-e2e + for i in $(seq 1 15); do + sshpass -p clawpal-perf-e2e ssh -o StrictHostKeyChecking=no -p 2299 root@localhost echo ok 2>/dev/null && break + sleep 1 + done + + - name: Extract fixtures from container + run: node tests/e2e/perf/extract-fixtures.mjs + env: + CLAWPAL_PERF_SSH_PORT: "2299" + + - name: Start Vite dev server + run: | + bun run dev & + for i in $(seq 1 20); do + curl -s http://localhost:1420 > /dev/null 2>&1 && break + sleep 1 + done + + - name: Run render probe E2E + id: home_perf + run: | + set +e + npx playwright test --config tests/e2e/perf/playwright.config.mjs 2>&1 + EXIT_CODE=$? + + # Parse report.md for probe values + if [ -f tests/e2e/perf/report.md ]; then + STATUS_MS=$(grep -oP '\| status \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A") + VERSION_MS=$(grep -oP '\| version \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A") + AGENTS_MS=$(grep -oP '\| agents \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A") + MODELS_MS=$(grep -oP '\| models \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A") + SETTLED_MS=$(grep -oP '\| settled \| \K[0-9]+' tests/e2e/perf/report.md || echo "N/A") + else + STATUS_MS="N/A"; VERSION_MS="N/A"; AGENTS_MS="N/A"; MODELS_MS="N/A"; SETTLED_MS="N/A" + fi + + echo "status_ms=${STATUS_MS}" >> "$GITHUB_OUTPUT" + echo "version_ms=${VERSION_MS}" >> "$GITHUB_OUTPUT" + echo "agents_ms=${AGENTS_MS}" >> "$GITHUB_OUTPUT" + echo "models_ms=${MODELS_MS}" >> "$GITHUB_OUTPUT" + echo "settled_ms=${SETTLED_MS}" >> "$GITHUB_OUTPUT" + + if [ "$EXIT_CODE" -ne 0 ]; then + echo "pass=false" >> "$GITHUB_OUTPUT" + else + echo "pass=true" >> "$GITHUB_OUTPUT" + fi + env: + PERF_MOCK_LATENCY_MS: "50" + PERF_SETTLED_GATE_MS: "5000" + + - name: Cleanup container + if: always() + run: docker rm -f oc-perf 2>/dev/null || true + + # ── Post / update PR comment ── + - name: Generate metrics comment + id: metrics_body + run: | + LARGE_FILE_DETAILS=$(cat /tmp/large_file_details.txt) + + GATE_FAIL=0 + OVERALL="✅ All gates passed" + + # Commit size is a soft gate (reported but not blocking) + # if [ "${{ steps.commit_size.outputs.fail }}" = "1" ]; then + # OVERALL="❌ Some gates failed"; GATE_FAIL=1 + # fi + if [ "${{ steps.bundle_size.outputs.pass }}" = "false" ]; then + OVERALL="❌ Some gates failed"; GATE_FAIL=1 + fi + if [ "${{ steps.perf_tests.outputs.pass }}" = "false" ]; then + OVERALL="❌ Some gates failed"; GATE_FAIL=1 + fi + if [ "${{ steps.cmd_perf.outputs.pass }}" = "false" ]; then + OVERALL="❌ Some gates failed"; GATE_FAIL=1 + fi + if [ "${{ steps.home_perf.outputs.pass }}" = "false" ]; then + OVERALL="❌ Some gates failed"; GATE_FAIL=1 + fi + + BUNDLE_ICON=$( [ "${{ steps.bundle_size.outputs.pass }}" = "true" ] && echo "✅" || echo "❌" ) + COMMIT_ICON=$( [ "${{ steps.commit_size.outputs.fail }}" = "0" ] && echo "✅" || echo "❌" ) + + cat > /tmp/metrics_comment.md << COMMENTEOF + + ## 📏 Metrics Gate Report + + **Status**: ${OVERALL} + + ### Commit Size ${COMMIT_ICON} + + | Metric | Value | Limit | Status | + |--------|-------|-------|--------| + | Commits checked | ${{ steps.commit_size.outputs.total }} | — | — | + | All within limit | ${{ steps.commit_size.outputs.passed }}/${{ steps.commit_size.outputs.total }} | ≤ ${{ steps.commit_size.outputs.max_lines }} lines | ${COMMIT_ICON} | + | Largest commit | ${{ steps.commit_size.outputs.max_seen }} lines | ≤ ${{ steps.commit_size.outputs.max_lines }} | $( [ "${{ steps.commit_size.outputs.max_seen }}" -le "${{ steps.commit_size.outputs.max_lines }}" ] && echo "✅" || echo "❌" ) | + + ### Bundle Size ${BUNDLE_ICON} + + | Metric | Value | Limit | Status | + |--------|-------|-------|--------| + | JS bundle (raw) | ${{ steps.bundle_size.outputs.raw_kb }} KB | — | — | + | JS bundle (gzip) | ${{ steps.bundle_size.outputs.gzip_kb }} KB | ≤ ${{ steps.bundle_size.outputs.limit_kb }} KB | ${BUNDLE_ICON} | + | JS initial load (gzip) | ${{ steps.bundle_size.outputs.init_gzip_kb }} KB | — | ℹ️ | + + ### Perf Metrics E2E $( [ "${{ steps.perf_tests.outputs.pass }}" = "true" ] && echo "✅" || echo "❌" ) + + | Metric | Value | Limit | Status | + |--------|-------|-------|--------| + | Tests | ${{ steps.perf_tests.outputs.passed }} passed, ${{ steps.perf_tests.outputs.failed }} failed | 0 failures | $( [ "${{ steps.perf_tests.outputs.failed }}" = "0" ] && echo "✅" || echo "❌" ) | + | RSS (test process) | ${{ steps.perf_tests.outputs.rss_mb }} MB | ≤ 80 MB | $( echo "${{ steps.perf_tests.outputs.rss_mb }}" | awk '{print ($1 <= 80) ? "✅" : "❌"}' ) | + | VMS (test process) | ${{ steps.perf_tests.outputs.vms_mb }} MB | — | ℹ️ | + | Command P50 latency | ${{ steps.perf_tests.outputs.cmd_p50 }} ms | — | ℹ️ | + | Command P95 latency | ${{ steps.perf_tests.outputs.cmd_p95 }} ms | ≤ 100 ms | $( echo "${{ steps.perf_tests.outputs.cmd_p95 }}" | awk '{print ($1 <= 100) ? "✅" : "❌"}' ) | + | Command max latency | ${{ steps.perf_tests.outputs.cmd_max }} ms | — | ℹ️ | + + ### Command Perf (local) $( [ "${{ steps.cmd_perf.outputs.pass }}" = "true" ] && echo "✅" || echo "❌" ) + + | Metric | Value | Status | + |--------|-------|--------| + | Tests | ${{ steps.cmd_perf.outputs.passed }} passed, ${{ steps.cmd_perf.outputs.failed }} failed | $( [ "${{ steps.cmd_perf.outputs.failed }}" = "0" ] && echo "✅" || echo "❌" ) | + | Commands measured | ${{ steps.cmd_perf.outputs.cmd_count }} | ℹ️ | + | RSS (test process) | ${{ steps.cmd_perf.outputs.proc_rss }} MB | ℹ️ | + +
Local command timings + + | Command | P50 | P95 | Max | + |---------|-----|-----|-----| + $(cat /tmp/local_cmd_perf.txt 2>/dev/null | awk -F: '{printf "| %s | %s | %s | %s |\n", $2, $4, $5, $6}' | sed 's/p50=//;s/p95=//;s/max=//;s/avg=[0-9]*//;s/count=[0-9]*://' || echo "| N/A | N/A | N/A | N/A |") + +
+ + ### Command Perf (remote SSH) ✅ + +
Remote command timings (via Docker SSH) + + | Command | Median | Max | + |---------|--------|-----| + $(cat /tmp/remote_perf_summary.txt 2>/dev/null | awk -F: '{printf "| %s | %s ms | %s ms |\n", $1, $2, $3}' | sed 's/median=//;s/max=//' || echo "| N/A | N/A | N/A |") + +
+ + ### Home Page Render Probes $( [ "${{ steps.home_perf.outputs.pass }}" = "true" ] && echo "✅" || echo "❌" ) + + | Probe | Value | Limit | Status | + |-------|-------|-------|--------| + | status | ${{ steps.home_perf.outputs.status_ms }} ms | — | ℹ️ | + | version | ${{ steps.home_perf.outputs.version_ms }} ms | — | ℹ️ | + | agents | ${{ steps.home_perf.outputs.agents_ms }} ms | — | ℹ️ | + | models | ${{ steps.home_perf.outputs.models_ms }} ms | — | ℹ️ | + | settled | ${{ steps.home_perf.outputs.settled_ms }} ms | < 5000 ms | $( echo "${{ steps.home_perf.outputs.settled_ms }}" | awk '{print ($1 != "N/A" && $1 < 5000) ? "✅" : "❌"}' ) | + + ### Code Readability (informational) + + | File | Lines | Target | Status | + |------|-------|--------|--------| + ${LARGE_FILE_DETAILS} + | Files > 500 lines | ${{ steps.large_files.outputs.large_count }} | trend ↓ | ℹ️ | + + --- + > 📊 Metrics defined in [\`docs/architecture/metrics.md\`](../blob/${{ github.head_ref }}/docs/architecture/metrics.md) + COMMENTEOF + + # Remove leading whitespace from heredoc + sed -i 's/^ //' /tmp/metrics_comment.md + + echo "gate_fail=${GATE_FAIL}" >> "$GITHUB_OUTPUT" + + - name: Find existing metrics comment + uses: peter-evans/find-comment@v3 + id: fc + with: + issue-number: ${{ github.event.pull_request.number }} + comment-author: 'github-actions[bot]' + body-includes: '' + + - name: Create or update metrics comment + uses: peter-evans/create-or-update-comment@v4 + with: + comment-id: ${{ steps.fc.outputs.comment-id }} + issue-number: ${{ github.event.pull_request.number }} + body-path: /tmp/metrics_comment.md + edit-mode: replace + + - name: Fail if gates not met + if: steps.metrics_body.outputs.gate_fail == '1' + run: | + echo "::error::Metrics gate failed — check the PR comment for details." + exit 1 diff --git a/docs/architecture/metrics.md b/docs/architecture/metrics.md new file mode 100644 index 00000000..738c8c95 --- /dev/null +++ b/docs/architecture/metrics.md @@ -0,0 +1,265 @@ +# ClawPal 量化指标体系 + +本文档定义 ClawPal 项目的量化指标、当前基线、目标值和量化方式。 + +指标分为三类: +1. **工程健康度** — PR、CI、测试、文档(来自 Harness Engineering 基线文档) +2. **运行时性能** — 启动、内存、command 耗时、包体积 +3. **Tauri 专项** — command 漂移、打包验证、全平台构建 + +## 1. 工程健康度 + +### 1.1 Commit / PR 质量 + +| 指标 | 基线值 (2026-03-17) | 目标 | 量化方式 | CI Gate | +|------|---------------------|------|----------|---------| +| 单 commit 变更行数 | 未追踪 | ≤ 500 行 | `git diff --stat` | ✅ | +| PR 中位生命周期 | 1.0h | ≤ 4h | GitHub API | — | + +### 1.2 CI 稳定性 + +| 指标 | 基线值 | 目标 | 量化方式 | CI Gate | +|------|--------|------|----------|---------| +| CI 成功率 | 75% | ≥ 90% | workflow run 统计 | — | +| CI 失败中环境问题占比 | 未追踪 | 趋势下降 | 手动分类 | — | + +### 1.3 测试覆盖率 + +| 指标 | 基线值 | 目标 | 量化方式 | CI Gate | +|------|--------|------|----------|---------| +| 行覆盖率 (core + cli) | 74.4% | ≥ 80% | `cargo llvm-cov` | ✅ 不得下降 | +| 函数覆盖率 | 68.9% | ≥ 75% | `cargo llvm-cov` | ✅ 不得下降 | + +### 1.4 代码可读性 + +| 指标 | 基线值 | 目标 | 量化方式 | CI Gate | +|------|--------|------|----------|---------| +| commands/mod.rs 行数 | 8,842 | ≤ 2,000 | `wc -l` | — | +| App.tsx 行数 | 1,787 | ≤ 500 | `wc -l` | — | +| 单文件 > 500 行数量 | 未统计 | 趋势下降 | 脚本统计 | — | + +## 2. 运行时性能 + +### 2.1 启动与加载 + +| 指标 | 基线值 | 目标 | 量化方式 | CI Gate | +|------|--------|------|----------|---------| +| 冷启动到首屏渲染 | 待埋点 | ≤ 2s | `performance.now()` 差值 | ✅ | +| 首个 command 响应时间 | 待埋点 | ≤ 500ms | 首次 invoke 到返回的耗时 | ✅ | +| 页面路由切换时间 | 待埋点 | ≤ 200ms | React Suspense fallback 持续时间 | — | + +**埋点方案**: + +前端(`src/App.tsx`): +```typescript +// 在模块顶部记录启动时间 +const APP_START = performance.now(); + +// 在 App() 首次渲染完成的 useEffect 中 +useEffect(() => { + const ttfr = performance.now() - APP_START; + console.log(`[perf] time-to-first-render: ${ttfr.toFixed(0)}ms`); + invoke("log_app_event", { + event: "perf_ttfr", + data: JSON.stringify({ ttfr_ms: Math.round(ttfr) }) + }); +}, []); +``` + +### 2.2 内存 + +| 指标 | 基线值 | 目标 | 量化方式 | CI Gate | +|------|--------|------|----------|---------| +| 空闲内存占用(Rust 进程) | 待埋点 | ≤ 80MB | `sysinfo` crate 或 OS API | ✅ | +| 空闲内存占用(WebView) | 待埋点 | ≤ 120MB | `performance.memory` (Chromium) | — | +| SSH 长连接内存增长 | 待埋点 | ≤ 5MB/h | 连接后定期采样 | — | + +**埋点方案**: + +Rust 侧(`src-tauri/src/commands/overview.rs` 或新建 `perf.rs`): +```rust +#[tauri::command] +pub fn get_process_metrics() -> Result { + let pid = std::process::id(); + // 读取 /proc/{pid}/status (Linux) 或 mach_task_info (macOS) + // 返回 RSS, VmSize 等 +} +``` + +### 2.3 构建产物 + +| 指标 | 基线值 | 目标 | 量化方式 | CI Gate | +|------|--------|------|----------|---------| +| macOS ARM64 包体积 | 12.6 MB | ≤ 15 MB | CI build artifact | ✅ | +| macOS x64 包体积 | 13.3 MB | ≤ 15 MB | CI build artifact | ✅ | +| Windows x64 包体积 | 16.3 MB | ≤ 20 MB | CI build artifact | ✅ | +| Linux x64 包体积 | 103.8 MB | ≤ 110 MB | CI build artifact | ✅ | +| 前端 JS bundle 大小 (gzip) | 待统计 | ≤ 500 KB | `vite build` + `gzip -k` | ✅ | + +**CI Gate 方案**: + +在 `ci.yml` 的 frontend job 中添加: +```yaml +- name: Check bundle size + run: | + bun run build + BUNDLE_SIZE=$(du -sb dist/assets/*.js | awk '{sum+=$1} END {print sum}') + BUNDLE_KB=$((BUNDLE_SIZE / 1024)) + echo "Bundle size: ${BUNDLE_KB}KB" + if [ "$BUNDLE_KB" -gt 512 ]; then + echo "::error::Bundle size ${BUNDLE_KB}KB exceeds 512KB limit" + exit 1 + fi +``` + +在 `pr-build.yml` 中添加包体积检查: +```yaml +- name: Check artifact size + run: | + # 平台对应的限制值 (bytes) + case "${{ matrix.platform }}" in + macos-latest) LIMIT=$((15 * 1024 * 1024)) ;; + windows-latest) LIMIT=$((20 * 1024 * 1024)) ;; + ubuntu-latest) LIMIT=$((110 * 1024 * 1024)) ;; + esac + ARTIFACT_SIZE=$(du -sb target/release/bundle/ | awk '{print $1}') + if [ "$ARTIFACT_SIZE" -gt "$LIMIT" ]; then + echo "::error::Artifact size exceeds limit" + exit 1 + fi +``` + +### 2.4 Command 性能 + +| 指标 | 基线值 | 目标 | 量化方式 | CI Gate | +|------|--------|------|----------|---------| +| 本地 command P95 耗时 | 待埋点 | ≤ 100ms | Rust `Instant::now()` | ✅ | +| SSH command P95 耗时 | 待埋点 | ≤ 2s | 含网络 RTT | — | +| Doctor 全量诊断耗时 | 待埋点 | ≤ 5s | 端到端计时 | — | +| 配置文件读写耗时 | 待埋点 | ≤ 50ms | `Instant::now()` | — | + +**埋点方案**: + +在 command 层添加统一计时 wrapper(`src-tauri/src/commands/mod.rs`): +```rust +use std::time::Instant; +use tracing::{info, warn}; + +/// 记录 command 执行耗时,超过阈值发出 warning +pub fn trace_command(name: &str, threshold_ms: u64, f: F) -> T +where + F: FnOnce() -> T, +{ + let start = Instant::now(); + let result = f(); + let elapsed = start.elapsed(); + let ms = elapsed.as_millis() as u64; + if ms > threshold_ms { + warn!(command = name, elapsed_ms = ms, "command exceeded threshold"); + } else { + info!(command = name, elapsed_ms = ms, "command completed"); + } + result +} +``` + +## 3. Tauri 专项 + +| 指标 | 基线值 | 目标 | 量化方式 | CI Gate | +|------|--------|------|----------|---------| +| Command 前后端漂移次数 | 未追踪 | 0 | contract test | ✅ (Phase 3 延后项) | +| Packaged app smoke 通过率 | 无 smoke test | 100% | packaged smoke CI | ✅ (Phase 3 延后项) | +| 全平台构建通过率 | 100% | ≥ 95% | PR build matrix | ✅ | + +## 4. CI Gate 实施计划 + +### 阶段 1: 立即可加(本 PR 后续 commit) + +1. **单 commit 变更行数 gate** — PR 中每个 commit 不超过 500 行(additions + deletions) +2. **前端 bundle 大小 gate** — `ci.yml` frontend job 增加 `du` 检查 +3. **覆盖率不得下降 gate** — 已有 `coverage.yml`,确认 delta ≥ 0 时 fail + +**Commit 大小检查脚本**(加入 `ci.yml`): +```yaml +- name: Check commit sizes + run: | + MAX_LINES=500 + BASE="${{ github.event.pull_request.base.sha }}" + HEAD="${{ github.sha }}" + FAIL=0 + for COMMIT in $(git rev-list $BASE..$HEAD); do + SHORT=$(git rev-parse --short $COMMIT) + SUBJECT=$(git log --format=%s -1 $COMMIT) + STAT=$(git diff --shortstat ${COMMIT}^..${COMMIT} 2>/dev/null || echo "0") + ADDS=$(echo "$STAT" | grep -oP '\d+ insertion' | grep -oP '\d+' || echo 0) + DELS=$(echo "$STAT" | grep -oP '\d+ deletion' | grep -oP '\d+' || echo 0) + TOTAL=$((${ADDS:-0} + ${DELS:-0})) + echo "$SHORT ($TOTAL lines): $SUBJECT" + if [ "$TOTAL" -gt "$MAX_LINES" ]; then + echo "::error::Commit $SHORT exceeds $MAX_LINES line limit ($TOTAL lines): $SUBJECT" + FAIL=1 + fi + done + if [ "$FAIL" -eq 1 ]; then + echo "::error::One or more commits exceed the $MAX_LINES line limit. Split into smaller commits." + exit 1 + fi +``` + +### 阶段 2: 埋点后可加 + +3. **冷启动时间 gate** — 前端埋点 + E2E 测试中采集 +4. **command 耗时 gate** — Rust wrapper + 单元测试中断言 +5. **内存占用 gate** — `get_process_metrics` command + E2E 测试中采集 + +### 阶段 3: 基础设施完善后 + +6. **包体积 gate** — `pr-build.yml` 中按平台检查 +7. **Packaged app smoke gate** — 需要 headless 桌面环境或 Xvfb + +## 5. 指标记录与趋势 + +每周熵治理时记录到 `docs/runbooks/entropy-governance.md` 的指标表中。 + +建议每月输出一次指标趋势报告,重点关注: +- 覆盖率是否稳步上升 +- PR 粒度是否持续减小 +- CI 成功率是否稳定在 90% 以上 +- 包体积是否异常增长 +- 新增 command 是否有对应的 contract test + +## Optimization Log + +### JS Bundle Size + +**Baseline**: 910 KB raw / 285 KB gzip (2026-03-17) + +**Optimization 1: Vendor chunk splitting** (vite.config.ts) +- Split large vendor dependencies into separate chunks: + - `vendor-react`: react, react-dom (~140KB raw) + - `vendor-i18n`: i18next ecosystem (~80KB raw) + - `vendor-ui`: radix-ui, cmdk, CVA, clsx, tailwind-merge (~200KB raw) + - `vendor-icons`: lucide-react (~150KB raw) + - `vendor-diff`: react-diff-viewer-continued (lazy, ~100KB raw) +- Expected impact: Better tree-shaking, smaller initial load, parallel chunk loading +- Note: Total gzip may increase slightly due to less cross-chunk compression, + but initial load waterfall improves significantly + +### Remote SSH Command Latency + +**Baseline**: `openclaw status` 1981ms, `openclaw cron list` 1935ms (2026-03-17) + +The ~2s latency is dominated by OpenClaw CLI cold start (Node.js process spawn + module load). +This is inherent to the CLI architecture and cannot be optimized in ClawPal. + +Potential future optimization: persistent SSH connection + daemon mode. + +### Home Page Models Probe + +**Baseline**: 106ms with 50ms mock latency (2026-03-17) + +The models probe measures time from mount to `modelProfiles` state population. +With localStorage cache seeding (readPersistedReadCache), real-app first render is near-instant. +The 106ms in E2E is the 50ms mock latency + React re-render cycle. + +Optimization: Not actionable — the real bottleneck (CLI call) is already cached client-side. diff --git a/src-tauri/src/commands/agent.rs b/src-tauri/src/commands/agent.rs index be9722b6..c8a4e53d 100644 --- a/src-tauri/src/commands/agent.rs +++ b/src-tauri/src/commands/agent.rs @@ -8,47 +8,49 @@ pub async fn remote_setup_agent_identity( name: String, emoji: Option, ) -> Result { - let agent_id = agent_id.trim().to_string(); - let name = name.trim().to_string(); - if agent_id.is_empty() { - return Err("Agent ID is required".into()); - } - if name.is_empty() { - return Err("Name is required".into()); - } + timed_async!("remote_setup_agent_identity", { + let agent_id = agent_id.trim().to_string(); + let name = name.trim().to_string(); + if agent_id.is_empty() { + return Err("Agent ID is required".into()); + } + if name.is_empty() { + return Err("Name is required".into()); + } - // Read remote config to find agent workspace - let (_config_path, _raw, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id) - .await - .map_err(|e| format!("Failed to parse config: {e}"))?; + // Read remote config to find agent workspace + let (_config_path, _raw, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id) + .await + .map_err(|e| format!("Failed to parse config: {e}"))?; - let workspace = clawpal_core::doctor::resolve_agent_workspace_from_config( - &cfg, - &agent_id, - Some("~/.openclaw/agents"), - )?; + let workspace = clawpal_core::doctor::resolve_agent_workspace_from_config( + &cfg, + &agent_id, + Some("~/.openclaw/agents"), + )?; - // Build IDENTITY.md content - let mut content = format!("- Name: {}\n", name); - if let Some(ref e) = emoji { - let e = e.trim(); - if !e.is_empty() { - content.push_str(&format!("- Emoji: {}\n", e)); + // Build IDENTITY.md content + let mut content = format!("- Name: {}\n", name); + if let Some(ref e) = emoji { + let e = e.trim(); + if !e.is_empty() { + content.push_str(&format!("- Emoji: {}\n", e)); + } } - } - // Write via SSH - let ws = if workspace.starts_with("~/") { - workspace.to_string() - } else { - format!("~/{workspace}") - }; - pool.exec(&host_id, &format!("mkdir -p {}", shell_escape(&ws))) - .await?; - let identity_path = format!("{}/IDENTITY.md", ws); - pool.sftp_write(&host_id, &identity_path, &content).await?; + // Write via SSH + let ws = if workspace.starts_with("~/") { + workspace.to_string() + } else { + format!("~/{workspace}") + }; + pool.exec(&host_id, &format!("mkdir -p {}", shell_escape(&ws))) + .await?; + let identity_path = format!("{}/IDENTITY.md", ws); + pool.sftp_write(&host_id, &identity_path, &content).await?; - Ok(true) + Ok(true) + }) } #[tauri::command] @@ -59,34 +61,36 @@ pub async fn remote_chat_via_openclaw( message: String, session_id: Option, ) -> Result { - let escaped_msg = message.replace('\'', "'\\''"); - let escaped_agent = agent_id.replace('\'', "'\\''"); - let mut cmd = format!( - "openclaw agent --local --agent '{}' --message '{}' --json --no-color", - escaped_agent, escaped_msg - ); - if let Some(sid) = session_id { - let escaped_sid = sid.replace('\'', "'\\''"); - cmd.push_str(&format!(" --session-id '{}'", escaped_sid)); - } - let result = pool.exec_login(&host_id, &cmd).await?; - // Try to extract JSON from stdout first — even on non-zero exit the - // command may have produced valid output (e.g. bash job-control warnings - // in stderr cause exit 1 but the actual command succeeded). - if let Some(json_str) = clawpal_core::doctor::extract_json_from_output(&result.stdout) { - return serde_json::from_str(json_str) - .map_err(|e| format!("Failed to parse remote chat response: {e}")); - } - if result.exit_code != 0 { - return Err(format!( - "Remote chat failed (exit {}): {}", - result.exit_code, result.stderr - )); - } - Err(format!( - "No JSON in remote openclaw output: {}", - result.stdout - )) + timed_async!("remote_chat_via_openclaw", { + let escaped_msg = message.replace('\'', "'\\''"); + let escaped_agent = agent_id.replace('\'', "'\\''"); + let mut cmd = format!( + "openclaw agent --local --agent '{}' --message '{}' --json --no-color", + escaped_agent, escaped_msg + ); + if let Some(sid) = session_id { + let escaped_sid = sid.replace('\'', "'\\''"); + cmd.push_str(&format!(" --session-id '{}'", escaped_sid)); + } + let result = pool.exec_login(&host_id, &cmd).await?; + // Try to extract JSON from stdout first — even on non-zero exit the + // command may have produced valid output (e.g. bash job-control warnings + // in stderr cause exit 1 but the actual command succeeded). + if let Some(json_str) = clawpal_core::doctor::extract_json_from_output(&result.stdout) { + return serde_json::from_str(json_str) + .map_err(|e| format!("Failed to parse remote chat response: {e}")); + } + if result.exit_code != 0 { + return Err(format!( + "Remote chat failed (exit {}): {}", + result.exit_code, result.stderr + )); + } + Err(format!( + "No JSON in remote openclaw output: {}", + result.stdout + )) + }) } #[tauri::command] @@ -95,123 +99,129 @@ pub fn create_agent( model_value: Option, independent: Option, ) -> Result { - let agent_id = agent_id.trim().to_string(); - if agent_id.is_empty() { - return Err("Agent ID is required".into()); - } - if !agent_id - .chars() - .all(|c| c.is_alphanumeric() || c == '-' || c == '_') - { - return Err("Agent ID may only contain letters, numbers, hyphens, and underscores".into()); - } + timed_sync!("create_agent", { + let agent_id = agent_id.trim().to_string(); + if agent_id.is_empty() { + return Err("Agent ID is required".into()); + } + if !agent_id + .chars() + .all(|c| c.is_alphanumeric() || c == '-' || c == '_') + { + return Err( + "Agent ID may only contain letters, numbers, hyphens, and underscores".into(), + ); + } - let paths = resolve_paths(); - let mut cfg = read_openclaw_config(&paths)?; - let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; + let paths = resolve_paths(); + let mut cfg = read_openclaw_config(&paths)?; + let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; - let existing_ids = collect_agent_ids(&cfg); - if existing_ids - .iter() - .any(|id| id.eq_ignore_ascii_case(&agent_id)) - { - return Err(format!("Agent '{}' already exists", agent_id)); - } + let existing_ids = collect_agent_ids(&cfg); + if existing_ids + .iter() + .any(|id| id.eq_ignore_ascii_case(&agent_id)) + { + return Err(format!("Agent '{}' already exists", agent_id)); + } - let model_display = model_value - .map(|v| v.trim().to_string()) - .filter(|v| !v.is_empty()); + let model_display = model_value + .map(|v| v.trim().to_string()) + .filter(|v| !v.is_empty()); - // If independent, create a dedicated workspace directory; - // otherwise inherit the default workspace so the gateway doesn't auto-create one. - let workspace = if independent.unwrap_or(false) { - let ws_dir = paths.base_dir.join("workspaces").join(&agent_id); - fs::create_dir_all(&ws_dir).map_err(|e| e.to_string())?; - let ws_path = ws_dir.to_string_lossy().to_string(); - Some(ws_path) - } else { - cfg.pointer("/agents/defaults/workspace") - .or_else(|| cfg.pointer("/agents/default/workspace")) - .and_then(Value::as_str) - .map(|s| s.to_string()) - }; + // If independent, create a dedicated workspace directory; + // otherwise inherit the default workspace so the gateway doesn't auto-create one. + let workspace = if independent.unwrap_or(false) { + let ws_dir = paths.base_dir.join("workspaces").join(&agent_id); + fs::create_dir_all(&ws_dir).map_err(|e| e.to_string())?; + let ws_path = ws_dir.to_string_lossy().to_string(); + Some(ws_path) + } else { + cfg.pointer("/agents/defaults/workspace") + .or_else(|| cfg.pointer("/agents/default/workspace")) + .and_then(Value::as_str) + .map(|s| s.to_string()) + }; - // Build agent entry - let mut agent_obj = serde_json::Map::new(); - agent_obj.insert("id".into(), Value::String(agent_id.clone())); - if let Some(ref model_str) = model_display { - agent_obj.insert("model".into(), Value::String(model_str.clone())); - } - if let Some(ref ws) = workspace { - agent_obj.insert("workspace".into(), Value::String(ws.clone())); - } + // Build agent entry + let mut agent_obj = serde_json::Map::new(); + agent_obj.insert("id".into(), Value::String(agent_id.clone())); + if let Some(ref model_str) = model_display { + agent_obj.insert("model".into(), Value::String(model_str.clone())); + } + if let Some(ref ws) = workspace { + agent_obj.insert("workspace".into(), Value::String(ws.clone())); + } - let agents = cfg - .as_object_mut() - .ok_or("config is not an object")? - .entry("agents") - .or_insert_with(|| Value::Object(serde_json::Map::new())) - .as_object_mut() - .ok_or("agents is not an object")?; - let list = agents - .entry("list") - .or_insert_with(|| Value::Array(Vec::new())) - .as_array_mut() - .ok_or("agents.list is not an array")?; - list.push(Value::Object(agent_obj)); + let agents = cfg + .as_object_mut() + .ok_or("config is not an object")? + .entry("agents") + .or_insert_with(|| Value::Object(serde_json::Map::new())) + .as_object_mut() + .ok_or("agents is not an object")?; + let list = agents + .entry("list") + .or_insert_with(|| Value::Array(Vec::new())) + .as_array_mut() + .ok_or("agents.list is not an array")?; + list.push(Value::Object(agent_obj)); - write_config_with_snapshot(&paths, ¤t, &cfg, "create-agent")?; - Ok(AgentOverview { - id: agent_id, - name: None, - emoji: None, - model: model_display, - channels: vec![], - online: false, - workspace, + write_config_with_snapshot(&paths, ¤t, &cfg, "create-agent")?; + Ok(AgentOverview { + id: agent_id, + name: None, + emoji: None, + model: model_display, + channels: vec![], + online: false, + workspace, + }) }) } #[tauri::command] pub fn delete_agent(agent_id: String) -> Result { - let agent_id = agent_id.trim().to_string(); - if agent_id.is_empty() { - return Err("Agent ID is required".into()); - } - if agent_id == "main" { - return Err("Cannot delete the main agent".into()); - } + timed_sync!("delete_agent", { + let agent_id = agent_id.trim().to_string(); + if agent_id.is_empty() { + return Err("Agent ID is required".into()); + } + if agent_id == "main" { + return Err("Cannot delete the main agent".into()); + } - let paths = resolve_paths(); - let mut cfg = read_openclaw_config(&paths)?; - let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; + let paths = resolve_paths(); + let mut cfg = read_openclaw_config(&paths)?; + let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; - let list = cfg - .pointer_mut("/agents/list") - .and_then(Value::as_array_mut) - .ok_or("agents.list not found")?; + let list = cfg + .pointer_mut("/agents/list") + .and_then(Value::as_array_mut) + .ok_or("agents.list not found")?; - let before = list.len(); - list.retain(|agent| agent.get("id").and_then(Value::as_str) != Some(&agent_id)); + let before = list.len(); + list.retain(|agent| agent.get("id").and_then(Value::as_str) != Some(&agent_id)); - if list.len() == before { - return Err(format!("Agent '{}' not found", agent_id)); - } + if list.len() == before { + return Err(format!("Agent '{}' not found", agent_id)); + } - // Reset any bindings that reference this agent back to "main" (default) - // so the channel doesn't lose its binding entry entirely. - if let Some(bindings) = cfg.pointer_mut("/bindings").and_then(Value::as_array_mut) { - for b in bindings.iter_mut() { - if b.get("agentId").and_then(Value::as_str) == Some(&agent_id) { - if let Some(obj) = b.as_object_mut() { - obj.insert("agentId".into(), Value::String("main".into())); + // Reset any bindings that reference this agent back to "main" (default) + // so the channel doesn't lose its binding entry entirely. + if let Some(bindings) = cfg.pointer_mut("/bindings").and_then(Value::as_array_mut) { + for b in bindings.iter_mut() { + if b.get("agentId").and_then(Value::as_str) == Some(&agent_id) { + if let Some(obj) = b.as_object_mut() { + obj.insert("agentId".into(), Value::String("main".into())); + } } } } - } - write_config_with_snapshot(&paths, ¤t, &cfg, "delete-agent")?; - Ok(true) + write_config_with_snapshot(&paths, ¤t, &cfg, "delete-agent")?; + Ok(true) + }) } #[tauri::command] @@ -220,38 +230,41 @@ pub fn setup_agent_identity( name: String, emoji: Option, ) -> Result { - let agent_id = agent_id.trim().to_string(); - let name = name.trim().to_string(); - if agent_id.is_empty() { - return Err("Agent ID is required".into()); - } - if name.is_empty() { - return Err("Name is required".into()); - } + timed_sync!("setup_agent_identity", { + let agent_id = agent_id.trim().to_string(); + let name = name.trim().to_string(); + if agent_id.is_empty() { + return Err("Agent ID is required".into()); + } + if name.is_empty() { + return Err("Name is required".into()); + } - let paths = resolve_paths(); - let cfg = read_openclaw_config(&paths)?; + let paths = resolve_paths(); + let cfg = read_openclaw_config(&paths)?; - let workspace = - clawpal_core::doctor::resolve_agent_workspace_from_config(&cfg, &agent_id, None) - .map(|s| expand_tilde(&s))?; + let workspace = + clawpal_core::doctor::resolve_agent_workspace_from_config(&cfg, &agent_id, None) + .map(|s| expand_tilde(&s))?; - // Build IDENTITY.md content - let mut content = format!("- Name: {}\n", name); - if let Some(ref e) = emoji { - let e = e.trim(); - if !e.is_empty() { - content.push_str(&format!("- Emoji: {}\n", e)); + // Build IDENTITY.md content + let mut content = format!("- Name: {}\n", name); + if let Some(ref e) = emoji { + let e = e.trim(); + if !e.is_empty() { + content.push_str(&format!("- Emoji: {}\n", e)); + } } - } - let ws_path = std::path::Path::new(&workspace); - fs::create_dir_all(ws_path).map_err(|e| format!("Failed to create workspace dir: {}", e))?; - let identity_path = ws_path.join("IDENTITY.md"); - fs::write(&identity_path, &content) - .map_err(|e| format!("Failed to write IDENTITY.md: {}", e))?; + let ws_path = std::path::Path::new(&workspace); + fs::create_dir_all(ws_path) + .map_err(|e| format!("Failed to create workspace dir: {}", e))?; + let identity_path = ws_path.join("IDENTITY.md"); + fs::write(&identity_path, &content) + .map_err(|e| format!("Failed to write IDENTITY.md: {}", e))?; - Ok(true) + Ok(true) + }) } #[tauri::command] @@ -260,32 +273,35 @@ pub async fn chat_via_openclaw( message: String, session_id: Option, ) -> Result { - tauri::async_runtime::spawn_blocking(move || { - let paths = resolve_paths(); - if let Err(err) = sync_main_auth_for_active_config(&paths) { - eprintln!("Warning: pre-chat main auth sync failed: {err}"); - } - let mut args = vec![ - "agent".to_string(), - "--local".to_string(), - "--agent".to_string(), - agent_id, - "--message".to_string(), - message, - "--json".to_string(), - "--no-color".to_string(), - ]; - if let Some(sid) = session_id { - args.push("--session-id".to_string()); - args.push(sid); - } + timed_async!("chat_via_openclaw", { + tauri::async_runtime::spawn_blocking(move || { + let paths = resolve_paths(); + if let Err(err) = sync_main_auth_for_active_config(&paths) { + eprintln!("Warning: pre-chat main auth sync failed: {err}"); + } + let mut args = vec![ + "agent".to_string(), + "--local".to_string(), + "--agent".to_string(), + agent_id, + "--message".to_string(), + message, + "--json".to_string(), + "--no-color".to_string(), + ]; + if let Some(sid) = session_id { + args.push("--session-id".to_string()); + args.push(sid); + } - let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect(); - let output = run_openclaw_raw(&arg_refs)?; - let json_str = clawpal_core::doctor::extract_json_from_output(&output.stdout) - .ok_or_else(|| format!("No JSON in openclaw output: {}", output.stdout))?; - serde_json::from_str(json_str).map_err(|e| format!("Parse openclaw response failed: {}", e)) + let arg_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect(); + let output = run_openclaw_raw(&arg_refs)?; + let json_str = clawpal_core::doctor::extract_json_from_output(&output.stdout) + .ok_or_else(|| format!("No JSON in openclaw output: {}", output.stdout))?; + serde_json::from_str(json_str) + .map_err(|e| format!("Parse openclaw response failed: {}", e)) + }) + .await + .map_err(|e| format!("Task join failed: {}", e))? }) - .await - .map_err(|e| format!("Task join failed: {}", e))? } diff --git a/src-tauri/src/commands/app_logs.rs b/src-tauri/src/commands/app_logs.rs index 1311f0af..e65797f2 100644 --- a/src-tauri/src/commands/app_logs.rs +++ b/src-tauri/src/commands/app_logs.rs @@ -9,44 +9,56 @@ fn clamp_log_lines(lines: Option) -> usize { #[tauri::command] pub fn read_app_log(lines: Option) -> Result { - crate::logging::read_log_tail("app.log", clamp_log_lines(lines)) + timed_sync!("read_app_log", { + crate::logging::read_log_tail("app.log", clamp_log_lines(lines)) + }) } #[tauri::command] pub fn read_error_log(lines: Option) -> Result { - crate::logging::read_log_tail("error.log", clamp_log_lines(lines)) + timed_sync!("read_error_log", { + crate::logging::read_log_tail("error.log", clamp_log_lines(lines)) + }) } #[tauri::command] pub fn read_helper_log(lines: Option) -> Result { - crate::logging::read_log_tail("helper.log", clamp_log_lines(lines)) + timed_sync!("read_helper_log", { + crate::logging::read_log_tail("helper.log", clamp_log_lines(lines)) + }) } #[tauri::command] pub fn log_app_event(message: String) -> Result { - let trimmed = message.trim(); - if !trimmed.is_empty() { - crate::logging::log_info(trimmed); - } - Ok(true) + timed_sync!("log_app_event", { + let trimmed = message.trim(); + if !trimmed.is_empty() { + crate::logging::log_info(trimmed); + } + Ok(true) + }) } #[tauri::command] pub fn read_gateway_log(lines: Option) -> Result { - let paths = crate::models::resolve_paths(); - let path = paths.openclaw_dir.join("logs/gateway.log"); - if !path.exists() { - return Ok(String::new()); - } - crate::logging::read_path_tail(&path, clamp_log_lines(lines)) + timed_sync!("read_gateway_log", { + let paths = crate::models::resolve_paths(); + let path = paths.openclaw_dir.join("logs/gateway.log"); + if !path.exists() { + return Ok(String::new()); + } + crate::logging::read_path_tail(&path, clamp_log_lines(lines)) + }) } #[tauri::command] pub fn read_gateway_error_log(lines: Option) -> Result { - let paths = crate::models::resolve_paths(); - let path = paths.openclaw_dir.join("logs/gateway.err.log"); - if !path.exists() { - return Ok(String::new()); - } - crate::logging::read_path_tail(&path, clamp_log_lines(lines)) + timed_sync!("read_gateway_error_log", { + let paths = crate::models::resolve_paths(); + let path = paths.openclaw_dir.join("logs/gateway.err.log"); + if !path.exists() { + return Ok(String::new()); + } + crate::logging::read_path_tail(&path, clamp_log_lines(lines)) + }) } diff --git a/src-tauri/src/commands/backup.rs b/src-tauri/src/commands/backup.rs index 283d7acf..70d74461 100644 --- a/src-tauri/src/commands/backup.rs +++ b/src-tauri/src/commands/backup.rs @@ -5,41 +5,43 @@ pub async fn remote_backup_before_upgrade( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let now_secs = unix_timestamp_secs(); - let now_dt = chrono::DateTime::::from_timestamp(now_secs as i64, 0); - let name = now_dt - .map(|dt| dt.format("%Y-%m-%d_%H%M%S").to_string()) - .unwrap_or_else(|| format!("{now_secs}")); - - let escaped_name = shell_escape(&name); - let cmd = format!( - concat!( - "set -e; ", - "BDIR=\"$HOME/.clawpal/backups/\"{name}; ", - "mkdir -p \"$BDIR\"; ", - "cp \"$HOME/.openclaw/openclaw.json\" \"$BDIR/\" 2>/dev/null || true; ", - "cp -r \"$HOME/.openclaw/agents\" \"$BDIR/\" 2>/dev/null || true; ", - "cp -r \"$HOME/.openclaw/memory\" \"$BDIR/\" 2>/dev/null || true; ", - "du -sk \"$BDIR\" 2>/dev/null | awk '{{print $1 * 1024}}' || echo 0" - ), - name = escaped_name - ); - - let result = pool.exec_login(&host_id, &cmd).await?; - if result.exit_code != 0 { - return Err(format!( - "Remote backup failed (exit {}): {}", - result.exit_code, result.stderr - )); - } - - let size_bytes = clawpal_core::backup::parse_backup_result(&result.stdout).size_bytes; - - Ok(BackupInfo { - name, - path: String::new(), - created_at: format_timestamp_from_unix(now_secs), - size_bytes, + timed_async!("remote_backup_before_upgrade", { + let now_secs = unix_timestamp_secs(); + let now_dt = chrono::DateTime::::from_timestamp(now_secs as i64, 0); + let name = now_dt + .map(|dt| dt.format("%Y-%m-%d_%H%M%S").to_string()) + .unwrap_or_else(|| format!("{now_secs}")); + + let escaped_name = shell_escape(&name); + let cmd = format!( + concat!( + "set -e; ", + "BDIR=\"$HOME/.clawpal/backups/\"{name}; ", + "mkdir -p \"$BDIR\"; ", + "cp \"$HOME/.openclaw/openclaw.json\" \"$BDIR/\" 2>/dev/null || true; ", + "cp -r \"$HOME/.openclaw/agents\" \"$BDIR/\" 2>/dev/null || true; ", + "cp -r \"$HOME/.openclaw/memory\" \"$BDIR/\" 2>/dev/null || true; ", + "du -sk \"$BDIR\" 2>/dev/null | awk '{{print $1 * 1024}}' || echo 0" + ), + name = escaped_name + ); + + let result = pool.exec_login(&host_id, &cmd).await?; + if result.exit_code != 0 { + return Err(format!( + "Remote backup failed (exit {}): {}", + result.exit_code, result.stderr + )); + } + + let size_bytes = clawpal_core::backup::parse_backup_result(&result.stdout).size_bytes; + + Ok(BackupInfo { + name, + path: String::new(), + created_at: format_timestamp_from_unix(now_secs), + size_bytes, + }) }) } @@ -48,69 +50,71 @@ pub async fn remote_list_backups( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - // Migrate remote data from legacy path ~/.openclaw/.clawpal → ~/.clawpal - let _ = pool - .exec_login( - &host_id, - concat!( - "if [ -d \"$HOME/.openclaw/.clawpal\" ]; then ", - "mkdir -p \"$HOME/.clawpal\"; ", - "cp -a \"$HOME/.openclaw/.clawpal/.\" \"$HOME/.clawpal/\" 2>/dev/null; ", - "rm -rf \"$HOME/.openclaw/.clawpal\"; ", - "fi" - ), - ) - .await; - - // List backup directory names - let list_result = pool - .exec_login( - &host_id, - "ls -1d \"$HOME/.clawpal/backups\"/*/ 2>/dev/null || true", - ) - .await?; - - let dirs: Vec = list_result - .stdout - .lines() - .filter(|l| !l.trim().is_empty()) - .map(|l| l.trim().trim_end_matches('/').to_string()) - .collect(); - - if dirs.is_empty() { - return Ok(Vec::new()); - } - - // Build a single command to get sizes for all backup dirs (du -sk is POSIX portable) - let du_parts: Vec = dirs - .iter() - .map(|d| format!("du -sk '{}' 2>/dev/null || echo '0\t{}'", d, d)) - .collect(); - let du_cmd = du_parts.join("; "); - let du_result = pool.exec_login(&host_id, &du_cmd).await?; - - let size_entries = clawpal_core::backup::parse_backup_list(&du_result.stdout); - let size_map: std::collections::HashMap = size_entries - .into_iter() - .map(|e| (e.path, e.size_bytes)) - .collect(); - - let mut backups: Vec = dirs - .iter() - .map(|d| { - let name = d.rsplit('/').next().unwrap_or(d).to_string(); - let size_bytes = size_map.get(d.trim_end_matches('/')).copied().unwrap_or(0); - BackupInfo { - name: name.clone(), - path: d.clone(), - created_at: name.clone(), // Name is the timestamp - size_bytes, - } - }) - .collect(); + timed_async!("remote_list_backups", { + // Migrate remote data from legacy path ~/.openclaw/.clawpal → ~/.clawpal + let _ = pool + .exec_login( + &host_id, + concat!( + "if [ -d \"$HOME/.openclaw/.clawpal\" ]; then ", + "mkdir -p \"$HOME/.clawpal\"; ", + "cp -a \"$HOME/.openclaw/.clawpal/.\" \"$HOME/.clawpal/\" 2>/dev/null; ", + "rm -rf \"$HOME/.openclaw/.clawpal\"; ", + "fi" + ), + ) + .await; + + // List backup directory names + let list_result = pool + .exec_login( + &host_id, + "ls -1d \"$HOME/.clawpal/backups\"/*/ 2>/dev/null || true", + ) + .await?; + + let dirs: Vec = list_result + .stdout + .lines() + .filter(|l| !l.trim().is_empty()) + .map(|l| l.trim().trim_end_matches('/').to_string()) + .collect(); + + if dirs.is_empty() { + return Ok(Vec::new()); + } - backups.sort_by(|a, b| b.name.cmp(&a.name)); - Ok(backups) + // Build a single command to get sizes for all backup dirs (du -sk is POSIX portable) + let du_parts: Vec = dirs + .iter() + .map(|d| format!("du -sk '{}' 2>/dev/null || echo '0\t{}'", d, d)) + .collect(); + let du_cmd = du_parts.join("; "); + let du_result = pool.exec_login(&host_id, &du_cmd).await?; + + let size_entries = clawpal_core::backup::parse_backup_list(&du_result.stdout); + let size_map: std::collections::HashMap = size_entries + .into_iter() + .map(|e| (e.path, e.size_bytes)) + .collect(); + + let mut backups: Vec = dirs + .iter() + .map(|d| { + let name = d.rsplit('/').next().unwrap_or(d).to_string(); + let size_bytes = size_map.get(d.trim_end_matches('/')).copied().unwrap_or(0); + BackupInfo { + name: name.clone(), + path: d.clone(), + created_at: name.clone(), // Name is the timestamp + size_bytes, + } + }) + .collect(); + + backups.sort_by(|a, b| b.name.cmp(&a.name)); + Ok(backups) + }) } #[tauri::command] @@ -119,26 +123,28 @@ pub async fn remote_restore_from_backup( host_id: String, backup_name: String, ) -> Result { - let escaped_name = shell_escape(&backup_name); - let cmd = format!( - concat!( - "set -e; ", - "BDIR=\"$HOME/.clawpal/backups/\"{name}; ", - "[ -d \"$BDIR\" ] || {{ echo 'Backup not found'; exit 1; }}; ", - "cp \"$BDIR/openclaw.json\" \"$HOME/.openclaw/openclaw.json\" 2>/dev/null || true; ", - "[ -d \"$BDIR/agents\" ] && cp -r \"$BDIR/agents\" \"$HOME/.openclaw/\" 2>/dev/null || true; ", - "[ -d \"$BDIR/memory\" ] && cp -r \"$BDIR/memory\" \"$HOME/.openclaw/\" 2>/dev/null || true; ", - "echo 'Restored from backup '{name}" - ), - name = escaped_name - ); - - let result = pool.exec_login(&host_id, &cmd).await?; - if result.exit_code != 0 { - return Err(format!("Remote restore failed: {}", result.stderr)); - } - - Ok(format!("Restored from backup '{}'", backup_name)) + timed_async!("remote_restore_from_backup", { + let escaped_name = shell_escape(&backup_name); + let cmd = format!( + concat!( + "set -e; ", + "BDIR=\"$HOME/.clawpal/backups/\"{name}; ", + "[ -d \"$BDIR\" ] || {{ echo 'Backup not found'; exit 1; }}; ", + "cp \"$BDIR/openclaw.json\" \"$HOME/.openclaw/openclaw.json\" 2>/dev/null || true; ", + "[ -d \"$BDIR/agents\" ] && cp -r \"$BDIR/agents\" \"$HOME/.openclaw/\" 2>/dev/null || true; ", + "[ -d \"$BDIR/memory\" ] && cp -r \"$BDIR/memory\" \"$HOME/.openclaw/\" 2>/dev/null || true; ", + "echo 'Restored from backup '{name}" + ), + name = escaped_name + ); + + let result = pool.exec_login(&host_id, &cmd).await?; + if result.exit_code != 0 { + return Err(format!("Remote restore failed: {}", result.stderr)); + } + + Ok(format!("Restored from backup '{}'", backup_name)) + }) } #[tauri::command] @@ -146,44 +152,49 @@ pub async fn remote_run_openclaw_upgrade( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - // Use the official install script with --no-prompt for non-interactive SSH. - // The script handles npm prefix/permissions, bin links, and PATH fixups - // that plain `npm install -g` misses (e.g. stale /usr/bin/openclaw symlinks). - let version_before = pool - .exec_login(&host_id, "openclaw --version 2>/dev/null || true") - .await - .map(|r| r.stdout.trim().to_string()) - .unwrap_or_default(); - - let install_cmd = "curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-prompt --no-onboard 2>&1"; - let result = pool.exec_login(&host_id, install_cmd).await?; - let combined = if result.stderr.is_empty() { - result.stdout.clone() - } else { - format!("{}\n{}", result.stdout, result.stderr) - }; - - if result.exit_code != 0 { - return Err(combined); - } - - // Restart gateway after successful upgrade (best-effort) - let _ = pool - .exec_login(&host_id, "openclaw gateway restart 2>/dev/null || true") - .await; - - // Verify version actually changed - let version_after = pool - .exec_login(&host_id, "openclaw --version 2>/dev/null || true") - .await - .map(|r| r.stdout.trim().to_string()) - .unwrap_or_default(); - let _upgrade_info = clawpal_core::backup::parse_upgrade_result(&combined); - if !version_before.is_empty() && !version_after.is_empty() && version_before == version_after { - return Err(format!("{combined}\n\nWarning: version unchanged after upgrade ({version_before}). Check PATH or npm prefix.")); - } - - Ok(combined) + timed_async!("remote_run_openclaw_upgrade", { + // Use the official install script with --no-prompt for non-interactive SSH. + // The script handles npm prefix/permissions, bin links, and PATH fixups + // that plain `npm install -g` misses (e.g. stale /usr/bin/openclaw symlinks). + let version_before = pool + .exec_login(&host_id, "openclaw --version 2>/dev/null || true") + .await + .map(|r| r.stdout.trim().to_string()) + .unwrap_or_default(); + + let install_cmd = "curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-prompt --no-onboard 2>&1"; + let result = pool.exec_login(&host_id, install_cmd).await?; + let combined = if result.stderr.is_empty() { + result.stdout.clone() + } else { + format!("{}\n{}", result.stdout, result.stderr) + }; + + if result.exit_code != 0 { + return Err(combined); + } + + // Restart gateway after successful upgrade (best-effort) + let _ = pool + .exec_login(&host_id, "openclaw gateway restart 2>/dev/null || true") + .await; + + // Verify version actually changed + let version_after = pool + .exec_login(&host_id, "openclaw --version 2>/dev/null || true") + .await + .map(|r| r.stdout.trim().to_string()) + .unwrap_or_default(); + let _upgrade_info = clawpal_core::backup::parse_upgrade_result(&combined); + if !version_before.is_empty() + && !version_after.is_empty() + && version_before == version_after + { + return Err(format!("{combined}\n\nWarning: version unchanged after upgrade ({version_before}). Check PATH or npm prefix.")); + } + + Ok(combined) + }) } #[tauri::command] @@ -191,137 +202,149 @@ pub async fn remote_check_openclaw_update( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - // Get installed version and extract clean semver — don't fail if binary not found - let installed_version = match pool.exec_login(&host_id, "openclaw --version").await { - Ok(r) => extract_version_from_text(r.stdout.trim()) - .unwrap_or_else(|| r.stdout.trim().to_string()), - Err(_) => String::new(), - }; - - let paths = resolve_paths(); - let cache = tokio::task::spawn_blocking(move || { - resolve_openclaw_latest_release_cached(&paths, false).ok() + timed_async!("remote_check_openclaw_update", { + // Get installed version and extract clean semver — don't fail if binary not found + let installed_version = match pool.exec_login(&host_id, "openclaw --version").await { + Ok(r) => extract_version_from_text(r.stdout.trim()) + .unwrap_or_else(|| r.stdout.trim().to_string()), + Err(_) => String::new(), + }; + + let paths = resolve_paths(); + let cache = tokio::task::spawn_blocking(move || { + resolve_openclaw_latest_release_cached(&paths, false).ok() + }) + .await + .unwrap_or(None); + let latest_version = cache.and_then(|entry| entry.latest_version); + let upgrade = latest_version + .as_ref() + .is_some_and(|latest| compare_semver(&installed_version, Some(latest.as_str()))); + Ok(serde_json::json!({ + "upgradeAvailable": upgrade, + "latestVersion": latest_version, + "installedVersion": installed_version, + })) }) - .await - .unwrap_or(None); - let latest_version = cache.and_then(|entry| entry.latest_version); - let upgrade = latest_version - .as_ref() - .is_some_and(|latest| compare_semver(&installed_version, Some(latest.as_str()))); - Ok(serde_json::json!({ - "upgradeAvailable": upgrade, - "latestVersion": latest_version, - "installedVersion": installed_version, - })) } #[tauri::command] pub fn backup_before_upgrade() -> Result { - let paths = resolve_paths(); - let backups_dir = paths.clawpal_dir.join("backups"); - fs::create_dir_all(&backups_dir).map_err(|e| format!("Failed to create backups dir: {e}"))?; - - let now_secs = unix_timestamp_secs(); - let now_dt = chrono::DateTime::::from_timestamp(now_secs as i64, 0); - let name = now_dt - .map(|dt| dt.format("%Y-%m-%d_%H%M%S").to_string()) - .unwrap_or_else(|| format!("{now_secs}")); - let backup_dir = backups_dir.join(&name); - fs::create_dir_all(&backup_dir).map_err(|e| format!("Failed to create backup dir: {e}"))?; - - let mut total_bytes = 0u64; - - // Copy config file - if paths.config_path.exists() { - let dest = backup_dir.join("openclaw.json"); - fs::copy(&paths.config_path, &dest).map_err(|e| format!("Failed to copy config: {e}"))?; - total_bytes += fs::metadata(&dest).map(|m| m.len()).unwrap_or(0); - } - - // Copy directories, excluding sessions and archive - let skip_dirs: HashSet<&str> = ["sessions", "archive", ".clawpal"] - .iter() - .copied() - .collect(); - copy_dir_recursive(&paths.base_dir, &backup_dir, &skip_dirs, &mut total_bytes)?; - - Ok(BackupInfo { - name: name.clone(), - path: backup_dir.to_string_lossy().to_string(), - created_at: format_timestamp_from_unix(now_secs), - size_bytes: total_bytes, + timed_sync!("backup_before_upgrade", { + let paths = resolve_paths(); + let backups_dir = paths.clawpal_dir.join("backups"); + fs::create_dir_all(&backups_dir) + .map_err(|e| format!("Failed to create backups dir: {e}"))?; + + let now_secs = unix_timestamp_secs(); + let now_dt = chrono::DateTime::::from_timestamp(now_secs as i64, 0); + let name = now_dt + .map(|dt| dt.format("%Y-%m-%d_%H%M%S").to_string()) + .unwrap_or_else(|| format!("{now_secs}")); + let backup_dir = backups_dir.join(&name); + fs::create_dir_all(&backup_dir).map_err(|e| format!("Failed to create backup dir: {e}"))?; + + let mut total_bytes = 0u64; + + // Copy config file + if paths.config_path.exists() { + let dest = backup_dir.join("openclaw.json"); + fs::copy(&paths.config_path, &dest) + .map_err(|e| format!("Failed to copy config: {e}"))?; + total_bytes += fs::metadata(&dest).map(|m| m.len()).unwrap_or(0); + } + + // Copy directories, excluding sessions and archive + let skip_dirs: HashSet<&str> = ["sessions", "archive", ".clawpal"] + .iter() + .copied() + .collect(); + copy_dir_recursive(&paths.base_dir, &backup_dir, &skip_dirs, &mut total_bytes)?; + + Ok(BackupInfo { + name: name.clone(), + path: backup_dir.to_string_lossy().to_string(), + created_at: format_timestamp_from_unix(now_secs), + size_bytes: total_bytes, + }) }) } #[tauri::command] pub fn list_backups() -> Result, String> { - let paths = resolve_paths(); - let backups_dir = paths.clawpal_dir.join("backups"); - if !backups_dir.exists() { - return Ok(Vec::new()); - } - let mut backups = Vec::new(); - let entries = fs::read_dir(&backups_dir).map_err(|e| e.to_string())?; - for entry in entries { - let entry = entry.map_err(|e| e.to_string())?; - if !entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { - continue; + timed_sync!("list_backups", { + let paths = resolve_paths(); + let backups_dir = paths.clawpal_dir.join("backups"); + if !backups_dir.exists() { + return Ok(Vec::new()); } - let name = entry.file_name().to_string_lossy().to_string(); - let path = entry.path(); - let size = dir_size(&path); - let created_at = fs::metadata(&path) - .and_then(|m| m.created()) - .map(|t| { - let secs = t.duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); - format_timestamp_from_unix(secs) - }) - .unwrap_or_else(|_| name.clone()); - backups.push(BackupInfo { - name, - path: path.to_string_lossy().to_string(), - created_at, - size_bytes: size, - }); - } - backups.sort_by(|a, b| b.name.cmp(&a.name)); - Ok(backups) + let mut backups = Vec::new(); + let entries = fs::read_dir(&backups_dir).map_err(|e| e.to_string())?; + for entry in entries { + let entry = entry.map_err(|e| e.to_string())?; + if !entry.file_type().map(|t| t.is_dir()).unwrap_or(false) { + continue; + } + let name = entry.file_name().to_string_lossy().to_string(); + let path = entry.path(); + let size = dir_size(&path); + let created_at = fs::metadata(&path) + .and_then(|m| m.created()) + .map(|t| { + let secs = t.duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); + format_timestamp_from_unix(secs) + }) + .unwrap_or_else(|_| name.clone()); + backups.push(BackupInfo { + name, + path: path.to_string_lossy().to_string(), + created_at, + size_bytes: size, + }); + } + backups.sort_by(|a, b| b.name.cmp(&a.name)); + Ok(backups) + }) } #[tauri::command] pub fn restore_from_backup(backup_name: String) -> Result { - let paths = resolve_paths(); - let backup_dir = paths.clawpal_dir.join("backups").join(&backup_name); - if !backup_dir.exists() { - return Err(format!("Backup '{}' not found", backup_name)); - } - - // Restore config file - let backup_config = backup_dir.join("openclaw.json"); - if backup_config.exists() { - fs::copy(&backup_config, &paths.config_path) - .map_err(|e| format!("Failed to restore config: {e}"))?; - } - - // Restore other directories (agents except sessions/archive, memory, etc.) - let skip_dirs: HashSet<&str> = ["sessions", "archive", ".clawpal"] - .iter() - .copied() - .collect(); - restore_dir_recursive(&backup_dir, &paths.base_dir, &skip_dirs)?; - - Ok(format!("Restored from backup '{}'", backup_name)) + timed_sync!("restore_from_backup", { + let paths = resolve_paths(); + let backup_dir = paths.clawpal_dir.join("backups").join(&backup_name); + if !backup_dir.exists() { + return Err(format!("Backup '{}' not found", backup_name)); + } + + // Restore config file + let backup_config = backup_dir.join("openclaw.json"); + if backup_config.exists() { + fs::copy(&backup_config, &paths.config_path) + .map_err(|e| format!("Failed to restore config: {e}"))?; + } + + // Restore other directories (agents except sessions/archive, memory, etc.) + let skip_dirs: HashSet<&str> = ["sessions", "archive", ".clawpal"] + .iter() + .copied() + .collect(); + restore_dir_recursive(&backup_dir, &paths.base_dir, &skip_dirs)?; + + Ok(format!("Restored from backup '{}'", backup_name)) + }) } #[tauri::command] pub fn delete_backup(backup_name: String) -> Result { - let paths = resolve_paths(); - let backup_dir = paths.clawpal_dir.join("backups").join(&backup_name); - if !backup_dir.exists() { - return Ok(false); - } - fs::remove_dir_all(&backup_dir).map_err(|e| format!("Failed to delete backup: {e}"))?; - Ok(true) + timed_sync!("delete_backup", { + let paths = resolve_paths(); + let backup_dir = paths.clawpal_dir.join("backups").join(&backup_name); + if !backup_dir.exists() { + return Ok(false); + } + fs::remove_dir_all(&backup_dir).map_err(|e| format!("Failed to delete backup: {e}"))?; + Ok(true) + }) } #[tauri::command] @@ -330,18 +353,22 @@ pub async fn remote_delete_backup( host_id: String, backup_name: String, ) -> Result { - let escaped_name = shell_escape(&backup_name); - let cmd = format!( - "BDIR=\"$HOME/.clawpal/backups/\"{name}; [ -d \"$BDIR\" ] && rm -rf \"$BDIR\" && echo 'deleted' || echo 'not_found'", - name = escaped_name - ); - - let result = pool.exec_login(&host_id, &cmd).await?; - Ok(result.stdout.trim() == "deleted") + timed_async!("remote_delete_backup", { + let escaped_name = shell_escape(&backup_name); + let cmd = format!( + "BDIR=\"$HOME/.clawpal/backups/\"{name}; [ -d \"$BDIR\" ] && rm -rf \"$BDIR\" && echo 'deleted' || echo 'not_found'", + name = escaped_name + ); + + let result = pool.exec_login(&host_id, &cmd).await?; + Ok(result.stdout.trim() == "deleted") + }) } #[tauri::command] pub fn check_openclaw_update() -> Result { - let paths = resolve_paths(); - check_openclaw_update_cached(&paths, true) + timed_sync!("check_openclaw_update", { + let paths = resolve_paths(); + check_openclaw_update_cached(&paths, true) + }) } diff --git a/src-tauri/src/commands/config.rs b/src-tauri/src/commands/config.rs index 9182d872..1074846d 100644 --- a/src-tauri/src/commands/config.rs +++ b/src-tauri/src/commands/config.rs @@ -5,10 +5,12 @@ pub async fn remote_read_raw_config( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - // openclaw config get requires a path — there's no way to dump the full config via CLI. - // Use sftp_read directly since this function's purpose is returning the entire raw config. - let config_path = remote_resolve_openclaw_config_path(&pool, &host_id).await?; - pool.sftp_read(&host_id, &config_path).await + timed_async!("remote_read_raw_config", { + // openclaw config get requires a path — there's no way to dump the full config via CLI. + // Use sftp_read directly since this function's purpose is returning the entire raw config. + let config_path = remote_resolve_openclaw_config_path(&pool, &host_id).await?; + pool.sftp_read(&host_id, &config_path).await + }) } #[tauri::command] @@ -17,18 +19,27 @@ pub async fn remote_write_raw_config( host_id: String, content: String, ) -> Result { - // Validate it's valid config JSON using core module - let next = clawpal_core::config::validate_config_json(&content) - .map_err(|e| format!("Invalid JSON: {e}"))?; - // Read current for snapshot - let config_path = remote_resolve_openclaw_config_path(&pool, &host_id).await?; - let current = pool - .sftp_read(&host_id, &config_path) - .await - .unwrap_or_default(); - remote_write_config_with_snapshot(&pool, &host_id, &config_path, ¤t, &next, "raw-edit") + timed_async!("remote_write_raw_config", { + // Validate it's valid config JSON using core module + let next = clawpal_core::config::validate_config_json(&content) + .map_err(|e| format!("Invalid JSON: {e}"))?; + // Read current for snapshot + let config_path = remote_resolve_openclaw_config_path(&pool, &host_id).await?; + let current = pool + .sftp_read(&host_id, &config_path) + .await + .unwrap_or_default(); + remote_write_config_with_snapshot( + &pool, + &host_id, + &config_path, + ¤t, + &next, + "raw-edit", + ) .await?; - Ok(true) + Ok(true) + }) } #[tauri::command] @@ -38,29 +49,31 @@ pub async fn remote_apply_config_patch( patch_template: String, params: Map, ) -> Result { - let (config_path, current_text, current) = - remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; + timed_async!("remote_apply_config_patch", { + let (config_path, current_text, current) = + remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; - // Use core function to build candidate config - let (candidate, _changes) = - clawpal_core::config::build_candidate_config(¤t, &patch_template, ¶ms)?; + // Use core function to build candidate config + let (candidate, _changes) = + clawpal_core::config::build_candidate_config(¤t, &patch_template, ¶ms)?; - remote_write_config_with_snapshot( - &pool, - &host_id, - &config_path, - ¤t_text, - &candidate, - "config-patch", - ) - .await?; - Ok(ApplyResult { - ok: true, - snapshot_id: None, - config_path, - backup_path: None, - warnings: Vec::new(), - errors: Vec::new(), + remote_write_config_with_snapshot( + &pool, + &host_id, + &config_path, + ¤t_text, + &candidate, + "config-patch", + ) + .await?; + Ok(ApplyResult { + ok: true, + snapshot_id: None, + config_path, + backup_path: None, + warnings: Vec::new(), + errors: Vec::new(), + }) }) } @@ -69,41 +82,43 @@ pub async fn remote_list_history( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - // Ensure dir exists - pool.exec(&host_id, "mkdir -p ~/.clawpal/snapshots").await?; - let entries = pool.sftp_list(&host_id, "~/.clawpal/snapshots").await?; - let mut items: Vec = Vec::new(); - for entry in entries { - if entry.name.starts_with('.') || entry.is_dir { - continue; + timed_async!("remote_list_history", { + // Ensure dir exists + pool.exec(&host_id, "mkdir -p ~/.clawpal/snapshots").await?; + let entries = pool.sftp_list(&host_id, "~/.clawpal/snapshots").await?; + let mut items: Vec = Vec::new(); + for entry in entries { + if entry.name.starts_with('.') || entry.is_dir { + continue; + } + // Parse filename: {unix_ts}-{source}-{summary}.json + let stem = entry.name.trim_end_matches(".json"); + let parts: Vec<&str> = stem.splitn(3, '-').collect(); + let ts_str = parts.first().unwrap_or(&"0"); + let source = parts.get(1).unwrap_or(&"unknown"); + let recipe_id = parts.get(2).map(|s| s.to_string()); + let created_at = ts_str.parse::().unwrap_or(0); + // Convert Unix timestamp to ISO 8601 format for frontend compatibility + let created_at_iso = chrono::DateTime::from_timestamp(created_at, 0) + .map(|dt| dt.format("%Y-%m-%dT%H:%M:%SZ").to_string()) + .unwrap_or_else(|| created_at.to_string()); + let is_rollback = *source == "rollback"; + items.push(serde_json::json!({ + "id": entry.name, + "recipeId": recipe_id, + "createdAt": created_at_iso, + "source": source, + "canRollback": !is_rollback, + })); } - // Parse filename: {unix_ts}-{source}-{summary}.json - let stem = entry.name.trim_end_matches(".json"); - let parts: Vec<&str> = stem.splitn(3, '-').collect(); - let ts_str = parts.first().unwrap_or(&"0"); - let source = parts.get(1).unwrap_or(&"unknown"); - let recipe_id = parts.get(2).map(|s| s.to_string()); - let created_at = ts_str.parse::().unwrap_or(0); - // Convert Unix timestamp to ISO 8601 format for frontend compatibility - let created_at_iso = chrono::DateTime::from_timestamp(created_at, 0) - .map(|dt| dt.format("%Y-%m-%dT%H:%M:%SZ").to_string()) - .unwrap_or_else(|| created_at.to_string()); - let is_rollback = *source == "rollback"; - items.push(serde_json::json!({ - "id": entry.name, - "recipeId": recipe_id, - "createdAt": created_at_iso, - "source": source, - "canRollback": !is_rollback, - })); - } - // Sort newest first - items.sort_by(|a, b| { - let ta = a["createdAt"].as_str().unwrap_or(""); - let tb = b["createdAt"].as_str().unwrap_or(""); - tb.cmp(ta) - }); - Ok(serde_json::json!({ "items": items })) + // Sort newest first + items.sort_by(|a, b| { + let ta = a["createdAt"].as_str().unwrap_or(""); + let tb = b["createdAt"].as_str().unwrap_or(""); + tb.cmp(ta) + }); + Ok(serde_json::json!({ "items": items })) + }) } #[tauri::command] @@ -112,28 +127,30 @@ pub async fn remote_preview_rollback( host_id: String, snapshot_id: String, ) -> Result { - let snapshot_path = format!("~/.clawpal/snapshots/{snapshot_id}"); - let snapshot_text = pool.sftp_read(&host_id, &snapshot_path).await?; - let target = clawpal_core::config::validate_config_json(&snapshot_text) - .map_err(|e| format!("Failed to parse snapshot: {e}"))?; + timed_async!("remote_preview_rollback", { + let snapshot_path = format!("~/.clawpal/snapshots/{snapshot_id}"); + let snapshot_text = pool.sftp_read(&host_id, &snapshot_path).await?; + let target = clawpal_core::config::validate_config_json(&snapshot_text) + .map_err(|e| format!("Failed to parse snapshot: {e}"))?; - let (_config_path, _current_text, current) = - remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; + let (_config_path, _current_text, current) = + remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; - let before = clawpal_core::config::format_config_diff(¤t, ¤t); - let after = clawpal_core::config::format_config_diff(&target, &target); - let diff = clawpal_core::config::format_config_diff(¤t, &target); + let before = clawpal_core::config::format_config_diff(¤t, ¤t); + let after = clawpal_core::config::format_config_diff(&target, &target); + let diff = clawpal_core::config::format_config_diff(¤t, &target); - Ok(PreviewResult { - recipe_id: "rollback".into(), - diff, - config_before: before, - config_after: after, - changes: Vec::new(), // Core module doesn't expose change paths directly - overwrites_existing: true, - can_rollback: true, - impact_level: "medium".into(), - warnings: vec!["Rollback will replace current configuration".into()], + Ok(PreviewResult { + recipe_id: "rollback".into(), + diff, + config_before: before, + config_after: after, + changes: Vec::new(), // Core module doesn't expose change paths directly + overwrites_existing: true, + can_rollback: true, + impact_level: "medium".into(), + warnings: vec!["Rollback will replace current configuration".into()], + }) }) } @@ -143,38 +160,42 @@ pub async fn remote_rollback( host_id: String, snapshot_id: String, ) -> Result { - let snapshot_path = format!("~/.clawpal/snapshots/{snapshot_id}"); - let target_text = pool.sftp_read(&host_id, &snapshot_path).await?; - let target = clawpal_core::config::validate_config_json(&target_text) - .map_err(|e| format!("Failed to parse snapshot: {e}"))?; + timed_async!("remote_rollback", { + let snapshot_path = format!("~/.clawpal/snapshots/{snapshot_id}"); + let target_text = pool.sftp_read(&host_id, &snapshot_path).await?; + let target = clawpal_core::config::validate_config_json(&target_text) + .map_err(|e| format!("Failed to parse snapshot: {e}"))?; - let (config_path, current_text, _current) = - remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; - remote_write_config_with_snapshot( - &pool, - &host_id, - &config_path, - ¤t_text, - &target, - "rollback", - ) - .await?; + let (config_path, current_text, _current) = + remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; + remote_write_config_with_snapshot( + &pool, + &host_id, + &config_path, + ¤t_text, + &target, + "rollback", + ) + .await?; - Ok(ApplyResult { - ok: true, - snapshot_id: Some(snapshot_id), - config_path, - backup_path: None, - warnings: vec!["rolled back".into()], - errors: Vec::new(), + Ok(ApplyResult { + ok: true, + snapshot_id: Some(snapshot_id), + config_path, + backup_path: None, + warnings: vec!["rolled back".into()], + errors: Vec::new(), + }) }) } #[tauri::command] pub fn read_raw_config() -> Result { - let paths = resolve_paths(); - let cfg = read_openclaw_config(&paths)?; - serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string()) + timed_sync!("read_raw_config", { + let paths = resolve_paths(); + let cfg = read_openclaw_config(&paths)?; + serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string()) + }) } #[tauri::command] @@ -182,120 +203,128 @@ pub fn apply_config_patch( patch_template: String, params: Map, ) -> Result { - let paths = resolve_paths(); - ensure_dirs(&paths)?; - let current = read_openclaw_config(&paths)?; - let current_text = serde_json::to_string_pretty(¤t).map_err(|e| e.to_string())?; - let snapshot = add_snapshot( - &paths.history_dir, - &paths.metadata_path, - Some("config-patch".into()), - "apply", - true, - ¤t_text, - None, - )?; - let (candidate, _changes) = - build_candidate_config_from_template(¤t, &patch_template, ¶ms)?; - write_json(&paths.config_path, &candidate)?; - let mut warnings = Vec::new(); - if let Err(err) = sync_main_auth_for_config(&paths, &candidate) { - warnings.push(format!("main auth sync skipped: {err}")); - } - Ok(ApplyResult { - ok: true, - snapshot_id: Some(snapshot.id), - config_path: paths.config_path.to_string_lossy().to_string(), - backup_path: Some(snapshot.config_path), - warnings, - errors: Vec::new(), + timed_sync!("apply_config_patch", { + let paths = resolve_paths(); + ensure_dirs(&paths)?; + let current = read_openclaw_config(&paths)?; + let current_text = serde_json::to_string_pretty(¤t).map_err(|e| e.to_string())?; + let snapshot = add_snapshot( + &paths.history_dir, + &paths.metadata_path, + Some("config-patch".into()), + "apply", + true, + ¤t_text, + None, + )?; + let (candidate, _changes) = + build_candidate_config_from_template(¤t, &patch_template, ¶ms)?; + write_json(&paths.config_path, &candidate)?; + let mut warnings = Vec::new(); + if let Err(err) = sync_main_auth_for_config(&paths, &candidate) { + warnings.push(format!("main auth sync skipped: {err}")); + } + Ok(ApplyResult { + ok: true, + snapshot_id: Some(snapshot.id), + config_path: paths.config_path.to_string_lossy().to_string(), + backup_path: Some(snapshot.config_path), + warnings, + errors: Vec::new(), + }) }) } #[tauri::command] pub fn list_history(limit: usize, offset: usize) -> Result { - let paths = resolve_paths(); - let index = list_snapshots(&paths.metadata_path)?; - let items = index - .items - .into_iter() - .skip(offset) - .take(limit) - .map(|item| HistoryItem { - id: item.id, - recipe_id: item.recipe_id, - created_at: item.created_at, - source: item.source, - can_rollback: item.can_rollback, - rollback_of: item.rollback_of, - }) - .collect(); - Ok(HistoryPage { items }) + timed_sync!("list_history", { + let paths = resolve_paths(); + let index = list_snapshots(&paths.metadata_path)?; + let items = index + .items + .into_iter() + .skip(offset) + .take(limit) + .map(|item| HistoryItem { + id: item.id, + recipe_id: item.recipe_id, + created_at: item.created_at, + source: item.source, + can_rollback: item.can_rollback, + rollback_of: item.rollback_of, + }) + .collect(); + Ok(HistoryPage { items }) + }) } #[tauri::command] pub fn preview_rollback(snapshot_id: String) -> Result { - let paths = resolve_paths(); - let index = list_snapshots(&paths.metadata_path)?; - let target = index - .items - .into_iter() - .find(|s| s.id == snapshot_id) - .ok_or_else(|| "snapshot not found".to_string())?; - if !target.can_rollback { - return Err("snapshot is not rollbackable".to_string()); - } + timed_sync!("preview_rollback", { + let paths = resolve_paths(); + let index = list_snapshots(&paths.metadata_path)?; + let target = index + .items + .into_iter() + .find(|s| s.id == snapshot_id) + .ok_or_else(|| "snapshot not found".to_string())?; + if !target.can_rollback { + return Err("snapshot is not rollbackable".to_string()); + } - let current = read_openclaw_config(&paths)?; - let target_text = read_snapshot(&target.config_path)?; - let target_json = clawpal_core::doctor::parse_json5_document_or_default(&target_text); - let before_text = serde_json::to_string_pretty(¤t).unwrap_or_else(|_| "{}".into()); - let after_text = serde_json::to_string_pretty(&target_json).unwrap_or_else(|_| "{}".into()); - Ok(PreviewResult { - recipe_id: "rollback".into(), - diff: format_diff(¤t, &target_json), - config_before: before_text, - config_after: after_text, - changes: collect_change_paths(¤t, &target_json), - overwrites_existing: true, - can_rollback: true, - impact_level: "medium".into(), - warnings: vec!["Rollback will replace current configuration".into()], + let current = read_openclaw_config(&paths)?; + let target_text = read_snapshot(&target.config_path)?; + let target_json = clawpal_core::doctor::parse_json5_document_or_default(&target_text); + let before_text = serde_json::to_string_pretty(¤t).unwrap_or_else(|_| "{}".into()); + let after_text = serde_json::to_string_pretty(&target_json).unwrap_or_else(|_| "{}".into()); + Ok(PreviewResult { + recipe_id: "rollback".into(), + diff: format_diff(¤t, &target_json), + config_before: before_text, + config_after: after_text, + changes: collect_change_paths(¤t, &target_json), + overwrites_existing: true, + can_rollback: true, + impact_level: "medium".into(), + warnings: vec!["Rollback will replace current configuration".into()], + }) }) } #[tauri::command] pub fn rollback(snapshot_id: String) -> Result { - let paths = resolve_paths(); - ensure_dirs(&paths)?; - let index = list_snapshots(&paths.metadata_path)?; - let target = index - .items - .into_iter() - .find(|s| s.id == snapshot_id) - .ok_or_else(|| "snapshot not found".to_string())?; - if !target.can_rollback { - return Err("snapshot is not rollbackable".to_string()); - } - let target_text = read_snapshot(&target.config_path)?; - let backup = read_openclaw_config(&paths)?; - let backup_text = serde_json::to_string_pretty(&backup).map_err(|e| e.to_string())?; - let _ = add_snapshot( - &paths.history_dir, - &paths.metadata_path, - target.recipe_id.clone(), - "rollback", - true, - &backup_text, - Some(target.id.clone()), - )?; - write_text(&paths.config_path, &target_text)?; - Ok(ApplyResult { - ok: true, - snapshot_id: Some(target.id), - config_path: paths.config_path.to_string_lossy().to_string(), - backup_path: None, - warnings: vec!["rolled back".into()], - errors: Vec::new(), + timed_sync!("rollback", { + let paths = resolve_paths(); + ensure_dirs(&paths)?; + let index = list_snapshots(&paths.metadata_path)?; + let target = index + .items + .into_iter() + .find(|s| s.id == snapshot_id) + .ok_or_else(|| "snapshot not found".to_string())?; + if !target.can_rollback { + return Err("snapshot is not rollbackable".to_string()); + } + let target_text = read_snapshot(&target.config_path)?; + let backup = read_openclaw_config(&paths)?; + let backup_text = serde_json::to_string_pretty(&backup).map_err(|e| e.to_string())?; + let _ = add_snapshot( + &paths.history_dir, + &paths.metadata_path, + target.recipe_id.clone(), + "rollback", + true, + &backup_text, + Some(target.id.clone()), + )?; + write_text(&paths.config_path, &target_text)?; + Ok(ApplyResult { + ok: true, + snapshot_id: Some(target.id), + config_path: paths.config_path.to_string_lossy().to_string(), + backup_path: None, + warnings: vec!["rolled back".into()], + errors: Vec::new(), + }) }) } diff --git a/src-tauri/src/commands/cron.rs b/src-tauri/src/commands/cron.rs index 0a7b0978..51ebfe35 100644 --- a/src-tauri/src/commands/cron.rs +++ b/src-tauri/src/commands/cron.rs @@ -5,11 +5,13 @@ pub async fn remote_list_cron_jobs( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let raw = pool.sftp_read(&host_id, "~/.openclaw/cron/jobs.json").await; - match raw { - Ok(text) => Ok(parse_cron_jobs(&text)), - Err(_) => Ok(Value::Array(vec![])), - } + timed_async!("remote_list_cron_jobs", { + let raw = pool.sftp_read(&host_id, "~/.openclaw/cron/jobs.json").await; + match raw { + Ok(text) => Ok(parse_cron_jobs(&text)), + Err(_) => Ok(Value::Array(vec![])), + } + }) } #[tauri::command] @@ -19,17 +21,19 @@ pub async fn remote_get_cron_runs( job_id: String, limit: Option, ) -> Result, String> { - let path = format!("~/.openclaw/cron/runs/{}.jsonl", job_id); - let raw = pool.sftp_read(&host_id, &path).await; - match raw { - Ok(text) => { - let mut runs = clawpal_core::cron::parse_cron_runs(&text)?; - let limit = limit.unwrap_or(10); - runs.truncate(limit); - Ok(runs) + timed_async!("remote_get_cron_runs", { + let path = format!("~/.openclaw/cron/runs/{}.jsonl", job_id); + let raw = pool.sftp_read(&host_id, &path).await; + match raw { + Ok(text) => { + let mut runs = clawpal_core::cron::parse_cron_runs(&text)?; + let limit = limit.unwrap_or(10); + runs.truncate(limit); + Ok(runs) + } + Err(_) => Ok(vec![]), } - Err(_) => Ok(vec![]), - } + }) } #[tauri::command] @@ -38,17 +42,19 @@ pub async fn remote_trigger_cron_job( host_id: String, job_id: String, ) -> Result { - let result = pool - .exec_login( - &host_id, - &format!("openclaw cron run {}", shell_escape(&job_id)), - ) - .await?; - if result.exit_code == 0 { - Ok(result.stdout) - } else { - Err(format!("{}\n{}", result.stdout, result.stderr)) - } + timed_async!("remote_trigger_cron_job", { + let result = pool + .exec_login( + &host_id, + &format!("openclaw cron run {}", shell_escape(&job_id)), + ) + .await?; + if result.exit_code == 0 { + Ok(result.stdout) + } else { + Err(format!("{}\n{}", result.stdout, result.stderr)) + } + }) } #[tauri::command] @@ -57,53 +63,88 @@ pub async fn remote_delete_cron_job( host_id: String, job_id: String, ) -> Result { - let result = pool - .exec_login( - &host_id, - &format!("openclaw cron remove {}", shell_escape(&job_id)), - ) - .await?; - if result.exit_code == 0 { - Ok(result.stdout) - } else { - Err(format!("{}\n{}", result.stdout, result.stderr)) - } + timed_async!("remote_delete_cron_job", { + let result = pool + .exec_login( + &host_id, + &format!("openclaw cron remove {}", shell_escape(&job_id)), + ) + .await?; + if result.exit_code == 0 { + Ok(result.stdout) + } else { + Err(format!("{}\n{}", result.stdout, result.stderr)) + } + }) } #[tauri::command] pub fn list_cron_jobs() -> Result { - let paths = resolve_paths(); - let jobs_path = paths.base_dir.join("cron").join("jobs.json"); - if !jobs_path.exists() { - return Ok(Value::Array(vec![])); - } - let text = std::fs::read_to_string(&jobs_path).map_err(|e| e.to_string())?; - Ok(parse_cron_jobs(&text)) + timed_sync!("list_cron_jobs", { + let paths = resolve_paths(); + let jobs_path = paths.base_dir.join("cron").join("jobs.json"); + if !jobs_path.exists() { + return Ok(Value::Array(vec![])); + } + let text = std::fs::read_to_string(&jobs_path).map_err(|e| e.to_string())?; + Ok(parse_cron_jobs(&text)) + }) } #[tauri::command] pub fn get_cron_runs(job_id: String, limit: Option) -> Result, String> { - let paths = resolve_paths(); - let runs_path = paths - .base_dir - .join("cron") - .join("runs") - .join(format!("{}.jsonl", job_id)); - if !runs_path.exists() { - return Ok(vec![]); - } - let text = std::fs::read_to_string(&runs_path).map_err(|e| e.to_string())?; - let mut runs = clawpal_core::cron::parse_cron_runs(&text)?; - let limit = limit.unwrap_or(10); - runs.truncate(limit); - Ok(runs) + timed_sync!("get_cron_runs", { + let paths = resolve_paths(); + let runs_path = paths + .base_dir + .join("cron") + .join("runs") + .join(format!("{}.jsonl", job_id)); + if !runs_path.exists() { + return Ok(vec![]); + } + let text = std::fs::read_to_string(&runs_path).map_err(|e| e.to_string())?; + let mut runs = clawpal_core::cron::parse_cron_runs(&text)?; + let limit = limit.unwrap_or(10); + runs.truncate(limit); + Ok(runs) + }) } #[tauri::command] pub async fn trigger_cron_job(job_id: String) -> Result { - tauri::async_runtime::spawn_blocking(move || { + timed_async!("trigger_cron_job", { + tauri::async_runtime::spawn_blocking(move || { + let mut cmd = + std::process::Command::new(clawpal_core::openclaw::resolve_openclaw_bin()); + cmd.args(["cron", "run", &job_id]); + if let Some(path) = crate::cli_runner::get_active_openclaw_home_override() { + cmd.env("OPENCLAW_HOME", path); + } + let output = cmd + .output() + .map_err(|e| format!("Failed to run openclaw: {e}"))?; + let stdout = String::from_utf8_lossy(&output.stdout).to_string(); + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + if output.status.success() { + Ok(stdout) + } else { + // Extract meaningful error lines, skip Doctor warning banners + let error_msg = + clawpal_core::doctor::strip_doctor_banner(&format!("{stdout}\n{stderr}")); + Err(error_msg) + } + }) + .await + .map_err(|e| format!("Task failed: {e}"))? + }) +} + +#[tauri::command] +pub fn delete_cron_job(job_id: String) -> Result { + timed_sync!("delete_cron_job", { let mut cmd = std::process::Command::new(clawpal_core::openclaw::resolve_openclaw_bin()); - cmd.args(["cron", "run", &job_id]); + cmd.args(["cron", "remove", &job_id]); if let Some(path) = crate::cli_runner::get_active_openclaw_home_override() { cmd.env("OPENCLAW_HOME", path); } @@ -115,31 +156,7 @@ pub async fn trigger_cron_job(job_id: String) -> Result { if output.status.success() { Ok(stdout) } else { - // Extract meaningful error lines, skip Doctor warning banners - let error_msg = - clawpal_core::doctor::strip_doctor_banner(&format!("{stdout}\n{stderr}")); - Err(error_msg) + Err(format!("{stdout}\n{stderr}")) } }) - .await - .map_err(|e| format!("Task failed: {e}"))? -} - -#[tauri::command] -pub fn delete_cron_job(job_id: String) -> Result { - let mut cmd = std::process::Command::new(clawpal_core::openclaw::resolve_openclaw_bin()); - cmd.args(["cron", "remove", &job_id]); - if let Some(path) = crate::cli_runner::get_active_openclaw_home_override() { - cmd.env("OPENCLAW_HOME", path); - } - let output = cmd - .output() - .map_err(|e| format!("Failed to run openclaw: {e}"))?; - let stdout = String::from_utf8_lossy(&output.stdout).to_string(); - let stderr = String::from_utf8_lossy(&output.stderr).to_string(); - if output.status.success() { - Ok(stdout) - } else { - Err(format!("{stdout}\n{stderr}")) - } } diff --git a/src-tauri/src/commands/discover_local.rs b/src-tauri/src/commands/discover_local.rs index 3df602b6..7d7f70dd 100644 --- a/src-tauri/src/commands/discover_local.rs +++ b/src-tauri/src/commands/discover_local.rs @@ -45,9 +45,11 @@ fn slug_from_name(name: &str) -> String { /// or exist as data directories under `~/.clawpal/`. #[tauri::command] pub async fn discover_local_instances() -> Result, String> { - tauri::async_runtime::spawn_blocking(|| discover_blocking()) - .await - .map_err(|e| e.to_string())? + timed_async!("discover_local_instances", { + tauri::async_runtime::spawn_blocking(|| discover_blocking()) + .await + .map_err(|e| e.to_string())? + }) } fn discover_blocking() -> Result, String> { diff --git a/src-tauri/src/commands/discovery.rs b/src-tauri/src/commands/discovery.rs index 5ba0ebbd..dc3fd7f0 100644 --- a/src-tauri/src/commands/discovery.rs +++ b/src-tauri/src/commands/discovery.rs @@ -5,282 +5,284 @@ pub async fn remote_list_discord_guild_channels( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - let output = crate::cli_runner::run_openclaw_remote( - &pool, - &host_id, - &["config", "get", "channels.discord", "--json"], - ) - .await?; - let discord_section = if output.exit_code == 0 { - crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null) - } else { - Value::Null - }; - let bindings_output = crate::cli_runner::run_openclaw_remote( - &pool, - &host_id, - &["config", "get", "bindings", "--json"], - ) - .await?; - let bindings_section = if bindings_output.exit_code == 0 { - crate::cli_runner::parse_json_output(&bindings_output) - .unwrap_or_else(|_| Value::Array(Vec::new())) - } else { - Value::Array(Vec::new()) - }; - // Wrap to match existing code expectations (rest of function uses cfg.get("channels").and_then(|c| c.get("discord"))) - let cfg = serde_json::json!({ - "channels": { "discord": discord_section }, - "bindings": bindings_section - }); - - let discord_cfg = cfg.get("channels").and_then(|c| c.get("discord")); - let configured_single_guild_id = discord_cfg - .and_then(|d| d.get("guilds")) - .and_then(Value::as_object) - .and_then(|guilds| { - if guilds.len() == 1 { - guilds.keys().next().cloned() - } else { - None - } + timed_async!("remote_list_discord_guild_channels", { + let output = crate::cli_runner::run_openclaw_remote( + &pool, + &host_id, + &["config", "get", "channels.discord", "--json"], + ) + .await?; + let discord_section = if output.exit_code == 0 { + crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null) + } else { + Value::Null + }; + let bindings_output = crate::cli_runner::run_openclaw_remote( + &pool, + &host_id, + &["config", "get", "bindings", "--json"], + ) + .await?; + let bindings_section = if bindings_output.exit_code == 0 { + crate::cli_runner::parse_json_output(&bindings_output) + .unwrap_or_else(|_| Value::Array(Vec::new())) + } else { + Value::Array(Vec::new()) + }; + // Wrap to match existing code expectations (rest of function uses cfg.get("channels").and_then(|c| c.get("discord"))) + let cfg = serde_json::json!({ + "channels": { "discord": discord_section }, + "bindings": bindings_section }); - // Extract bot token: top-level first, then fall back to first account token - let bot_token = discord_cfg - .and_then(|d| d.get("botToken").or_else(|| d.get("token"))) - .and_then(Value::as_str) - .map(|s| s.to_string()) - .or_else(|| { - discord_cfg - .and_then(|d| d.get("accounts")) - .and_then(Value::as_object) - .and_then(|accounts| { - accounts.values().find_map(|acct| { - acct.get("token") - .and_then(Value::as_str) - .filter(|s| !s.is_empty()) - .map(|s| s.to_string()) + let discord_cfg = cfg.get("channels").and_then(|c| c.get("discord")); + let configured_single_guild_id = discord_cfg + .and_then(|d| d.get("guilds")) + .and_then(Value::as_object) + .and_then(|guilds| { + if guilds.len() == 1 { + guilds.keys().next().cloned() + } else { + None + } + }); + + // Extract bot token: top-level first, then fall back to first account token + let bot_token = discord_cfg + .and_then(|d| d.get("botToken").or_else(|| d.get("token"))) + .and_then(Value::as_str) + .map(|s| s.to_string()) + .or_else(|| { + discord_cfg + .and_then(|d| d.get("accounts")) + .and_then(Value::as_object) + .and_then(|accounts| { + accounts.values().find_map(|acct| { + acct.get("token") + .and_then(Value::as_str) + .filter(|s| !s.is_empty()) + .map(|s| s.to_string()) + }) }) - }) - }); - let mut guild_name_fallback_map = pool - .sftp_read(&host_id, "~/.clawpal/discord-guild-channels.json") - .await - .ok() - .map(|text| parse_discord_cache_guild_name_fallbacks(&text)) - .unwrap_or_default(); - guild_name_fallback_map.extend(collect_discord_config_guild_name_fallbacks(discord_cfg)); - - let core_channels = clawpal_core::discovery::parse_guild_channels(&cfg.to_string())?; - let mut entries: Vec = core_channels - .iter() - .map(|c| DiscordGuildChannel { - guild_id: c.guild_id.clone(), - guild_name: c.guild_name.clone(), - channel_id: c.channel_id.clone(), - channel_name: c.channel_name.clone(), - default_agent_id: None, - }) - .collect(); - let mut channel_ids: Vec = entries.iter().map(|e| e.channel_id.clone()).collect(); - let mut unresolved_guild_ids: Vec = entries - .iter() - .filter(|e| e.guild_name == e.guild_id) - .map(|e| e.guild_id.clone()) - .collect(); - unresolved_guild_ids.sort(); - unresolved_guild_ids.dedup(); - - // Fallback A: if we have token + guild ids, fetch channels from Discord REST directly. - // This avoids hard-failing when CLI rejects config due non-critical schema drift. - if channel_ids.is_empty() { - let configured_guild_ids = collect_discord_config_guild_ids(discord_cfg); - if let Some(token) = bot_token.clone() { - let rest_entries = tokio::task::spawn_blocking(move || { - let mut out: Vec = Vec::new(); - for guild_id in configured_guild_ids { - if let Ok(channels) = fetch_discord_guild_channels(&token, &guild_id) { - for (channel_id, channel_name) in channels { - if out - .iter() - .any(|e| e.guild_id == guild_id && e.channel_id == channel_id) - { - continue; + }); + let mut guild_name_fallback_map = pool + .sftp_read(&host_id, "~/.clawpal/discord-guild-channels.json") + .await + .ok() + .map(|text| parse_discord_cache_guild_name_fallbacks(&text)) + .unwrap_or_default(); + guild_name_fallback_map.extend(collect_discord_config_guild_name_fallbacks(discord_cfg)); + + let core_channels = clawpal_core::discovery::parse_guild_channels(&cfg.to_string())?; + let mut entries: Vec = core_channels + .iter() + .map(|c| DiscordGuildChannel { + guild_id: c.guild_id.clone(), + guild_name: c.guild_name.clone(), + channel_id: c.channel_id.clone(), + channel_name: c.channel_name.clone(), + default_agent_id: None, + }) + .collect(); + let mut channel_ids: Vec = entries.iter().map(|e| e.channel_id.clone()).collect(); + let mut unresolved_guild_ids: Vec = entries + .iter() + .filter(|e| e.guild_name == e.guild_id) + .map(|e| e.guild_id.clone()) + .collect(); + unresolved_guild_ids.sort(); + unresolved_guild_ids.dedup(); + + // Fallback A: if we have token + guild ids, fetch channels from Discord REST directly. + // This avoids hard-failing when CLI rejects config due non-critical schema drift. + if channel_ids.is_empty() { + let configured_guild_ids = collect_discord_config_guild_ids(discord_cfg); + if let Some(token) = bot_token.clone() { + let rest_entries = tokio::task::spawn_blocking(move || { + let mut out: Vec = Vec::new(); + for guild_id in configured_guild_ids { + if let Ok(channels) = fetch_discord_guild_channels(&token, &guild_id) { + for (channel_id, channel_name) in channels { + if out + .iter() + .any(|e| e.guild_id == guild_id && e.channel_id == channel_id) + { + continue; + } + out.push(DiscordGuildChannel { + guild_id: guild_id.clone(), + guild_name: guild_id.clone(), + channel_id, + channel_name, + default_agent_id: None, + }); } - out.push(DiscordGuildChannel { - guild_id: guild_id.clone(), - guild_name: guild_id.clone(), - channel_id, - channel_name, - default_agent_id: None, - }); } } - } - out - }) - .await - .unwrap_or_default(); - for entry in rest_entries { - if entries - .iter() - .any(|e| e.guild_id == entry.guild_id && e.channel_id == entry.channel_id) - { - continue; - } - channel_ids.push(entry.channel_id.clone()); - entries.push(entry); - } - } - } - - // Fallback B: query channel ids from directory and keep compatibility - // with existing cache shape when config has no explicit channel map. - if channel_ids.is_empty() { - let cmd = "openclaw directory groups list --channel discord --json"; - if let Ok(r) = pool.exec_login(&host_id, cmd).await { - if r.exit_code == 0 && !r.stdout.trim().is_empty() { - for channel_id in parse_directory_group_channel_ids(&r.stdout) { - if entries.iter().any(|e| e.channel_id == channel_id) { + out + }) + .await + .unwrap_or_default(); + for entry in rest_entries { + if entries + .iter() + .any(|e| e.guild_id == entry.guild_id && e.channel_id == entry.channel_id) + { continue; } - let (guild_id, guild_name) = - if let Some(gid) = configured_single_guild_id.clone() { - (gid.clone(), gid) - } else { - ("discord".to_string(), "Discord".to_string()) - }; - channel_ids.push(channel_id.clone()); - entries.push(DiscordGuildChannel { - guild_id, - guild_name, - channel_id: channel_id.clone(), - channel_name: channel_id, - default_agent_id: None, - }); + channel_ids.push(entry.channel_id.clone()); + entries.push(entry); } } } - } - - // Resolve channel names via openclaw CLI on remote - if !channel_ids.is_empty() { - let ids_arg = channel_ids.join(" "); - let cmd = format!( - "openclaw channels resolve --json --channel discord --kind auto {}", - ids_arg - ); - if let Ok(r) = pool.exec_login(&host_id, &cmd).await { - if r.exit_code == 0 && !r.stdout.trim().is_empty() { - if let Some(name_map) = parse_resolve_name_map(&r.stdout) { - for entry in &mut entries { - if let Some(name) = name_map.get(&entry.channel_id) { - entry.channel_name = name.clone(); + + // Fallback B: query channel ids from directory and keep compatibility + // with existing cache shape when config has no explicit channel map. + if channel_ids.is_empty() { + let cmd = "openclaw directory groups list --channel discord --json"; + if let Ok(r) = pool.exec_login(&host_id, cmd).await { + if r.exit_code == 0 && !r.stdout.trim().is_empty() { + for channel_id in parse_directory_group_channel_ids(&r.stdout) { + if entries.iter().any(|e| e.channel_id == channel_id) { + continue; } + let (guild_id, guild_name) = + if let Some(gid) = configured_single_guild_id.clone() { + (gid.clone(), gid) + } else { + ("discord".to_string(), "Discord".to_string()) + }; + channel_ids.push(channel_id.clone()); + entries.push(DiscordGuildChannel { + guild_id, + guild_name, + channel_id: channel_id.clone(), + channel_name: channel_id, + default_agent_id: None, + }); } } } } - } - - // Resolve guild names via Discord REST API (guild names can't be resolved by openclaw CLI) - // Must use spawn_blocking because reqwest::blocking panics in async context - if let Some(token) = bot_token { - if !unresolved_guild_ids.is_empty() { - let guild_name_map = tokio::task::spawn_blocking(move || { - let mut map = std::collections::HashMap::new(); - for gid in &unresolved_guild_ids { - if let Ok(name) = fetch_discord_guild_name(&token, gid) { - map.insert(gid.clone(), name); + + // Resolve channel names via openclaw CLI on remote + if !channel_ids.is_empty() { + let ids_arg = channel_ids.join(" "); + let cmd = format!( + "openclaw channels resolve --json --channel discord --kind auto {}", + ids_arg + ); + if let Ok(r) = pool.exec_login(&host_id, &cmd).await { + if r.exit_code == 0 && !r.stdout.trim().is_empty() { + if let Some(name_map) = parse_resolve_name_map(&r.stdout) { + for entry in &mut entries { + if let Some(name) = name_map.get(&entry.channel_id) { + entry.channel_name = name.clone(); + } + } } } - map - }) - .await - .unwrap_or_default(); - for entry in &mut entries { - if let Some(name) = guild_name_map.get(&entry.guild_id) { - entry.guild_name = name.clone(); - } } } - } - for entry in &mut entries { - if entry.guild_name == entry.guild_id { - if let Some(name) = guild_name_fallback_map.get(&entry.guild_id) { - entry.guild_name = name.clone(); + + // Resolve guild names via Discord REST API (guild names can't be resolved by openclaw CLI) + // Must use spawn_blocking because reqwest::blocking panics in async context + if let Some(token) = bot_token { + if !unresolved_guild_ids.is_empty() { + let guild_name_map = tokio::task::spawn_blocking(move || { + let mut map = std::collections::HashMap::new(); + for gid in &unresolved_guild_ids { + if let Ok(name) = fetch_discord_guild_name(&token, gid) { + map.insert(gid.clone(), name); + } + } + map + }) + .await + .unwrap_or_default(); + for entry in &mut entries { + if let Some(name) = guild_name_map.get(&entry.guild_id) { + entry.guild_name = name.clone(); + } + } } } - } - - // Resolve default agent per guild from account config + bindings (remote) - { - // Build account_id -> default agent_id from bindings (account-level, no peer) - let mut account_agent_map: std::collections::HashMap = - std::collections::HashMap::new(); - if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) { - for b in bindings { - let m = match b.get("match") { - Some(m) => m, - None => continue, - }; - if m.get("channel").and_then(Value::as_str) != Some("discord") { - continue; - } - let account_id = match m.get("accountId").and_then(Value::as_str) { - Some(s) => s, - None => continue, - }; - if m.get("peer").and_then(|p| p.get("id")).is_some() { - continue; - } // skip channel-specific - if let Some(agent_id) = b.get("agentId").and_then(Value::as_str) { - account_agent_map - .entry(account_id.to_string()) - .or_insert_with(|| agent_id.to_string()); + for entry in &mut entries { + if entry.guild_name == entry.guild_id { + if let Some(name) = guild_name_fallback_map.get(&entry.guild_id) { + entry.guild_name = name.clone(); } } } - // Build guild_id -> default agent from account->guild mapping - let mut guild_default_agent: std::collections::HashMap = - std::collections::HashMap::new(); - if let Some(accounts) = discord_cfg - .and_then(|d| d.get("accounts")) - .and_then(Value::as_object) + + // Resolve default agent per guild from account config + bindings (remote) { - for (account_id, account_val) in accounts { - let agent = account_agent_map - .get(account_id) - .cloned() - .unwrap_or_else(|| account_id.clone()); - if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) { - for guild_id in guilds.keys() { - guild_default_agent - .entry(guild_id.clone()) - .or_insert(agent.clone()); + // Build account_id -> default agent_id from bindings (account-level, no peer) + let mut account_agent_map: std::collections::HashMap = + std::collections::HashMap::new(); + if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) { + for b in bindings { + let m = match b.get("match") { + Some(m) => m, + None => continue, + }; + if m.get("channel").and_then(Value::as_str) != Some("discord") { + continue; + } + let account_id = match m.get("accountId").and_then(Value::as_str) { + Some(s) => s, + None => continue, + }; + if m.get("peer").and_then(|p| p.get("id")).is_some() { + continue; + } // skip channel-specific + if let Some(agent_id) = b.get("agentId").and_then(Value::as_str) { + account_agent_map + .entry(account_id.to_string()) + .or_insert_with(|| agent_id.to_string()); } } } - } - for entry in &mut entries { - if entry.default_agent_id.is_none() { - if let Some(agent_id) = guild_default_agent.get(&entry.guild_id) { - entry.default_agent_id = Some(agent_id.clone()); + // Build guild_id -> default agent from account->guild mapping + let mut guild_default_agent: std::collections::HashMap = + std::collections::HashMap::new(); + if let Some(accounts) = discord_cfg + .and_then(|d| d.get("accounts")) + .and_then(Value::as_object) + { + for (account_id, account_val) in accounts { + let agent = account_agent_map + .get(account_id) + .cloned() + .unwrap_or_else(|| account_id.clone()); + if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) { + for guild_id in guilds.keys() { + guild_default_agent + .entry(guild_id.clone()) + .or_insert(agent.clone()); + } + } + } + } + for entry in &mut entries { + if entry.default_agent_id.is_none() { + if let Some(agent_id) = guild_default_agent.get(&entry.guild_id) { + entry.default_agent_id = Some(agent_id.clone()); + } } } } - } - // Persist to remote cache - if !entries.is_empty() { - let json = serde_json::to_string_pretty(&entries).map_err(|e| e.to_string())?; - let _ = pool - .sftp_write(&host_id, "~/.clawpal/discord-guild-channels.json", &json) - .await; - } + // Persist to remote cache + if !entries.is_empty() { + let json = serde_json::to_string_pretty(&entries).map_err(|e| e.to_string())?; + let _ = pool + .sftp_write(&host_id, "~/.clawpal/discord-guild-channels.json", &json) + .await; + } - Ok(entries) + Ok(entries) + }) } #[tauri::command] @@ -288,21 +290,23 @@ pub async fn remote_list_bindings( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - let output = crate::cli_runner::run_openclaw_remote( - &pool, - &host_id, - &["config", "get", "bindings", "--json"], - ) - .await?; - // "bindings" may not exist yet — treat non-zero exit with "not found" as empty - if output.exit_code != 0 { - let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase(); - if msg.contains("not found") { - return Ok(Vec::new()); + timed_async!("remote_list_bindings", { + let output = crate::cli_runner::run_openclaw_remote( + &pool, + &host_id, + &["config", "get", "bindings", "--json"], + ) + .await?; + // "bindings" may not exist yet — treat non-zero exit with "not found" as empty + if output.exit_code != 0 { + let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase(); + if msg.contains("not found") { + return Ok(Vec::new()); + } } - } - let json = crate::cli_runner::parse_json_output(&output)?; - clawpal_core::discovery::parse_bindings(&json.to_string()) + let json = crate::cli_runner::parse_json_output(&output)?; + clawpal_core::discovery::parse_bindings(&json.to_string()) + }) } #[tauri::command] @@ -310,27 +314,29 @@ pub async fn remote_list_channels_minimal( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - let output = crate::cli_runner::run_openclaw_remote( - &pool, - &host_id, - &["config", "get", "channels", "--json"], - ) - .await?; - // channels key might not exist yet - if output.exit_code != 0 { - let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase(); - if msg.contains("not found") { - return Ok(Vec::new()); + timed_async!("remote_list_channels_minimal", { + let output = crate::cli_runner::run_openclaw_remote( + &pool, + &host_id, + &["config", "get", "channels", "--json"], + ) + .await?; + // channels key might not exist yet + if output.exit_code != 0 { + let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase(); + if msg.contains("not found") { + return Ok(Vec::new()); + } + return Err(format!( + "openclaw config get channels failed: {}", + output.stderr + )); } - return Err(format!( - "openclaw config get channels failed: {}", - output.stderr - )); - } - let channels_val = crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null); - // Wrap in top-level object with "channels" key so collect_channel_nodes works - let cfg = serde_json::json!({ "channels": channels_val }); - Ok(collect_channel_nodes(&cfg)) + let channels_val = crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null); + // Wrap in top-level object with "channels" key so collect_channel_nodes works + let cfg = serde_json::json!({ "channels": channels_val }); + Ok(collect_channel_nodes(&cfg)) + }) } #[tauri::command] @@ -338,518 +344,535 @@ pub async fn remote_list_agents_overview( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - let output = - run_openclaw_remote_with_autofix(&pool, &host_id, &["agents", "list", "--json"]).await?; - if output.exit_code != 0 { - let details = format!("{}\n{}", output.stderr.trim(), output.stdout.trim()); - return Err(format!( - "openclaw agents list failed ({}): {}", - output.exit_code, - details.trim() - )); - } - let json = crate::cli_runner::parse_json_output(&output)?; - // Check which agents have sessions remotely (single command, batch check) - // Lists agents whose sessions.json is larger than 2 bytes (not just "{}") - let online_set = match pool.exec_login( - &host_id, - "for d in ~/.openclaw/agents/*/sessions/sessions.json; do [ -f \"$d\" ] && [ $(wc -c < \"$d\") -gt 2 ] && basename $(dirname $(dirname \"$d\")); done", - ).await { - Ok(result) => { - result.stdout.lines() - .map(|l| l.trim().to_string()) - .filter(|l| !l.is_empty()) - .collect::>() + timed_async!("remote_list_agents_overview", { + let output = + run_openclaw_remote_with_autofix(&pool, &host_id, &["agents", "list", "--json"]) + .await?; + if output.exit_code != 0 { + let details = format!("{}\n{}", output.stderr.trim(), output.stdout.trim()); + return Err(format!( + "openclaw agents list failed ({}): {}", + output.exit_code, + details.trim() + )); } - Err(_) => std::collections::HashSet::new(), // fallback: all offline - }; - parse_agents_cli_output(&json, Some(&online_set)) + let json = crate::cli_runner::parse_json_output(&output)?; + // Check which agents have sessions remotely (single command, batch check) + // Lists agents whose sessions.json is larger than 2 bytes (not just "{}") + let online_set = match pool.exec_login( + &host_id, + "for d in ~/.openclaw/agents/*/sessions/sessions.json; do [ -f \"$d\" ] && [ $(wc -c < \"$d\") -gt 2 ] && basename $(dirname $(dirname \"$d\")); done", + ).await { + Ok(result) => { + result.stdout.lines() + .map(|l| l.trim().to_string()) + .filter(|l| !l.is_empty()) + .collect::>() + } + Err(_) => std::collections::HashSet::new(), // fallback: all offline + }; + parse_agents_cli_output(&json, Some(&online_set)) + }) } #[tauri::command] pub async fn list_channels() -> Result, String> { - tauri::async_runtime::spawn_blocking(|| { - let paths = resolve_paths(); - let cfg = read_openclaw_config(&paths)?; - let mut nodes = collect_channel_nodes(&cfg); - enrich_channel_display_names(&paths, &cfg, &mut nodes)?; - Ok(nodes) + timed_async!("list_channels", { + tauri::async_runtime::spawn_blocking(|| { + let paths = resolve_paths(); + let cfg = read_openclaw_config(&paths)?; + let mut nodes = collect_channel_nodes(&cfg); + enrich_channel_display_names(&paths, &cfg, &mut nodes)?; + Ok(nodes) + }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } #[tauri::command] pub async fn list_channels_minimal( cache: tauri::State<'_, crate::cli_runner::CliCache>, ) -> Result, String> { - let cache_key = local_cli_cache_key("channels-minimal"); - let ttl = Some(std::time::Duration::from_secs(30)); - if let Some(cached) = cache.get(&cache_key, ttl) { - return serde_json::from_str(&cached).map_err(|e| e.to_string()); - } - let cache = cache.inner().clone(); - let cache_key_cloned = cache_key.clone(); - tauri::async_runtime::spawn_blocking(move || { - let output = crate::cli_runner::run_openclaw(&["config", "get", "channels", "--json"]) - .map_err(|e| format!("Failed to run openclaw: {e}"))?; - if output.exit_code != 0 { - let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase(); - if msg.contains("not found") { - return Ok(Vec::new()); + timed_async!("list_channels_minimal", { + let cache_key = local_cli_cache_key("channels-minimal"); + let ttl = Some(std::time::Duration::from_secs(30)); + if let Some(cached) = cache.get(&cache_key, ttl) { + return serde_json::from_str(&cached).map_err(|e| e.to_string()); + } + let cache = cache.inner().clone(); + let cache_key_cloned = cache_key.clone(); + tauri::async_runtime::spawn_blocking(move || { + let output = crate::cli_runner::run_openclaw(&["config", "get", "channels", "--json"]) + .map_err(|e| format!("Failed to run openclaw: {e}"))?; + if output.exit_code != 0 { + let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase(); + if msg.contains("not found") { + return Ok(Vec::new()); + } + // Fallback: direct read + let paths = resolve_paths(); + let cfg = read_openclaw_config(&paths)?; + let result = collect_channel_nodes(&cfg); + if let Ok(serialized) = serde_json::to_string(&result) { + cache.set(cache_key_cloned, serialized); + } + return Ok(result); } - // Fallback: direct read - let paths = resolve_paths(); - let cfg = read_openclaw_config(&paths)?; + let channels_val = crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null); + let cfg = serde_json::json!({ "channels": channels_val }); let result = collect_channel_nodes(&cfg); if let Ok(serialized) = serde_json::to_string(&result) { cache.set(cache_key_cloned, serialized); } - return Ok(result); - } - let channels_val = crate::cli_runner::parse_json_output(&output).unwrap_or(Value::Null); - let cfg = serde_json::json!({ "channels": channels_val }); - let result = collect_channel_nodes(&cfg); - if let Ok(serialized) = serde_json::to_string(&result) { - cache.set(cache_key_cloned, serialized); - } - Ok(result) + Ok(result) + }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } #[tauri::command] pub fn list_discord_guild_channels() -> Result, String> { - let paths = resolve_paths(); - let cache_file = paths.clawpal_dir.join("discord-guild-channels.json"); - if cache_file.exists() { - let text = fs::read_to_string(&cache_file).map_err(|e| e.to_string())?; - let entries: Vec = serde_json::from_str(&text).unwrap_or_default(); - return Ok(entries); - } - Ok(Vec::new()) + timed_sync!("list_discord_guild_channels", { + let paths = resolve_paths(); + let cache_file = paths.clawpal_dir.join("discord-guild-channels.json"); + if cache_file.exists() { + let text = fs::read_to_string(&cache_file).map_err(|e| e.to_string())?; + let entries: Vec = serde_json::from_str(&text).unwrap_or_default(); + return Ok(entries); + } + Ok(Vec::new()) + }) } #[tauri::command] pub async fn refresh_discord_guild_channels() -> Result, String> { - tauri::async_runtime::spawn_blocking(move || { - let paths = resolve_paths(); - ensure_dirs(&paths)?; - let cfg = read_openclaw_config(&paths)?; + timed_async!("refresh_discord_guild_channels", { + tauri::async_runtime::spawn_blocking(move || { + let paths = resolve_paths(); + ensure_dirs(&paths)?; + let cfg = read_openclaw_config(&paths)?; - let discord_cfg = cfg.get("channels").and_then(|c| c.get("discord")); - let configured_single_guild_id = discord_cfg - .and_then(|d| d.get("guilds")) - .and_then(Value::as_object) - .and_then(|guilds| { - if guilds.len() == 1 { - guilds.keys().next().cloned() - } else { - None - } - }); + let discord_cfg = cfg.get("channels").and_then(|c| c.get("discord")); + let configured_single_guild_id = discord_cfg + .and_then(|d| d.get("guilds")) + .and_then(Value::as_object) + .and_then(|guilds| { + if guilds.len() == 1 { + guilds.keys().next().cloned() + } else { + None + } + }); - // Extract bot token: top-level first, then fall back to first account token - let bot_token = discord_cfg - .and_then(|d| d.get("botToken").or_else(|| d.get("token"))) - .and_then(Value::as_str) - .map(|s| s.to_string()) - .or_else(|| { - discord_cfg - .and_then(|d| d.get("accounts")) - .and_then(Value::as_object) - .and_then(|accounts| { - accounts.values().find_map(|acct| { - acct.get("token") - .and_then(Value::as_str) - .filter(|s| !s.is_empty()) - .map(|s| s.to_string()) + // Extract bot token: top-level first, then fall back to first account token + let bot_token = discord_cfg + .and_then(|d| d.get("botToken").or_else(|| d.get("token"))) + .and_then(Value::as_str) + .map(|s| s.to_string()) + .or_else(|| { + discord_cfg + .and_then(|d| d.get("accounts")) + .and_then(Value::as_object) + .and_then(|accounts| { + accounts.values().find_map(|acct| { + acct.get("token") + .and_then(Value::as_str) + .filter(|s| !s.is_empty()) + .map(|s| s.to_string()) + }) }) - }) - }); - let cache_file = paths.clawpal_dir.join("discord-guild-channels.json"); - let mut guild_name_fallback_map = fs::read_to_string(&cache_file) - .ok() - .map(|text| parse_discord_cache_guild_name_fallbacks(&text)) - .unwrap_or_default(); - guild_name_fallback_map.extend(collect_discord_config_guild_name_fallbacks(discord_cfg)); + }); + let cache_file = paths.clawpal_dir.join("discord-guild-channels.json"); + let mut guild_name_fallback_map = fs::read_to_string(&cache_file) + .ok() + .map(|text| parse_discord_cache_guild_name_fallbacks(&text)) + .unwrap_or_default(); + guild_name_fallback_map + .extend(collect_discord_config_guild_name_fallbacks(discord_cfg)); + + let mut entries: Vec = Vec::new(); + let mut channel_ids: Vec = Vec::new(); + let mut unresolved_guild_ids: Vec = Vec::new(); + + // Helper: collect guilds from a guilds object + let mut collect_guilds = |guilds: &serde_json::Map| { + for (guild_id, guild_val) in guilds { + let guild_name = guild_val + .get("slug") + .or_else(|| guild_val.get("name")) + .and_then(Value::as_str) + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .unwrap_or_else(|| guild_id.clone()); - let mut entries: Vec = Vec::new(); - let mut channel_ids: Vec = Vec::new(); - let mut unresolved_guild_ids: Vec = Vec::new(); - - // Helper: collect guilds from a guilds object - let mut collect_guilds = |guilds: &serde_json::Map| { - for (guild_id, guild_val) in guilds { - let guild_name = guild_val - .get("slug") - .or_else(|| guild_val.get("name")) - .and_then(Value::as_str) - .map(|s| s.trim().to_string()) - .filter(|s| !s.is_empty()) - .unwrap_or_else(|| guild_id.clone()); - - if guild_name == *guild_id && !unresolved_guild_ids.contains(guild_id) { - unresolved_guild_ids.push(guild_id.clone()); - } + if guild_name == *guild_id && !unresolved_guild_ids.contains(guild_id) { + unresolved_guild_ids.push(guild_id.clone()); + } - if let Some(channels) = guild_val.get("channels").and_then(Value::as_object) { - for (channel_id, _channel_val) in channels { - // Skip glob/wildcard patterns (e.g. "*") — not real channel IDs - if channel_id.contains('*') || channel_id.contains('?') { - continue; - } - if entries - .iter() - .any(|e| e.guild_id == *guild_id && e.channel_id == *channel_id) - { - continue; + if let Some(channels) = guild_val.get("channels").and_then(Value::as_object) { + for (channel_id, _channel_val) in channels { + // Skip glob/wildcard patterns (e.g. "*") — not real channel IDs + if channel_id.contains('*') || channel_id.contains('?') { + continue; + } + if entries + .iter() + .any(|e| e.guild_id == *guild_id && e.channel_id == *channel_id) + { + continue; + } + channel_ids.push(channel_id.clone()); + entries.push(DiscordGuildChannel { + guild_id: guild_id.clone(), + guild_name: guild_name.clone(), + channel_id: channel_id.clone(), + channel_name: channel_id.clone(), + default_agent_id: None, + }); } - channel_ids.push(channel_id.clone()); - entries.push(DiscordGuildChannel { - guild_id: guild_id.clone(), - guild_name: guild_name.clone(), - channel_id: channel_id.clone(), - channel_name: channel_id.clone(), - default_agent_id: None, - }); } } - } - }; + }; - // Collect from channels.discord.guilds (top-level structured config) - if let Some(guilds) = discord_cfg - .and_then(|d| d.get("guilds")) - .and_then(Value::as_object) - { - collect_guilds(guilds); - } + // Collect from channels.discord.guilds (top-level structured config) + if let Some(guilds) = discord_cfg + .and_then(|d| d.get("guilds")) + .and_then(Value::as_object) + { + collect_guilds(guilds); + } - // Collect from channels.discord.accounts..guilds (multi-account config) - if let Some(accounts) = discord_cfg - .and_then(|d| d.get("accounts")) - .and_then(Value::as_object) - { - for (_account_id, account_val) in accounts { - if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) { - collect_guilds(guilds); + // Collect from channels.discord.accounts..guilds (multi-account config) + if let Some(accounts) = discord_cfg + .and_then(|d| d.get("accounts")) + .and_then(Value::as_object) + { + for (_account_id, account_val) in accounts { + if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) { + collect_guilds(guilds); + } } } - } - drop(collect_guilds); // Release mutable borrows before bindings section - - // Also collect from bindings array (users may only have bindings, no guilds map) - if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) { - for b in bindings { - let m = match b.get("match") { - Some(m) => m, - None => continue, - }; - if m.get("channel").and_then(Value::as_str) != Some("discord") { - continue; - } - let guild_id = match m.get("guildId") { - Some(Value::String(s)) => s.clone(), - Some(Value::Number(n)) => n.to_string(), - _ => continue, - }; - let channel_id = match m.pointer("/peer/id") { - Some(Value::String(s)) => s.clone(), - Some(Value::Number(n)) => n.to_string(), - _ => continue, - }; - // Skip if already collected from guilds map - if entries - .iter() - .any(|e| e.guild_id == guild_id && e.channel_id == channel_id) - { - continue; - } - if !unresolved_guild_ids.contains(&guild_id) { - unresolved_guild_ids.push(guild_id.clone()); + drop(collect_guilds); // Release mutable borrows before bindings section + + // Also collect from bindings array (users may only have bindings, no guilds map) + if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) { + for b in bindings { + let m = match b.get("match") { + Some(m) => m, + None => continue, + }; + if m.get("channel").and_then(Value::as_str) != Some("discord") { + continue; + } + let guild_id = match m.get("guildId") { + Some(Value::String(s)) => s.clone(), + Some(Value::Number(n)) => n.to_string(), + _ => continue, + }; + let channel_id = match m.pointer("/peer/id") { + Some(Value::String(s)) => s.clone(), + Some(Value::Number(n)) => n.to_string(), + _ => continue, + }; + // Skip if already collected from guilds map + if entries + .iter() + .any(|e| e.guild_id == guild_id && e.channel_id == channel_id) + { + continue; + } + if !unresolved_guild_ids.contains(&guild_id) { + unresolved_guild_ids.push(guild_id.clone()); + } + channel_ids.push(channel_id.clone()); + entries.push(DiscordGuildChannel { + guild_id: guild_id.clone(), + guild_name: guild_id.clone(), + channel_id: channel_id.clone(), + channel_name: channel_id.clone(), + default_agent_id: None, + }); } - channel_ids.push(channel_id.clone()); - entries.push(DiscordGuildChannel { - guild_id: guild_id.clone(), - guild_name: guild_id.clone(), - channel_id: channel_id.clone(), - channel_name: channel_id.clone(), - default_agent_id: None, - }); } - } - // Fallback A: fetch channels from Discord REST for guilds that have no entries yet. - // Build a guild_id -> token mapping so each guild uses the correct bot token. - { - let mut guild_token_map: std::collections::HashMap = - std::collections::HashMap::new(); - - // Map guilds from accounts to their respective tokens - if let Some(accounts) = discord_cfg - .and_then(|d| d.get("accounts")) - .and_then(Value::as_object) + // Fallback A: fetch channels from Discord REST for guilds that have no entries yet. + // Build a guild_id -> token mapping so each guild uses the correct bot token. { - for (_acct_id, acct_val) in accounts { - let acct_token = acct_val - .get("token") - .and_then(Value::as_str) - .filter(|s| !s.is_empty()) - .map(|s| s.to_string()); - if let Some(token) = acct_token { - if let Some(guilds) = acct_val.get("guilds").and_then(Value::as_object) { - for guild_id in guilds.keys() { - guild_token_map - .entry(guild_id.clone()) - .or_insert_with(|| token.clone()); + let mut guild_token_map: std::collections::HashMap = + std::collections::HashMap::new(); + + // Map guilds from accounts to their respective tokens + if let Some(accounts) = discord_cfg + .and_then(|d| d.get("accounts")) + .and_then(Value::as_object) + { + for (_acct_id, acct_val) in accounts { + let acct_token = acct_val + .get("token") + .and_then(Value::as_str) + .filter(|s| !s.is_empty()) + .map(|s| s.to_string()); + if let Some(token) = acct_token { + if let Some(guilds) = acct_val.get("guilds").and_then(Value::as_object) + { + for guild_id in guilds.keys() { + guild_token_map + .entry(guild_id.clone()) + .or_insert_with(|| token.clone()); + } } } } } - } - // Also map top-level guilds to the top-level bot token - if let Some(token) = &bot_token { - let configured_guild_ids = collect_discord_config_guild_ids(discord_cfg); - for guild_id in &configured_guild_ids { - guild_token_map - .entry(guild_id.clone()) - .or_insert_with(|| token.clone()); + // Also map top-level guilds to the top-level bot token + if let Some(token) = &bot_token { + let configured_guild_ids = collect_discord_config_guild_ids(discord_cfg); + for guild_id in &configured_guild_ids { + guild_token_map + .entry(guild_id.clone()) + .or_insert_with(|| token.clone()); + } } - } - for (guild_id, token) in &guild_token_map { - // Skip guilds that already have entries from config/bindings - if entries.iter().any(|e| e.guild_id == *guild_id) { - continue; + for (guild_id, token) in &guild_token_map { + // Skip guilds that already have entries from config/bindings + if entries.iter().any(|e| e.guild_id == *guild_id) { + continue; + } + if let Ok(channels) = fetch_discord_guild_channels(token, guild_id) { + for (channel_id, channel_name) in channels { + if entries + .iter() + .any(|e| e.guild_id == *guild_id && e.channel_id == channel_id) + { + continue; + } + channel_ids.push(channel_id.clone()); + entries.push(DiscordGuildChannel { + guild_id: guild_id.clone(), + guild_name: guild_id.clone(), + channel_id, + channel_name, + default_agent_id: None, + }); + } + } } - if let Ok(channels) = fetch_discord_guild_channels(token, guild_id) { - for (channel_id, channel_name) in channels { - if entries - .iter() - .any(|e| e.guild_id == *guild_id && e.channel_id == channel_id) - { + } + + // Fallback B: query channel ids from directory and keep compatibility + // with existing cache shape when config has no explicit channel map. + if channel_ids.is_empty() { + if let Ok(output) = run_openclaw_raw(&[ + "directory", + "groups", + "list", + "--channel", + "discord", + "--json", + ]) { + for channel_id in parse_directory_group_channel_ids(&output.stdout) { + if entries.iter().any(|e| e.channel_id == channel_id) { continue; } + let (guild_id, guild_name) = + if let Some(gid) = configured_single_guild_id.clone() { + (gid.clone(), gid) + } else { + ("discord".to_string(), "Discord".to_string()) + }; channel_ids.push(channel_id.clone()); entries.push(DiscordGuildChannel { - guild_id: guild_id.clone(), - guild_name: guild_id.clone(), - channel_id, - channel_name, + guild_id, + guild_name, + channel_id: channel_id.clone(), + channel_name: channel_id, default_agent_id: None, }); } } } - } - // Fallback B: query channel ids from directory and keep compatibility - // with existing cache shape when config has no explicit channel map. - if channel_ids.is_empty() { - if let Ok(output) = run_openclaw_raw(&[ - "directory", - "groups", - "list", - "--channel", - "discord", - "--json", - ]) { - for channel_id in parse_directory_group_channel_ids(&output.stdout) { - if entries.iter().any(|e| e.channel_id == channel_id) { - continue; - } - let (guild_id, guild_name) = - if let Some(gid) = configured_single_guild_id.clone() { - (gid.clone(), gid) - } else { - ("discord".to_string(), "Discord".to_string()) - }; - channel_ids.push(channel_id.clone()); - entries.push(DiscordGuildChannel { - guild_id, - guild_name, - channel_id: channel_id.clone(), - channel_name: channel_id, - default_agent_id: None, - }); - } + if entries.is_empty() { + return Ok(Vec::new()); } - } - if entries.is_empty() { - return Ok(Vec::new()); - } - - // Resolve channel names via openclaw CLI - if !channel_ids.is_empty() { - let mut args = vec![ - "channels", - "resolve", - "--json", - "--channel", - "discord", - "--kind", - "auto", - ]; - let id_refs: Vec<&str> = channel_ids.iter().map(String::as_str).collect(); - args.extend_from_slice(&id_refs); - - if let Ok(output) = run_openclaw_raw(&args) { - if let Some(name_map) = parse_resolve_name_map(&output.stdout) { - for entry in &mut entries { - if let Some(name) = name_map.get(&entry.channel_id) { - entry.channel_name = name.clone(); + // Resolve channel names via openclaw CLI + if !channel_ids.is_empty() { + let mut args = vec![ + "channels", + "resolve", + "--json", + "--channel", + "discord", + "--kind", + "auto", + ]; + let id_refs: Vec<&str> = channel_ids.iter().map(String::as_str).collect(); + args.extend_from_slice(&id_refs); + + if let Ok(output) = run_openclaw_raw(&args) { + if let Some(name_map) = parse_resolve_name_map(&output.stdout) { + for entry in &mut entries { + if let Some(name) = name_map.get(&entry.channel_id) { + entry.channel_name = name.clone(); + } } } } } - } - // Resolve guild names via Discord REST API - if let Some(token) = &bot_token { - if !unresolved_guild_ids.is_empty() { - let mut guild_name_map: std::collections::HashMap = - std::collections::HashMap::new(); - for gid in &unresolved_guild_ids { - if let Ok(name) = fetch_discord_guild_name(token, gid) { - guild_name_map.insert(gid.clone(), name); + // Resolve guild names via Discord REST API + if let Some(token) = &bot_token { + if !unresolved_guild_ids.is_empty() { + let mut guild_name_map: std::collections::HashMap = + std::collections::HashMap::new(); + for gid in &unresolved_guild_ids { + if let Ok(name) = fetch_discord_guild_name(token, gid) { + guild_name_map.insert(gid.clone(), name); + } } - } - for entry in &mut entries { - if let Some(name) = guild_name_map.get(&entry.guild_id) { - entry.guild_name = name.clone(); + for entry in &mut entries { + if let Some(name) = guild_name_map.get(&entry.guild_id) { + entry.guild_name = name.clone(); + } } } } - } - for entry in &mut entries { - if entry.guild_name == entry.guild_id { - if let Some(name) = guild_name_fallback_map.get(&entry.guild_id) { - entry.guild_name = name.clone(); + for entry in &mut entries { + if entry.guild_name == entry.guild_id { + if let Some(name) = guild_name_fallback_map.get(&entry.guild_id) { + entry.guild_name = name.clone(); + } } } - } - // Resolve default agent per guild from account config + bindings - { - // Build account_id -> default agent_id from bindings (account-level, no peer) - let mut account_agent_map: std::collections::HashMap = - std::collections::HashMap::new(); - if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) { - for b in bindings { - let m = match b.get("match") { - Some(m) => m, - None => continue, - }; - if m.get("channel").and_then(Value::as_str) != Some("discord") { - continue; - } - let account_id = match m.get("accountId").and_then(Value::as_str) { - Some(s) => s, - None => continue, - }; - if m.get("peer").and_then(|p| p.get("id")).is_some() { - continue; - } - if let Some(agent_id) = b.get("agentId").and_then(Value::as_str) { - account_agent_map - .entry(account_id.to_string()) - .or_insert_with(|| agent_id.to_string()); + // Resolve default agent per guild from account config + bindings + { + // Build account_id -> default agent_id from bindings (account-level, no peer) + let mut account_agent_map: std::collections::HashMap = + std::collections::HashMap::new(); + if let Some(bindings) = cfg.get("bindings").and_then(Value::as_array) { + for b in bindings { + let m = match b.get("match") { + Some(m) => m, + None => continue, + }; + if m.get("channel").and_then(Value::as_str) != Some("discord") { + continue; + } + let account_id = match m.get("accountId").and_then(Value::as_str) { + Some(s) => s, + None => continue, + }; + if m.get("peer").and_then(|p| p.get("id")).is_some() { + continue; + } + if let Some(agent_id) = b.get("agentId").and_then(Value::as_str) { + account_agent_map + .entry(account_id.to_string()) + .or_insert_with(|| agent_id.to_string()); + } } } - } - let mut guild_default_agent: std::collections::HashMap = - std::collections::HashMap::new(); - if let Some(accounts) = discord_cfg - .and_then(|d| d.get("accounts")) - .and_then(Value::as_object) - { - for (account_id, account_val) in accounts { - let agent = account_agent_map - .get(account_id) - .cloned() - .unwrap_or_else(|| account_id.clone()); - if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) { - for guild_id in guilds.keys() { - guild_default_agent - .entry(guild_id.clone()) - .or_insert(agent.clone()); + let mut guild_default_agent: std::collections::HashMap = + std::collections::HashMap::new(); + if let Some(accounts) = discord_cfg + .and_then(|d| d.get("accounts")) + .and_then(Value::as_object) + { + for (account_id, account_val) in accounts { + let agent = account_agent_map + .get(account_id) + .cloned() + .unwrap_or_else(|| account_id.clone()); + if let Some(guilds) = account_val.get("guilds").and_then(Value::as_object) { + for guild_id in guilds.keys() { + guild_default_agent + .entry(guild_id.clone()) + .or_insert(agent.clone()); + } } } } - } - for entry in &mut entries { - if entry.default_agent_id.is_none() { - if let Some(agent_id) = guild_default_agent.get(&entry.guild_id) { - entry.default_agent_id = Some(agent_id.clone()); + for entry in &mut entries { + if entry.default_agent_id.is_none() { + if let Some(agent_id) = guild_default_agent.get(&entry.guild_id) { + entry.default_agent_id = Some(agent_id.clone()); + } } } } - } - // Persist to cache - let json = serde_json::to_string_pretty(&entries).map_err(|e| e.to_string())?; - write_text(&cache_file, &json)?; + // Persist to cache + let json = serde_json::to_string_pretty(&entries).map_err(|e| e.to_string())?; + write_text(&cache_file, &json)?; - Ok(entries) + Ok(entries) + }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } #[tauri::command] pub async fn list_bindings( cache: tauri::State<'_, crate::cli_runner::CliCache>, ) -> Result, String> { - let cache_key = local_cli_cache_key("bindings"); - if let Some(cached) = cache.get(&cache_key, None) { - return serde_json::from_str(&cached).map_err(|e| e.to_string()); - } - let cache = cache.inner().clone(); - let cache_key_cloned = cache_key.clone(); - tauri::async_runtime::spawn_blocking(move || { - let output = crate::cli_runner::run_openclaw(&["config", "get", "bindings", "--json"])?; - // "bindings" may not exist yet — treat "not found" as empty - if output.exit_code != 0 { - let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase(); - if msg.contains("not found") { - return Ok(Vec::new()); - } + timed_async!("list_bindings", { + let cache_key = local_cli_cache_key("bindings"); + if let Some(cached) = cache.get(&cache_key, None) { + return serde_json::from_str(&cached).map_err(|e| e.to_string()); } - let json = crate::cli_runner::parse_json_output(&output)?; - let result = json.as_array().cloned().unwrap_or_default(); - if let Ok(serialized) = serde_json::to_string(&result) { - cache.set(cache_key_cloned, serialized); - } - Ok(result) + let cache = cache.inner().clone(); + let cache_key_cloned = cache_key.clone(); + tauri::async_runtime::spawn_blocking(move || { + let output = crate::cli_runner::run_openclaw(&["config", "get", "bindings", "--json"])?; + // "bindings" may not exist yet — treat "not found" as empty + if output.exit_code != 0 { + let msg = format!("{} {}", output.stderr, output.stdout).to_lowercase(); + if msg.contains("not found") { + return Ok(Vec::new()); + } + } + let json = crate::cli_runner::parse_json_output(&output)?; + let result = json.as_array().cloned().unwrap_or_default(); + if let Ok(serialized) = serde_json::to_string(&result) { + cache.set(cache_key_cloned, serialized); + } + Ok(result) + }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } #[tauri::command] pub async fn list_agents_overview( cache: tauri::State<'_, crate::cli_runner::CliCache>, ) -> Result, String> { - let cache_key = local_cli_cache_key("agents-list"); - if let Some(cached) = cache.get(&cache_key, None) { - return serde_json::from_str(&cached).map_err(|e| e.to_string()); - } - let cache = cache.inner().clone(); - let cache_key_cloned = cache_key.clone(); - tauri::async_runtime::spawn_blocking(move || { - let output = crate::cli_runner::run_openclaw(&["agents", "list", "--json"])?; - let json = crate::cli_runner::parse_json_output(&output)?; - let result = parse_agents_cli_output(&json, None)?; - if let Ok(serialized) = serde_json::to_string(&result) { - cache.set(cache_key_cloned, serialized); + timed_async!("list_agents_overview", { + let cache_key = local_cli_cache_key("agents-list"); + if let Some(cached) = cache.get(&cache_key, None) { + return serde_json::from_str(&cached).map_err(|e| e.to_string()); } - Ok(result) + let cache = cache.inner().clone(); + let cache_key_cloned = cache_key.clone(); + tauri::async_runtime::spawn_blocking(move || { + let output = crate::cli_runner::run_openclaw(&["agents", "list", "--json"])?; + let json = crate::cli_runner::parse_json_output(&output)?; + let result = parse_agents_cli_output(&json, None)?; + if let Ok(serialized) = serde_json::to_string(&result) { + cache.set(cache_key_cloned, serialized); + } + Ok(result) + }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } diff --git a/src-tauri/src/commands/doctor.rs b/src-tauri/src/commands/doctor.rs index c837dd28..ad65b1b3 100644 --- a/src-tauri/src/commands/doctor.rs +++ b/src-tauri/src/commands/doctor.rs @@ -762,23 +762,25 @@ pub async fn remote_run_doctor( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let result = pool - .exec_login( - &host_id, - "openclaw doctor --json 2>/dev/null || openclaw doctor 2>&1", - ) - .await?; - // Try to parse as JSON first - if let Ok(json) = serde_json::from_str::(&result.stdout) { - return Ok(json); - } - // Fallback: return raw output as a simple report - Ok(serde_json::json!({ - "ok": result.exit_code == 0, - "score": if result.exit_code == 0 { 100 } else { 0 }, - "issues": [], - "rawOutput": result.stdout, - })) + timed_async!("remote_run_doctor", { + let result = pool + .exec_login( + &host_id, + "openclaw doctor --json 2>/dev/null || openclaw doctor 2>&1", + ) + .await?; + // Try to parse as JSON first + if let Ok(json) = serde_json::from_str::(&result.stdout) { + return Ok(json); + } + // Fallback: return raw output as a simple report + Ok(serde_json::json!({ + "ok": result.exit_code == 0, + "score": if result.exit_code == 0 { 100 } else { 0 }, + "issues": [], + "rawOutput": result.stdout, + })) + }) } #[tauri::command] @@ -787,21 +789,30 @@ pub async fn remote_fix_issues( host_id: String, ids: Vec, ) -> Result { - let (config_path, raw, _cfg) = - remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; - let mut cfg = clawpal_core::doctor::parse_json5_document_or_default(&raw); - let applied = clawpal_core::doctor::apply_issue_fixes(&mut cfg, &ids)?; - - if !applied.is_empty() { - remote_write_config_with_snapshot(&pool, &host_id, &config_path, &raw, &cfg, "doctor-fix") + timed_async!("remote_fix_issues", { + let (config_path, raw, _cfg) = + remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; + let mut cfg = clawpal_core::doctor::parse_json5_document_or_default(&raw); + let applied = clawpal_core::doctor::apply_issue_fixes(&mut cfg, &ids)?; + + if !applied.is_empty() { + remote_write_config_with_snapshot( + &pool, + &host_id, + &config_path, + &raw, + &cfg, + "doctor-fix", + ) .await?; - } + } - let remaining: Vec = ids.into_iter().filter(|id| !applied.contains(id)).collect(); - Ok(FixResult { - ok: true, - applied, - remaining_issues: remaining, + let remaining: Vec = ids.into_iter().filter(|id| !applied.contains(id)).collect(); + Ok(FixResult { + ok: true, + applied, + remaining_issues: remaining, + }) }) } @@ -810,81 +821,88 @@ pub async fn remote_get_system_status( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - // Tier 1: fast, essential — health check + config + real agent list. - let (config_res, agents_res, pgrep_res) = tokio::join!( - run_openclaw_remote_with_autofix(&pool, &host_id, &["config", "get", "agents", "--json"]), - run_openclaw_remote_with_autofix(&pool, &host_id, &["agents", "list", "--json"]), - pool.exec(&host_id, "pgrep -f '[o]penclaw-gateway' >/dev/null 2>&1"), - ); + timed_async!("remote_get_system_status", { + // Tier 1: fast, essential — health check + config + real agent list. + let (config_res, agents_res, pgrep_res) = tokio::join!( + run_openclaw_remote_with_autofix( + &pool, + &host_id, + &["config", "get", "agents", "--json"] + ), + run_openclaw_remote_with_autofix(&pool, &host_id, &["agents", "list", "--json"]), + pool.exec(&host_id, "pgrep -f '[o]penclaw-gateway' >/dev/null 2>&1"), + ); - let config_ok = matches!(&config_res, Ok(output) if output.exit_code == 0); - let ssh_diagnostic = match (&config_res, &agents_res, &pgrep_res) { - (Err(error), _, _) => Some(from_any_error( - SshStage::RemoteExec, - SshIntent::HealthCheck, - error.clone(), - )), - (_, Err(error), _) => Some(from_any_error( - SshStage::RemoteExec, - SshIntent::HealthCheck, - error.clone(), - )), - (_, _, Err(error)) => Some(from_any_error( - SshStage::RemoteExec, - SshIntent::HealthCheck, - error.clone(), - )), - _ => None, - }; + let config_ok = matches!(&config_res, Ok(output) if output.exit_code == 0); + let ssh_diagnostic = match (&config_res, &agents_res, &pgrep_res) { + (Err(error), _, _) => Some(from_any_error( + SshStage::RemoteExec, + SshIntent::HealthCheck, + error.clone(), + )), + (_, Err(error), _) => Some(from_any_error( + SshStage::RemoteExec, + SshIntent::HealthCheck, + error.clone(), + )), + (_, _, Err(error)) => Some(from_any_error( + SshStage::RemoteExec, + SshIntent::HealthCheck, + error.clone(), + )), + _ => None, + }; - let active_agents = match &agents_res { - Ok(output) if output.exit_code == 0 => { - let json = crate::cli_runner::parse_json_output(output).unwrap_or(Value::Null); - count_agent_entries_from_cli_json(&json).unwrap_or(0) - } - _ => 0, - }; + let active_agents = match &agents_res { + Ok(output) if output.exit_code == 0 => { + let json = crate::cli_runner::parse_json_output(output).unwrap_or(Value::Null); + count_agent_entries_from_cli_json(&json).unwrap_or(0) + } + _ => 0, + }; - let (global_default_model, fallback_models) = match config_res { - Ok(ref output) if output.exit_code == 0 => { - let cfg: Value = crate::cli_runner::parse_json_output(output).unwrap_or(Value::Null); - let model = cfg - .pointer("/defaults/model") - .and_then(|v| read_model_value(v)) - .or_else(|| { - cfg.pointer("/default/model") - .and_then(|v| read_model_value(v)) - }); - let fallbacks = cfg - .pointer("/defaults/model/fallbacks") - .and_then(Value::as_array) - .map(|arr| { - arr.iter() - .filter_map(Value::as_str) - .map(String::from) - .collect() - }) - .unwrap_or_default(); - (model, fallbacks) - } - _ => (None, Vec::new()), - }; + let (global_default_model, fallback_models) = match config_res { + Ok(ref output) if output.exit_code == 0 => { + let cfg: Value = + crate::cli_runner::parse_json_output(output).unwrap_or(Value::Null); + let model = cfg + .pointer("/defaults/model") + .and_then(|v| read_model_value(v)) + .or_else(|| { + cfg.pointer("/default/model") + .and_then(|v| read_model_value(v)) + }); + let fallbacks = cfg + .pointer("/defaults/model/fallbacks") + .and_then(Value::as_array) + .map(|arr| { + arr.iter() + .filter_map(Value::as_str) + .map(String::from) + .collect() + }) + .unwrap_or_default(); + (model, fallbacks) + } + _ => (None, Vec::new()), + }; - // Avoid false negatives from transient SSH exec failures: - // if health probe fails but config fetch in the same cycle succeeded, - // keep health as true instead of flipping to unhealthy. - let healthy = match pgrep_res { - Ok(r) => r.exit_code == 0, - Err(_) if config_ok => true, - Err(_) => false, - }; + // Avoid false negatives from transient SSH exec failures: + // if health probe fails but config fetch in the same cycle succeeded, + // keep health as true instead of flipping to unhealthy. + let healthy = match pgrep_res { + Ok(r) => r.exit_code == 0, + Err(_) if config_ok => true, + Err(_) => false, + }; - Ok(StatusLight { - healthy, - active_agents, - global_default_model, - fallback_models, - ssh_diagnostic, + Ok(StatusLight { + healthy, + active_agents, + global_default_model, + fallback_models, + ssh_diagnostic, + }) }) } @@ -895,27 +913,29 @@ pub async fn probe_ssh_connection_profile( request_id: String, app: AppHandle, ) -> Result { - let emitter = ProbeEmitter { - app, - host_id: host_id.clone(), - request_id, - current_stage: Arc::new(Mutex::new("connect".to_string())), - }; + timed_async!("probe_ssh_connection_profile", { + let emitter = ProbeEmitter { + app, + host_id: host_id.clone(), + request_id, + current_stage: Arc::new(Mutex::new("connect".to_string())), + }; - match timeout( - Duration::from_secs(SSH_PROBE_TOTAL_TIMEOUT_SECS), - probe_ssh_connection_profile_impl(&pool, &host_id, Some(emitter.clone())), - ) - .await - { - Ok(result) => result, - Err(_) => { - let current_stage = emitter.current_stage(); - let message = format!("ssh probe timed out during {current_stage}"); - emitter.emit(¤t_stage, "failed", None, Some(message.clone())); - Err(message) + match timeout( + Duration::from_secs(SSH_PROBE_TOTAL_TIMEOUT_SECS), + probe_ssh_connection_profile_impl(&pool, &host_id, Some(emitter.clone())), + ) + .await + { + Ok(result) => result, + Err(_) => { + let current_stage = emitter.current_stage(); + let message = format!("ssh probe timed out during {current_stage}"); + emitter.emit(¤t_stage, "failed", None, Some(message.clone())); + Err(message) + } } - } + }) } #[tauri::command] @@ -923,12 +943,14 @@ pub async fn remote_get_ssh_connection_profile( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - timeout( - Duration::from_secs(SSH_PROBE_TOTAL_TIMEOUT_SECS), - probe_ssh_connection_profile_impl(&pool, &host_id, None), - ) - .await - .map_err(|_| "ssh probe timed out".to_string())? + timed_async!("remote_get_ssh_connection_profile", { + timeout( + Duration::from_secs(SSH_PROBE_TOTAL_TIMEOUT_SECS), + probe_ssh_connection_profile_impl(&pool, &host_id, None), + ) + .await + .map_err(|_| "ssh probe timed out".to_string())? + }) } #[tauri::command] @@ -936,199 +958,211 @@ pub async fn remote_get_status_extra( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let detect_duplicates_script = concat!( - "seen=''; for p in $(which -a openclaw 2>/dev/null) ", - "\"$HOME/.npm-global/bin/openclaw\" \"/usr/local/bin/openclaw\" \"/opt/homebrew/bin/openclaw\"; do ", - "[ -x \"$p\" ] || continue; ", - "rp=$(readlink -f \"$p\" 2>/dev/null || echo \"$p\"); ", - "echo \"$seen\" | grep -qF \"$rp\" && continue; ", - "seen=\"$seen $rp\"; ", - "v=$($p --version 2>/dev/null || echo 'unknown'); ", - "echo \"$p: $v\"; ", - "done" - ); + timed_async!("remote_get_status_extra", { + let detect_duplicates_script = concat!( + "seen=''; for p in $(which -a openclaw 2>/dev/null) ", + "\"$HOME/.npm-global/bin/openclaw\" \"/usr/local/bin/openclaw\" \"/opt/homebrew/bin/openclaw\"; do ", + "[ -x \"$p\" ] || continue; ", + "rp=$(readlink -f \"$p\" 2>/dev/null || echo \"$p\"); ", + "echo \"$seen\" | grep -qF \"$rp\" && continue; ", + "seen=\"$seen $rp\"; ", + "v=$($p --version 2>/dev/null || echo 'unknown'); ", + "echo \"$p: $v\"; ", + "done" + ); - let (version_res, dup_res) = tokio::join!( - pool.exec_login(&host_id, "openclaw --version"), - pool.exec_login(&host_id, detect_duplicates_script), - ); + let (version_res, dup_res) = tokio::join!( + pool.exec_login(&host_id, "openclaw --version"), + pool.exec_login(&host_id, detect_duplicates_script), + ); - let openclaw_version = match version_res { - Ok(r) if r.exit_code == 0 => Some(r.stdout.trim().to_string()), - Ok(r) => { - let trimmed = r.stdout.trim().to_string(); - if trimmed.is_empty() { - None - } else { - Some(trimmed) + let openclaw_version = match version_res { + Ok(r) if r.exit_code == 0 => Some(r.stdout.trim().to_string()), + Ok(r) => { + let trimmed = r.stdout.trim().to_string(); + if trimmed.is_empty() { + None + } else { + Some(trimmed) + } } - } - Err(_) => None, - }; + Err(_) => None, + }; - let duplicate_installs = match dup_res { - Ok(r) => { - let entries: Vec = r - .stdout - .lines() - .map(|l| l.trim().to_string()) - .filter(|l| !l.is_empty()) - .collect(); - if entries.len() > 1 { - entries - } else { - Vec::new() + let duplicate_installs = match dup_res { + Ok(r) => { + let entries: Vec = r + .stdout + .lines() + .map(|l| l.trim().to_string()) + .filter(|l| !l.is_empty()) + .collect(); + if entries.len() > 1 { + entries + } else { + Vec::new() + } } - } - Err(_) => Vec::new(), - }; + Err(_) => Vec::new(), + }; - Ok(StatusExtra { - openclaw_version, - duplicate_installs, + Ok(StatusExtra { + openclaw_version, + duplicate_installs, + }) }) } #[tauri::command] pub async fn get_status_light() -> Result { - tauri::async_runtime::spawn_blocking(|| { - let paths = resolve_paths(); - let cfg = read_openclaw_config(&paths)?; - let local_health = clawpal_core::health::check_instance(&local_health_instance()) - .map_err(|e| e.to_string())?; - let active_agents = crate::cli_runner::run_openclaw(&["agents", "list", "--json"]) - .ok() - .and_then(|output| crate::cli_runner::parse_json_output(&output).ok()) - .and_then(|json| count_agent_entries_from_cli_json(&json).ok()) - .unwrap_or(0); - let global_default_model = cfg - .pointer("/agents/defaults/model") - .and_then(read_model_value) - .or_else(|| { - cfg.pointer("/agents/default/model") - .and_then(read_model_value) - }); + timed_async!("get_status_light", { + tauri::async_runtime::spawn_blocking(|| { + let paths = resolve_paths(); + let cfg = read_openclaw_config(&paths)?; + let local_health = clawpal_core::health::check_instance(&local_health_instance()) + .map_err(|e| e.to_string())?; + let active_agents = crate::cli_runner::run_openclaw(&["agents", "list", "--json"]) + .ok() + .and_then(|output| crate::cli_runner::parse_json_output(&output).ok()) + .and_then(|json| count_agent_entries_from_cli_json(&json).ok()) + .unwrap_or(0); + let global_default_model = cfg + .pointer("/agents/defaults/model") + .and_then(read_model_value) + .or_else(|| { + cfg.pointer("/agents/default/model") + .and_then(read_model_value) + }); - let fallback_models = cfg - .pointer("/agents/defaults/model/fallbacks") - .and_then(Value::as_array) - .map(|arr| { - arr.iter() - .filter_map(Value::as_str) - .map(String::from) - .collect() - }) - .unwrap_or_default(); + let fallback_models = cfg + .pointer("/agents/defaults/model/fallbacks") + .and_then(Value::as_array) + .map(|arr| { + arr.iter() + .filter_map(Value::as_str) + .map(String::from) + .collect() + }) + .unwrap_or_default(); - Ok(StatusLight { - healthy: local_health.healthy, - active_agents, - global_default_model, - fallback_models, - ssh_diagnostic: None, + Ok(StatusLight { + healthy: local_health.healthy, + active_agents, + global_default_model, + fallback_models, + ssh_diagnostic: None, + }) }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } #[tauri::command] pub async fn get_status_extra() -> Result { - tauri::async_runtime::spawn_blocking(|| { - let openclaw_version = { - let mut cache = OPENCLAW_VERSION_CACHE.lock().unwrap(); - if cache.is_none() { - let version = clawpal_core::health::check_instance(&local_health_instance()) - .ok() - .and_then(|status| status.version); - *cache = Some(version); - } - cache.as_ref().unwrap().clone() - }; - Ok(StatusExtra { - openclaw_version, - duplicate_installs: Vec::new(), + timed_async!("get_status_extra", { + tauri::async_runtime::spawn_blocking(|| { + let openclaw_version = { + let mut cache = OPENCLAW_VERSION_CACHE.lock().unwrap(); + if cache.is_none() { + let version = clawpal_core::health::check_instance(&local_health_instance()) + .ok() + .and_then(|status| status.version); + *cache = Some(version); + } + cache.as_ref().unwrap().clone() + }; + Ok(StatusExtra { + openclaw_version, + duplicate_installs: Vec::new(), + }) }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } #[tauri::command] pub fn get_system_status() -> Result { - let paths = resolve_paths(); - ensure_dirs(&paths)?; - let cfg = read_openclaw_config(&paths)?; - let active_agents = cfg - .get("agents") - .and_then(|a| a.get("list")) - .and_then(|a| a.as_array()) - .map(|a| a.len() as u32) - .unwrap_or(0); - let snapshots = list_snapshots(&paths.metadata_path) - .unwrap_or_default() - .items - .len(); - let model_summary = collect_model_summary(&cfg); - let channel_summary = collect_channel_summary(&cfg); - let memory = collect_memory_overview(&paths.base_dir); - let sessions = collect_session_overview(&paths.base_dir); - let openclaw_version = resolve_openclaw_version(); - let openclaw_update = - check_openclaw_update_cached(&paths, false).unwrap_or_else(|_| OpenclawUpdateCheck { - installed_version: openclaw_version.clone(), - latest_version: None, - upgrade_available: false, - channel: None, - details: Some("update status unavailable".into()), - source: "unknown".into(), - checked_at: format_timestamp_from_unix(unix_timestamp_secs()), - }); - Ok(SystemStatus { - healthy: true, - config_path: paths.config_path.to_string_lossy().to_string(), - openclaw_dir: paths.openclaw_dir.to_string_lossy().to_string(), - clawpal_dir: paths.clawpal_dir.to_string_lossy().to_string(), - openclaw_version, - active_agents, - snapshots, - channels: channel_summary, - models: model_summary, - memory, - sessions, - openclaw_update, + timed_sync!("get_system_status", { + let paths = resolve_paths(); + ensure_dirs(&paths)?; + let cfg = read_openclaw_config(&paths)?; + let active_agents = cfg + .get("agents") + .and_then(|a| a.get("list")) + .and_then(|a| a.as_array()) + .map(|a| a.len() as u32) + .unwrap_or(0); + let snapshots = list_snapshots(&paths.metadata_path) + .unwrap_or_default() + .items + .len(); + let model_summary = collect_model_summary(&cfg); + let channel_summary = collect_channel_summary(&cfg); + let memory = collect_memory_overview(&paths.base_dir); + let sessions = collect_session_overview(&paths.base_dir); + let openclaw_version = resolve_openclaw_version(); + let openclaw_update = + check_openclaw_update_cached(&paths, false).unwrap_or_else(|_| OpenclawUpdateCheck { + installed_version: openclaw_version.clone(), + latest_version: None, + upgrade_available: false, + channel: None, + details: Some("update status unavailable".into()), + source: "unknown".into(), + checked_at: format_timestamp_from_unix(unix_timestamp_secs()), + }); + Ok(SystemStatus { + healthy: true, + config_path: paths.config_path.to_string_lossy().to_string(), + openclaw_dir: paths.openclaw_dir.to_string_lossy().to_string(), + clawpal_dir: paths.clawpal_dir.to_string_lossy().to_string(), + openclaw_version, + active_agents, + snapshots, + channels: channel_summary, + models: model_summary, + memory, + sessions, + openclaw_update, + }) }) } #[tauri::command] pub fn run_doctor_command() -> Result { - let paths = resolve_paths(); - Ok(run_doctor(&paths)) + timed_sync!("run_doctor_command", { + let paths = resolve_paths(); + Ok(run_doctor(&paths)) + }) } #[tauri::command] pub fn fix_issues(ids: Vec) -> Result { - let paths = resolve_paths(); - let issues = run_doctor(&paths); - let mut fixable = Vec::new(); - for issue in issues.issues { - if ids.contains(&issue.id) && issue.auto_fixable { - fixable.push(issue.id); + timed_sync!("fix_issues", { + let paths = resolve_paths(); + let issues = run_doctor(&paths); + let mut fixable = Vec::new(); + for issue in issues.issues { + if ids.contains(&issue.id) && issue.auto_fixable { + fixable.push(issue.id); + } } - } - let auto_applied = apply_auto_fixes(&paths, &fixable); - let mut remaining = Vec::new(); - let mut applied = Vec::new(); - for id in ids { - if fixable.contains(&id) && auto_applied.iter().any(|x| x == &id) { - applied.push(id); - } else { - remaining.push(id); + let auto_applied = apply_auto_fixes(&paths, &fixable); + let mut remaining = Vec::new(); + let mut applied = Vec::new(); + for id in ids { + if fixable.contains(&id) && auto_applied.iter().any(|x| x == &id) { + applied.push(id); + } else { + remaining.push(id); + } } - } - Ok(FixResult { - ok: true, - applied, - remaining_issues: remaining, + Ok(FixResult { + ok: true, + applied, + remaining_issues: remaining, + }) }) } diff --git a/src-tauri/src/commands/doctor_assistant.rs b/src-tauri/src/commands/doctor_assistant.rs index bac699e0..2e4bc2b7 100644 --- a/src-tauri/src/commands/doctor_assistant.rs +++ b/src-tauri/src/commands/doctor_assistant.rs @@ -4292,12 +4292,14 @@ fn build_temp_gateway_record( pub async fn diagnose_doctor_assistant( app: AppHandle, ) -> Result { - let run_id = Uuid::new_v4().to_string(); - tauri::async_runtime::spawn_blocking(move || { - diagnose_doctor_assistant_local_impl(&app, &run_id, DOCTOR_ASSISTANT_TARGET_PROFILE) + timed_async!("diagnose_doctor_assistant", { + let run_id = Uuid::new_v4().to_string(); + tauri::async_runtime::spawn_blocking(move || { + diagnose_doctor_assistant_local_impl(&app, &run_id, DOCTOR_ASSISTANT_TARGET_PROFILE) + }) + .await + .map_err(|error| error.to_string())? }) - .await - .map_err(|error| error.to_string())? } #[tauri::command] @@ -4306,15 +4308,17 @@ pub async fn remote_diagnose_doctor_assistant( host_id: String, app: AppHandle, ) -> Result { - let run_id = Uuid::new_v4().to_string(); - diagnose_doctor_assistant_remote_impl( - &pool, - &host_id, - &app, - &run_id, - DOCTOR_ASSISTANT_TARGET_PROFILE, - ) - .await + timed_async!("remote_diagnose_doctor_assistant", { + let run_id = Uuid::new_v4().to_string(); + diagnose_doctor_assistant_remote_impl( + &pool, + &host_id, + &app, + &run_id, + DOCTOR_ASSISTANT_TARGET_PROFILE, + ) + .await + }) } #[tauri::command] @@ -4323,16 +4327,373 @@ pub async fn repair_doctor_assistant( temp_provider_profile_id: Option, app: AppHandle, ) -> Result { - let run_id = Uuid::new_v4().to_string(); - tauri::async_runtime::spawn_blocking(move || -> Result { + timed_async!("repair_doctor_assistant", { + let run_id = Uuid::new_v4().to_string(); + tauri::async_runtime::spawn_blocking( + move || -> Result { + let paths = resolve_paths(); + let before = match current_diagnosis { + Some(diagnosis) => diagnosis, + None => diagnose_doctor_assistant_local_impl( + &app, + &run_id, + DOCTOR_ASSISTANT_TARGET_PROFILE, + )?, + }; + let attempted_at = format_timestamp_from_unix(unix_timestamp_secs()); + let (selected_issue_ids, skipped_issue_ids) = collect_repairable_primary_issue_ids( + &before, + &before.summary.selected_fix_issue_ids, + ); + let mut applied_issue_ids = Vec::new(); + let mut failed_issue_ids = Vec::new(); + let mut steps = Vec::new(); + let mut current = before.clone(); + + if diagnose_doctor_assistant_status(&before) { + append_step( + &mut steps, + "repair.noop", + "No automatic repairs needed", + true, + "The primary gateway is already healthy", + None, + ); + return Ok(doctor_assistant_completed_result( + attempted_at, + "temporary".into(), + selected_issue_ids, + applied_issue_ids, + skipped_issue_ids, + failed_issue_ids, + steps, + before.clone(), + before, + )); + } + + if !diagnose_doctor_assistant_status(¤t) { + let temp_profile = choose_temp_gateway_profile_name(); + let temp_port = + choose_temp_gateway_port(resolve_main_port_from_diagnosis(¤t)); + emit_doctor_assistant_progress( + &app, + &run_id, + "bootstrap_temp_gateway", + "Bootstrapping temporary gateway", + 0.56, + 0, + None, + None, + ); + upsert_doctor_temp_gateway_record( + &paths, + build_temp_gateway_record( + DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL, + &temp_profile, + temp_port, + "bootstrapping", + resolve_main_port_from_diagnosis(¤t), + Some("bootstrap".into()), + ), + )?; + + let temp_flow = (|| -> Result<(), String> { + run_local_temp_gateway_action( + RescueBotAction::Set, + &temp_profile, + temp_port, + true, + &mut steps, + "temp.setup", + )?; + write_local_temp_gateway_marker( + &paths.openclaw_dir, + DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL, + &temp_profile, + )?; + emit_doctor_assistant_progress( + &app, + &run_id, + "bootstrap_temp_gateway", + "Syncing provider configuration into temporary gateway", + 0.58, + 0, + None, + None, + ); + let (provider, model) = sync_local_temp_gateway_provider_context( + &temp_profile, + temp_provider_profile_id.as_deref(), + &mut steps, + )?; + emit_doctor_assistant_progress( + &app, + &run_id, + "bootstrap_temp_gateway", + format!("Temporary gateway ready: {provider}/{model}"), + 0.64, + 0, + None, + None, + ); + upsert_doctor_temp_gateway_record( + &paths, + build_temp_gateway_record( + DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL, + &temp_profile, + temp_port, + "repairing", + resolve_main_port_from_diagnosis(¤t), + Some("repair".into()), + ), + )?; + + for round in 1..=DOCTOR_ASSISTANT_TEMP_REPAIR_ROUNDS { + run_local_temp_gateway_agent_repair_round( + &app, + &run_id, + &temp_profile, + ¤t, + round, + &mut steps, + )?; + let next = diagnose_doctor_assistant_local_impl( + &app, + &run_id, + DOCTOR_ASSISTANT_TARGET_PROFILE, + )?; + for (issue_id, label) in collect_resolved_issues(¤t, &next) { + merge_issue_lists( + &mut applied_issue_ids, + std::iter::once(issue_id.clone()), + ); + emit_doctor_assistant_progress( + &app, + &run_id, + "agent_repair", + format!("{label} fixed"), + 0.6 + (round as f32 * 0.03), + round, + Some(issue_id), + Some(label), + ); + } + current = next; + if diagnose_doctor_assistant_status(¤t) { + break; + } + } + Ok(()) + })(); + let temp_flow_error = temp_flow.as_ref().err().cloned(); + let pending_reason = temp_flow_error.as_ref().and_then(|error| { + doctor_assistant_extract_temp_provider_setup_reason(error) + }); + + emit_doctor_assistant_progress( + &app, + &run_id, + "cleanup", + "Cleaning up temporary gateway", + 0.94, + 0, + None, + None, + ); + let cleanup_result = run_local_temp_gateway_action( + RescueBotAction::Unset, + &temp_profile, + temp_port, + false, + &mut steps, + "temp.cleanup", + ); + let _ = remove_doctor_temp_gateway_record( + &paths, + DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL, + &temp_profile, + ); + match cleanup_result { + Ok(()) => match prune_local_temp_gateway_profile_roots(&paths.openclaw_dir) + { + Ok(removed) => append_step( + &mut steps, + "temp.cleanup.roots", + "Delete temporary gateway profiles", + true, + if removed.is_empty() { + "No temporary gateway profiles remained on disk".into() + } else { + format!( + "Removed {} temporary gateway profile directorie(s)", + removed.len() + ) + }, + None, + ), + Err(error) => append_step( + &mut steps, + "temp.cleanup.roots", + "Delete temporary gateway profiles", + false, + error, + None, + ), + }, + Err(error) => append_step( + &mut steps, + "temp.cleanup.error", + "Cleanup temporary gateway", + false, + error, + None, + ), + } + if temp_flow_error.is_some() || !diagnose_doctor_assistant_status(¤t) { + let fallback_reason = pending_reason + .clone() + .or(temp_flow_error.clone()) + .unwrap_or_else(|| { + "Temporary gateway repair finished with remaining issues".into() + }); + match fallback_restore_local_primary_config( + &app, + &run_id, + &mut steps, + &fallback_reason, + ) { + Ok(Some(next)) => { + for (issue_id, label) in collect_resolved_issues(¤t, &next) { + merge_issue_lists( + &mut applied_issue_ids, + std::iter::once(issue_id.clone()), + ); + emit_doctor_assistant_progress( + &app, + &run_id, + "cleanup", + format!("{label} fixed"), + 0.94, + 0, + Some(issue_id), + Some(label), + ); + } + current = next + } + Ok(None) => {} + Err(error) => append_step( + &mut steps, + "repair.fallback.error", + "Fallback restore primary config", + false, + error, + None, + ), + } + } + if let Some(reason) = pending_reason { + if !diagnose_doctor_assistant_status(¤t) { + emit_doctor_assistant_progress( + &app, &run_id, "cleanup", &reason, 0.96, 0, None, None, + ); + return Ok(doctor_assistant_pending_temp_provider_result( + attempted_at, + temp_profile, + selected_issue_ids.clone(), + applied_issue_ids.clone(), + skipped_issue_ids.clone(), + selected_issue_ids + .iter() + .filter(|id| !applied_issue_ids.contains(id)) + .cloned() + .collect(), + steps, + before, + current, + temp_provider_profile_id, + reason, + )); + } + } + } + + let after = diagnose_doctor_assistant_local_impl( + &app, + &run_id, + DOCTOR_ASSISTANT_TARGET_PROFILE, + )?; + for (issue_id, _label) in collect_resolved_issues(¤t, &after) { + merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id)); + } + let remaining = after + .issues + .iter() + .map(|issue| issue.id.clone()) + .collect::>(); + failed_issue_ids = selected_issue_ids + .iter() + .filter(|id| remaining.contains(id)) + .cloned() + .collect(); + + emit_doctor_assistant_progress( + &app, + &run_id, + "cleanup", + if diagnose_doctor_assistant_status(&after) { + "Repair complete" + } else { + "Repair finished with remaining issues" + }, + 1.0, + 0, + None, + None, + ); + + Ok(doctor_assistant_completed_result( + attempted_at, + current.rescue_profile.clone(), + selected_issue_ids, + applied_issue_ids, + skipped_issue_ids, + failed_issue_ids, + steps, + before, + after, + )) + }, + ) + .await + .map_err(|error| error.to_string())? + }) +} + +#[tauri::command] +pub async fn remote_repair_doctor_assistant( + pool: State<'_, SshConnectionPool>, + host_id: String, + current_diagnosis: Option, + temp_provider_profile_id: Option, + app: AppHandle, +) -> Result { + timed_async!("remote_repair_doctor_assistant", { + let run_id = Uuid::new_v4().to_string(); let paths = resolve_paths(); let before = match current_diagnosis { Some(diagnosis) => diagnosis, - None => diagnose_doctor_assistant_local_impl( - &app, - &run_id, - DOCTOR_ASSISTANT_TARGET_PROFILE, - )?, + None => { + diagnose_doctor_assistant_remote_impl( + &pool, + &host_id, + &app, + &run_id, + DOCTOR_ASSISTANT_TARGET_PROFILE, + ) + .await? + } }; let attempted_at = format_timestamp_from_unix(unix_timestamp_secs()); let (selected_issue_ids, skipped_issue_ids) = @@ -4380,7 +4741,7 @@ pub async fn repair_doctor_assistant( upsert_doctor_temp_gateway_record( &paths, build_temp_gateway_record( - DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL, + &host_id, &temp_profile, temp_port, "bootstrapping", @@ -4389,20 +4750,37 @@ pub async fn repair_doctor_assistant( ), )?; - let temp_flow = (|| -> Result<(), String> { - run_local_temp_gateway_action( + let mut temp_flow = async { + run_remote_temp_gateway_action( + &pool, + &host_id, RescueBotAction::Set, &temp_profile, temp_port, true, &mut steps, "temp.setup", - )?; - write_local_temp_gateway_marker( - &paths.openclaw_dir, - DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL, + ) + .await?; + let main_root = resolve_remote_main_root(&pool, &host_id).await; + if let Err(error) = write_remote_temp_gateway_marker( + &pool, + &host_id, + &main_root, + &host_id, &temp_profile, - )?; + ) + .await + { + append_step( + &mut steps, + "temp.marker", + "Mark temporary gateway ownership", + false, + error, + None, + ); + } emit_doctor_assistant_progress( &app, &run_id, @@ -4413,25 +4791,84 @@ pub async fn repair_doctor_assistant( None, None, ); - let (provider, model) = sync_local_temp_gateway_provider_context( + let (main_root, temp_root, donor_cfg) = sync_remote_temp_gateway_provider_context( + &pool, + &host_id, &temp_profile, temp_provider_profile_id.as_deref(), &mut steps, - )?; - emit_doctor_assistant_progress( - &app, - &run_id, - "bootstrap_temp_gateway", - format!("Temporary gateway ready: {provider}/{model}"), - 0.64, - 0, - None, - None, - ); + ) + .await?; + let mut provider_identity = None; + if let Err(error) = probe_remote_temp_gateway_agent_smoke( + &pool, + &host_id, + &temp_profile, + &mut steps, + ) + .await + { + let should_retry_from_remote_auth_store = temp_provider_profile_id.is_none() + && doctor_assistant_extract_temp_provider_setup_reason(&error).is_some(); + if !should_retry_from_remote_auth_store { + return Err(error); + } + emit_doctor_assistant_progress( + &app, + &run_id, + "bootstrap_temp_gateway", + "Rebuilding temporary gateway provider from remote auth store", + 0.62, + 0, + None, + None, + ); + rebuild_remote_temp_gateway_provider_context_from_auth_store( + &pool, + &host_id, + &main_root, + &temp_root, + &donor_cfg, + &mut steps, + ) + .await?; + probe_remote_temp_gateway_agent_smoke( + &pool, + &host_id, + &temp_profile, + &mut steps, + ) + .await + .map(|identity| provider_identity = Some(identity))?; + } else { + provider_identity = steps + .iter() + .rev() + .find(|step| step.id == "temp.probe.agent.identity") + .and_then(|step| { + let detail = step.detail.trim(); + detail + .strip_prefix("Temporary gateway replied using ") + .and_then(|value| value.split_once('/')) + .map(|(provider, model)| (provider.to_string(), model.to_string())) + }); + } + if let Some((provider, model)) = provider_identity.as_ref() { + emit_doctor_assistant_progress( + &app, + &run_id, + "bootstrap_temp_gateway", + format!("Temporary gateway ready: {provider}/{model}"), + 0.64, + 0, + None, + None, + ); + } upsert_doctor_temp_gateway_record( &paths, build_temp_gateway_record( - DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL, + &host_id, &temp_profile, temp_port, "repairing", @@ -4440,43 +4877,74 @@ pub async fn repair_doctor_assistant( ), )?; - for round in 1..=DOCTOR_ASSISTANT_TEMP_REPAIR_ROUNDS { - run_local_temp_gateway_agent_repair_round( - &app, - &run_id, - &temp_profile, - ¤t, - round, + if DOCTOR_ASSISTANT_REMOTE_SKIP_AGENT_REPAIR { + append_step( &mut steps, - )?; - let next = diagnose_doctor_assistant_local_impl( - &app, - &run_id, - DOCTOR_ASSISTANT_TARGET_PROFILE, - )?; - for (issue_id, label) in collect_resolved_issues(¤t, &next) { - merge_issue_lists( - &mut applied_issue_ids, - std::iter::once(issue_id.clone()), - ); - emit_doctor_assistant_progress( + "temp.debug.skip_agent_repair", + "Skip temporary gateway repair loop", + true, + "Remote Doctor debug mode leaves the primary gateway unchanged after temp bootstrap so the temporary gateway configuration can be inspected in isolation.", + None, + ); + } else { + for round in 1..=DOCTOR_ASSISTANT_TEMP_REPAIR_ROUNDS { + run_remote_temp_gateway_agent_repair_round( + &pool, + &host_id, &app, &run_id, - "agent_repair", - format!("{label} fixed"), - 0.6 + (round as f32 * 0.03), + &temp_profile, + ¤t, round, - Some(issue_id), - Some(label), - ); + &mut steps, + ) + .await?; + let next = diagnose_doctor_assistant_remote_impl( + &pool, + &host_id, + &app, + &run_id, + DOCTOR_ASSISTANT_TARGET_PROFILE, + ) + .await?; + for (issue_id, label) in collect_resolved_issues(¤t, &next) { + merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id.clone())); + emit_doctor_assistant_progress( + &app, + &run_id, + "agent_repair", + format!("{label} fixed"), + 0.6 + (round as f32 * 0.03), + round, + Some(issue_id), + Some(label), + ); + } + current = next; + if diagnose_doctor_assistant_status(¤t) { + break; + } } - current = next; - if diagnose_doctor_assistant_status(¤t) { - break; + } + Ok::<(), String>(()) + } + .await; + if let Err(error) = temp_flow.as_ref() { + if doctor_assistant_is_remote_exec_timeout(error) { + let recovered = remote_wait_for_primary_gateway_recovery_after_timeout( + &pool, &host_id, &app, &run_id, &mut steps, + ) + .await?; + if recovered { + temp_flow = Ok(()); + } else { + temp_flow = Err( + "Temporary gateway repair timed out before health could be confirmed. Open Gateway Logs and inspect the latest repair output." + .into(), + ); } } - Ok(()) - })(); + } let temp_flow_error = temp_flow.as_ref().err().cloned(); let pending_reason = temp_flow_error .as_ref() @@ -4492,67 +4960,71 @@ pub async fn repair_doctor_assistant( None, None, ); - let cleanup_result = run_local_temp_gateway_action( + let cleanup_result = run_remote_temp_gateway_action( + &pool, + &host_id, RescueBotAction::Unset, &temp_profile, temp_port, false, &mut steps, "temp.cleanup", - ); - let _ = remove_doctor_temp_gateway_record( - &paths, - DOCTOR_ASSISTANT_TEMP_SCOPE_LOCAL, - &temp_profile, - ); - match cleanup_result { - Ok(()) => match prune_local_temp_gateway_profile_roots(&paths.openclaw_dir) { - Ok(removed) => append_step( - &mut steps, - "temp.cleanup.roots", - "Delete temporary gateway profiles", - true, - if removed.is_empty() { - "No temporary gateway profiles remained on disk".into() - } else { - format!( - "Removed {} temporary gateway profile directorie(s)", - removed.len() - ) - }, - None, - ), - Err(error) => append_step( - &mut steps, - "temp.cleanup.roots", - "Delete temporary gateway profiles", - false, - error, - None, - ), - }, - Err(error) => append_step( + ) + .await; + let _ = remove_doctor_temp_gateway_record(&paths, &host_id, &temp_profile); + if let Err(error) = cleanup_result { + append_step( &mut steps, "temp.cleanup.error", "Cleanup temporary gateway", false, error, None, - ), + ); } - if temp_flow_error.is_some() || !diagnose_doctor_assistant_status(¤t) { - let fallback_reason = pending_reason - .clone() - .or(temp_flow_error.clone()) - .unwrap_or_else(|| { - "Temporary gateway repair finished with remaining issues".into() - }); - match fallback_restore_local_primary_config( - &app, + let main_root = resolve_remote_main_root(&pool, &host_id).await; + match prune_remote_temp_gateway_profile_roots(&pool, &host_id, &main_root).await { + Ok(removed) => append_step( + &mut steps, + "temp.cleanup.roots", + "Delete temporary gateway profiles", + true, + if removed.is_empty() { + "No temporary gateway profiles remained on disk".into() + } else { + format!( + "Removed {} temporary gateway profile directorie(s)", + removed.len() + ) + }, + None, + ), + Err(error) => append_step( + &mut steps, + "temp.cleanup.roots", + "Delete temporary gateway profiles", + false, + error, + None, + ), + } + if temp_flow_error.is_some() || !diagnose_doctor_assistant_status(¤t) { + let fallback_reason = pending_reason + .clone() + .or(temp_flow_error.clone()) + .unwrap_or_else(|| { + "Temporary gateway repair finished with remaining issues".into() + }); + match fallback_restore_remote_primary_config( + &pool, + &host_id, + &app, &run_id, &mut steps, &fallback_reason, - ) { + ) + .await + { Ok(Some(next)) => { for (issue_id, label) in collect_resolved_issues(¤t, &next) { merge_issue_lists( @@ -4609,8 +5081,14 @@ pub async fn repair_doctor_assistant( } } - let after = - diagnose_doctor_assistant_local_impl(&app, &run_id, DOCTOR_ASSISTANT_TARGET_PROFILE)?; + let after = diagnose_doctor_assistant_remote_impl( + &pool, + &host_id, + &app, + &run_id, + DOCTOR_ASSISTANT_TARGET_PROFILE, + ) + .await?; for (issue_id, _label) in collect_resolved_issues(¤t, &after) { merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id)); } @@ -4652,467 +5130,6 @@ pub async fn repair_doctor_assistant( after, )) }) - .await - .map_err(|error| error.to_string())? -} - -#[tauri::command] -pub async fn remote_repair_doctor_assistant( - pool: State<'_, SshConnectionPool>, - host_id: String, - current_diagnosis: Option, - temp_provider_profile_id: Option, - app: AppHandle, -) -> Result { - let run_id = Uuid::new_v4().to_string(); - let paths = resolve_paths(); - let before = match current_diagnosis { - Some(diagnosis) => diagnosis, - None => { - diagnose_doctor_assistant_remote_impl( - &pool, - &host_id, - &app, - &run_id, - DOCTOR_ASSISTANT_TARGET_PROFILE, - ) - .await? - } - }; - let attempted_at = format_timestamp_from_unix(unix_timestamp_secs()); - let (selected_issue_ids, skipped_issue_ids) = - collect_repairable_primary_issue_ids(&before, &before.summary.selected_fix_issue_ids); - let mut applied_issue_ids = Vec::new(); - let mut failed_issue_ids = Vec::new(); - let mut steps = Vec::new(); - let mut current = before.clone(); - - if diagnose_doctor_assistant_status(&before) { - append_step( - &mut steps, - "repair.noop", - "No automatic repairs needed", - true, - "The primary gateway is already healthy", - None, - ); - return Ok(doctor_assistant_completed_result( - attempted_at, - "temporary".into(), - selected_issue_ids, - applied_issue_ids, - skipped_issue_ids, - failed_issue_ids, - steps, - before.clone(), - before, - )); - } - - if !diagnose_doctor_assistant_status(¤t) { - let temp_profile = choose_temp_gateway_profile_name(); - let temp_port = choose_temp_gateway_port(resolve_main_port_from_diagnosis(¤t)); - emit_doctor_assistant_progress( - &app, - &run_id, - "bootstrap_temp_gateway", - "Bootstrapping temporary gateway", - 0.56, - 0, - None, - None, - ); - upsert_doctor_temp_gateway_record( - &paths, - build_temp_gateway_record( - &host_id, - &temp_profile, - temp_port, - "bootstrapping", - resolve_main_port_from_diagnosis(¤t), - Some("bootstrap".into()), - ), - )?; - - let mut temp_flow = async { - run_remote_temp_gateway_action( - &pool, - &host_id, - RescueBotAction::Set, - &temp_profile, - temp_port, - true, - &mut steps, - "temp.setup", - ) - .await?; - let main_root = resolve_remote_main_root(&pool, &host_id).await; - if let Err(error) = write_remote_temp_gateway_marker( - &pool, - &host_id, - &main_root, - &host_id, - &temp_profile, - ) - .await - { - append_step( - &mut steps, - "temp.marker", - "Mark temporary gateway ownership", - false, - error, - None, - ); - } - emit_doctor_assistant_progress( - &app, - &run_id, - "bootstrap_temp_gateway", - "Syncing provider configuration into temporary gateway", - 0.58, - 0, - None, - None, - ); - let (main_root, temp_root, donor_cfg) = sync_remote_temp_gateway_provider_context( - &pool, - &host_id, - &temp_profile, - temp_provider_profile_id.as_deref(), - &mut steps, - ) - .await?; - let mut provider_identity = None; - if let Err(error) = probe_remote_temp_gateway_agent_smoke( - &pool, - &host_id, - &temp_profile, - &mut steps, - ) - .await - { - let should_retry_from_remote_auth_store = temp_provider_profile_id.is_none() - && doctor_assistant_extract_temp_provider_setup_reason(&error).is_some(); - if !should_retry_from_remote_auth_store { - return Err(error); - } - emit_doctor_assistant_progress( - &app, - &run_id, - "bootstrap_temp_gateway", - "Rebuilding temporary gateway provider from remote auth store", - 0.62, - 0, - None, - None, - ); - rebuild_remote_temp_gateway_provider_context_from_auth_store( - &pool, - &host_id, - &main_root, - &temp_root, - &donor_cfg, - &mut steps, - ) - .await?; - probe_remote_temp_gateway_agent_smoke( - &pool, - &host_id, - &temp_profile, - &mut steps, - ) - .await - .map(|identity| provider_identity = Some(identity))?; - } else { - provider_identity = steps - .iter() - .rev() - .find(|step| step.id == "temp.probe.agent.identity") - .and_then(|step| { - let detail = step.detail.trim(); - detail - .strip_prefix("Temporary gateway replied using ") - .and_then(|value| value.split_once('/')) - .map(|(provider, model)| (provider.to_string(), model.to_string())) - }); - } - if let Some((provider, model)) = provider_identity.as_ref() { - emit_doctor_assistant_progress( - &app, - &run_id, - "bootstrap_temp_gateway", - format!("Temporary gateway ready: {provider}/{model}"), - 0.64, - 0, - None, - None, - ); - } - upsert_doctor_temp_gateway_record( - &paths, - build_temp_gateway_record( - &host_id, - &temp_profile, - temp_port, - "repairing", - resolve_main_port_from_diagnosis(¤t), - Some("repair".into()), - ), - )?; - - if DOCTOR_ASSISTANT_REMOTE_SKIP_AGENT_REPAIR { - append_step( - &mut steps, - "temp.debug.skip_agent_repair", - "Skip temporary gateway repair loop", - true, - "Remote Doctor debug mode leaves the primary gateway unchanged after temp bootstrap so the temporary gateway configuration can be inspected in isolation.", - None, - ); - } else { - for round in 1..=DOCTOR_ASSISTANT_TEMP_REPAIR_ROUNDS { - run_remote_temp_gateway_agent_repair_round( - &pool, - &host_id, - &app, - &run_id, - &temp_profile, - ¤t, - round, - &mut steps, - ) - .await?; - let next = diagnose_doctor_assistant_remote_impl( - &pool, - &host_id, - &app, - &run_id, - DOCTOR_ASSISTANT_TARGET_PROFILE, - ) - .await?; - for (issue_id, label) in collect_resolved_issues(¤t, &next) { - merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id.clone())); - emit_doctor_assistant_progress( - &app, - &run_id, - "agent_repair", - format!("{label} fixed"), - 0.6 + (round as f32 * 0.03), - round, - Some(issue_id), - Some(label), - ); - } - current = next; - if diagnose_doctor_assistant_status(¤t) { - break; - } - } - } - Ok::<(), String>(()) - } - .await; - if let Err(error) = temp_flow.as_ref() { - if doctor_assistant_is_remote_exec_timeout(error) { - let recovered = remote_wait_for_primary_gateway_recovery_after_timeout( - &pool, &host_id, &app, &run_id, &mut steps, - ) - .await?; - if recovered { - temp_flow = Ok(()); - } else { - temp_flow = Err( - "Temporary gateway repair timed out before health could be confirmed. Open Gateway Logs and inspect the latest repair output." - .into(), - ); - } - } - } - let temp_flow_error = temp_flow.as_ref().err().cloned(); - let pending_reason = temp_flow_error - .as_ref() - .and_then(|error| doctor_assistant_extract_temp_provider_setup_reason(error)); - - emit_doctor_assistant_progress( - &app, - &run_id, - "cleanup", - "Cleaning up temporary gateway", - 0.94, - 0, - None, - None, - ); - let cleanup_result = run_remote_temp_gateway_action( - &pool, - &host_id, - RescueBotAction::Unset, - &temp_profile, - temp_port, - false, - &mut steps, - "temp.cleanup", - ) - .await; - let _ = remove_doctor_temp_gateway_record(&paths, &host_id, &temp_profile); - if let Err(error) = cleanup_result { - append_step( - &mut steps, - "temp.cleanup.error", - "Cleanup temporary gateway", - false, - error, - None, - ); - } - let main_root = resolve_remote_main_root(&pool, &host_id).await; - match prune_remote_temp_gateway_profile_roots(&pool, &host_id, &main_root).await { - Ok(removed) => append_step( - &mut steps, - "temp.cleanup.roots", - "Delete temporary gateway profiles", - true, - if removed.is_empty() { - "No temporary gateway profiles remained on disk".into() - } else { - format!( - "Removed {} temporary gateway profile directorie(s)", - removed.len() - ) - }, - None, - ), - Err(error) => append_step( - &mut steps, - "temp.cleanup.roots", - "Delete temporary gateway profiles", - false, - error, - None, - ), - } - if temp_flow_error.is_some() || !diagnose_doctor_assistant_status(¤t) { - let fallback_reason = pending_reason - .clone() - .or(temp_flow_error.clone()) - .unwrap_or_else(|| { - "Temporary gateway repair finished with remaining issues".into() - }); - match fallback_restore_remote_primary_config( - &pool, - &host_id, - &app, - &run_id, - &mut steps, - &fallback_reason, - ) - .await - { - Ok(Some(next)) => { - for (issue_id, label) in collect_resolved_issues(¤t, &next) { - merge_issue_lists( - &mut applied_issue_ids, - std::iter::once(issue_id.clone()), - ); - emit_doctor_assistant_progress( - &app, - &run_id, - "cleanup", - format!("{label} fixed"), - 0.94, - 0, - Some(issue_id), - Some(label), - ); - } - current = next - } - Ok(None) => {} - Err(error) => append_step( - &mut steps, - "repair.fallback.error", - "Fallback restore primary config", - false, - error, - None, - ), - } - } - if let Some(reason) = pending_reason { - if !diagnose_doctor_assistant_status(¤t) { - emit_doctor_assistant_progress( - &app, &run_id, "cleanup", &reason, 0.96, 0, None, None, - ); - return Ok(doctor_assistant_pending_temp_provider_result( - attempted_at, - temp_profile, - selected_issue_ids.clone(), - applied_issue_ids.clone(), - skipped_issue_ids.clone(), - selected_issue_ids - .iter() - .filter(|id| !applied_issue_ids.contains(id)) - .cloned() - .collect(), - steps, - before, - current, - temp_provider_profile_id, - reason, - )); - } - } - } - - let after = diagnose_doctor_assistant_remote_impl( - &pool, - &host_id, - &app, - &run_id, - DOCTOR_ASSISTANT_TARGET_PROFILE, - ) - .await?; - for (issue_id, _label) in collect_resolved_issues(¤t, &after) { - merge_issue_lists(&mut applied_issue_ids, std::iter::once(issue_id)); - } - let remaining = after - .issues - .iter() - .map(|issue| issue.id.clone()) - .collect::>(); - failed_issue_ids = selected_issue_ids - .iter() - .filter(|id| remaining.contains(id)) - .cloned() - .collect(); - - emit_doctor_assistant_progress( - &app, - &run_id, - "cleanup", - if diagnose_doctor_assistant_status(&after) { - "Repair complete" - } else { - "Repair finished with remaining issues" - }, - 1.0, - 0, - None, - None, - ); - - Ok(doctor_assistant_completed_result( - attempted_at, - current.rescue_profile.clone(), - selected_issue_ids, - applied_issue_ids, - skipped_issue_ids, - failed_issue_ids, - steps, - before, - after, - )) } fn resolve_main_port_from_diagnosis(diagnosis: &RescuePrimaryDiagnosisResult) -> u16 { diff --git a/src-tauri/src/commands/gateway.rs b/src-tauri/src/commands/gateway.rs index ce38ceeb..e75dd4fe 100644 --- a/src-tauri/src/commands/gateway.rs +++ b/src-tauri/src/commands/gateway.rs @@ -5,17 +5,21 @@ pub async fn remote_restart_gateway( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - pool.exec_login(&host_id, "openclaw gateway restart") - .await?; - Ok(true) + timed_async!("remote_restart_gateway", { + pool.exec_login(&host_id, "openclaw gateway restart") + .await?; + Ok(true) + }) } #[tauri::command] pub async fn restart_gateway() -> Result { - tauri::async_runtime::spawn_blocking(move || { - run_openclaw_raw(&["gateway", "restart"])?; - Ok(true) + timed_async!("restart_gateway", { + tauri::async_runtime::spawn_blocking(move || { + run_openclaw_raw(&["gateway", "restart"])?; + Ok(true) + }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } diff --git a/src-tauri/src/commands/instance.rs b/src-tauri/src/commands/instance.rs index 421c903e..080dd83e 100644 --- a/src-tauri/src/commands/instance.rs +++ b/src-tauri/src/commands/instance.rs @@ -2,70 +2,80 @@ use super::*; #[tauri::command] pub fn set_active_openclaw_home(path: Option) -> Result { - crate::cli_runner::set_active_openclaw_home_override(path)?; - Ok(true) + timed_sync!("set_active_openclaw_home", { + crate::cli_runner::set_active_openclaw_home_override(path)?; + Ok(true) + }) } #[tauri::command] pub fn set_active_clawpal_data_dir(path: Option) -> Result { - crate::cli_runner::set_active_clawpal_data_override(path)?; - Ok(true) + timed_sync!("set_active_clawpal_data_dir", { + crate::cli_runner::set_active_clawpal_data_override(path)?; + Ok(true) + }) } #[tauri::command] pub fn local_openclaw_config_exists(openclaw_home: String) -> Result { - let home = openclaw_home.trim(); - if home.is_empty() { - return Ok(false); - } - let expanded = shellexpand::tilde(home).to_string(); - let config_path = PathBuf::from(expanded) - .join(".openclaw") - .join("openclaw.json"); - Ok(config_path.exists()) + timed_sync!("local_openclaw_config_exists", { + let home = openclaw_home.trim(); + if home.is_empty() { + return Ok(false); + } + let expanded = shellexpand::tilde(home).to_string(); + let config_path = PathBuf::from(expanded) + .join(".openclaw") + .join("openclaw.json"); + Ok(config_path.exists()) + }) } #[tauri::command] pub fn local_openclaw_cli_available() -> Result { - Ok(run_openclaw_raw(&["--version"]).is_ok()) + timed_sync!("local_openclaw_cli_available", { + Ok(run_openclaw_raw(&["--version"]).is_ok()) + }) } #[tauri::command] pub fn delete_local_instance_home(openclaw_home: String) -> Result { - let home = openclaw_home.trim(); - if home.is_empty() { - return Err("openclaw_home is required".to_string()); - } - let expanded = shellexpand::tilde(home).to_string(); - let target = PathBuf::from(expanded); - if !target.exists() { - return Ok(true); - } + timed_sync!("delete_local_instance_home", { + let home = openclaw_home.trim(); + if home.is_empty() { + return Err("openclaw_home is required".to_string()); + } + let expanded = shellexpand::tilde(home).to_string(); + let target = PathBuf::from(expanded); + if !target.exists() { + return Ok(true); + } - let canonical_target = target - .canonicalize() - .map_err(|e| format!("failed to resolve target path: {e}"))?; - let user_home = - dirs::home_dir().ok_or_else(|| "failed to resolve HOME directory".to_string())?; - let allowed_root = user_home.join(".clawpal"); - let canonical_allowed_root = allowed_root - .canonicalize() - .map_err(|e| format!("failed to resolve ~/.clawpal path: {e}"))?; - - if !canonical_target.starts_with(&canonical_allowed_root) { - return Err("refuse to delete path outside ~/.clawpal".to_string()); - } - if canonical_target == canonical_allowed_root { - return Err("refuse to delete ~/.clawpal root".to_string()); - } + let canonical_target = target + .canonicalize() + .map_err(|e| format!("failed to resolve target path: {e}"))?; + let user_home = + dirs::home_dir().ok_or_else(|| "failed to resolve HOME directory".to_string())?; + let allowed_root = user_home.join(".clawpal"); + let canonical_allowed_root = allowed_root + .canonicalize() + .map_err(|e| format!("failed to resolve ~/.clawpal path: {e}"))?; + + if !canonical_target.starts_with(&canonical_allowed_root) { + return Err("refuse to delete path outside ~/.clawpal".to_string()); + } + if canonical_target == canonical_allowed_root { + return Err("refuse to delete ~/.clawpal root".to_string()); + } - fs::remove_dir_all(&canonical_target).map_err(|e| { - format!( - "failed to delete '{}': {e}", - canonical_target.to_string_lossy() - ) - })?; - Ok(true) + fs::remove_dir_all(&canonical_target).map_err(|e| { + format!( + "failed to delete '{}': {e}", + canonical_target.to_string_lossy() + ) + })?; + Ok(true) + }) } #[derive(Debug, Serialize, Deserialize)] @@ -137,7 +147,9 @@ pub async fn ensure_access_profile( instance_id: String, transport: String, ) -> Result { - ensure_access_profile_impl(instance_id, transport).await + timed_async!("ensure_access_profile", { + ensure_access_profile_impl(instance_id, transport).await + }) } pub async fn ensure_access_profile_for_test( @@ -165,64 +177,71 @@ pub async fn record_install_experience( goal: String, store: State<'_, InstallSessionStore>, ) -> Result { - let id = session_id.trim(); - if id.is_empty() { - return Err("session_id is required".to_string()); - } - let session = store - .get(id)? - .ok_or_else(|| format!("install session not found: {id}"))?; - if !matches!(session.state, InstallState::Ready) { - return Err(format!( - "install session is not ready: {}", - session.state.as_str() - )); - } + timed_async!("record_install_experience", { + let id = session_id.trim(); + if id.is_empty() { + return Err("session_id is required".to_string()); + } + let session = store + .get(id)? + .ok_or_else(|| format!("install session not found: {id}"))?; + if !matches!(session.state, InstallState::Ready) { + return Err(format!( + "install session is not ready: {}", + session.state.as_str() + )); + } - let transport = session.method.as_str().to_string(); - let paths = resolve_paths(); - let discovery_store = AccessDiscoveryStore::new(paths.clawpal_dir.join("access-discovery")); - let profile = discovery_store.load_profile(&instance_id)?; - let successful_chain = profile.map(|p| p.working_chain).unwrap_or_default(); - let commands = value_array_as_strings(session.artifacts.get("executed_commands")); - - let experience = ExecutionExperience { - instance_id: instance_id.clone(), - goal, - transport, - method: session.method.as_str().to_string(), - commands, - successful_chain, - recorded_at: unix_timestamp_secs(), - }; - let total_count = discovery_store.save_experience(experience)?; - Ok(RecordInstallExperienceResult { - saved: true, - total_count, + let transport = session.method.as_str().to_string(); + let paths = resolve_paths(); + let discovery_store = AccessDiscoveryStore::new(paths.clawpal_dir.join("access-discovery")); + let profile = discovery_store.load_profile(&instance_id)?; + let successful_chain = profile.map(|p| p.working_chain).unwrap_or_default(); + let commands = value_array_as_strings(session.artifacts.get("executed_commands")); + + let experience = ExecutionExperience { + instance_id: instance_id.clone(), + goal, + transport, + method: session.method.as_str().to_string(), + commands, + successful_chain, + recorded_at: unix_timestamp_secs(), + }; + let total_count = discovery_store.save_experience(experience)?; + Ok(RecordInstallExperienceResult { + saved: true, + total_count, + }) }) } #[tauri::command] pub fn list_registered_instances() -> Result, String> { - let registry = clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; - // Best-effort self-heal: persist normalized instance ids (e.g., legacy empty SSH ids). - let _ = registry.save(); - Ok(registry.list()) + timed_sync!("list_registered_instances", { + let registry = + clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; + // Best-effort self-heal: persist normalized instance ids (e.g., legacy empty SSH ids). + let _ = registry.save(); + Ok(registry.list()) + }) } #[tauri::command] pub fn delete_registered_instance(instance_id: String) -> Result { - let id = instance_id.trim(); - if id.is_empty() || id == "local" { - return Ok(false); - } - let mut registry = - clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; - let removed = registry.remove(id).is_some(); - if removed { - registry.save().map_err(|e| e.to_string())?; - } - Ok(removed) + timed_sync!("delete_registered_instance", { + let id = instance_id.trim(); + if id.is_empty() || id == "local" { + return Ok(false); + } + let mut registry = + clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; + let removed = registry.remove(id).is_some(); + if removed { + registry.save().map_err(|e| e.to_string())?; + } + Ok(removed) + }) } #[tauri::command] @@ -231,9 +250,11 @@ pub async fn connect_docker_instance( label: Option, instance_id: Option, ) -> Result { - clawpal_core::connect::connect_docker(&home, label.as_deref(), instance_id.as_deref()) - .await - .map_err(|e| e.to_string()) + timed_async!("connect_docker_instance", { + clawpal_core::connect::connect_docker(&home, label.as_deref(), instance_id.as_deref()) + .await + .map_err(|e| e.to_string()) + }) } #[tauri::command] @@ -242,36 +263,40 @@ pub async fn connect_local_instance( label: Option, instance_id: Option, ) -> Result { - clawpal_core::connect::connect_local(&home, label.as_deref(), instance_id.as_deref()) - .await - .map_err(|e| e.to_string()) + timed_async!("connect_local_instance", { + clawpal_core::connect::connect_local(&home, label.as_deref(), instance_id.as_deref()) + .await + .map_err(|e| e.to_string()) + }) } #[tauri::command] pub async fn connect_ssh_instance( host_id: String, ) -> Result { - let hosts = read_hosts_from_registry()?; - let host = hosts - .into_iter() - .find(|h| h.id == host_id) - .ok_or_else(|| format!("No SSH host config with id: {host_id}"))?; - // Register the SSH host as an instance in the instance registry - // (skip the actual SSH connectivity probe — the caller already connected) - let instance = clawpal_core::instance::Instance { - id: host.id.clone(), - instance_type: clawpal_core::instance::InstanceType::RemoteSsh, - label: host.label.clone(), - openclaw_home: None, - clawpal_data_dir: None, - ssh_host_config: Some(host), - }; - let mut registry = - clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; - let _ = registry.remove(&instance.id); - registry.add(instance.clone()).map_err(|e| e.to_string())?; - registry.save().map_err(|e| e.to_string())?; - Ok(instance) + timed_async!("connect_ssh_instance", { + let hosts = read_hosts_from_registry()?; + let host = hosts + .into_iter() + .find(|h| h.id == host_id) + .ok_or_else(|| format!("No SSH host config with id: {host_id}"))?; + // Register the SSH host as an instance in the instance registry + // (skip the actual SSH connectivity probe — the caller already connected) + let instance = clawpal_core::instance::Instance { + id: host.id.clone(), + instance_type: clawpal_core::instance::InstanceType::RemoteSsh, + label: host.label.clone(), + openclaw_home: None, + clawpal_data_dir: None, + ssh_host_config: Some(host), + }; + let mut registry = + clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; + let _ = registry.remove(&instance.id); + registry.add(instance.clone()).map_err(|e| e.to_string())?; + registry.save().map_err(|e| e.to_string())?; + Ok(instance) + }) } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -363,112 +388,114 @@ pub fn migrate_legacy_instances( legacy_docker_instances: Vec, legacy_open_tab_ids: Vec, ) -> Result { - let paths = resolve_paths(); - let mut registry = - clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; - - // Ensure local instance exists for old users. - if registry.get("local").is_none() { - upsert_registry_instance( - &mut registry, - clawpal_core::instance::Instance { - id: "local".to_string(), - instance_type: clawpal_core::instance::InstanceType::Local, - label: "Local".to_string(), - openclaw_home: None, - clawpal_data_dir: None, - ssh_host_config: None, - }, - )?; - } - - let imported_ssh_hosts = migrate_legacy_ssh_file(&paths, &mut registry)?; - - let mut imported_docker_instances = 0usize; - for docker in legacy_docker_instances { - let id = docker.id.trim(); - if id.is_empty() { - continue; - } - let label = if docker.label.trim().is_empty() { - fallback_label_from_instance_id(id) - } else { - docker.label.clone() - }; - upsert_registry_instance( - &mut registry, - clawpal_core::instance::Instance { - id: id.to_string(), - instance_type: clawpal_core::instance::InstanceType::Docker, - label, - openclaw_home: docker.openclaw_home.clone(), - clawpal_data_dir: docker.clawpal_data_dir.clone(), - ssh_host_config: None, - }, - )?; - imported_docker_instances += 1; - } + timed_sync!("migrate_legacy_instances", { + let paths = resolve_paths(); + let mut registry = + clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; - let mut imported_open_tab_instances = 0usize; - for tab_id in legacy_open_tab_ids { - let id = tab_id.trim(); - if id.is_empty() { - continue; - } - if registry.get(id).is_some() { - continue; - } - if id == "local" { - continue; - } - if id.starts_with("docker:") { + // Ensure local instance exists for old users. + if registry.get("local").is_none() { upsert_registry_instance( &mut registry, clawpal_core::instance::Instance { - id: id.to_string(), - instance_type: clawpal_core::instance::InstanceType::Docker, - label: fallback_label_from_instance_id(id), + id: "local".to_string(), + instance_type: clawpal_core::instance::InstanceType::Local, + label: "Local".to_string(), openclaw_home: None, clawpal_data_dir: None, ssh_host_config: None, }, )?; - imported_open_tab_instances += 1; - continue; } - if id.starts_with("ssh:") { - let host_alias = id.strip_prefix("ssh:").unwrap_or("").to_string(); + + let imported_ssh_hosts = migrate_legacy_ssh_file(&paths, &mut registry)?; + + let mut imported_docker_instances = 0usize; + for docker in legacy_docker_instances { + let id = docker.id.trim(); + if id.is_empty() { + continue; + } + let label = if docker.label.trim().is_empty() { + fallback_label_from_instance_id(id) + } else { + docker.label.clone() + }; upsert_registry_instance( &mut registry, clawpal_core::instance::Instance { id: id.to_string(), - instance_type: clawpal_core::instance::InstanceType::RemoteSsh, - label: fallback_label_from_instance_id(id), - openclaw_home: None, - clawpal_data_dir: None, - ssh_host_config: Some(clawpal_core::instance::SshHostConfig { - id: id.to_string(), - label: fallback_label_from_instance_id(id), - host: host_alias, - port: 22, - username: String::new(), - auth_method: "ssh_config".to_string(), - key_path: None, - password: None, - passphrase: None, - }), + instance_type: clawpal_core::instance::InstanceType::Docker, + label, + openclaw_home: docker.openclaw_home.clone(), + clawpal_data_dir: docker.clawpal_data_dir.clone(), + ssh_host_config: None, }, )?; - imported_open_tab_instances += 1; + imported_docker_instances += 1; + } + + let mut imported_open_tab_instances = 0usize; + for tab_id in legacy_open_tab_ids { + let id = tab_id.trim(); + if id.is_empty() { + continue; + } + if registry.get(id).is_some() { + continue; + } + if id == "local" { + continue; + } + if id.starts_with("docker:") { + upsert_registry_instance( + &mut registry, + clawpal_core::instance::Instance { + id: id.to_string(), + instance_type: clawpal_core::instance::InstanceType::Docker, + label: fallback_label_from_instance_id(id), + openclaw_home: None, + clawpal_data_dir: None, + ssh_host_config: None, + }, + )?; + imported_open_tab_instances += 1; + continue; + } + if id.starts_with("ssh:") { + let host_alias = id.strip_prefix("ssh:").unwrap_or("").to_string(); + upsert_registry_instance( + &mut registry, + clawpal_core::instance::Instance { + id: id.to_string(), + instance_type: clawpal_core::instance::InstanceType::RemoteSsh, + label: fallback_label_from_instance_id(id), + openclaw_home: None, + clawpal_data_dir: None, + ssh_host_config: Some(clawpal_core::instance::SshHostConfig { + id: id.to_string(), + label: fallback_label_from_instance_id(id), + host: host_alias, + port: 22, + username: String::new(), + auth_method: "ssh_config".to_string(), + key_path: None, + password: None, + passphrase: None, + }), + }, + )?; + imported_open_tab_instances += 1; + } } - } - registry.save().map_err(|e| e.to_string())?; - let total_instances = registry.list().len(); - Ok(LegacyMigrationResult { - imported_ssh_hosts, - imported_docker_instances, - imported_open_tab_instances, - total_instances, + registry.save().map_err(|e| e.to_string())?; + let total_instances = registry.list().len(); + Ok(LegacyMigrationResult { + imported_ssh_hosts, + imported_docker_instances, + imported_open_tab_instances, + total_instances, + }) }) } diff --git a/src-tauri/src/commands/logs.rs b/src-tauri/src/commands/logs.rs index 4b5b5ee5..cf88facf 100644 --- a/src-tauri/src/commands/logs.rs +++ b/src-tauri/src/commands/logs.rs @@ -70,18 +70,20 @@ pub async fn remote_read_app_log( host_id: String, lines: Option, ) -> Result { - let n = clamp_lines(lines); - let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "app"); - log_debug(&format!( - "remote_read_app_log start host_id={host_id} lines={n} cmd={cmd}" - )); - let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + timed_async!("remote_read_app_log", { + let n = clamp_lines(lines); + let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "app"); log_debug(&format!( - "remote_read_app_log failed host_id={host_id} error={error}" + "remote_read_app_log start host_id={host_id} lines={n} cmd={cmd}" )); - error - })?; - Ok(result.stdout) + let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + log_debug(&format!( + "remote_read_app_log failed host_id={host_id} error={error}" + )); + error + })?; + Ok(result.stdout) + }) } #[tauri::command] @@ -90,18 +92,20 @@ pub async fn remote_read_error_log( host_id: String, lines: Option, ) -> Result { - let n = clamp_lines(lines); - let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "error"); - log_debug(&format!( - "remote_read_error_log start host_id={host_id} lines={n} cmd={cmd}" - )); - let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + timed_async!("remote_read_error_log", { + let n = clamp_lines(lines); + let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "error"); log_debug(&format!( - "remote_read_error_log failed host_id={host_id} error={error}" + "remote_read_error_log start host_id={host_id} lines={n} cmd={cmd}" )); - error - })?; - Ok(result.stdout) + let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + log_debug(&format!( + "remote_read_error_log failed host_id={host_id} error={error}" + )); + error + })?; + Ok(result.stdout) + }) } #[tauri::command] @@ -110,18 +114,20 @@ pub async fn remote_read_helper_log( host_id: String, lines: Option, ) -> Result { - let n = clamp_lines(lines); - let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "helper"); - log_debug(&format!( - "remote_read_helper_log start host_id={host_id} lines={n} cmd={cmd}" - )); - let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + timed_async!("remote_read_helper_log", { + let n = clamp_lines(lines); + let cmd = clawpal_core::doctor::remote_clawpal_log_tail_script(n, "helper"); log_debug(&format!( - "remote_read_helper_log failed host_id={host_id} error={error}" + "remote_read_helper_log start host_id={host_id} lines={n} cmd={cmd}" )); - error - })?; - Ok(result.stdout) + let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + log_debug(&format!( + "remote_read_helper_log failed host_id={host_id} error={error}" + )); + error + })?; + Ok(result.stdout) + }) } #[tauri::command] @@ -130,18 +136,20 @@ pub async fn remote_read_gateway_log( host_id: String, lines: Option, ) -> Result { - let n = clamp_lines(lines); - let cmd = remote_gateway_log_command(n); - log_debug(&format!( - "remote_read_gateway_log start host_id={host_id} lines={n} cmd={cmd}" - )); - let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + timed_async!("remote_read_gateway_log", { + let n = clamp_lines(lines); + let cmd = remote_gateway_log_command(n); log_debug(&format!( - "remote_read_gateway_log failed host_id={host_id} error={error}" + "remote_read_gateway_log start host_id={host_id} lines={n} cmd={cmd}" )); - error - })?; - Ok(result.stdout) + let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + log_debug(&format!( + "remote_read_gateway_log failed host_id={host_id} error={error}" + )); + error + })?; + Ok(result.stdout) + }) } #[tauri::command] @@ -150,16 +158,18 @@ pub async fn remote_read_gateway_error_log( host_id: String, lines: Option, ) -> Result { - let n = clamp_lines(lines); - let cmd = clawpal_core::doctor::remote_gateway_error_log_tail_script(n); - log_debug(&format!( - "remote_read_gateway_error_log start host_id={host_id} lines={n} cmd={cmd}" - )); - let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + timed_async!("remote_read_gateway_error_log", { + let n = clamp_lines(lines); + let cmd = clawpal_core::doctor::remote_gateway_error_log_tail_script(n); log_debug(&format!( - "remote_read_gateway_error_log failed host_id={host_id} error={error}" + "remote_read_gateway_error_log start host_id={host_id} lines={n} cmd={cmd}" )); - error - })?; - Ok(result.stdout) + let result = pool.exec(&host_id, &cmd).await.map_err(|error| { + log_debug(&format!( + "remote_read_gateway_error_log failed host_id={host_id} error={error}" + )); + error + })?; + Ok(result.stdout) + }) } diff --git a/src-tauri/src/commands/mod.rs b/src-tauri/src/commands/mod.rs index 6a35c54a..eb0b4849 100644 --- a/src-tauri/src/commands/mod.rs +++ b/src-tauri/src/commands/mod.rs @@ -1,3 +1,25 @@ +/// Macro for wrapping synchronous command bodies with timing. +macro_rules! timed_sync { + ($name:expr, $body:block) => {{ + let __start = std::time::Instant::now(); + let __result = $body; + let __elapsed_ms = __start.elapsed().as_millis() as u64; + crate::commands::perf::record_timing($name, __elapsed_ms); + __result + }}; +} + +/// Macro for wrapping async command bodies with timing. +macro_rules! timed_async { + ($name:expr, $body:block) => {{ + let __start = std::time::Instant::now(); + let __result = $body; + let __elapsed_ms = __start.elapsed().as_millis() as u64; + crate::commands::perf::record_timing($name, __elapsed_ms); + __result + }}; +} + use serde::{Deserialize, Serialize}; use serde_json::{json, Map, Value}; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}; @@ -44,6 +66,7 @@ pub mod instance; pub mod logs; pub mod model; pub mod overview; +pub mod perf; pub mod precheck; pub mod preferences; pub mod profiles; @@ -85,6 +108,8 @@ pub use model::*; #[allow(unused_imports)] pub use overview::*; #[allow(unused_imports)] +pub use perf::*; +#[allow(unused_imports)] pub use precheck::*; #[allow(unused_imports)] pub use preferences::*; diff --git a/src-tauri/src/commands/model.rs b/src-tauri/src/commands/model.rs index 70a4ab38..26c8b3a6 100644 --- a/src-tauri/src/commands/model.rs +++ b/src-tauri/src/commands/model.rs @@ -9,119 +9,131 @@ pub fn update_channel_config( allowlist: Vec, model: Option, ) -> Result { - if path.trim().is_empty() { - return Err("channel path is required".into()); - } - let paths = resolve_paths(); - let mut cfg = read_openclaw_config(&paths)?; - let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; - set_nested_value( - &mut cfg, - &format!("{path}.type"), - channel_type.map(Value::String), - )?; - set_nested_value(&mut cfg, &format!("{path}.mode"), mode.map(Value::String))?; - let allowlist_values = allowlist.into_iter().map(Value::String).collect::>(); - set_nested_value( - &mut cfg, - &format!("{path}.allowlist"), - Some(Value::Array(allowlist_values)), - )?; - set_nested_value(&mut cfg, &format!("{path}.model"), model.map(Value::String))?; - write_config_with_snapshot(&paths, ¤t, &cfg, "update-channel")?; - Ok(true) + timed_sync!("update_channel_config", { + if path.trim().is_empty() { + return Err("channel path is required".into()); + } + let paths = resolve_paths(); + let mut cfg = read_openclaw_config(&paths)?; + let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; + set_nested_value( + &mut cfg, + &format!("{path}.type"), + channel_type.map(Value::String), + )?; + set_nested_value(&mut cfg, &format!("{path}.mode"), mode.map(Value::String))?; + let allowlist_values = allowlist.into_iter().map(Value::String).collect::>(); + set_nested_value( + &mut cfg, + &format!("{path}.allowlist"), + Some(Value::Array(allowlist_values)), + )?; + set_nested_value(&mut cfg, &format!("{path}.model"), model.map(Value::String))?; + write_config_with_snapshot(&paths, ¤t, &cfg, "update-channel")?; + Ok(true) + }) } /// List current channel→agent bindings from config. #[tauri::command] pub fn delete_channel_node(path: String) -> Result { - if path.trim().is_empty() { - return Err("channel path is required".into()); - } - let paths = resolve_paths(); - let mut cfg = read_openclaw_config(&paths)?; - let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; - let before = cfg.to_string(); - set_nested_value(&mut cfg, &path, None)?; - if cfg.to_string() == before { - return Ok(false); - } - write_config_with_snapshot(&paths, ¤t, &cfg, "delete-channel")?; - Ok(true) + timed_sync!("delete_channel_node", { + if path.trim().is_empty() { + return Err("channel path is required".into()); + } + let paths = resolve_paths(); + let mut cfg = read_openclaw_config(&paths)?; + let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; + let before = cfg.to_string(); + set_nested_value(&mut cfg, &path, None)?; + if cfg.to_string() == before { + return Ok(false); + } + write_config_with_snapshot(&paths, ¤t, &cfg, "delete-channel")?; + Ok(true) + }) } #[tauri::command] pub fn set_global_model(model_value: Option) -> Result { - let paths = resolve_paths(); - let mut cfg = read_openclaw_config(&paths)?; - let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; - let model = model_value - .map(|v| v.trim().to_string()) - .filter(|v| !v.is_empty()); - // If existing model is an object (has fallbacks etc.), only update "primary" inside it - if let Some(existing) = cfg.pointer_mut("/agents/defaults/model") { - if let Some(model_obj) = existing.as_object_mut() { - let sync_model_value = match model.clone() { - Some(v) => { - model_obj.insert("primary".into(), Value::String(v.clone())); - Some(v) - } - None => { - model_obj.remove("primary"); - None - } - }; - write_config_with_snapshot(&paths, ¤t, &cfg, "set-global-model")?; - maybe_sync_main_auth_for_model_value(&paths, sync_model_value)?; - return Ok(true); + timed_sync!("set_global_model", { + let paths = resolve_paths(); + let mut cfg = read_openclaw_config(&paths)?; + let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; + let model = model_value + .map(|v| v.trim().to_string()) + .filter(|v| !v.is_empty()); + // If existing model is an object (has fallbacks etc.), only update "primary" inside it + if let Some(existing) = cfg.pointer_mut("/agents/defaults/model") { + if let Some(model_obj) = existing.as_object_mut() { + let sync_model_value = match model.clone() { + Some(v) => { + model_obj.insert("primary".into(), Value::String(v.clone())); + Some(v) + } + None => { + model_obj.remove("primary"); + None + } + }; + write_config_with_snapshot(&paths, ¤t, &cfg, "set-global-model")?; + maybe_sync_main_auth_for_model_value(&paths, sync_model_value)?; + return Ok(true); + } } - } - // Fallback: plain string or missing — set the whole value - set_nested_value(&mut cfg, "agents.defaults.model", model.map(Value::String))?; - write_config_with_snapshot(&paths, ¤t, &cfg, "set-global-model")?; - let model_to_sync = cfg - .pointer("/agents/defaults/model") - .and_then(read_model_value); - maybe_sync_main_auth_for_model_value(&paths, model_to_sync)?; - Ok(true) + // Fallback: plain string or missing — set the whole value + set_nested_value(&mut cfg, "agents.defaults.model", model.map(Value::String))?; + write_config_with_snapshot(&paths, ¤t, &cfg, "set-global-model")?; + let model_to_sync = cfg + .pointer("/agents/defaults/model") + .and_then(read_model_value); + maybe_sync_main_auth_for_model_value(&paths, model_to_sync)?; + Ok(true) + }) } #[tauri::command] pub fn set_agent_model(agent_id: String, model_value: Option) -> Result { - if agent_id.trim().is_empty() { - return Err("agent id is required".into()); - } - let paths = resolve_paths(); - let mut cfg = read_openclaw_config(&paths)?; - let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; - let value = model_value - .map(|v| v.trim().to_string()) - .filter(|v| !v.is_empty()); - set_agent_model_value(&mut cfg, &agent_id, value)?; - write_config_with_snapshot(&paths, ¤t, &cfg, "set-agent-model")?; - Ok(true) + timed_sync!("set_agent_model", { + if agent_id.trim().is_empty() { + return Err("agent id is required".into()); + } + let paths = resolve_paths(); + let mut cfg = read_openclaw_config(&paths)?; + let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; + let value = model_value + .map(|v| v.trim().to_string()) + .filter(|v| !v.is_empty()); + set_agent_model_value(&mut cfg, &agent_id, value)?; + write_config_with_snapshot(&paths, ¤t, &cfg, "set-agent-model")?; + Ok(true) + }) } #[tauri::command] pub fn set_channel_model(path: String, model_value: Option) -> Result { - if path.trim().is_empty() { - return Err("channel path is required".into()); - } - let paths = resolve_paths(); - let mut cfg = read_openclaw_config(&paths)?; - let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; - let value = model_value - .map(|v| v.trim().to_string()) - .filter(|v| !v.is_empty()); - set_nested_value(&mut cfg, &format!("{path}.model"), value.map(Value::String))?; - write_config_with_snapshot(&paths, ¤t, &cfg, "set-channel-model")?; - Ok(true) + timed_sync!("set_channel_model", { + if path.trim().is_empty() { + return Err("channel path is required".into()); + } + let paths = resolve_paths(); + let mut cfg = read_openclaw_config(&paths)?; + let current = serde_json::to_string_pretty(&cfg).map_err(|e| e.to_string())?; + let value = model_value + .map(|v| v.trim().to_string()) + .filter(|v| !v.is_empty()); + set_nested_value(&mut cfg, &format!("{path}.model"), value.map(Value::String))?; + write_config_with_snapshot(&paths, ¤t, &cfg, "set-channel-model")?; + Ok(true) + }) } #[tauri::command] pub fn list_model_bindings() -> Result, String> { - let paths = resolve_paths(); - let cfg = read_openclaw_config(&paths)?; - let profiles = load_model_profiles(&paths); - Ok(collect_model_bindings(&cfg, &profiles)) + timed_sync!("list_model_bindings", { + let paths = resolve_paths(); + let cfg = read_openclaw_config(&paths)?; + let profiles = load_model_profiles(&paths); + Ok(collect_model_bindings(&cfg, &profiles)) + }) } diff --git a/src-tauri/src/commands/overview.rs b/src-tauri/src/commands/overview.rs index e5a3e93c..c8f8c16b 100644 --- a/src-tauri/src/commands/overview.rs +++ b/src-tauri/src/commands/overview.rs @@ -292,12 +292,14 @@ async fn remote_channels_runtime_snapshot_impl( #[tauri::command] pub async fn get_instance_config_snapshot() -> Result { - tauri::async_runtime::spawn_blocking(|| { - let cfg = read_openclaw_config(&resolve_paths())?; - Ok(extract_instance_config_snapshot(&cfg)) + timed_async!("get_instance_config_snapshot", { + tauri::async_runtime::spawn_blocking(|| { + let cfg = read_openclaw_config(&resolve_paths())?; + Ok(extract_instance_config_snapshot(&cfg)) + }) + .await + .map_err(|error| error.to_string())? }) - .await - .map_err(|error| error.to_string())? } #[tauri::command] @@ -305,21 +307,25 @@ pub async fn remote_get_instance_config_snapshot( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; - Ok(extract_instance_config_snapshot(&cfg)) + timed_async!("remote_get_instance_config_snapshot", { + let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; + Ok(extract_instance_config_snapshot(&cfg)) + }) } #[tauri::command] pub async fn get_instance_runtime_snapshot( cache: tauri::State<'_, crate::cli_runner::CliCache>, ) -> Result { - let status = get_status_light().await?; - let agents = list_agents_overview(cache).await?; - Ok(InstanceRuntimeSnapshot { - global_default_model: status.global_default_model.clone(), - fallback_models: status.fallback_models.clone(), - status, - agents, + timed_async!("get_instance_runtime_snapshot", { + let status = get_status_light().await?; + let agents = list_agents_overview(cache).await?; + Ok(InstanceRuntimeSnapshot { + global_default_model: status.global_default_model.clone(), + fallback_models: status.fallback_models.clone(), + status, + agents, + }) }) } @@ -328,17 +334,21 @@ pub async fn remote_get_instance_runtime_snapshot( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - remote_instance_runtime_snapshot_impl(&pool, &host_id).await + timed_async!("remote_get_instance_runtime_snapshot", { + remote_instance_runtime_snapshot_impl(&pool, &host_id).await + }) } #[tauri::command] pub async fn get_channels_config_snapshot() -> Result { - tauri::async_runtime::spawn_blocking(|| { - let cfg = read_openclaw_config(&resolve_paths())?; - extract_channels_config_snapshot(&cfg) + timed_async!("get_channels_config_snapshot", { + tauri::async_runtime::spawn_blocking(|| { + let cfg = read_openclaw_config(&resolve_paths())?; + extract_channels_config_snapshot(&cfg) + }) + .await + .map_err(|error| error.to_string())? }) - .await - .map_err(|error| error.to_string())? } #[tauri::command] @@ -346,26 +356,30 @@ pub async fn remote_get_channels_config_snapshot( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; - extract_channels_config_snapshot(&cfg) + timed_async!("remote_get_channels_config_snapshot", { + let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; + extract_channels_config_snapshot(&cfg) + }) } #[tauri::command] pub async fn get_channels_runtime_snapshot( cache: tauri::State<'_, crate::cli_runner::CliCache>, ) -> Result { - let channels = list_channels_minimal(cache.clone()).await?; - let bindings = list_bindings(cache.clone()).await?; - let agents = list_agents_overview(cache).await?; - let bindings = serde_json::to_value(bindings) - .map_err(|error| error.to_string())? - .as_array() - .cloned() - .unwrap_or_default(); - Ok(ChannelsRuntimeSnapshot { - channels, - bindings, - agents, + timed_async!("get_channels_runtime_snapshot", { + let channels = list_channels_minimal(cache.clone()).await?; + let bindings = list_bindings(cache.clone()).await?; + let agents = list_agents_overview(cache).await?; + let bindings = serde_json::to_value(bindings) + .map_err(|error| error.to_string())? + .as_array() + .cloned() + .unwrap_or_default(); + Ok(ChannelsRuntimeSnapshot { + channels, + bindings, + agents, + }) }) } @@ -374,14 +388,18 @@ pub async fn remote_get_channels_runtime_snapshot( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - remote_channels_runtime_snapshot_impl(&pool, &host_id).await + timed_async!("remote_get_channels_runtime_snapshot", { + remote_channels_runtime_snapshot_impl(&pool, &host_id).await + }) } #[tauri::command] pub fn get_cron_config_snapshot() -> Result { - let jobs = list_cron_jobs()?; - let jobs = jobs.as_array().cloned().unwrap_or_default(); - Ok(CronConfigSnapshot { jobs }) + timed_sync!("get_cron_config_snapshot", { + let jobs = list_cron_jobs()?; + let jobs = jobs.as_array().cloned().unwrap_or_default(); + Ok(CronConfigSnapshot { jobs }) + }) } #[tauri::command] @@ -389,17 +407,21 @@ pub async fn remote_get_cron_config_snapshot( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let jobs = remote_list_cron_jobs(pool, host_id).await?; - let jobs = jobs.as_array().cloned().unwrap_or_default(); - Ok(CronConfigSnapshot { jobs }) + timed_async!("remote_get_cron_config_snapshot", { + let jobs = remote_list_cron_jobs(pool, host_id).await?; + let jobs = jobs.as_array().cloned().unwrap_or_default(); + Ok(CronConfigSnapshot { jobs }) + }) } #[tauri::command] pub async fn get_cron_runtime_snapshot() -> Result { - let jobs = list_cron_jobs()?; - let watchdog = get_watchdog_status().await?; - let jobs = jobs.as_array().cloned().unwrap_or_default(); - Ok(CronRuntimeSnapshot { jobs, watchdog }) + timed_async!("get_cron_runtime_snapshot", { + let jobs = list_cron_jobs()?; + let watchdog = get_watchdog_status().await?; + let jobs = jobs.as_array().cloned().unwrap_or_default(); + Ok(CronRuntimeSnapshot { jobs, watchdog }) + }) } #[tauri::command] @@ -407,12 +429,14 @@ pub async fn remote_get_cron_runtime_snapshot( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let jobs = remote_list_cron_jobs(pool.clone(), host_id.clone()).await?; - let watchdog = remote_get_watchdog_status(pool, host_id).await?; - let jobs = jobs.as_array().cloned().unwrap_or_default(); - Ok(CronRuntimeSnapshot { - jobs, - watchdog: parse_remote_watchdog_value(watchdog), + timed_async!("remote_get_cron_runtime_snapshot", { + let jobs = remote_list_cron_jobs(pool.clone(), host_id.clone()).await?; + let watchdog = remote_get_watchdog_status(pool, host_id).await?; + let jobs = jobs.as_array().cloned().unwrap_or_default(); + Ok(CronRuntimeSnapshot { + jobs, + watchdog: parse_remote_watchdog_value(watchdog), + }) }) } diff --git a/src-tauri/src/commands/perf.rs b/src-tauri/src/commands/perf.rs new file mode 100644 index 00000000..9d57ed7f --- /dev/null +++ b/src-tauri/src/commands/perf.rs @@ -0,0 +1,280 @@ +use super::*; + +/// Metrics about the current process, exposed to the frontend and E2E tests. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ProcessMetrics { + /// Process ID + pub pid: u32, + /// Resident Set Size in bytes (physical memory used) + pub rss_bytes: u64, + /// Virtual memory size in bytes + pub vms_bytes: u64, + /// Process uptime in seconds + pub uptime_secs: f64, + /// Platform identifier + pub platform: String, +} + +/// Tracks elapsed time of a named operation and logs it. +/// Returns `(result, elapsed_ms)`. +pub fn trace_command(name: &str, f: F) -> (T, u64) +where + F: FnOnce() -> T, +{ + let start = Instant::now(); + let result = f(); + let elapsed_ms = start.elapsed().as_millis() as u64; + + let threshold_ms = if name.starts_with("remote_") || name.starts_with("ssh_") { + 2000 + } else { + 100 + }; + + if elapsed_ms > threshold_ms { + crate::logging::log_info(&format!( + "[perf] SLOW {} completed in {}ms (threshold: {}ms)", + name, elapsed_ms, threshold_ms + )); + } else { + crate::logging::log_info(&format!("[perf] {} completed in {}ms", name, elapsed_ms)); + } + + (result, elapsed_ms) +} + +/// Single perf sample emitted to the frontend via events or returned directly. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PerfSample { + /// The command or operation name + pub name: String, + /// Elapsed time in milliseconds + pub elapsed_ms: u64, + /// Timestamp (Unix millis) when the sample was taken + pub timestamp: u64, + /// Whether the command exceeded its latency threshold + pub exceeded_threshold: bool, +} + +static APP_START: LazyLock = LazyLock::new(Instant::now); + +/// Initialize the start time — call this once during app setup. +pub fn init_perf_clock() { + // Force lazy evaluation so the clock starts ticking from app init, not first command. + let _ = *APP_START; +} + +/// Get the time since app start in milliseconds. +pub fn uptime_ms() -> u64 { + APP_START.elapsed().as_millis() as u64 +} + +#[tauri::command] +pub fn get_process_metrics() -> Result { + let pid = std::process::id(); + + let (rss_bytes, vms_bytes) = read_process_memory(pid)?; + + let uptime_secs = APP_START.elapsed().as_secs_f64(); + + Ok(ProcessMetrics { + pid, + rss_bytes, + vms_bytes, + uptime_secs, + platform: std::env::consts::OS.to_string(), + }) +} + +/// Read memory info for a given PID from the OS. +#[cfg(target_os = "linux")] +fn read_process_memory(pid: u32) -> Result<(u64, u64), String> { + let status_path = format!("/proc/{}/status", pid); + let content = fs::read_to_string(&status_path) + .map_err(|e| format!("Failed to read {}: {}", status_path, e))?; + + let mut rss: u64 = 0; + let mut vms: u64 = 0; + + for line in content.lines() { + if line.starts_with("VmRSS:") { + if let Some(val) = parse_proc_kb(line) { + rss = val * 1024; // Convert KB to bytes + } + } else if line.starts_with("VmSize:") { + if let Some(val) = parse_proc_kb(line) { + vms = val * 1024; + } + } + } + + Ok((rss, vms)) +} + +#[cfg(target_os = "linux")] +fn parse_proc_kb(line: &str) -> Option { + line.split_whitespace().nth(1)?.parse::().ok() +} + +#[cfg(target_os = "macos")] +fn read_process_memory(pid: u32) -> Result<(u64, u64), String> { + // Use `ps` as a portable fallback — mach_task_info requires unsafe FFI + let output = Command::new("ps") + .args(["-o", "rss=,vsz=", "-p", &pid.to_string()]) + .output() + .map_err(|e| format!("Failed to run ps: {}", e))?; + + let text = String::from_utf8_lossy(&output.stdout); + let parts: Vec<&str> = text.trim().split_whitespace().collect(); + if parts.len() >= 2 { + let rss_kb: u64 = parts[0].parse().unwrap_or(0); + let vms_kb: u64 = parts[1].parse().unwrap_or(0); + Ok((rss_kb * 1024, vms_kb * 1024)) + } else { + Err("Failed to parse ps output".to_string()) + } +} + +#[cfg(target_os = "windows")] +fn read_process_memory(_pid: u32) -> Result<(u64, u64), String> { + // Windows: use tasklist /FI to get memory info + let output = Command::new("tasklist") + .args(["/FI", &format!("PID eq {}", _pid), "/FO", "CSV", "/NH"]) + .output() + .map_err(|e| format!("Failed to run tasklist: {}", e))?; + + let text = String::from_utf8_lossy(&output.stdout); + // CSV format: "name","pid","session","session#","mem usage" + // mem usage is like "12,345 K" + for line in text.lines() { + let fields: Vec<&str> = line.split(',').collect(); + if fields.len() >= 5 { + let mem_str = fields[4].trim().trim_matches('"'); + let mem_kb: u64 = mem_str + .replace(" K", "") + .replace(',', "") + .trim() + .parse() + .unwrap_or(0); + return Ok((mem_kb * 1024, 0)); // VMS not easily available + } + } + + Ok((0, 0)) +} + +#[cfg(not(any(target_os = "linux", target_os = "macos", target_os = "windows")))] +fn read_process_memory(_pid: u32) -> Result<(u64, u64), String> { + Ok((0, 0)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_trace_command_returns_result_and_timing() { + let (result, elapsed) = trace_command("test_noop", || 42); + assert_eq!(result, 42); + // Should complete in well under 100ms + assert!(elapsed < 100, "noop took {}ms", elapsed); + } + + #[test] + fn test_get_process_metrics_returns_valid_data() { + init_perf_clock(); + let metrics = get_process_metrics().expect("should succeed"); + assert!(metrics.pid > 0); + assert!(metrics.rss_bytes > 0, "RSS should be non-zero"); + assert!(!metrics.platform.is_empty()); + } + + #[test] + fn test_uptime_increases() { + init_perf_clock(); + let t1 = uptime_ms(); + std::thread::sleep(std::time::Duration::from_millis(10)); + let t2 = uptime_ms(); + assert!(t2 > t1, "uptime should increase: {} vs {}", t1, t2); + } +} + +// ── Global performance registry ── + +use std::sync::Arc; + +/// Thread-safe registry of command timing samples. +static PERF_REGISTRY: LazyLock>>> = + LazyLock::new(|| Arc::new(Mutex::new(Vec::with_capacity(1024)))); + +/// Record a timing sample into the global registry. +pub fn record_timing(name: &str, elapsed_ms: u64) { + let ts = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_millis() as u64; + let threshold = if name.starts_with("remote_") { + 2000 + } else { + 100 + }; + let sample = PerfSample { + name: name.to_string(), + elapsed_ms, + timestamp: ts, + exceeded_threshold: elapsed_ms > threshold, + }; + if let Ok(mut reg) = PERF_REGISTRY.lock() { + reg.push(sample); + } +} + +/// Get all recorded timing samples and clear the registry. +#[tauri::command] +pub fn get_perf_timings() -> Result, String> { + let mut reg = PERF_REGISTRY.lock().map_err(|e| e.to_string())?; + let samples = reg.drain(..).collect(); + Ok(samples) +} + +/// Get a summary report of all recorded timings grouped by command name. +#[tauri::command] +pub fn get_perf_report() -> Result { + let reg = PERF_REGISTRY.lock().map_err(|e| e.to_string())?; + + let mut by_name: HashMap> = HashMap::new(); + for s in reg.iter() { + by_name + .entry(s.name.clone()) + .or_default() + .push(s.elapsed_ms); + } + + let mut report = serde_json::Map::new(); + for (name, mut times) in by_name { + times.sort(); + let count = times.len(); + let sum: u64 = times.iter().sum(); + let p50 = times.get(count / 2).copied().unwrap_or(0); + let p95 = times + .get((count as f64 * 0.95) as usize) + .copied() + .unwrap_or(0); + let max = times.last().copied().unwrap_or(0); + + report.insert( + name, + json!({ + "count": count, + "p50_ms": p50, + "p95_ms": p95, + "max_ms": max, + "avg_ms": if count > 0 { sum / count as u64 } else { 0 }, + }), + ); + } + + Ok(Value::Object(report)) +} diff --git a/src-tauri/src/commands/precheck.rs b/src-tauri/src/commands/precheck.rs index f5cbafa4..471cce89 100644 --- a/src-tauri/src/commands/precheck.rs +++ b/src-tauri/src/commands/precheck.rs @@ -5,17 +5,22 @@ use crate::ssh::SshConnectionPool; #[tauri::command] pub async fn precheck_registry() -> Result, String> { - let registry_path = clawpal_core::instance::registry_path(); - Ok(precheck::precheck_registry(®istry_path)) + timed_async!("precheck_registry", { + let registry_path = clawpal_core::instance::registry_path(); + Ok(precheck::precheck_registry(®istry_path)) + }) } #[tauri::command] pub async fn precheck_instance(instance_id: String) -> Result, String> { - let registry = clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; - let instance = registry - .get(&instance_id) - .ok_or_else(|| format!("Instance not found: {instance_id}"))?; - Ok(precheck::precheck_instance_state(instance)) + timed_async!("precheck_instance", { + let registry = + clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; + let instance = registry + .get(&instance_id) + .ok_or_else(|| format!("Instance not found: {instance_id}"))?; + Ok(precheck::precheck_instance_state(instance)) + }) } #[tauri::command] @@ -23,55 +28,61 @@ pub async fn precheck_transport( pool: State<'_, SshConnectionPool>, instance_id: String, ) -> Result, String> { - let registry = clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; - let instance = registry - .get(&instance_id) - .ok_or_else(|| format!("Instance not found: {instance_id}"))?; + timed_async!("precheck_transport", { + let registry = + clawpal_core::instance::InstanceRegistry::load().map_err(|e| e.to_string())?; + let instance = registry + .get(&instance_id) + .ok_or_else(|| format!("Instance not found: {instance_id}"))?; - let mut issues = Vec::new(); + let mut issues = Vec::new(); - match &instance.instance_type { - clawpal_core::instance::InstanceType::RemoteSsh => { - if !pool.is_connected(&instance_id).await { - issues.push(PrecheckIssue { - code: "TRANSPORT_STALE".into(), - severity: "warn".into(), - message: format!( - "SSH connection for instance '{}' is not active", - instance.label - ), - auto_fixable: false, - }); + match &instance.instance_type { + clawpal_core::instance::InstanceType::RemoteSsh => { + if !pool.is_connected(&instance_id).await { + issues.push(PrecheckIssue { + code: "TRANSPORT_STALE".into(), + severity: "warn".into(), + message: format!( + "SSH connection for instance '{}' is not active", + instance.label + ), + auto_fixable: false, + }); + } } - } - clawpal_core::instance::InstanceType::Docker => { - let docker_ok = tokio::process::Command::new("docker") - .args(["info", "--format", "{{.ServerVersion}}"]) - .stdout(std::process::Stdio::null()) - .stderr(std::process::Stdio::null()) - .status() - .await - .map(|s| s.success()) - .unwrap_or(false); - if !docker_ok { - issues.push(PrecheckIssue { - code: "TRANSPORT_STALE".into(), - severity: "error".into(), - message: "Docker daemon is not running or unreachable".into(), - auto_fixable: false, - }); + clawpal_core::instance::InstanceType::Docker => { + let docker_ok = tokio::process::Command::new("docker") + .args(["info", "--format", "{{.ServerVersion}}"]) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .await + .map(|s| s.success()) + .unwrap_or(false); + if !docker_ok { + issues.push(PrecheckIssue { + code: "TRANSPORT_STALE".into(), + severity: "error".into(), + message: "Docker daemon is not running or unreachable".into(), + auto_fixable: false, + }); + } } + _ => {} } - _ => {} - } - Ok(issues) + Ok(issues) + }) } #[tauri::command] pub async fn precheck_auth(instance_id: String) -> Result, String> { - let openclaw = clawpal_core::openclaw::OpenclawCli::new(); - let profiles = clawpal_core::profile::list_profiles(&openclaw).map_err(|e| e.to_string())?; - let _ = instance_id; // reserved for future per-instance profile filtering - Ok(precheck::precheck_auth(&profiles)) + timed_async!("precheck_auth", { + let openclaw = clawpal_core::openclaw::OpenclawCli::new(); + let profiles = + clawpal_core::profile::list_profiles(&openclaw).map_err(|e| e.to_string())?; + let _ = instance_id; // reserved for future per-instance profile filtering + Ok(precheck::precheck_auth(&profiles)) + }) } diff --git a/src-tauri/src/commands/preferences.rs b/src-tauri/src/commands/preferences.rs index 150fb15d..b77295d8 100644 --- a/src-tauri/src/commands/preferences.rs +++ b/src-tauri/src/commands/preferences.rs @@ -87,29 +87,37 @@ pub fn save_bug_report_settings_from_paths( #[tauri::command] pub fn get_app_preferences() -> Result { - let paths = resolve_paths(); - Ok(load_app_preferences_from_paths(&paths)) + timed_sync!("get_app_preferences", { + let paths = resolve_paths(); + Ok(load_app_preferences_from_paths(&paths)) + }) } #[tauri::command] pub fn get_bug_report_settings() -> Result { - let paths = resolve_paths(); - Ok(load_bug_report_settings_from_paths(&paths)) + timed_sync!("get_bug_report_settings", { + let paths = resolve_paths(); + Ok(load_bug_report_settings_from_paths(&paths)) + }) } #[tauri::command] pub fn set_bug_report_settings(settings: BugReportSettings) -> Result { - let paths = resolve_paths(); - save_bug_report_settings_from_paths(&paths, settings) + timed_sync!("set_bug_report_settings", { + let paths = resolve_paths(); + save_bug_report_settings_from_paths(&paths, settings) + }) } #[tauri::command] pub fn set_ssh_transfer_speed_ui_preference(show_ui: bool) -> Result { - let paths = resolve_paths(); - let mut prefs = load_app_preferences_from_paths(&paths); - prefs.show_ssh_transfer_speed_ui = show_ui; - save_app_preferences_from_paths(&paths, &prefs)?; - Ok(prefs) + timed_sync!("set_ssh_transfer_speed_ui_preference", { + let paths = resolve_paths(); + let mut prefs = load_app_preferences_from_paths(&paths); + prefs.show_ssh_transfer_speed_ui = show_ui; + save_app_preferences_from_paths(&paths, &prefs)?; + Ok(prefs) + }) } // --------------------------------------------------------------------------- @@ -132,30 +140,36 @@ pub fn lookup_session_model_override(session_id: &str) -> Option { #[tauri::command] pub fn set_session_model_override(session_id: String, model: String) -> Result<(), String> { - let trimmed = model.trim().to_string(); - if trimmed.is_empty() { - return Err("model must not be empty".into()); - } - if let Ok(mut map) = session_model_overrides().lock() { - map.insert(session_id, trimmed); - } - Ok(()) + timed_sync!("set_session_model_override", { + let trimmed = model.trim().to_string(); + if trimmed.is_empty() { + return Err("model must not be empty".into()); + } + if let Ok(mut map) = session_model_overrides().lock() { + map.insert(session_id, trimmed); + } + Ok(()) + }) } #[tauri::command] pub fn get_session_model_override(session_id: String) -> Result, String> { - let map = session_model_overrides() - .lock() - .map_err(|e| e.to_string())?; - Ok(map.get(&session_id).cloned()) + timed_sync!("get_session_model_override", { + let map = session_model_overrides() + .lock() + .map_err(|e| e.to_string())?; + Ok(map.get(&session_id).cloned()) + }) } #[tauri::command] pub fn clear_session_model_override(session_id: String) -> Result<(), String> { - if let Ok(mut map) = session_model_overrides().lock() { - map.remove(&session_id); - } - Ok(()) + timed_sync!("clear_session_model_override", { + if let Ok(mut map) = session_model_overrides().lock() { + map.remove(&session_id); + } + Ok(()) + }) } #[cfg(test)] diff --git a/src-tauri/src/commands/profiles.rs b/src-tauri/src/commands/profiles.rs index 4d2d5a43..c7149451 100644 --- a/src-tauri/src/commands/profiles.rs +++ b/src-tauri/src/commands/profiles.rs @@ -415,8 +415,10 @@ pub async fn remote_list_model_profiles( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - let (profiles, _) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; - Ok(profiles) + timed_async!("remote_list_model_profiles", { + let (profiles, _) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; + Ok(profiles) + }) } #[tauri::command] @@ -425,18 +427,20 @@ pub async fn remote_upsert_model_profile( host_id: String, profile: ModelProfile, ) -> Result { - let content = pool - .sftp_read(&host_id, "~/.clawpal/model-profiles.json") - .await - .unwrap_or_else(|_| r#"{"profiles":[]}"#.to_string()); - let (saved, next_json) = - clawpal_core::profile::upsert_profile_in_storage_json(&content, profile) - .map_err(|e| e.to_string())?; + timed_async!("remote_upsert_model_profile", { + let content = pool + .sftp_read(&host_id, "~/.clawpal/model-profiles.json") + .await + .unwrap_or_else(|_| r#"{"profiles":[]}"#.to_string()); + let (saved, next_json) = + clawpal_core::profile::upsert_profile_in_storage_json(&content, profile) + .map_err(|e| e.to_string())?; - let _ = pool.exec(&host_id, "mkdir -p ~/.clawpal").await; - pool.sftp_write(&host_id, "~/.clawpal/model-profiles.json", &next_json) - .await?; - Ok(saved) + let _ = pool.exec(&host_id, "mkdir -p ~/.clawpal").await; + pool.sftp_write(&host_id, "~/.clawpal/model-profiles.json", &next_json) + .await?; + Ok(saved) + }) } #[tauri::command] @@ -445,19 +449,21 @@ pub async fn remote_delete_model_profile( host_id: String, profile_id: String, ) -> Result { - let content = pool - .sftp_read(&host_id, "~/.clawpal/model-profiles.json") - .await - .unwrap_or_else(|_| r#"{"profiles":[]}"#.to_string()); - let (removed, next_json) = - clawpal_core::profile::delete_profile_from_storage_json(&content, &profile_id) - .map_err(|e| e.to_string())?; - if !removed { - return Ok(false); - } - pool.sftp_write(&host_id, "~/.clawpal/model-profiles.json", &next_json) - .await?; - Ok(true) + timed_async!("remote_delete_model_profile", { + let content = pool + .sftp_read(&host_id, "~/.clawpal/model-profiles.json") + .await + .unwrap_or_else(|_| r#"{"profiles":[]}"#.to_string()); + let (removed, next_json) = + clawpal_core::profile::delete_profile_from_storage_json(&content, &profile_id) + .map_err(|e| e.to_string())?; + if !removed { + return Ok(false); + } + pool.sftp_write(&host_id, "~/.clawpal/model-profiles.json", &next_json) + .await?; + Ok(true) + }) } #[tauri::command] @@ -465,38 +471,41 @@ pub async fn remote_resolve_api_keys( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - let (profiles, _) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; - let auth_cache = RemoteAuthCache::build(&pool, &host_id, &profiles) - .await - .ok(); + timed_async!("remote_resolve_api_keys", { + let (profiles, _) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; + let auth_cache = RemoteAuthCache::build(&pool, &host_id, &profiles) + .await + .ok(); - let mut out = Vec::new(); - for profile in &profiles { - let (resolved_key, source) = if let Some(ref cache) = auth_cache { - if let Some((key, source)) = cache.resolve_for_profile_with_source(profile) { - (key, Some(source)) + let mut out = Vec::new(); + for profile in &profiles { + let (resolved_key, source) = if let Some(ref cache) = auth_cache { + if let Some((key, source)) = cache.resolve_for_profile_with_source(profile) { + (key, Some(source)) + } else { + (String::new(), None) + } } else { - (String::new(), None) - } - } else { - match resolve_remote_profile_api_key(&pool, &host_id, profile).await { - Ok(key) => (key, None), - Err(_) => (String::new(), None), - } - }; - let resolved_override = if resolved_key.trim().is_empty() && oauth_session_ready(profile) { - Some(true) - } else { - None - }; - out.push(build_resolved_api_key( - profile, - &resolved_key, - source, - resolved_override, - )); - } - Ok(out) + match resolve_remote_profile_api_key(&pool, &host_id, profile).await { + Ok(key) => (key, None), + Err(_) => (String::new(), None), + } + }; + let resolved_override = + if resolved_key.trim().is_empty() && oauth_session_ready(profile) { + Some(true) + } else { + None + }; + out.push(build_resolved_api_key( + profile, + &resolved_key, + source, + resolved_override, + )); + } + Ok(out) + }) } #[tauri::command] @@ -505,33 +514,35 @@ pub async fn remote_test_model_profile( host_id: String, profile_id: String, ) -> Result { - let (profiles, _) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; - let profile = profiles - .into_iter() - .find(|candidate| candidate.id == profile_id) - .ok_or_else(|| format!("Profile not found: {profile_id}"))?; - - if !profile.enabled { - return Err("Profile is disabled".into()); - } + timed_async!("remote_test_model_profile", { + let (profiles, _) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; + let profile = profiles + .into_iter() + .find(|candidate| candidate.id == profile_id) + .ok_or_else(|| format!("Profile not found: {profile_id}"))?; + + if !profile.enabled { + return Err("Profile is disabled".into()); + } - let api_key = resolve_remote_profile_api_key(&pool, &host_id, &profile).await?; - if api_key.trim().is_empty() && !provider_supports_optional_api_key(&profile.provider) { - let hint = missing_profile_auth_hint(&profile.provider, true); - return Err( - format!("No API key resolved for this remote profile. Set apiKey directly, configure auth_ref in remote auth store (auth-profiles.json/auth.json), or export auth_ref on remote shell.{hint}"), - ); - } + let api_key = resolve_remote_profile_api_key(&pool, &host_id, &profile).await?; + if api_key.trim().is_empty() && !provider_supports_optional_api_key(&profile.provider) { + let hint = missing_profile_auth_hint(&profile.provider, true); + return Err( + format!("No API key resolved for this remote profile. Set apiKey directly, configure auth_ref in remote auth store (auth-profiles.json/auth.json), or export auth_ref on remote shell.{hint}"), + ); + } - let resolved_base_url = resolve_remote_profile_base_url(&pool, &host_id, &profile).await?; + let resolved_base_url = resolve_remote_profile_base_url(&pool, &host_id, &profile).await?; - tauri::async_runtime::spawn_blocking(move || { - run_provider_probe(profile.provider, profile.model, resolved_base_url, api_key) - }) - .await - .map_err(|e| format!("Task join failed: {e}"))??; + tauri::async_runtime::spawn_blocking(move || { + run_provider_probe(profile.provider, profile.model, resolved_base_url, api_key) + }) + .await + .map_err(|e| format!("Task join failed: {e}"))??; - Ok(true) + Ok(true) + }) } #[tauri::command] @@ -539,8 +550,10 @@ pub async fn remote_extract_model_profiles_from_config( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let (_, result) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; - Ok(result) + timed_async!("remote_extract_model_profiles_from_config", { + let (_, result) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; + Ok(result) + }) } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -560,101 +573,104 @@ pub async fn remote_sync_profiles_to_local_auth( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let (remote_profiles, _) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; - if remote_profiles.is_empty() { - return Ok(RemoteAuthSyncResult { - total_remote_profiles: 0, - synced_profiles: 0, - created_profiles: 0, - updated_profiles: 0, - resolved_keys: 0, - unresolved_keys: 0, - failed_key_resolves: 0, - }); - } + timed_async!("remote_sync_profiles_to_local_auth", { + let (remote_profiles, _) = + collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; + if remote_profiles.is_empty() { + return Ok(RemoteAuthSyncResult { + total_remote_profiles: 0, + synced_profiles: 0, + created_profiles: 0, + updated_profiles: 0, + resolved_keys: 0, + unresolved_keys: 0, + failed_key_resolves: 0, + }); + } - let paths = resolve_paths(); - let mut local_profiles = dedupe_profiles_by_model_key(load_model_profiles(&paths)); + let paths = resolve_paths(); + let mut local_profiles = dedupe_profiles_by_model_key(load_model_profiles(&paths)); - let mut created_profiles = 0usize; - let mut updated_profiles = 0usize; - let mut resolved_keys = 0usize; - let mut unresolved_keys = 0usize; - let mut failed_key_resolves = 0usize; + let mut created_profiles = 0usize; + let mut updated_profiles = 0usize; + let mut resolved_keys = 0usize; + let mut unresolved_keys = 0usize; + let mut failed_key_resolves = 0usize; - // Pre-fetch all needed remote env vars and auth-store files in bulk - // (~3 SSH calls total instead of 5-7 per profile). - let auth_cache = match RemoteAuthCache::build(&pool, &host_id, &remote_profiles).await { - Ok(cache) => Some(cache), - Err(_) => None, - }; + // Pre-fetch all needed remote env vars and auth-store files in bulk + // (~3 SSH calls total instead of 5-7 per profile). + let auth_cache = match RemoteAuthCache::build(&pool, &host_id, &remote_profiles).await { + Ok(cache) => Some(cache), + Err(_) => None, + }; - for remote in &remote_profiles { - let mut resolved_api_key: Option = None; - if !should_skip_session_material_sync(remote) { - if let Some(ref cache) = auth_cache { - let key = cache.resolve_for_profile(remote); - if !key.trim().is_empty() { - resolved_api_key = Some(key); - resolved_keys += 1; - } else { - unresolved_keys += 1; - } - } else { - // Fallback to per-profile resolution if cache build failed. - match resolve_remote_profile_api_key(&pool, &host_id, remote).await { - Ok(api_key) if !api_key.trim().is_empty() => { - resolved_api_key = Some(api_key); + for remote in &remote_profiles { + let mut resolved_api_key: Option = None; + if !should_skip_session_material_sync(remote) { + if let Some(ref cache) = auth_cache { + let key = cache.resolve_for_profile(remote); + if !key.trim().is_empty() { + resolved_api_key = Some(key); resolved_keys += 1; - } - Ok(_) => { + } else { unresolved_keys += 1; } - Err(_) => { - failed_key_resolves += 1; + } else { + // Fallback to per-profile resolution if cache build failed. + match resolve_remote_profile_api_key(&pool, &host_id, remote).await { + Ok(api_key) if !api_key.trim().is_empty() => { + resolved_api_key = Some(api_key); + resolved_keys += 1; + } + Ok(_) => { + unresolved_keys += 1; + } + Err(_) => { + failed_key_resolves += 1; + } } } } - } - let resolved_base_url = if remote - .base_url - .as_deref() - .map(str::trim) - .is_some_and(|v| !v.is_empty()) - { - None - } else { - match resolve_remote_profile_base_url(&pool, &host_id, remote).await { - Ok(Some(remote_base)) if !remote_base.trim().is_empty() => { - Some(remote_base.trim().to_string()) + let resolved_base_url = if remote + .base_url + .as_deref() + .map(str::trim) + .is_some_and(|v| !v.is_empty()) + { + None + } else { + match resolve_remote_profile_base_url(&pool, &host_id, remote).await { + Ok(Some(remote_base)) if !remote_base.trim().is_empty() => { + Some(remote_base.trim().to_string()) + } + _ => None, } - _ => None, + }; + + if merge_remote_profile_into_local( + &mut local_profiles, + remote, + resolved_api_key, + resolved_base_url, + ) { + created_profiles += 1; + } else { + updated_profiles += 1; } - }; - - if merge_remote_profile_into_local( - &mut local_profiles, - remote, - resolved_api_key, - resolved_base_url, - ) { - created_profiles += 1; - } else { - updated_profiles += 1; } - } - save_model_profiles(&paths, &local_profiles)?; - - Ok(RemoteAuthSyncResult { - total_remote_profiles: remote_profiles.len(), - synced_profiles: created_profiles + updated_profiles, - created_profiles, - updated_profiles, - resolved_keys, - unresolved_keys, - failed_key_resolves, + save_model_profiles(&paths, &local_profiles)?; + + Ok(RemoteAuthSyncResult { + total_remote_profiles: remote_profiles.len(), + synced_profiles: created_profiles + updated_profiles, + created_profiles, + updated_profiles, + resolved_keys, + unresolved_keys, + failed_key_resolves, + }) }) } @@ -973,94 +989,99 @@ pub async fn push_related_secrets_to_remote( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; - - let (remote_profiles, _) = collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; - let related = collect_related_remote_providers(&cfg, &remote_profiles); - - if related.is_empty() { - return Ok(RelatedSecretPushResult { - total_related_providers: 0, - resolved_secrets: 0, - written_secrets: 0, - skipped_providers: 0, - failed_providers: 0, - }); - } + timed_async!("push_related_secrets_to_remote", { + let (_, _, cfg) = remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; + + let (remote_profiles, _) = + collect_remote_profiles_from_openclaw(&pool, &host_id, true).await?; + let related = collect_related_remote_providers(&cfg, &remote_profiles); + + if related.is_empty() { + return Ok(RelatedSecretPushResult { + total_related_providers: 0, + resolved_secrets: 0, + written_secrets: 0, + skipped_providers: 0, + failed_providers: 0, + }); + } - // Secret provider resolution may execute external commands with timeouts. - // Run it on the blocking pool so async command threads stay responsive. - let local_credentials = - tauri::async_runtime::spawn_blocking(collect_provider_credentials_for_internal) - .await - .map_err(|e| format!("Failed to resolve local provider credentials: {e}"))?; - let mut providers = related.into_iter().collect::>(); - providers.sort(); - - let mut selected = Vec::<(String, InternalProviderCredential)>::new(); - let mut skipped = 0usize; - for provider in &providers { - if let Some(credential) = local_credentials.get(provider) { - selected.push((provider.clone(), credential.clone())); - } else { - skipped += 1; + // Secret provider resolution may execute external commands with timeouts. + // Run it on the blocking pool so async command threads stay responsive. + let local_credentials = + tauri::async_runtime::spawn_blocking(collect_provider_credentials_for_internal) + .await + .map_err(|e| format!("Failed to resolve local provider credentials: {e}"))?; + let mut providers = related.into_iter().collect::>(); + providers.sort(); + + let mut selected = Vec::<(String, InternalProviderCredential)>::new(); + let mut skipped = 0usize; + for provider in &providers { + if let Some(credential) = local_credentials.get(provider) { + selected.push((provider.clone(), credential.clone())); + } else { + skipped += 1; + } } - } - if selected.is_empty() { - return Ok(RelatedSecretPushResult { - total_related_providers: providers.len(), - resolved_secrets: 0, - written_secrets: 0, - skipped_providers: skipped, - failed_providers: 0, - }); - } + if selected.is_empty() { + return Ok(RelatedSecretPushResult { + total_related_providers: providers.len(), + resolved_secrets: 0, + written_secrets: 0, + skipped_providers: skipped, + failed_providers: 0, + }); + } - let roots = resolve_remote_openclaw_roots(&pool, &host_id).await?; - let root = roots - .first() - .map(String::as_str) - .map(str::trim) - .filter(|value| !value.is_empty()) - .ok_or_else(|| "Failed to resolve remote openclaw root".to_string())?; - let root = root.trim_end_matches('/'); - let remote_auth_dir = format!("{root}/agents/main/agent"); - let remote_auth_path = format!("{remote_auth_dir}/auth-profiles.json"); - let remote_auth_raw = match pool.sftp_read(&host_id, &remote_auth_path).await { - Ok(content) => content, - Err(e) if is_remote_missing_path_error(&e) => r#"{"version":1,"profiles":{}}"#.to_string(), - Err(e) => return Err(format!("Failed to read remote auth store: {e}")), - }; - let mut remote_auth_json: Value = serde_json::from_str(&remote_auth_raw) - .map_err(|e| format!("Failed to parse remote auth store at {remote_auth_path}: {e}"))?; - - let mut written = 0usize; - let mut failed = 0usize; - for (provider, credential) in &selected { - let auth_ref = format!("{provider}:default"); - match upsert_auth_store_entry(&mut remote_auth_json, &auth_ref, provider, credential) { - UpsertAuthStoreResult::Written => written += 1, - UpsertAuthStoreResult::Unchanged => {} - UpsertAuthStoreResult::Failed => failed += 1, + let roots = resolve_remote_openclaw_roots(&pool, &host_id).await?; + let root = roots + .first() + .map(String::as_str) + .map(str::trim) + .filter(|value| !value.is_empty()) + .ok_or_else(|| "Failed to resolve remote openclaw root".to_string())?; + let root = root.trim_end_matches('/'); + let remote_auth_dir = format!("{root}/agents/main/agent"); + let remote_auth_path = format!("{remote_auth_dir}/auth-profiles.json"); + let remote_auth_raw = match pool.sftp_read(&host_id, &remote_auth_path).await { + Ok(content) => content, + Err(e) if is_remote_missing_path_error(&e) => { + r#"{"version":1,"profiles":{}}"#.to_string() + } + Err(e) => return Err(format!("Failed to read remote auth store: {e}")), + }; + let mut remote_auth_json: Value = serde_json::from_str(&remote_auth_raw) + .map_err(|e| format!("Failed to parse remote auth store at {remote_auth_path}: {e}"))?; + + let mut written = 0usize; + let mut failed = 0usize; + for (provider, credential) in &selected { + let auth_ref = format!("{provider}:default"); + match upsert_auth_store_entry(&mut remote_auth_json, &auth_ref, provider, credential) { + UpsertAuthStoreResult::Written => written += 1, + UpsertAuthStoreResult::Unchanged => {} + UpsertAuthStoreResult::Failed => failed += 1, + } } - } - if written > 0 { - let serialized = serde_json::to_string_pretty(&remote_auth_json) - .map_err(|e| format!("Failed to serialize remote auth store: {e}"))?; - let mkdir_cmd = format!("mkdir -p {}", shell_escape(&remote_auth_dir)); - let _ = pool.exec(&host_id, &mkdir_cmd).await; - pool.sftp_write(&host_id, &remote_auth_path, &serialized) - .await?; - } + if written > 0 { + let serialized = serde_json::to_string_pretty(&remote_auth_json) + .map_err(|e| format!("Failed to serialize remote auth store: {e}"))?; + let mkdir_cmd = format!("mkdir -p {}", shell_escape(&remote_auth_dir)); + let _ = pool.exec(&host_id, &mkdir_cmd).await; + pool.sftp_write(&host_id, &remote_auth_path, &serialized) + .await?; + } - Ok(RelatedSecretPushResult { - total_related_providers: providers.len(), - resolved_secrets: selected.len(), - written_secrets: written, - skipped_providers: skipped, - failed_providers: failed, + Ok(RelatedSecretPushResult { + total_related_providers: providers.len(), + resolved_secrets: selected.len(), + written_secrets: written, + skipped_providers: skipped, + failed_providers: failed, + }) }) } @@ -1068,71 +1089,73 @@ pub async fn push_related_secrets_to_remote( pub fn push_model_profiles_to_local_openclaw( profile_ids: Vec, ) -> Result { - let paths = resolve_paths(); - let (prepared, blocked_profiles) = collect_selected_profile_pushes(&paths, &profile_ids)?; - if prepared.is_empty() { - return Ok(ProfilePushResult { - requested_profiles: profile_ids.len(), - pushed_profiles: 0, - written_model_entries: 0, - written_auth_entries: 0, - blocked_profiles, - }); - } + timed_sync!("push_model_profiles_to_local_openclaw", { + let paths = resolve_paths(); + let (prepared, blocked_profiles) = collect_selected_profile_pushes(&paths, &profile_ids)?; + if prepared.is_empty() { + return Ok(ProfilePushResult { + requested_profiles: profile_ids.len(), + pushed_profiles: 0, + written_model_entries: 0, + written_auth_entries: 0, + blocked_profiles, + }); + } - let mut cfg = read_openclaw_config(&paths)?; - let mut written_model_entries = 0usize; - for push in &prepared { - if upsert_model_registration(&mut cfg, push)? { - written_model_entries += 1; + let mut cfg = read_openclaw_config(&paths)?; + let mut written_model_entries = 0usize; + for push in &prepared { + if upsert_model_registration(&mut cfg, push)? { + written_model_entries += 1; + } + } + if written_model_entries > 0 { + write_json(&paths.config_path, &cfg)?; } - } - if written_model_entries > 0 { - write_json(&paths.config_path, &cfg)?; - } - let auth_file = paths - .base_dir - .join("agents") - .join("main") - .join("agent") - .join("auth-profiles.json"); - let auth_raw = std::fs::read_to_string(&auth_file) - .unwrap_or_else(|_| r#"{"version":1,"profiles":{}}"#.to_string()); - let mut auth_json = parse_auth_store_json(&auth_raw)?; - let mut written_auth_entries = 0usize; - for push in &prepared { - let Some(credential) = push.credential.as_ref() else { - continue; - }; - match upsert_auth_store_entry( - &mut auth_json, - &push.target_auth_ref, - &push.provider_key, - credential, - ) { - UpsertAuthStoreResult::Written => written_auth_entries += 1, - UpsertAuthStoreResult::Unchanged => {} - UpsertAuthStoreResult::Failed => { - return Err(format!( - "Failed to write auth entry for {}/{}", - push.provider_key, push.profile.model - )); + let auth_file = paths + .base_dir + .join("agents") + .join("main") + .join("agent") + .join("auth-profiles.json"); + let auth_raw = std::fs::read_to_string(&auth_file) + .unwrap_or_else(|_| r#"{"version":1,"profiles":{}}"#.to_string()); + let mut auth_json = parse_auth_store_json(&auth_raw)?; + let mut written_auth_entries = 0usize; + for push in &prepared { + let Some(credential) = push.credential.as_ref() else { + continue; + }; + match upsert_auth_store_entry( + &mut auth_json, + &push.target_auth_ref, + &push.provider_key, + credential, + ) { + UpsertAuthStoreResult::Written => written_auth_entries += 1, + UpsertAuthStoreResult::Unchanged => {} + UpsertAuthStoreResult::Failed => { + return Err(format!( + "Failed to write auth entry for {}/{}", + push.provider_key, push.profile.model + )); + } } } - } - if written_auth_entries > 0 { - let serialized = serde_json::to_string_pretty(&auth_json) - .map_err(|e| format!("Failed to serialize local auth store: {e}"))?; - write_text(&auth_file, &serialized)?; - } + if written_auth_entries > 0 { + let serialized = serde_json::to_string_pretty(&auth_json) + .map_err(|e| format!("Failed to serialize local auth store: {e}"))?; + write_text(&auth_file, &serialized)?; + } - Ok(ProfilePushResult { - requested_profiles: profile_ids.len(), - pushed_profiles: prepared.len(), - written_model_entries, - written_auth_entries, - blocked_profiles, + Ok(ProfilePushResult { + requested_profiles: profile_ids.len(), + pushed_profiles: prepared.len(), + written_model_entries, + written_auth_entries, + blocked_profiles, + }) }) } @@ -1142,90 +1165,94 @@ pub async fn push_model_profiles_to_remote_openclaw( host_id: String, profile_ids: Vec, ) -> Result { - let paths = resolve_paths(); - let (prepared, blocked_profiles) = collect_selected_profile_pushes(&paths, &profile_ids)?; - if prepared.is_empty() { - return Ok(ProfilePushResult { - requested_profiles: profile_ids.len(), - pushed_profiles: 0, - written_model_entries: 0, - written_auth_entries: 0, - blocked_profiles, - }); - } + timed_async!("push_model_profiles_to_remote_openclaw", { + let paths = resolve_paths(); + let (prepared, blocked_profiles) = collect_selected_profile_pushes(&paths, &profile_ids)?; + if prepared.is_empty() { + return Ok(ProfilePushResult { + requested_profiles: profile_ids.len(), + pushed_profiles: 0, + written_model_entries: 0, + written_auth_entries: 0, + blocked_profiles, + }); + } - let (config_path, current_text, mut cfg) = - remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; - let mut written_model_entries = 0usize; - for push in &prepared { - if upsert_model_registration(&mut cfg, push)? { - written_model_entries += 1; + let (config_path, current_text, mut cfg) = + remote_read_openclaw_config_text_and_json(&pool, &host_id).await?; + let mut written_model_entries = 0usize; + for push in &prepared { + if upsert_model_registration(&mut cfg, push)? { + written_model_entries += 1; + } + } + if written_model_entries > 0 { + remote_write_config_with_snapshot( + &pool, + &host_id, + &config_path, + ¤t_text, + &cfg, + "push-profiles", + ) + .await?; } - } - if written_model_entries > 0 { - remote_write_config_with_snapshot( - &pool, - &host_id, - &config_path, - ¤t_text, - &cfg, - "push-profiles", - ) - .await?; - } - let roots = resolve_remote_openclaw_roots(&pool, &host_id).await?; - let root = roots - .first() - .map(String::as_str) - .map(str::trim) - .filter(|value| !value.is_empty()) - .ok_or_else(|| "Failed to resolve remote openclaw root".to_string())?; - let root = root.trim_end_matches('/'); - let remote_auth_dir = format!("{root}/agents/main/agent"); - let remote_auth_path = format!("{remote_auth_dir}/auth-profiles.json"); - let remote_auth_raw = match pool.sftp_read(&host_id, &remote_auth_path).await { - Ok(content) => content, - Err(e) if is_remote_missing_path_error(&e) => r#"{"version":1,"profiles":{}}"#.to_string(), - Err(e) => return Err(format!("Failed to read remote auth store: {e}")), - }; - let mut remote_auth_json = parse_auth_store_json(&remote_auth_raw)?; - let mut written_auth_entries = 0usize; - for push in &prepared { - let Some(credential) = push.credential.as_ref() else { - continue; + let roots = resolve_remote_openclaw_roots(&pool, &host_id).await?; + let root = roots + .first() + .map(String::as_str) + .map(str::trim) + .filter(|value| !value.is_empty()) + .ok_or_else(|| "Failed to resolve remote openclaw root".to_string())?; + let root = root.trim_end_matches('/'); + let remote_auth_dir = format!("{root}/agents/main/agent"); + let remote_auth_path = format!("{remote_auth_dir}/auth-profiles.json"); + let remote_auth_raw = match pool.sftp_read(&host_id, &remote_auth_path).await { + Ok(content) => content, + Err(e) if is_remote_missing_path_error(&e) => { + r#"{"version":1,"profiles":{}}"#.to_string() + } + Err(e) => return Err(format!("Failed to read remote auth store: {e}")), }; - match upsert_auth_store_entry( - &mut remote_auth_json, - &push.target_auth_ref, - &push.provider_key, - credential, - ) { - UpsertAuthStoreResult::Written => written_auth_entries += 1, - UpsertAuthStoreResult::Unchanged => {} - UpsertAuthStoreResult::Failed => { - return Err(format!( - "Failed to write remote auth entry for {}/{}", - push.provider_key, push.profile.model - )); + let mut remote_auth_json = parse_auth_store_json(&remote_auth_raw)?; + let mut written_auth_entries = 0usize; + for push in &prepared { + let Some(credential) = push.credential.as_ref() else { + continue; + }; + match upsert_auth_store_entry( + &mut remote_auth_json, + &push.target_auth_ref, + &push.provider_key, + credential, + ) { + UpsertAuthStoreResult::Written => written_auth_entries += 1, + UpsertAuthStoreResult::Unchanged => {} + UpsertAuthStoreResult::Failed => { + return Err(format!( + "Failed to write remote auth entry for {}/{}", + push.provider_key, push.profile.model + )); + } } } - } - if written_auth_entries > 0 { - let serialized = serde_json::to_string_pretty(&remote_auth_json) - .map_err(|e| format!("Failed to serialize remote auth store: {e}"))?; - let mkdir_cmd = format!("mkdir -p {}", shell_escape(&remote_auth_dir)); - let _ = pool.exec(&host_id, &mkdir_cmd).await; - pool.sftp_write(&host_id, &remote_auth_path, &serialized) - .await?; - } + if written_auth_entries > 0 { + let serialized = serde_json::to_string_pretty(&remote_auth_json) + .map_err(|e| format!("Failed to serialize remote auth store: {e}"))?; + let mkdir_cmd = format!("mkdir -p {}", shell_escape(&remote_auth_dir)); + let _ = pool.exec(&host_id, &mkdir_cmd).await; + pool.sftp_write(&host_id, &remote_auth_path, &serialized) + .await?; + } - Ok(ProfilePushResult { - requested_profiles: profile_ids.len(), - pushed_profiles: prepared.len(), - written_model_entries, - written_auth_entries, - blocked_profiles, + Ok(ProfilePushResult { + requested_profiles: profile_ids.len(), + pushed_profiles: prepared.len(), + written_model_entries, + written_auth_entries, + blocked_profiles, + }) }) } @@ -1581,217 +1608,237 @@ mod tests { #[tauri::command] pub fn get_cached_model_catalog() -> Result, String> { - let paths = resolve_paths(); - let cache_path = model_catalog_cache_path(&paths); - let current_version = resolve_openclaw_version(); - if let Some(catalog) = select_catalog_from_cache( - read_model_catalog_cache(&cache_path).as_ref(), - ¤t_version, - ) { - return Ok(catalog); - } - Ok(Vec::new()) + timed_sync!("get_cached_model_catalog", { + let paths = resolve_paths(); + let cache_path = model_catalog_cache_path(&paths); + let current_version = resolve_openclaw_version(); + if let Some(catalog) = select_catalog_from_cache( + read_model_catalog_cache(&cache_path).as_ref(), + ¤t_version, + ) { + return Ok(catalog); + } + Ok(Vec::new()) + }) } #[tauri::command] pub fn refresh_model_catalog() -> Result, String> { - let paths = resolve_paths(); - load_model_catalog(&paths) + timed_sync!("refresh_model_catalog", { + let paths = resolve_paths(); + load_model_catalog(&paths) + }) } #[tauri::command] pub fn list_model_profiles() -> Result, String> { - let openclaw = clawpal_core::openclaw::OpenclawCli::new(); - clawpal_core::profile::list_profiles(&openclaw).map_err(|e| e.to_string()) + timed_sync!("list_model_profiles", { + let openclaw = clawpal_core::openclaw::OpenclawCli::new(); + clawpal_core::profile::list_profiles(&openclaw).map_err(|e| e.to_string()) + }) } #[tauri::command] pub fn extract_model_profiles_from_config() -> Result { - let paths = resolve_paths(); - let cfg = read_openclaw_config(&paths)?; - let profiles = load_model_profiles(&paths); - let (next_profiles, result) = extract_profiles_from_openclaw_config(&cfg, profiles); - - if result.created > 0 { - save_model_profiles(&paths, &next_profiles)?; - } + timed_sync!("extract_model_profiles_from_config", { + let paths = resolve_paths(); + let cfg = read_openclaw_config(&paths)?; + let profiles = load_model_profiles(&paths); + let (next_profiles, result) = extract_profiles_from_openclaw_config(&cfg, profiles); + + if result.created > 0 { + save_model_profiles(&paths, &next_profiles)?; + } - Ok(result) + Ok(result) + }) } #[tauri::command] pub fn upsert_model_profile(profile: ModelProfile) -> Result { - let paths = resolve_paths(); - let path = model_profiles_path(&paths); - let content = std::fs::read_to_string(&path).unwrap_or_else(|_| r#"{"profiles":[]}"#.into()); - let (saved, next_json) = - clawpal_core::profile::upsert_profile_in_storage_json(&content, profile) - .map_err(|e| e.to_string())?; - crate::config_io::write_text(&path, &next_json)?; - #[cfg(unix)] - { - use std::os::unix::fs::PermissionsExt; - let _ = std::fs::set_permissions(&path, std::fs::Permissions::from_mode(0o600)); - } - Ok(saved) + timed_sync!("upsert_model_profile", { + let paths = resolve_paths(); + let path = model_profiles_path(&paths); + let content = + std::fs::read_to_string(&path).unwrap_or_else(|_| r#"{"profiles":[]}"#.into()); + let (saved, next_json) = + clawpal_core::profile::upsert_profile_in_storage_json(&content, profile) + .map_err(|e| e.to_string())?; + crate::config_io::write_text(&path, &next_json)?; + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let _ = std::fs::set_permissions(&path, std::fs::Permissions::from_mode(0o600)); + } + Ok(saved) + }) } #[tauri::command] pub fn delete_model_profile(profile_id: String) -> Result { - let openclaw = clawpal_core::openclaw::OpenclawCli::new(); - clawpal_core::profile::delete_profile(&openclaw, &profile_id).map_err(|e| e.to_string()) + timed_sync!("delete_model_profile", { + let openclaw = clawpal_core::openclaw::OpenclawCli::new(); + clawpal_core::profile::delete_profile(&openclaw, &profile_id).map_err(|e| e.to_string()) + }) } #[tauri::command] pub fn resolve_provider_auth(provider: String) -> Result { - let provider_trimmed = provider.trim(); - if provider_trimmed.is_empty() { - return Ok(ProviderAuthSuggestion { - auth_ref: None, - has_key: false, - source: String::new(), - }); - } - let paths = resolve_paths(); - let cfg = read_openclaw_config(&paths)?; - let global_base = local_global_openclaw_base_dir(); - - // 1. Check openclaw config auth profiles - if let Some(auth_ref) = resolve_auth_ref_for_provider(&cfg, provider_trimmed) { - let probe_profile = ModelProfile { - id: "provider-auth-probe".into(), - name: "provider-auth-probe".into(), - provider: provider_trimmed.to_string(), - model: "probe".into(), - auth_ref: auth_ref.clone(), - api_key: None, - base_url: None, - description: None, - enabled: true, - }; - let key = resolve_profile_api_key(&probe_profile, &global_base); - if !key.trim().is_empty() { + timed_sync!("resolve_provider_auth", { + let provider_trimmed = provider.trim(); + if provider_trimmed.is_empty() { return Ok(ProviderAuthSuggestion { - auth_ref: Some(auth_ref), - has_key: true, - source: "openclaw auth profile".into(), + auth_ref: None, + has_key: false, + source: String::new(), }); } - } - - // 2. Check env vars - for env_name in provider_env_var_candidates(provider_trimmed) { - if std::env::var(&env_name) - .map(|v| !v.trim().is_empty()) - .unwrap_or(false) - { - return Ok(ProviderAuthSuggestion { - auth_ref: Some(env_name), - has_key: true, - source: "environment variable".into(), - }); + let paths = resolve_paths(); + let cfg = read_openclaw_config(&paths)?; + let global_base = local_global_openclaw_base_dir(); + + // 1. Check openclaw config auth profiles + if let Some(auth_ref) = resolve_auth_ref_for_provider(&cfg, provider_trimmed) { + let probe_profile = ModelProfile { + id: "provider-auth-probe".into(), + name: "provider-auth-probe".into(), + provider: provider_trimmed.to_string(), + model: "probe".into(), + auth_ref: auth_ref.clone(), + api_key: None, + base_url: None, + description: None, + enabled: true, + }; + let key = resolve_profile_api_key(&probe_profile, &global_base); + if !key.trim().is_empty() { + return Ok(ProviderAuthSuggestion { + auth_ref: Some(auth_ref), + has_key: true, + source: "openclaw auth profile".into(), + }); + } } - } - // 3. Check existing model profiles for this provider - let profiles = load_model_profiles(&paths); - for p in &profiles { - if p.provider.eq_ignore_ascii_case(provider_trimmed) { - let key = resolve_profile_api_key(p, &global_base); - if !key.is_empty() { - let auth_ref = if !p.auth_ref.trim().is_empty() { - Some(p.auth_ref.clone()) - } else { - None - }; + // 2. Check env vars + for env_name in provider_env_var_candidates(provider_trimmed) { + if std::env::var(&env_name) + .map(|v| !v.trim().is_empty()) + .unwrap_or(false) + { return Ok(ProviderAuthSuggestion { - auth_ref, + auth_ref: Some(env_name), has_key: true, - source: format!("existing profile {}/{}", p.provider, p.model), + source: "environment variable".into(), }); } } - } - Ok(ProviderAuthSuggestion { - auth_ref: None, - has_key: false, - source: String::new(), + // 3. Check existing model profiles for this provider + let profiles = load_model_profiles(&paths); + for p in &profiles { + if p.provider.eq_ignore_ascii_case(provider_trimmed) { + let key = resolve_profile_api_key(p, &global_base); + if !key.is_empty() { + let auth_ref = if !p.auth_ref.trim().is_empty() { + Some(p.auth_ref.clone()) + } else { + None + }; + return Ok(ProviderAuthSuggestion { + auth_ref, + has_key: true, + source: format!("existing profile {}/{}", p.provider, p.model), + }); + } + } + } + + Ok(ProviderAuthSuggestion { + auth_ref: None, + has_key: false, + source: String::new(), + }) }) } #[tauri::command] pub fn resolve_api_keys() -> Result, String> { - let paths = resolve_paths(); - let profiles = load_model_profiles(&paths); - let global_base = local_global_openclaw_base_dir(); - let mut out = Vec::new(); - for profile in &profiles { - let (resolved_key, source) = if let Some((credential, _priority, source)) = - resolve_profile_credential_with_priority(profile, &global_base) - { - (credential.secret, Some(source)) - } else { - (String::new(), None) - }; - let resolved_override = if resolved_key.trim().is_empty() && oauth_session_ready(profile) { - Some(true) - } else { - None - }; - out.push(build_resolved_api_key( - profile, - &resolved_key, - source, - resolved_override, - )); - } - Ok(out) + timed_sync!("resolve_api_keys", { + let paths = resolve_paths(); + let profiles = load_model_profiles(&paths); + let global_base = local_global_openclaw_base_dir(); + let mut out = Vec::new(); + for profile in &profiles { + let (resolved_key, source) = if let Some((credential, _priority, source)) = + resolve_profile_credential_with_priority(profile, &global_base) + { + (credential.secret, Some(source)) + } else { + (String::new(), None) + }; + let resolved_override = + if resolved_key.trim().is_empty() && oauth_session_ready(profile) { + Some(true) + } else { + None + }; + out.push(build_resolved_api_key( + profile, + &resolved_key, + source, + resolved_override, + )); + } + Ok(out) + }) } #[tauri::command] pub async fn test_model_profile(profile_id: String) -> Result { - let paths = resolve_paths(); - let profiles = load_model_profiles(&paths); - let profile = profiles - .into_iter() - .find(|p| p.id == profile_id) - .ok_or_else(|| format!("Profile not found: {profile_id}"))?; - - if !profile.enabled { - return Err("Profile is disabled".into()); - } + timed_async!("test_model_profile", { + let paths = resolve_paths(); + let profiles = load_model_profiles(&paths); + let profile = profiles + .into_iter() + .find(|p| p.id == profile_id) + .ok_or_else(|| format!("Profile not found: {profile_id}"))?; + + if !profile.enabled { + return Err("Profile is disabled".into()); + } - let global_base = local_global_openclaw_base_dir(); - let api_key = resolve_profile_api_key(&profile, &global_base); - if api_key.trim().is_empty() { - if !provider_supports_optional_api_key(&profile.provider) { - let hint = missing_profile_auth_hint(&profile.provider, false); - return Err( - format!("No API key resolved for this profile. Set apiKey directly, configure auth_ref in auth store (auth-profiles.json/auth.json), or export auth_ref on local shell.{hint}"), - ); + let global_base = local_global_openclaw_base_dir(); + let api_key = resolve_profile_api_key(&profile, &global_base); + if api_key.trim().is_empty() { + if !provider_supports_optional_api_key(&profile.provider) { + let hint = missing_profile_auth_hint(&profile.provider, false); + return Err( + format!("No API key resolved for this profile. Set apiKey directly, configure auth_ref in auth store (auth-profiles.json/auth.json), or export auth_ref on local shell.{hint}"), + ); + } } - } - let resolved_base_url = profile - .base_url - .as_deref() - .map(str::trim) - .filter(|v| !v.is_empty()) - .map(|v| v.to_string()) - .or_else(|| { - read_openclaw_config(&paths) - .ok() - .and_then(|cfg| resolve_model_provider_base_url(&cfg, &profile.provider)) - }); + let resolved_base_url = profile + .base_url + .as_deref() + .map(str::trim) + .filter(|v| !v.is_empty()) + .map(|v| v.to_string()) + .or_else(|| { + read_openclaw_config(&paths) + .ok() + .and_then(|cfg| resolve_model_provider_base_url(&cfg, &profile.provider)) + }); - tauri::async_runtime::spawn_blocking(move || { - run_provider_probe(profile.provider, profile.model, resolved_base_url, api_key) - }) - .await - .map_err(|e| format!("Task join failed: {e}"))??; + tauri::async_runtime::spawn_blocking(move || { + run_provider_probe(profile.provider, profile.model, resolved_base_url, api_key) + }) + .await + .map_err(|e| format!("Task join failed: {e}"))??; - Ok(true) + Ok(true) + }) } #[tauri::command] @@ -1799,41 +1846,43 @@ pub async fn remote_refresh_model_catalog( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - let paths = resolve_paths(); - let cache_path = remote_model_catalog_cache_path(&paths, &host_id); - let remote_version = match pool.exec_login(&host_id, "openclaw --version").await { - Ok(r) => { - extract_version_from_text(&r.stdout).unwrap_or_else(|| r.stdout.trim().to_string()) + timed_async!("remote_refresh_model_catalog", { + let paths = resolve_paths(); + let cache_path = remote_model_catalog_cache_path(&paths, &host_id); + let remote_version = match pool.exec_login(&host_id, "openclaw --version").await { + Ok(r) => { + extract_version_from_text(&r.stdout).unwrap_or_else(|| r.stdout.trim().to_string()) + } + Err(_) => "unknown".into(), + }; + let cached = read_model_catalog_cache(&cache_path); + if let Some(selected) = select_catalog_from_cache(cached.as_ref(), &remote_version) { + return Ok(selected); } - Err(_) => "unknown".into(), - }; - let cached = read_model_catalog_cache(&cache_path); - if let Some(selected) = select_catalog_from_cache(cached.as_ref(), &remote_version) { - return Ok(selected); - } - let result = pool - .exec_login(&host_id, "openclaw models list --all --json --no-color") - .await; - if let Ok(r) = result { - if r.exit_code == 0 && !r.stdout.trim().is_empty() { - if let Some(catalog) = parse_model_catalog_from_cli_output(&r.stdout) { - let cache = ModelCatalogProviderCache { - cli_version: remote_version, - updated_at: unix_timestamp_secs(), - providers: catalog.clone(), - source: "openclaw models list --all --json".into(), - error: None, - }; - let _ = save_model_catalog_cache(&cache_path, &cache); - return Ok(catalog); + let result = pool + .exec_login(&host_id, "openclaw models list --all --json --no-color") + .await; + if let Ok(r) = result { + if r.exit_code == 0 && !r.stdout.trim().is_empty() { + if let Some(catalog) = parse_model_catalog_from_cli_output(&r.stdout) { + let cache = ModelCatalogProviderCache { + cli_version: remote_version, + updated_at: unix_timestamp_secs(), + providers: catalog.clone(), + source: "openclaw models list --all --json".into(), + error: None, + }; + let _ = save_model_catalog_cache(&cache_path, &cache); + return Ok(catalog); + } } } - } - if let Some(previous) = cached { - if !previous.providers.is_empty() && previous.error.is_none() { - return Ok(previous.providers); + if let Some(previous) = cached { + if !previous.providers.is_empty() && previous.error.is_none() { + return Ok(previous.providers); + } } - } - Err("Failed to load remote model catalog from openclaw CLI".into()) + Err("Failed to load remote model catalog from openclaw CLI".into()) + }) } diff --git a/src-tauri/src/commands/recipe_cmds.rs b/src-tauri/src/commands/recipe_cmds.rs index cf1711a7..38780798 100644 --- a/src-tauri/src/commands/recipe_cmds.rs +++ b/src-tauri/src/commands/recipe_cmds.rs @@ -5,7 +5,9 @@ use crate::recipe::load_recipes_with_fallback; #[tauri::command] pub fn list_recipes(source: Option) -> Result, String> { - let paths = resolve_paths(); - let default_path = paths.clawpal_dir.join("recipes").join("recipes.json"); - Ok(load_recipes_with_fallback(source, &default_path)) + timed_sync!("list_recipes", { + let paths = resolve_paths(); + let default_path = paths.clawpal_dir.join("recipes").join("recipes.json"); + Ok(load_recipes_with_fallback(source, &default_path)) + }) } diff --git a/src-tauri/src/commands/rescue.rs b/src-tauri/src/commands/rescue.rs index 554c4a99..347d2d50 100644 --- a/src-tauri/src/commands/rescue.rs +++ b/src-tauri/src/commands/rescue.rs @@ -23,294 +23,19 @@ pub async fn remote_manage_rescue_bot( profile: Option, rescue_port: Option, ) -> Result { - let action_label = action.clone(); - let profile_label = profile.clone().unwrap_or_else(|| "rescue".into()); - remote_log_helper_event( - &pool, - &host_id, - &format!( - "[remote:{host_id}] manage_rescue_bot start action={} profile={}", - action_label, profile_label - ), - ) - .await; - - let action = RescueBotAction::parse(&action)?; - let profile = profile - .as_deref() - .map(str::trim) - .filter(|p| !p.is_empty()) - .unwrap_or("rescue") - .to_string(); - - let main_port = match remote_resolve_openclaw_config_path(&pool, &host_id).await { - Ok(path) => match pool.sftp_read(&host_id, &path).await { - Ok(raw) => { - let cfg = clawpal_core::config::parse_config_json5(&raw); - clawpal_core::config::resolve_gateway_port(&cfg) - } - Err(_) => 18789, - }, - Err(_) => 18789, - }; - let (already_configured, existing_port) = - resolve_remote_rescue_profile_state(&pool, &host_id, &profile).await?; - let should_configure = !already_configured - || action == RescueBotAction::Set - || action == RescueBotAction::Activate; - let rescue_port = if should_configure { - rescue_port.unwrap_or_else(|| clawpal_core::doctor::suggest_rescue_port(main_port)) - } else { - existing_port - .or(rescue_port) - .unwrap_or_else(|| clawpal_core::doctor::suggest_rescue_port(main_port)) - }; - let min_recommended_port = main_port.saturating_add(20); - - if should_configure && matches!(action, RescueBotAction::Set | RescueBotAction::Activate) { - clawpal_core::doctor::ensure_rescue_port_spacing(main_port, rescue_port)?; - } - - if action == RescueBotAction::Status && !already_configured { - let runtime_state = infer_rescue_bot_runtime_state(false, None, None); - return Ok(RescueBotManageResult { - action: action.as_str().into(), - profile, - main_port, - rescue_port, - min_recommended_port, - configured: false, - active: false, - runtime_state, - was_already_configured: false, - commands: Vec::new(), - }); - } - - let plan = build_rescue_bot_command_plan(action, &profile, rescue_port, should_configure); - let mut commands = Vec::new(); - for command in plan { - let result = run_remote_rescue_bot_command(&pool, &host_id, command).await?; - if result.output.exit_code != 0 { - if action == RescueBotAction::Status { - commands.push(result); - break; - } - if is_rescue_cleanup_noop(action, &result.command, &result.output) { - commands.push(result); - continue; - } - if action == RescueBotAction::Activate - && is_gateway_restart_command(&result.command) - && is_gateway_restart_timeout(&result.output) - { - commands.push(result); - run_remote_gateway_restart_fallback(&pool, &host_id, &profile, &mut commands) - .await?; - continue; - } - return Err(command_failure_message(&result.command, &result.output)); - } - commands.push(result); - } - - let configured = match action { - RescueBotAction::Unset => false, - RescueBotAction::Activate | RescueBotAction::Set | RescueBotAction::Deactivate => true, - RescueBotAction::Status => already_configured, - }; - let mut status_output = commands - .iter() - .rev() - .find(|result| { - result - .command - .windows(2) - .any(|window| window[0] == "gateway" && window[1] == "status") - }) - .map(|result| &result.output); - if action == RescueBotAction::Activate { - let active_now = status_output - .map(|output| infer_rescue_bot_runtime_state(true, Some(output), None) == "active") - .unwrap_or(false); - if !active_now { - let probe_status = build_gateway_status_command(&profile, true); - if let Ok(result) = run_remote_rescue_bot_command(&pool, &host_id, probe_status).await { - commands.push(result); - status_output = commands - .iter() - .rev() - .find(|result| { - result - .command - .windows(2) - .any(|window| window[0] == "gateway" && window[1] == "status") - }) - .map(|result| &result.output); - } - } - } - let runtime_state = infer_rescue_bot_runtime_state(configured, status_output, None); - let active = runtime_state == "active"; - - let result = RescueBotManageResult { - action: action.as_str().into(), - profile, - main_port, - rescue_port, - min_recommended_port, - configured, - active, - runtime_state, - was_already_configured: already_configured, - commands, - }; - - remote_log_helper_event( - &pool, - &host_id, - &format!( - "[remote:{host_id}] manage_rescue_bot success action={} profile={} state={} configured={} active={}", - action_label, result.profile, result.runtime_state, result.configured, result.active - ), - ) - .await; - - Ok(result) -} - -#[tauri::command] -pub async fn remote_get_rescue_bot_status( - pool: State<'_, SshConnectionPool>, - host_id: String, - profile: Option, - rescue_port: Option, -) -> Result { - remote_manage_rescue_bot(pool, host_id, "status".to_string(), profile, rescue_port).await -} - -#[tauri::command] -pub async fn remote_diagnose_primary_via_rescue( - pool: State<'_, SshConnectionPool>, - host_id: String, - target_profile: Option, - rescue_profile: Option, -) -> Result { - let target_profile = normalize_profile_name(target_profile.as_deref(), "primary"); - let rescue_profile = normalize_profile_name(rescue_profile.as_deref(), "rescue"); - remote_log_helper_event( - &pool, - &host_id, - &format!( - "[remote:{host_id}] diagnose_primary_via_rescue start target={} rescue={}", - target_profile, rescue_profile - ), - ) - .await; - let result = - diagnose_primary_via_rescue_remote(&pool, &host_id, &target_profile, &rescue_profile).await; - match &result { - Ok(summary) => { - remote_log_helper_event( - &pool, - &host_id, - &format!( - "[remote:{host_id}] diagnose_primary_via_rescue success target={} rescue={} status={} issues={}", - summary.target_profile, - summary.rescue_profile, - summary.summary.status, - summary.issues.len() - ), - ) - .await; - } - Err(error) => { - remote_log_helper_event( - &pool, - &host_id, - &format!( - "[remote:{host_id}] diagnose_primary_via_rescue failed target={} rescue={} error={}", - target_profile, rescue_profile, error - ), - ) - .await; - } - } - result -} - -#[tauri::command] -pub async fn remote_repair_primary_via_rescue( - pool: State<'_, SshConnectionPool>, - host_id: String, - target_profile: Option, - rescue_profile: Option, - issue_ids: Option>, -) -> Result { - let target_profile = normalize_profile_name(target_profile.as_deref(), "primary"); - let rescue_profile = normalize_profile_name(rescue_profile.as_deref(), "rescue"); - let requested_issue_count = issue_ids.as_ref().map_or(0, Vec::len); - remote_log_helper_event( - &pool, - &host_id, - &format!( - "[remote:{host_id}] repair_primary_via_rescue start target={} rescue={} requested_issues={}", - target_profile, rescue_profile, requested_issue_count - ), - ) - .await; - let result = repair_primary_via_rescue_remote( - &pool, - &host_id, - &target_profile, - &rescue_profile, - issue_ids.unwrap_or_default(), - ) - .await; - match &result { - Ok(summary) => { - remote_log_helper_event( - &pool, - &host_id, - &format!( - "[remote:{host_id}] repair_primary_via_rescue success target={} rescue={} applied={} failed={} skipped={}", - summary.target_profile, - summary.rescue_profile, - summary.applied_issue_ids.len(), - summary.failed_issue_ids.len(), - summary.skipped_issue_ids.len() - ), - ) - .await; - } - Err(error) => { - remote_log_helper_event( - &pool, - &host_id, - &format!( - "[remote:{host_id}] repair_primary_via_rescue failed target={} rescue={} error={}", - target_profile, rescue_profile, error - ), - ) - .await; - } - } - result -} + timed_async!("remote_manage_rescue_bot", { + let action_label = action.clone(); + let profile_label = profile.clone().unwrap_or_else(|| "rescue".into()); + remote_log_helper_event( + &pool, + &host_id, + &format!( + "[remote:{host_id}] manage_rescue_bot start action={} profile={}", + action_label, profile_label + ), + ) + .await; -#[tauri::command] -pub async fn manage_rescue_bot( - action: String, - profile: Option, - rescue_port: Option, -) -> Result { - let action_label = action.clone(); - let profile_label = profile.clone().unwrap_or_else(|| "rescue".into()); - crate::logging::log_helper(&format!( - "[local] manage_rescue_bot start action={} profile={}", - action_label, profile_label - )); - let result = tauri::async_runtime::spawn_blocking(move || { let action = RescueBotAction::parse(&action)?; let profile = profile .as_deref() @@ -319,10 +44,18 @@ pub async fn manage_rescue_bot( .unwrap_or("rescue") .to_string(); - let main_port = read_openclaw_config(&resolve_paths()) - .map(|cfg| clawpal_core::doctor::resolve_gateway_port_from_config(&cfg)) - .unwrap_or(18789); - let (already_configured, existing_port) = resolve_local_rescue_profile_state(&profile)?; + let main_port = match remote_resolve_openclaw_config_path(&pool, &host_id).await { + Ok(path) => match pool.sftp_read(&host_id, &path).await { + Ok(raw) => { + let cfg = clawpal_core::config::parse_config_json5(&raw); + clawpal_core::config::resolve_gateway_port(&cfg) + } + Err(_) => 18789, + }, + Err(_) => 18789, + }; + let (already_configured, existing_port) = + resolve_remote_rescue_profile_state(&pool, &host_id, &profile).await?; let should_configure = !already_configured || action == RescueBotAction::Set || action == RescueBotAction::Activate; @@ -357,9 +90,8 @@ pub async fn manage_rescue_bot( let plan = build_rescue_bot_command_plan(action, &profile, rescue_port, should_configure); let mut commands = Vec::new(); - for command in plan { - let result = run_local_rescue_bot_command(command)?; + let result = run_remote_rescue_bot_command(&pool, &host_id, command).await?; if result.output.exit_code != 0 { if action == RescueBotAction::Status { commands.push(result); @@ -374,7 +106,8 @@ pub async fn manage_rescue_bot( && is_gateway_restart_timeout(&result.output) { commands.push(result); - run_local_gateway_restart_fallback(&profile, &mut commands)?; + run_remote_gateway_restart_fallback(&pool, &host_id, &profile, &mut commands) + .await?; continue; } return Err(command_failure_message(&result.command, &result.output)); @@ -403,7 +136,9 @@ pub async fn manage_rescue_bot( .unwrap_or(false); if !active_now { let probe_status = build_gateway_status_command(&profile, true); - if let Ok(result) = run_local_rescue_bot_command(probe_status) { + if let Ok(result) = + run_remote_rescue_bot_command(&pool, &host_id, probe_status).await + { commands.push(result); status_output = commands .iter() @@ -421,7 +156,7 @@ pub async fn manage_rescue_bot( let runtime_state = infer_rescue_bot_runtime_state(configured, status_output, None); let active = runtime_state == "active"; - Ok(RescueBotManageResult { + let result = RescueBotManageResult { action: action.as_str().into(), profile, main_port, @@ -432,12 +167,296 @@ pub async fn manage_rescue_bot( runtime_state, was_already_configured: already_configured, commands, - }) + }; + + remote_log_helper_event( + &pool, + &host_id, + &format!( + "[remote:{host_id}] manage_rescue_bot success action={} profile={} state={} configured={} active={}", + action_label, result.profile, result.runtime_state, result.configured, result.active + ), + ) + .await; + + Ok(result) + }) +} + +#[tauri::command] +pub async fn remote_get_rescue_bot_status( + pool: State<'_, SshConnectionPool>, + host_id: String, + profile: Option, + rescue_port: Option, +) -> Result { + timed_async!("remote_get_rescue_bot_status", { + remote_manage_rescue_bot(pool, host_id, "status".to_string(), profile, rescue_port).await + }) +} + +#[tauri::command] +pub async fn remote_diagnose_primary_via_rescue( + pool: State<'_, SshConnectionPool>, + host_id: String, + target_profile: Option, + rescue_profile: Option, +) -> Result { + timed_async!("remote_diagnose_primary_via_rescue", { + let target_profile = normalize_profile_name(target_profile.as_deref(), "primary"); + let rescue_profile = normalize_profile_name(rescue_profile.as_deref(), "rescue"); + remote_log_helper_event( + &pool, + &host_id, + &format!( + "[remote:{host_id}] diagnose_primary_via_rescue start target={} rescue={}", + target_profile, rescue_profile + ), + ) + .await; + let result = + diagnose_primary_via_rescue_remote(&pool, &host_id, &target_profile, &rescue_profile) + .await; + match &result { + Ok(summary) => { + remote_log_helper_event( + &pool, + &host_id, + &format!( + "[remote:{host_id}] diagnose_primary_via_rescue success target={} rescue={} status={} issues={}", + summary.target_profile, + summary.rescue_profile, + summary.summary.status, + summary.issues.len() + ), + ) + .await; + } + Err(error) => { + remote_log_helper_event( + &pool, + &host_id, + &format!( + "[remote:{host_id}] diagnose_primary_via_rescue failed target={} rescue={} error={}", + target_profile, rescue_profile, error + ), + ) + .await; + } + } + result + }) +} + +#[tauri::command] +pub async fn remote_repair_primary_via_rescue( + pool: State<'_, SshConnectionPool>, + host_id: String, + target_profile: Option, + rescue_profile: Option, + issue_ids: Option>, +) -> Result { + timed_async!("remote_repair_primary_via_rescue", { + let target_profile = normalize_profile_name(target_profile.as_deref(), "primary"); + let rescue_profile = normalize_profile_name(rescue_profile.as_deref(), "rescue"); + let requested_issue_count = issue_ids.as_ref().map_or(0, Vec::len); + remote_log_helper_event( + &pool, + &host_id, + &format!( + "[remote:{host_id}] repair_primary_via_rescue start target={} rescue={} requested_issues={}", + target_profile, rescue_profile, requested_issue_count + ), + ) + .await; + let result = repair_primary_via_rescue_remote( + &pool, + &host_id, + &target_profile, + &rescue_profile, + issue_ids.unwrap_or_default(), + ) + .await; + match &result { + Ok(summary) => { + remote_log_helper_event( + &pool, + &host_id, + &format!( + "[remote:{host_id}] repair_primary_via_rescue success target={} rescue={} applied={} failed={} skipped={}", + summary.target_profile, + summary.rescue_profile, + summary.applied_issue_ids.len(), + summary.failed_issue_ids.len(), + summary.skipped_issue_ids.len() + ), + ) + .await; + } + Err(error) => { + remote_log_helper_event( + &pool, + &host_id, + &format!( + "[remote:{host_id}] repair_primary_via_rescue failed target={} rescue={} error={}", + target_profile, rescue_profile, error + ), + ) + .await; + } + } + result }) - .await - .map_err(|e| e.to_string())?; +} + +#[tauri::command] +pub async fn manage_rescue_bot( + action: String, + profile: Option, + rescue_port: Option, +) -> Result { + timed_async!("manage_rescue_bot", { + let action_label = action.clone(); + let profile_label = profile.clone().unwrap_or_else(|| "rescue".into()); + crate::logging::log_helper(&format!( + "[local] manage_rescue_bot start action={} profile={}", + action_label, profile_label + )); + let result = tauri::async_runtime::spawn_blocking(move || { + let action = RescueBotAction::parse(&action)?; + let profile = profile + .as_deref() + .map(str::trim) + .filter(|p| !p.is_empty()) + .unwrap_or("rescue") + .to_string(); + + let main_port = read_openclaw_config(&resolve_paths()) + .map(|cfg| clawpal_core::doctor::resolve_gateway_port_from_config(&cfg)) + .unwrap_or(18789); + let (already_configured, existing_port) = resolve_local_rescue_profile_state(&profile)?; + let should_configure = !already_configured + || action == RescueBotAction::Set + || action == RescueBotAction::Activate; + let rescue_port = if should_configure { + rescue_port.unwrap_or_else(|| clawpal_core::doctor::suggest_rescue_port(main_port)) + } else { + existing_port + .or(rescue_port) + .unwrap_or_else(|| clawpal_core::doctor::suggest_rescue_port(main_port)) + }; + let min_recommended_port = main_port.saturating_add(20); + + if should_configure + && matches!(action, RescueBotAction::Set | RescueBotAction::Activate) + { + clawpal_core::doctor::ensure_rescue_port_spacing(main_port, rescue_port)?; + } + + if action == RescueBotAction::Status && !already_configured { + let runtime_state = infer_rescue_bot_runtime_state(false, None, None); + return Ok(RescueBotManageResult { + action: action.as_str().into(), + profile, + main_port, + rescue_port, + min_recommended_port, + configured: false, + active: false, + runtime_state, + was_already_configured: false, + commands: Vec::new(), + }); + } + + let plan = + build_rescue_bot_command_plan(action, &profile, rescue_port, should_configure); + let mut commands = Vec::new(); + + for command in plan { + let result = run_local_rescue_bot_command(command)?; + if result.output.exit_code != 0 { + if action == RescueBotAction::Status { + commands.push(result); + break; + } + if is_rescue_cleanup_noop(action, &result.command, &result.output) { + commands.push(result); + continue; + } + if action == RescueBotAction::Activate + && is_gateway_restart_command(&result.command) + && is_gateway_restart_timeout(&result.output) + { + commands.push(result); + run_local_gateway_restart_fallback(&profile, &mut commands)?; + continue; + } + return Err(command_failure_message(&result.command, &result.output)); + } + commands.push(result); + } + + let configured = match action { + RescueBotAction::Unset => false, + RescueBotAction::Activate | RescueBotAction::Set | RescueBotAction::Deactivate => { + true + } + RescueBotAction::Status => already_configured, + }; + let mut status_output = commands + .iter() + .rev() + .find(|result| { + result + .command + .windows(2) + .any(|window| window[0] == "gateway" && window[1] == "status") + }) + .map(|result| &result.output); + if action == RescueBotAction::Activate { + let active_now = status_output + .map(|output| { + infer_rescue_bot_runtime_state(true, Some(output), None) == "active" + }) + .unwrap_or(false); + if !active_now { + let probe_status = build_gateway_status_command(&profile, true); + if let Ok(result) = run_local_rescue_bot_command(probe_status) { + commands.push(result); + status_output = commands + .iter() + .rev() + .find(|result| { + result + .command + .windows(2) + .any(|window| window[0] == "gateway" && window[1] == "status") + }) + .map(|result| &result.output); + } + } + } + let runtime_state = infer_rescue_bot_runtime_state(configured, status_output, None); + let active = runtime_state == "active"; + + Ok(RescueBotManageResult { + action: action.as_str().into(), + profile, + main_port, + rescue_port, + min_recommended_port, + configured, + active, + runtime_state, + was_already_configured: already_configured, + commands, + }) + }) + .await + .map_err(|e| e.to_string())?; - match &result { + match &result { Ok(summary) => crate::logging::log_helper(&format!( "[local] manage_rescue_bot success action={} profile={} state={} configured={} active={}", action_label, summary.profile, summary.runtime_state, summary.configured, summary.active @@ -448,7 +467,8 @@ pub async fn manage_rescue_bot( )), } - result + result + }) } #[tauri::command] @@ -456,7 +476,9 @@ pub async fn get_rescue_bot_status( profile: Option, rescue_port: Option, ) -> Result { - manage_rescue_bot("status".to_string(), profile, rescue_port).await + timed_async!("get_rescue_bot_status", { + manage_rescue_bot("status".to_string(), profile, rescue_port).await + }) } #[tauri::command] @@ -464,35 +486,37 @@ pub async fn diagnose_primary_via_rescue( target_profile: Option, rescue_profile: Option, ) -> Result { - let target_label = normalize_profile_name(target_profile.as_deref(), "primary"); - let rescue_label = normalize_profile_name(rescue_profile.as_deref(), "rescue"); - crate::logging::log_helper(&format!( - "[local] diagnose_primary_via_rescue start target={} rescue={}", - target_label, rescue_label - )); - let result = tauri::async_runtime::spawn_blocking(move || { - let target_profile = normalize_profile_name(target_profile.as_deref(), "primary"); - let rescue_profile = normalize_profile_name(rescue_profile.as_deref(), "rescue"); - diagnose_primary_via_rescue_local(&target_profile, &rescue_profile) - }) - .await - .map_err(|e| e.to_string())?; + timed_async!("diagnose_primary_via_rescue", { + let target_label = normalize_profile_name(target_profile.as_deref(), "primary"); + let rescue_label = normalize_profile_name(rescue_profile.as_deref(), "rescue"); + crate::logging::log_helper(&format!( + "[local] diagnose_primary_via_rescue start target={} rescue={}", + target_label, rescue_label + )); + let result = tauri::async_runtime::spawn_blocking(move || { + let target_profile = normalize_profile_name(target_profile.as_deref(), "primary"); + let rescue_profile = normalize_profile_name(rescue_profile.as_deref(), "rescue"); + diagnose_primary_via_rescue_local(&target_profile, &rescue_profile) + }) + .await + .map_err(|e| e.to_string())?; - match &result { - Ok(summary) => crate::logging::log_helper(&format!( + match &result { + Ok(summary) => crate::logging::log_helper(&format!( "[local] diagnose_primary_via_rescue success target={} rescue={} status={} issues={}", summary.target_profile, summary.rescue_profile, summary.summary.status, summary.issues.len() )), - Err(error) => crate::logging::log_helper(&format!( - "[local] diagnose_primary_via_rescue failed target={} rescue={} error={}", - target_label, rescue_label, error - )), - } + Err(error) => crate::logging::log_helper(&format!( + "[local] diagnose_primary_via_rescue failed target={} rescue={} error={}", + target_label, rescue_label, error + )), + } - result + result + }) } #[tauri::command] @@ -501,26 +525,27 @@ pub async fn repair_primary_via_rescue( rescue_profile: Option, issue_ids: Option>, ) -> Result { - let target_label = normalize_profile_name(target_profile.as_deref(), "primary"); - let rescue_label = normalize_profile_name(rescue_profile.as_deref(), "rescue"); - let requested_issue_count = issue_ids.as_ref().map_or(0, Vec::len); - crate::logging::log_helper(&format!( - "[local] repair_primary_via_rescue start target={} rescue={} requested_issues={}", - target_label, rescue_label, requested_issue_count - )); - let result = tauri::async_runtime::spawn_blocking(move || { - let target_profile = normalize_profile_name(target_profile.as_deref(), "primary"); - let rescue_profile = normalize_profile_name(rescue_profile.as_deref(), "rescue"); - repair_primary_via_rescue_local( - &target_profile, - &rescue_profile, - issue_ids.unwrap_or_default(), - ) - }) - .await - .map_err(|e| e.to_string())?; + timed_async!("repair_primary_via_rescue", { + let target_label = normalize_profile_name(target_profile.as_deref(), "primary"); + let rescue_label = normalize_profile_name(rescue_profile.as_deref(), "rescue"); + let requested_issue_count = issue_ids.as_ref().map_or(0, Vec::len); + crate::logging::log_helper(&format!( + "[local] repair_primary_via_rescue start target={} rescue={} requested_issues={}", + target_label, rescue_label, requested_issue_count + )); + let result = tauri::async_runtime::spawn_blocking(move || { + let target_profile = normalize_profile_name(target_profile.as_deref(), "primary"); + let rescue_profile = normalize_profile_name(rescue_profile.as_deref(), "rescue"); + repair_primary_via_rescue_local( + &target_profile, + &rescue_profile, + issue_ids.unwrap_or_default(), + ) + }) + .await + .map_err(|e| e.to_string())?; - match &result { + match &result { Ok(summary) => crate::logging::log_helper(&format!( "[local] repair_primary_via_rescue success target={} rescue={} applied={} failed={} skipped={}", summary.target_profile, @@ -535,5 +560,6 @@ pub async fn repair_primary_via_rescue( )), } - result + result + }) } diff --git a/src-tauri/src/commands/sessions.rs b/src-tauri/src/commands/sessions.rs index 4d4f4308..2f83d051 100644 --- a/src-tauri/src/commands/sessions.rs +++ b/src-tauri/src/commands/sessions.rs @@ -5,81 +5,83 @@ pub async fn remote_analyze_sessions( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - // Run a shell script via SSH that scans session files and outputs JSON. - // This is MUCH faster than doing per-file SFTP reads. - let script = r#" -setopt nonomatch 2>/dev/null; shopt -s nullglob 2>/dev/null -cd ~/.openclaw/agents 2>/dev/null || { echo '[]'; exit 0; } -now=$(date +%s) -sep="" -echo "[" -for agent_dir in */; do - [ -d "$agent_dir" ] || continue - agent="${agent_dir%/}" - # Sanitize agent name for JSON (escape backslash then double-quote) - safe_agent=$(printf '%s' "$agent" | sed 's/\\/\\\\/g; s/"/\\"/g') - for kind in sessions sessions_archive; do - dir="$agent_dir$kind" - [ -d "$dir" ] || continue - for f in "$dir"/*.jsonl; do - [ -f "$f" ] || continue - fname=$(basename "$f" .jsonl) - safe_fname=$(printf '%s' "$fname" | sed 's/\\/\\\\/g; s/"/\\"/g') - size=$(wc -c < "$f" 2>/dev/null | tr -d ' ') - msgs=$(grep -c '"type":"message"' "$f" 2>/dev/null || true) - [ -z "$msgs" ] && msgs=0 - user_msgs=$(grep -c '"role":"user"' "$f" 2>/dev/null || true) - [ -z "$user_msgs" ] && user_msgs=0 - asst_msgs=$(grep -c '"role":"assistant"' "$f" 2>/dev/null || true) - [ -z "$asst_msgs" ] && asst_msgs=0 - mtime=$(stat -c %Y "$f" 2>/dev/null || stat -f %m "$f" 2>/dev/null || echo 0) - age_days=$(( (now - mtime) / 86400 )) - printf '%s{"agent":"%s","sessionId":"%s","sizeBytes":%s,"messageCount":%s,"userMessageCount":%s,"assistantMessageCount":%s,"ageDays":%s,"kind":"%s"}' \ - "$sep" "$safe_agent" "$safe_fname" "$size" "$msgs" "$user_msgs" "$asst_msgs" "$age_days" "$kind" - sep="," + timed_async!("remote_analyze_sessions", { + // Run a shell script via SSH that scans session files and outputs JSON. + // This is MUCH faster than doing per-file SFTP reads. + let script = r#" + setopt nonomatch 2>/dev/null; shopt -s nullglob 2>/dev/null + cd ~/.openclaw/agents 2>/dev/null || { echo '[]'; exit 0; } + now=$(date +%s) + sep="" + echo "[" + for agent_dir in */; do + [ -d "$agent_dir" ] || continue + agent="${agent_dir%/}" + # Sanitize agent name for JSON (escape backslash then double-quote) + safe_agent=$(printf '%s' "$agent" | sed 's/\\/\\\\/g; s/"/\\"/g') + for kind in sessions sessions_archive; do + dir="$agent_dir$kind" + [ -d "$dir" ] || continue + for f in "$dir"/*.jsonl; do + [ -f "$f" ] || continue + fname=$(basename "$f" .jsonl) + safe_fname=$(printf '%s' "$fname" | sed 's/\\/\\\\/g; s/"/\\"/g') + size=$(wc -c < "$f" 2>/dev/null | tr -d ' ') + msgs=$(grep -c '"type":"message"' "$f" 2>/dev/null || true) + [ -z "$msgs" ] && msgs=0 + user_msgs=$(grep -c '"role":"user"' "$f" 2>/dev/null || true) + [ -z "$user_msgs" ] && user_msgs=0 + asst_msgs=$(grep -c '"role":"assistant"' "$f" 2>/dev/null || true) + [ -z "$asst_msgs" ] && asst_msgs=0 + mtime=$(stat -c %Y "$f" 2>/dev/null || stat -f %m "$f" 2>/dev/null || echo 0) + age_days=$(( (now - mtime) / 86400 )) + printf '%s{"agent":"%s","sessionId":"%s","sizeBytes":%s,"messageCount":%s,"userMessageCount":%s,"assistantMessageCount":%s,"ageDays":%s,"kind":"%s"}' \ + "$sep" "$safe_agent" "$safe_fname" "$size" "$msgs" "$user_msgs" "$asst_msgs" "$age_days" "$kind" + sep="," + done + done done - done -done -echo "]" -"#; + echo "]" + "#; - let result = pool.exec(&host_id, script).await?; - if result.exit_code != 0 && result.stdout.trim().is_empty() { - // No agents directory — return empty - return Ok(Vec::new()); - } + let result = pool.exec(&host_id, script).await?; + if result.exit_code != 0 && result.stdout.trim().is_empty() { + // No agents directory — return empty + return Ok(Vec::new()); + } - let core = clawpal_core::sessions::parse_session_analysis(result.stdout.trim())?; - Ok(core - .into_iter() - .map(|agent| AgentSessionAnalysis { - agent: agent.agent, - total_files: agent.total_files, - total_size_bytes: agent.total_size_bytes, - empty_count: agent.empty_count, - low_value_count: agent.low_value_count, - valuable_count: agent.valuable_count, - sessions: agent - .sessions - .into_iter() - .map(|session| SessionAnalysis { - agent: session.agent, - session_id: session.session_id, - file_path: session.file_path, - size_bytes: session.size_bytes, - message_count: session.message_count, - user_message_count: session.user_message_count, - assistant_message_count: session.assistant_message_count, - last_activity: session.last_activity, - age_days: session.age_days, - total_tokens: session.total_tokens, - model: session.model, - category: session.category, - kind: session.kind, - }) - .collect(), - }) - .collect()) + let core = clawpal_core::sessions::parse_session_analysis(result.stdout.trim())?; + Ok(core + .into_iter() + .map(|agent| AgentSessionAnalysis { + agent: agent.agent, + total_files: agent.total_files, + total_size_bytes: agent.total_size_bytes, + empty_count: agent.empty_count, + low_value_count: agent.low_value_count, + valuable_count: agent.valuable_count, + sessions: agent + .sessions + .into_iter() + .map(|session| SessionAnalysis { + agent: session.agent, + session_id: session.session_id, + file_path: session.file_path, + size_bytes: session.size_bytes, + message_count: session.message_count, + user_message_count: session.user_message_count, + assistant_message_count: session.assistant_message_count, + last_activity: session.last_activity, + age_days: session.age_days, + total_tokens: session.total_tokens, + model: session.model, + category: session.category, + kind: session.kind, + }) + .collect(), + }) + .collect()) + }) } #[tauri::command] @@ -89,39 +91,41 @@ pub async fn remote_delete_sessions_by_ids( agent_id: String, session_ids: Vec, ) -> Result { - if agent_id.trim().is_empty() || agent_id.contains("..") || agent_id.contains('/') { - return Err("invalid agent id".into()); - } - - let mut deleted = 0usize; - for sid in &session_ids { - if sid.contains("..") || sid.contains('/') || sid.contains('\\') { - continue; + timed_async!("remote_delete_sessions_by_ids", { + if agent_id.trim().is_empty() || agent_id.contains("..") || agent_id.contains('/') { + return Err("invalid agent id".into()); } - // Delete from both sessions and sessions_archive - let cmd = format!( - "rm -f ~/.openclaw/agents/{agent}/sessions/{sid}.jsonl ~/.openclaw/agents/{agent}/sessions/{sid}-topic-*.jsonl ~/.openclaw/agents/{agent}/sessions_archive/{sid}.jsonl ~/.openclaw/agents/{agent}/sessions_archive/{sid}-topic-*.jsonl 2>/dev/null; echo ok", - agent = agent_id, sid = sid - ); - if let Ok(r) = pool.exec(&host_id, &cmd).await { - if r.stdout.trim() == "ok" { - deleted += 1; + + let mut deleted = 0usize; + for sid in &session_ids { + if sid.contains("..") || sid.contains('/') || sid.contains('\\') { + continue; + } + // Delete from both sessions and sessions_archive + let cmd = format!( + "rm -f ~/.openclaw/agents/{agent}/sessions/{sid}.jsonl ~/.openclaw/agents/{agent}/sessions/{sid}-topic-*.jsonl ~/.openclaw/agents/{agent}/sessions_archive/{sid}.jsonl ~/.openclaw/agents/{agent}/sessions_archive/{sid}-topic-*.jsonl 2>/dev/null; echo ok", + agent = agent_id, sid = sid + ); + if let Ok(r) = pool.exec(&host_id, &cmd).await { + if r.stdout.trim() == "ok" { + deleted += 1; + } } } - } - // Clean up sessions.json - let sessions_json_path = format!("~/.openclaw/agents/{}/sessions/sessions.json", agent_id); - if let Ok(content) = pool.sftp_read(&host_id, &sessions_json_path).await { - let ids: Vec<&str> = session_ids.iter().map(String::as_str).collect(); - if let Ok(updated) = clawpal_core::sessions::filter_sessions_by_ids(&content, &ids) { - let _ = pool - .sftp_write(&host_id, &sessions_json_path, &updated) - .await; + // Clean up sessions.json + let sessions_json_path = format!("~/.openclaw/agents/{}/sessions/sessions.json", agent_id); + if let Ok(content) = pool.sftp_read(&host_id, &sessions_json_path).await { + let ids: Vec<&str> = session_ids.iter().map(String::as_str).collect(); + if let Ok(updated) = clawpal_core::sessions::filter_sessions_by_ids(&content, &ids) { + let _ = pool + .sftp_write(&host_id, &sessions_json_path, &updated) + .await; + } } - } - Ok(deleted) + Ok(deleted) + }) } #[tauri::command] @@ -129,41 +133,43 @@ pub async fn remote_list_session_files( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result, String> { - let script = r#" -setopt nonomatch 2>/dev/null; shopt -s nullglob 2>/dev/null -cd ~/.openclaw/agents 2>/dev/null || { echo "[]"; exit 0; } -sep="" -echo "[" -for agent_dir in */; do - [ -d "$agent_dir" ] || continue - agent="${agent_dir%/}" - safe_agent=$(printf '%s' "$agent" | sed 's/\\/\\\\/g; s/"/\\"/g') - for kind in sessions sessions_archive; do - dir="$agent_dir$kind" - [ -d "$dir" ] || continue - for f in "$dir"/*.jsonl; do - [ -f "$f" ] || continue - size=$(wc -c < "$f" 2>/dev/null | tr -d ' ') - safe_path=$(printf '%s' "$f" | sed 's/\\/\\\\/g; s/"/\\"/g') - printf '%s{"agent":"%s","kind":"%s","path":"%s","sizeBytes":%s}' "$sep" "$safe_agent" "$kind" "$safe_path" "$size" - sep="," + timed_async!("remote_list_session_files", { + let script = r#" + setopt nonomatch 2>/dev/null; shopt -s nullglob 2>/dev/null + cd ~/.openclaw/agents 2>/dev/null || { echo "[]"; exit 0; } + sep="" + echo "[" + for agent_dir in */; do + [ -d "$agent_dir" ] || continue + agent="${agent_dir%/}" + safe_agent=$(printf '%s' "$agent" | sed 's/\\/\\\\/g; s/"/\\"/g') + for kind in sessions sessions_archive; do + dir="$agent_dir$kind" + [ -d "$dir" ] || continue + for f in "$dir"/*.jsonl; do + [ -f "$f" ] || continue + size=$(wc -c < "$f" 2>/dev/null | tr -d ' ') + safe_path=$(printf '%s' "$f" | sed 's/\\/\\\\/g; s/"/\\"/g') + printf '%s{"agent":"%s","kind":"%s","path":"%s","sizeBytes":%s}' "$sep" "$safe_agent" "$kind" "$safe_path" "$size" + sep="," + done + done done - done -done -echo "]" -"#; - let result = pool.exec(&host_id, script).await?; - let core = clawpal_core::sessions::parse_session_file_list(result.stdout.trim())?; - Ok(core - .into_iter() - .map(|entry| SessionFile { - path: entry.path, - relative_path: entry.relative_path, - agent: entry.agent, - kind: entry.kind, - size_bytes: entry.size_bytes, - }) - .collect()) + echo "]" + "#; + let result = pool.exec(&host_id, script).await?; + let core = clawpal_core::sessions::parse_session_file_list(result.stdout.trim())?; + Ok(core + .into_iter() + .map(|entry| SessionFile { + path: entry.path, + relative_path: entry.relative_path, + agent: entry.agent, + kind: entry.kind, + size_bytes: entry.size_bytes, + }) + .collect()) + }) } #[tauri::command] @@ -173,40 +179,42 @@ pub async fn remote_preview_session( agent_id: String, session_id: String, ) -> Result, String> { - if agent_id.contains("..") - || agent_id.contains('/') - || session_id.contains("..") - || session_id.contains('/') - { - return Err("invalid id".into()); - } - let jsonl_name = format!("{}.jsonl", session_id); + timed_async!("remote_preview_session", { + if agent_id.contains("..") + || agent_id.contains('/') + || session_id.contains("..") + || session_id.contains('/') + { + return Err("invalid id".into()); + } + let jsonl_name = format!("{}.jsonl", session_id); - // Try sessions dir first, then archive - let paths = [ - format!("~/.openclaw/agents/{}/sessions/{}", agent_id, jsonl_name), - format!( - "~/.openclaw/agents/{}/sessions_archive/{}", - agent_id, jsonl_name - ), - ]; + // Try sessions dir first, then archive + let paths = [ + format!("~/.openclaw/agents/{}/sessions/{}", agent_id, jsonl_name), + format!( + "~/.openclaw/agents/{}/sessions_archive/{}", + agent_id, jsonl_name + ), + ]; - let mut content = String::new(); - for path in &paths { - if let Ok(c) = pool.sftp_read(&host_id, path).await { - content = c; - break; + let mut content = String::new(); + for path in &paths { + if let Ok(c) = pool.sftp_read(&host_id, path).await { + content = c; + break; + } + } + if content.is_empty() { + return Ok(Vec::new()); } - } - if content.is_empty() { - return Ok(Vec::new()); - } - let parsed = clawpal_core::sessions::parse_session_preview(&content)?; - Ok(parsed - .into_iter() - .map(|m| serde_json::json!({ "role": m.role, "content": m.content })) - .collect()) + let parsed = clawpal_core::sessions::parse_session_preview(&content)?; + Ok(parsed + .into_iter() + .map(|m| serde_json::json!({ "role": m.role, "content": m.content })) + .collect()) + }) } #[tauri::command] @@ -214,44 +222,52 @@ pub async fn remote_clear_all_sessions( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let script = r#" -setopt nonomatch 2>/dev/null; shopt -s nullglob 2>/dev/null -count=0 -cd ~/.openclaw/agents 2>/dev/null || { echo "0"; exit 0; } -for agent_dir in */; do - for kind in sessions sessions_archive; do - dir="$agent_dir$kind" - [ -d "$dir" ] || continue - for f in "$dir"/*; do - [ -f "$f" ] || continue - rm -f "$f" && count=$((count + 1)) + timed_async!("remote_clear_all_sessions", { + let script = r#" + setopt nonomatch 2>/dev/null; shopt -s nullglob 2>/dev/null + count=0 + cd ~/.openclaw/agents 2>/dev/null || { echo "0"; exit 0; } + for agent_dir in */; do + for kind in sessions sessions_archive; do + dir="$agent_dir$kind" + [ -d "$dir" ] || continue + for f in "$dir"/*; do + [ -f "$f" ] || continue + rm -f "$f" && count=$((count + 1)) + done + done done - done -done -echo "$count" -"#; - let result = pool.exec(&host_id, script).await?; - let count: usize = result.stdout.trim().parse().unwrap_or(0); - Ok(count) + echo "$count" + "#; + let result = pool.exec(&host_id, script).await?; + let count: usize = result.stdout.trim().parse().unwrap_or(0); + Ok(count) + }) } #[tauri::command] pub fn list_session_files() -> Result, String> { - let paths = resolve_paths(); - list_session_files_detailed(&paths.base_dir) + timed_sync!("list_session_files", { + let paths = resolve_paths(); + list_session_files_detailed(&paths.base_dir) + }) } #[tauri::command] pub fn clear_all_sessions() -> Result { - let paths = resolve_paths(); - clear_agent_and_global_sessions(&paths.base_dir.join("agents"), None) + timed_sync!("clear_all_sessions", { + let paths = resolve_paths(); + clear_agent_and_global_sessions(&paths.base_dir.join("agents"), None) + }) } #[tauri::command] pub async fn analyze_sessions() -> Result, String> { - tauri::async_runtime::spawn_blocking(|| analyze_sessions_sync()) - .await - .map_err(|e| e.to_string())? + timed_async!("analyze_sessions", { + tauri::async_runtime::spawn_blocking(|| analyze_sessions_sync()) + .await + .map_err(|e| e.to_string())? + }) } #[tauri::command] @@ -259,16 +275,20 @@ pub async fn delete_sessions_by_ids( agent_id: String, session_ids: Vec, ) -> Result { - tauri::async_runtime::spawn_blocking(move || { - delete_sessions_by_ids_sync(&agent_id, &session_ids) + timed_async!("delete_sessions_by_ids", { + tauri::async_runtime::spawn_blocking(move || { + delete_sessions_by_ids_sync(&agent_id, &session_ids) + }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } #[tauri::command] pub async fn preview_session(agent_id: String, session_id: String) -> Result, String> { - tauri::async_runtime::spawn_blocking(move || preview_session_sync(&agent_id, &session_id)) - .await - .map_err(|e| e.to_string())? + timed_async!("preview_session", { + tauri::async_runtime::spawn_blocking(move || preview_session_sync(&agent_id, &session_id)) + .await + .map_err(|e| e.to_string())? + }) } diff --git a/src-tauri/src/commands/ssh.rs b/src-tauri/src/commands/ssh.rs index ca8d8519..1f8152c1 100644 --- a/src-tauri/src/commands/ssh.rs +++ b/src-tauri/src/commands/ssh.rs @@ -12,30 +12,36 @@ pub(crate) fn read_hosts_from_registry() -> Result, String> { #[tauri::command] pub fn list_ssh_hosts() -> Result, String> { - read_hosts_from_registry() + timed_sync!("list_ssh_hosts", { read_hosts_from_registry() }) } #[tauri::command] pub fn list_ssh_config_hosts() -> Result, String> { - let Some(path) = ssh_config_path() else { - return Ok(Vec::new()); - }; - if !path.exists() { - return Ok(Vec::new()); - } - let data = - fs::read_to_string(&path).map_err(|e| format!("Failed to read {}: {e}", path.display()))?; - Ok(clawpal_core::ssh::config::parse_ssh_config_hosts(&data)) + timed_sync!("list_ssh_config_hosts", { + let Some(path) = ssh_config_path() else { + return Ok(Vec::new()); + }; + if !path.exists() { + return Ok(Vec::new()); + } + let data = fs::read_to_string(&path) + .map_err(|e| format!("Failed to read {}: {e}", path.display()))?; + Ok(clawpal_core::ssh::config::parse_ssh_config_hosts(&data)) + }) } #[tauri::command] pub fn upsert_ssh_host(host: SshHostConfig) -> Result { - clawpal_core::ssh::registry::upsert_ssh_host(host) + timed_sync!("upsert_ssh_host", { + clawpal_core::ssh::registry::upsert_ssh_host(host) + }) } #[tauri::command] pub fn delete_ssh_host(host_id: String) -> Result { - clawpal_core::ssh::registry::delete_ssh_host(&host_id) + timed_sync!("delete_ssh_host", { + clawpal_core::ssh::registry::delete_ssh_host(&host_id) + }) } // --------------------------------------------------------------------------- @@ -194,81 +200,83 @@ pub async fn ssh_connect( host_id: String, app: AppHandle, ) -> Result { - crate::commands::logs::log_dev(format!("[dev][ssh_connect] begin host_id={host_id}")); - // If already connected and handle is alive, reuse - if pool.is_connected(&host_id).await { - crate::commands::logs::log_dev(format!( - "[dev][ssh_connect] reuse existing connection host_id={host_id}" - )); - let _ = success_ssh_diagnostic( - &app, - SshStage::SessionOpen, - SshIntent::Connect, - "SSH session already connected", - SshDiagnosticSuccessTrigger::ConnectReuse, - ); - return Ok(true); - } - let hosts = read_hosts_from_registry().map_err(|error| { - make_ssh_command_error(&app, SshStage::ResolveHostConfig, SshIntent::Connect, error) - })?; - if hosts.is_empty() { - crate::commands::logs::log_dev("[dev][ssh_connect] host registry is empty"); - } - let host = hosts.into_iter().find(|h| h.id == host_id).ok_or_else(|| { - let mut ids = Vec::new(); - for h in read_hosts_from_registry().unwrap_or_default() { - ids.push(h.id); + timed_async!("ssh_connect", { + crate::commands::logs::log_dev(format!("[dev][ssh_connect] begin host_id={host_id}")); + // If already connected and handle is alive, reuse + if pool.is_connected(&host_id).await { + crate::commands::logs::log_dev(format!( + "[dev][ssh_connect] reuse existing connection host_id={host_id}" + )); + let _ = success_ssh_diagnostic( + &app, + SshStage::SessionOpen, + SshIntent::Connect, + "SSH session already connected", + SshDiagnosticSuccessTrigger::ConnectReuse, + ); + return Ok(true); } - crate::commands::logs::log_dev(format!( - "[dev][ssh_connect] no host found host_id={host_id} known={ids:?}" - )); - make_ssh_command_error( - &app, - SshStage::ResolveHostConfig, - SshIntent::Connect, - format!("No SSH host config with id: {host_id}"), - ) - })?; - // If the host has a stored passphrase, use it directly - let connect_result = if let Some(ref pp) = host.passphrase { - if !pp.is_empty() { + let hosts = read_hosts_from_registry().map_err(|error| { + make_ssh_command_error(&app, SshStage::ResolveHostConfig, SshIntent::Connect, error) + })?; + if hosts.is_empty() { + crate::commands::logs::log_dev("[dev][ssh_connect] host registry is empty"); + } + let host = hosts.into_iter().find(|h| h.id == host_id).ok_or_else(|| { + let mut ids = Vec::new(); + for h in read_hosts_from_registry().unwrap_or_default() { + ids.push(h.id); + } crate::commands::logs::log_dev(format!( - "[dev][ssh_connect] using stored passphrase for host_id={host_id}" + "[dev][ssh_connect] no host found host_id={host_id} known={ids:?}" )); - pool.connect_with_passphrase(&host, Some(pp.as_str())).await + make_ssh_command_error( + &app, + SshStage::ResolveHostConfig, + SshIntent::Connect, + format!("No SSH host config with id: {host_id}"), + ) + })?; + // If the host has a stored passphrase, use it directly + let connect_result = if let Some(ref pp) = host.passphrase { + if !pp.is_empty() { + crate::commands::logs::log_dev(format!( + "[dev][ssh_connect] using stored passphrase for host_id={host_id}" + )); + pool.connect_with_passphrase(&host, Some(pp.as_str())).await + } else { + pool.connect(&host).await + } } else { pool.connect(&host).await + }; + if let Err(error) = connect_result { + crate::commands::logs::log_dev(format!( + "[dev][ssh_connect] failed host_id={} host={} user={} port={} auth_method={} error={}", + host_id, host.host, host.username, host.port, host.auth_method, error + )); + let message = format!("ssh connect failed: {error}"); + let mut diagnostic = from_any_error( + SshStage::TcpReachability, + SshIntent::Connect, + message.clone(), + ); + if let Some(code) = diagnostic.error_code { + diagnostic.stage = ssh_stage_for_error_code(code); + } + emit_ssh_diagnostic(&app, &diagnostic); + return Err(message); } - } else { - pool.connect(&host).await - }; - if let Err(error) = connect_result { - crate::commands::logs::log_dev(format!( - "[dev][ssh_connect] failed host_id={} host={} user={} port={} auth_method={} error={}", - host_id, host.host, host.username, host.port, host.auth_method, error - )); - let message = format!("ssh connect failed: {error}"); - let mut diagnostic = from_any_error( - SshStage::TcpReachability, + crate::commands::logs::log_dev(format!("[dev][ssh_connect] success host_id={host_id}")); + let _ = success_ssh_diagnostic( + &app, + SshStage::SessionOpen, SshIntent::Connect, - message.clone(), + "SSH connection established", + SshDiagnosticSuccessTrigger::ConnectEstablished, ); - if let Some(code) = diagnostic.error_code { - diagnostic.stage = ssh_stage_for_error_code(code); - } - emit_ssh_diagnostic(&app, &diagnostic); - return Err(message); - } - crate::commands::logs::log_dev(format!("[dev][ssh_connect] success host_id={host_id}")); - let _ = success_ssh_diagnostic( - &app, - SshStage::SessionOpen, - SshIntent::Connect, - "SSH connection established", - SshDiagnosticSuccessTrigger::ConnectEstablished, - ); - Ok(true) + Ok(true) + }) } #[tauri::command] @@ -278,74 +286,78 @@ pub async fn ssh_connect_with_passphrase( passphrase: String, app: AppHandle, ) -> Result { - crate::commands::logs::log_dev(format!( - "[dev][ssh_connect_with_passphrase] begin host_id={host_id}" - )); - if pool.is_connected(&host_id).await { + timed_async!("ssh_connect_with_passphrase", { crate::commands::logs::log_dev(format!( - "[dev][ssh_connect_with_passphrase] reuse existing connection host_id={host_id}" + "[dev][ssh_connect_with_passphrase] begin host_id={host_id}" )); - let _ = success_ssh_diagnostic( - &app, - SshStage::SessionOpen, - SshIntent::Connect, - "SSH session already connected", - SshDiagnosticSuccessTrigger::ConnectReuse, - ); - return Ok(true); - } - let hosts = read_hosts_from_registry().map_err(|error| { - make_ssh_command_error(&app, SshStage::ResolveHostConfig, SshIntent::Connect, error) - })?; - if hosts.is_empty() { - crate::commands::logs::log_dev("[dev][ssh_connect_with_passphrase] host registry is empty"); - } - let host = hosts.into_iter().find(|h| h.id == host_id).ok_or_else(|| { - let mut ids = Vec::new(); - for h in read_hosts_from_registry().unwrap_or_default() { - ids.push(h.id); + if pool.is_connected(&host_id).await { + crate::commands::logs::log_dev(format!( + "[dev][ssh_connect_with_passphrase] reuse existing connection host_id={host_id}" + )); + let _ = success_ssh_diagnostic( + &app, + SshStage::SessionOpen, + SshIntent::Connect, + "SSH session already connected", + SshDiagnosticSuccessTrigger::ConnectReuse, + ); + return Ok(true); + } + let hosts = read_hosts_from_registry().map_err(|error| { + make_ssh_command_error(&app, SshStage::ResolveHostConfig, SshIntent::Connect, error) + })?; + if hosts.is_empty() { + crate::commands::logs::log_dev( + "[dev][ssh_connect_with_passphrase] host registry is empty", + ); + } + let host = hosts.into_iter().find(|h| h.id == host_id).ok_or_else(|| { + let mut ids = Vec::new(); + for h in read_hosts_from_registry().unwrap_or_default() { + ids.push(h.id); + } + crate::commands::logs::log_dev(format!( + "[dev][ssh_connect_with_passphrase] no host found host_id={host_id} known={ids:?}" + )); + make_ssh_command_error( + &app, + SshStage::ResolveHostConfig, + SshIntent::Connect, + format!("No SSH host config with id: {host_id}"), + ) + })?; + if let Err(error) = pool + .connect_with_passphrase(&host, Some(passphrase.as_str())) + .await + { + crate::commands::logs::log_dev(format!( + "[dev][ssh_connect_with_passphrase] failed host_id={} host={} user={} port={} auth_method={} error={}", + host_id, + host.host, + host.username, + host.port, + host.auth_method, + error + )); + return Err(make_ssh_command_error( + &app, + SshStage::AuthNegotiation, + SshIntent::Connect, + format!("ssh connect failed: {error}"), + )); } crate::commands::logs::log_dev(format!( - "[dev][ssh_connect_with_passphrase] no host found host_id={host_id} known={ids:?}" - )); - make_ssh_command_error( - &app, - SshStage::ResolveHostConfig, - SshIntent::Connect, - format!("No SSH host config with id: {host_id}"), - ) - })?; - if let Err(error) = pool - .connect_with_passphrase(&host, Some(passphrase.as_str())) - .await - { - crate::commands::logs::log_dev(format!( - "[dev][ssh_connect_with_passphrase] failed host_id={} host={} user={} port={} auth_method={} error={}", - host_id, - host.host, - host.username, - host.port, - host.auth_method, - error + "[dev][ssh_connect_with_passphrase] success host_id={host_id}" )); - return Err(make_ssh_command_error( + let _ = success_ssh_diagnostic( &app, - SshStage::AuthNegotiation, + SshStage::SessionOpen, SshIntent::Connect, - format!("ssh connect failed: {error}"), - )); - } - crate::commands::logs::log_dev(format!( - "[dev][ssh_connect_with_passphrase] success host_id={host_id}" - )); - let _ = success_ssh_diagnostic( - &app, - SshStage::SessionOpen, - SshIntent::Connect, - "SSH connection established", - SshDiagnosticSuccessTrigger::ConnectEstablished, - ); - Ok(true) + "SSH connection established", + SshDiagnosticSuccessTrigger::ConnectEstablished, + ); + Ok(true) + }) } #[tauri::command] @@ -353,8 +365,10 @@ pub async fn ssh_disconnect( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - pool.disconnect(&host_id).await?; - Ok(true) + timed_async!("ssh_disconnect", { + pool.disconnect(&host_id).await?; + Ok(true) + }) } #[tauri::command] @@ -362,11 +376,13 @@ pub async fn ssh_status( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - if pool.is_connected(&host_id).await { - Ok("connected".to_string()) - } else { - Ok("disconnected".to_string()) - } + timed_async!("ssh_status", { + if pool.is_connected(&host_id).await { + Ok("connected".to_string()) + } else { + Ok("disconnected".to_string()) + } + }) } #[tauri::command] @@ -374,7 +390,9 @@ pub async fn get_ssh_transfer_stats( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - Ok(pool.get_transfer_stats(&host_id).await) + timed_async!("get_ssh_transfer_stats", { + Ok(pool.get_transfer_stats(&host_id).await) + }) } // --------------------------------------------------------------------------- @@ -388,19 +406,23 @@ pub async fn ssh_exec( command: String, app: AppHandle, ) -> Result { - pool.exec(&host_id, &command) - .await - .map(|result| { - let _ = success_ssh_diagnostic( - &app, - SshStage::RemoteExec, - SshIntent::Exec, - "Remote SSH command executed", - SshDiagnosticSuccessTrigger::RoutineOperation, - ); - result - }) - .map_err(|error| make_ssh_command_error(&app, SshStage::RemoteExec, SshIntent::Exec, error)) + timed_async!("ssh_exec", { + pool.exec(&host_id, &command) + .await + .map(|result| { + let _ = success_ssh_diagnostic( + &app, + SshStage::RemoteExec, + SshIntent::Exec, + "Remote SSH command executed", + SshDiagnosticSuccessTrigger::RoutineOperation, + ); + result + }) + .map_err(|error| { + make_ssh_command_error(&app, SshStage::RemoteExec, SshIntent::Exec, error) + }) + }) } #[tauri::command] @@ -410,21 +432,23 @@ pub async fn sftp_read_file( path: String, app: AppHandle, ) -> Result { - pool.sftp_read(&host_id, &path) - .await - .map(|result| { - let _ = success_ssh_diagnostic( - &app, - SshStage::SftpRead, - SshIntent::SftpRead, - "SFTP read succeeded", - SshDiagnosticSuccessTrigger::RoutineOperation, - ); - result - }) - .map_err(|error| { - make_ssh_command_error(&app, SshStage::SftpRead, SshIntent::SftpRead, error) - }) + timed_async!("sftp_read_file", { + pool.sftp_read(&host_id, &path) + .await + .map(|result| { + let _ = success_ssh_diagnostic( + &app, + SshStage::SftpRead, + SshIntent::SftpRead, + "SFTP read succeeded", + SshDiagnosticSuccessTrigger::RoutineOperation, + ); + result + }) + .map_err(|error| { + make_ssh_command_error(&app, SshStage::SftpRead, SshIntent::SftpRead, error) + }) + }) } #[tauri::command] @@ -435,19 +459,21 @@ pub async fn sftp_write_file( content: String, app: AppHandle, ) -> Result { - pool.sftp_write(&host_id, &path, &content) - .await - .map_err(|error| { - make_ssh_command_error(&app, SshStage::SftpWrite, SshIntent::SftpWrite, error) - })?; - let _ = success_ssh_diagnostic( - &app, - SshStage::SftpWrite, - SshIntent::SftpWrite, - "SFTP write succeeded", - SshDiagnosticSuccessTrigger::RoutineOperation, - ); - Ok(true) + timed_async!("sftp_write_file", { + pool.sftp_write(&host_id, &path, &content) + .await + .map_err(|error| { + make_ssh_command_error(&app, SshStage::SftpWrite, SshIntent::SftpWrite, error) + })?; + let _ = success_ssh_diagnostic( + &app, + SshStage::SftpWrite, + SshIntent::SftpWrite, + "SFTP write succeeded", + SshDiagnosticSuccessTrigger::RoutineOperation, + ); + Ok(true) + }) } #[tauri::command] @@ -457,21 +483,23 @@ pub async fn sftp_list_dir( path: String, app: AppHandle, ) -> Result, String> { - pool.sftp_list(&host_id, &path) - .await - .map(|result| { - let _ = success_ssh_diagnostic( - &app, - SshStage::SftpRead, - SshIntent::SftpRead, - "SFTP list succeeded", - SshDiagnosticSuccessTrigger::RoutineOperation, - ); - result - }) - .map_err(|error| { - make_ssh_command_error(&app, SshStage::SftpRead, SshIntent::SftpRead, error) - }) + timed_async!("sftp_list_dir", { + pool.sftp_list(&host_id, &path) + .await + .map(|result| { + let _ = success_ssh_diagnostic( + &app, + SshStage::SftpRead, + SshIntent::SftpRead, + "SFTP list succeeded", + SshDiagnosticSuccessTrigger::RoutineOperation, + ); + result + }) + .map_err(|error| { + make_ssh_command_error(&app, SshStage::SftpRead, SshIntent::SftpRead, error) + }) + }) } #[tauri::command] @@ -481,17 +509,19 @@ pub async fn sftp_remove_file( path: String, app: AppHandle, ) -> Result { - pool.sftp_remove(&host_id, &path).await.map_err(|error| { - make_ssh_command_error(&app, SshStage::SftpRemove, SshIntent::SftpRemove, error) - })?; - let _ = success_ssh_diagnostic( - &app, - SshStage::SftpRemove, - SshIntent::SftpRemove, - "SFTP remove succeeded", - SshDiagnosticSuccessTrigger::RoutineOperation, - ); - Ok(true) + timed_async!("sftp_remove_file", { + pool.sftp_remove(&host_id, &path).await.map_err(|error| { + make_ssh_command_error(&app, SshStage::SftpRemove, SshIntent::SftpRemove, error) + })?; + let _ = success_ssh_diagnostic( + &app, + SshStage::SftpRemove, + SshIntent::SftpRemove, + "SFTP remove succeeded", + SshDiagnosticSuccessTrigger::RoutineOperation, + ); + Ok(true) + }) } #[tauri::command] @@ -501,85 +531,89 @@ pub async fn diagnose_ssh( intent: String, app: AppHandle, ) -> Result { - let intent = intent.parse::().map_err(|_| { - make_ssh_command_error( - &app, - SshStage::ResolveHostConfig, - SshIntent::Connect, - format!("Invalid SSH diagnostic intent: {intent}"), - ) - })?; - - let stage = ssh_stage_for_intent(intent); - if matches!(intent, SshIntent::Connect) { - if pool.is_connected(&host_id).await { - return Ok(success_ssh_diagnostic( - &app, - stage, - intent, - "SSH connection is healthy", - SshDiagnosticSuccessTrigger::ExplicitProbe, - )); - } - let hosts = read_hosts_from_registry().map_err(|error| { - make_ssh_command_error(&app, SshStage::ResolveHostConfig, SshIntent::Connect, error) - })?; - let host = hosts.into_iter().find(|h| h.id == host_id).ok_or_else(|| { + timed_async!("diagnose_ssh", { + let intent = intent.parse::().map_err(|_| { make_ssh_command_error( &app, SshStage::ResolveHostConfig, SshIntent::Connect, - format!("No SSH host config with id: {host_id}"), + format!("Invalid SSH diagnostic intent: {intent}"), ) })?; - return Ok(match pool.connect(&host).await { - Ok(_) => success_ssh_diagnostic( - &app, - SshStage::SessionOpen, - SshIntent::Connect, - "SSH connect probe succeeded", - SshDiagnosticSuccessTrigger::ExplicitProbe, - ), - Err(error) => { - let mut report = - from_any_error(SshStage::TcpReachability, SshIntent::Connect, error); - if let Some(code) = report.error_code { - report.stage = ssh_stage_for_error_code(code); - } - emit_ssh_diagnostic(&app, &report); - report + + let stage = ssh_stage_for_intent(intent); + if matches!(intent, SshIntent::Connect) { + if pool.is_connected(&host_id).await { + return Ok(success_ssh_diagnostic( + &app, + stage, + intent, + "SSH connection is healthy", + SshDiagnosticSuccessTrigger::ExplicitProbe, + )); } - }); - } + let hosts = read_hosts_from_registry().map_err(|error| { + make_ssh_command_error(&app, SshStage::ResolveHostConfig, SshIntent::Connect, error) + })?; + let host = hosts.into_iter().find(|h| h.id == host_id).ok_or_else(|| { + make_ssh_command_error( + &app, + SshStage::ResolveHostConfig, + SshIntent::Connect, + format!("No SSH host config with id: {host_id}"), + ) + })?; + return Ok(match pool.connect(&host).await { + Ok(_) => success_ssh_diagnostic( + &app, + SshStage::SessionOpen, + SshIntent::Connect, + "SSH connect probe succeeded", + SshDiagnosticSuccessTrigger::ExplicitProbe, + ), + Err(error) => { + let mut report = + from_any_error(SshStage::TcpReachability, SshIntent::Connect, error); + if let Some(code) = report.error_code { + report.stage = ssh_stage_for_error_code(code); + } + emit_ssh_diagnostic(&app, &report); + report + } + }); + } - if !pool.is_connected(&host_id).await { - let report = from_any_error(stage, intent, format!("No connection for id: {host_id}")); - emit_ssh_diagnostic(&app, &report); - return Ok(report); - } + if !pool.is_connected(&host_id).await { + let report = from_any_error(stage, intent, format!("No connection for id: {host_id}")); + emit_ssh_diagnostic(&app, &report); + return Ok(report); + } - let report = match intent { - SshIntent::Exec - | SshIntent::InstallStep - | SshIntent::DoctorRemote - | SshIntent::HealthCheck => { - match pool.exec(&host_id, "echo clawpal_ssh_diagnostic").await { - Ok(_) => SshDiagnosticReport::success(stage, intent, "SSH exec probe succeeded"), + let report = match intent { + SshIntent::Exec + | SshIntent::InstallStep + | SshIntent::DoctorRemote + | SshIntent::HealthCheck => { + match pool.exec(&host_id, "echo clawpal_ssh_diagnostic").await { + Ok(_) => { + SshDiagnosticReport::success(stage, intent, "SSH exec probe succeeded") + } + Err(error) => from_any_error(stage, intent, error), + } + } + SshIntent::SftpRead => match pool.sftp_list(&host_id, "~").await { + Ok(_) => SshDiagnosticReport::success(stage, intent, "SFTP read probe succeeded"), Err(error) => from_any_error(stage, intent, error), + }, + SshIntent::SftpWrite => { + skipped_probe_diagnostic(stage, intent, "SFTP write probe skipped (no-op)") } - } - SshIntent::SftpRead => match pool.sftp_list(&host_id, "~").await { - Ok(_) => SshDiagnosticReport::success(stage, intent, "SFTP read probe succeeded"), - Err(error) => from_any_error(stage, intent, error), - }, - SshIntent::SftpWrite => { - skipped_probe_diagnostic(stage, intent, "SFTP write probe skipped (no-op)") - } - SshIntent::SftpRemove => { - skipped_probe_diagnostic(stage, intent, "SFTP remove probe skipped (no-op)") - } - SshIntent::Connect => unreachable!(), - }; - emit_ssh_diagnostic(&app, &report); - Ok(report) + SshIntent::SftpRemove => { + skipped_probe_diagnostic(stage, intent, "SFTP remove probe skipped (no-op)") + } + SshIntent::Connect => unreachable!(), + }; + emit_ssh_diagnostic(&app, &report); + Ok(report) + }) } diff --git a/src-tauri/src/commands/upgrade.rs b/src-tauri/src/commands/upgrade.rs index cec83525..84d144ea 100644 --- a/src-tauri/src/commands/upgrade.rs +++ b/src-tauri/src/commands/upgrade.rs @@ -4,21 +4,23 @@ use std::process::Command; #[tauri::command] pub async fn run_openclaw_upgrade() -> Result { - let output = Command::new("bash") - .args(["-c", "curl -fsSL https://openclaw.ai/install.sh | bash"]) - .output() - .map_err(|e| format!("Failed to run upgrade: {e}"))?; - let stdout = String::from_utf8_lossy(&output.stdout).to_string(); - let stderr = String::from_utf8_lossy(&output.stderr).to_string(); - let combined = if stderr.is_empty() { - stdout - } else { - format!("{stdout}\n{stderr}") - }; - if output.status.success() { - super::clear_openclaw_version_cache(); - Ok(combined) - } else { - Err(combined) - } + timed_async!("run_openclaw_upgrade", { + let output = Command::new("bash") + .args(["-c", "curl -fsSL https://openclaw.ai/install.sh | bash"]) + .output() + .map_err(|e| format!("Failed to run upgrade: {e}"))?; + let stdout = String::from_utf8_lossy(&output.stdout).to_string(); + let stderr = String::from_utf8_lossy(&output.stderr).to_string(); + let combined = if stderr.is_empty() { + stdout + } else { + format!("{stdout}\n{stderr}") + }; + if output.status.success() { + super::clear_openclaw_version_cache(); + Ok(combined) + } else { + Err(combined) + } + }) } diff --git a/src-tauri/src/commands/util.rs b/src-tauri/src/commands/util.rs index 63688abd..de3963a3 100644 --- a/src-tauri/src/commands/util.rs +++ b/src-tauri/src/commands/util.rs @@ -4,41 +4,43 @@ use std::process::Command; #[tauri::command] pub fn open_url(url: String) -> Result<(), String> { - let trimmed = url.trim(); - if trimmed.is_empty() { - return Err("URL is required".into()); - } - // Allow http(s) URLs and local paths within user home directory - if !trimmed.starts_with("http://") && !trimmed.starts_with("https://") { - // For local paths, ensure they don't execute apps - let path = std::path::Path::new(trimmed); - if path - .extension() - .map_or(false, |ext| ext == "app" || ext == "exe") + timed_sync!("open_url", { + let trimmed = url.trim(); + if trimmed.is_empty() { + return Err("URL is required".into()); + } + // Allow http(s) URLs and local paths within user home directory + if !trimmed.starts_with("http://") && !trimmed.starts_with("https://") { + // For local paths, ensure they don't execute apps + let path = std::path::Path::new(trimmed); + if path + .extension() + .map_or(false, |ext| ext == "app" || ext == "exe") + { + return Err("Cannot open application files".into()); + } + } + #[cfg(target_os = "macos")] + { + Command::new("open") + .arg(&url) + .spawn() + .map_err(|e| e.to_string())?; + } + #[cfg(target_os = "linux")] + { + Command::new("xdg-open") + .arg(&url) + .spawn() + .map_err(|e| e.to_string())?; + } + #[cfg(target_os = "windows")] { - return Err("Cannot open application files".into()); + Command::new("cmd") + .args(["/c", "start", &url]) + .spawn() + .map_err(|e| e.to_string())?; } - } - #[cfg(target_os = "macos")] - { - Command::new("open") - .arg(&url) - .spawn() - .map_err(|e| e.to_string())?; - } - #[cfg(target_os = "linux")] - { - Command::new("xdg-open") - .arg(&url) - .spawn() - .map_err(|e| e.to_string())?; - } - #[cfg(target_os = "windows")] - { - Command::new("cmd") - .args(["/c", "start", &url]) - .spawn() - .map_err(|e| e.to_string())?; - } - Ok(()) + Ok(()) + }) } diff --git a/src-tauri/src/commands/watchdog.rs b/src-tauri/src/commands/watchdog.rs index 15eda2a3..cc3eb9d8 100644 --- a/src-tauri/src/commands/watchdog.rs +++ b/src-tauri/src/commands/watchdog.rs @@ -5,30 +5,32 @@ pub async fn remote_get_watchdog_status( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let status_raw = pool - .exec( + timed_async!("remote_get_watchdog_status", { + let status_raw = pool + .exec( + &host_id, + "cat ~/.clawpal/watchdog/status.json 2>/dev/null || true", + ) + .await + .map(|result| result.stdout) + .unwrap_or_default(); + let probe = pool.exec( &host_id, - "cat ~/.clawpal/watchdog/status.json 2>/dev/null || true", + "pid=\"\"; [ -f ~/.clawpal/watchdog/watchdog.pid ] && pid=$(cat ~/.clawpal/watchdog/watchdog.pid 2>/dev/null | tr -d '\\r\\n'); alive=dead; [ -n \"$pid\" ] && kill -0 \"$pid\" 2>/dev/null && alive=alive; deployed=0; [ -f ~/.clawpal/watchdog/watchdog.js ] && deployed=1; printf \"%s\\t%s\\t%s\\n\" \"$pid\" \"$alive\" \"$deployed\"", ) .await .map(|result| result.stdout) .unwrap_or_default(); - let probe = pool.exec( - &host_id, - "pid=\"\"; [ -f ~/.clawpal/watchdog/watchdog.pid ] && pid=$(cat ~/.clawpal/watchdog/watchdog.pid 2>/dev/null | tr -d '\\r\\n'); alive=dead; [ -n \"$pid\" ] && kill -0 \"$pid\" 2>/dev/null && alive=alive; deployed=0; [ -f ~/.clawpal/watchdog/watchdog.js ] && deployed=1; printf \"%s\\t%s\\t%s\\n\" \"$pid\" \"$alive\" \"$deployed\"", - ) - .await - .map(|result| result.stdout) - .unwrap_or_default(); - let mut fields = probe.trim().splitn(3, '\t'); - let _pid = fields.next().unwrap_or("").trim(); - let alive_output = fields.next().unwrap_or("dead").to_string(); - let deployed = fields.next().map(|v| v.trim() == "1").unwrap_or(false); + let mut fields = probe.trim().splitn(3, '\t'); + let _pid = fields.next().unwrap_or("").trim(); + let alive_output = fields.next().unwrap_or("dead").to_string(); + let deployed = fields.next().map(|v| v.trim() == "1").unwrap_or(false); - let mut status = - clawpal_core::watchdog::parse_watchdog_status(&status_raw, &alive_output).extra; - status.insert("deployed".into(), Value::Bool(deployed)); - Ok(Value::Object(status)) + let mut status = + clawpal_core::watchdog::parse_watchdog_status(&status_raw, &alive_output).extra; + status.insert("deployed".into(), Value::Bool(deployed)); + Ok(Value::Object(status)) + }) } #[tauri::command] @@ -37,20 +39,22 @@ pub async fn remote_deploy_watchdog( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let resource_path = app_handle - .path() - .resolve( - "resources/watchdog.js", - tauri::path::BaseDirectory::Resource, - ) - .map_err(|e| format!("Failed to resolve watchdog resource: {e}"))?; - let content = std::fs::read_to_string(&resource_path) - .map_err(|e| format!("Failed to read watchdog resource: {e}"))?; + timed_async!("remote_deploy_watchdog", { + let resource_path = app_handle + .path() + .resolve( + "resources/watchdog.js", + tauri::path::BaseDirectory::Resource, + ) + .map_err(|e| format!("Failed to resolve watchdog resource: {e}"))?; + let content = std::fs::read_to_string(&resource_path) + .map_err(|e| format!("Failed to read watchdog resource: {e}"))?; - pool.exec(&host_id, "mkdir -p ~/.clawpal/watchdog").await?; - pool.sftp_write(&host_id, "~/.clawpal/watchdog/watchdog.js", &content) - .await?; - Ok(true) + pool.exec(&host_id, "mkdir -p ~/.clawpal/watchdog").await?; + pool.sftp_write(&host_id, "~/.clawpal/watchdog/watchdog.js", &content) + .await?; + Ok(true) + }) } #[tauri::command] @@ -58,25 +62,27 @@ pub async fn remote_start_watchdog( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let pid_raw = pool - .sftp_read(&host_id, "~/.clawpal/watchdog/watchdog.pid") - .await; - if let Ok(pid_str) = pid_raw { - let cmd = format!( - "kill -0 {} 2>/dev/null && echo alive || echo dead", - pid_str.trim() - ); - if let Ok(r) = pool.exec(&host_id, &cmd).await { - if r.stdout.trim() == "alive" { - return Ok(true); + timed_async!("remote_start_watchdog", { + let pid_raw = pool + .sftp_read(&host_id, "~/.clawpal/watchdog/watchdog.pid") + .await; + if let Ok(pid_str) = pid_raw { + let cmd = format!( + "kill -0 {} 2>/dev/null && echo alive || echo dead", + pid_str.trim() + ); + if let Ok(r) = pool.exec(&host_id, &cmd).await { + if r.stdout.trim() == "alive" { + return Ok(true); + } } } - } - let cmd = "cd ~/.clawpal/watchdog && nohup node watchdog.js >> watchdog.log 2>&1 &"; - pool.exec(&host_id, cmd).await?; - // watchdog.js writes its own PID file to ~/.clawpal/watchdog/ - Ok(true) + let cmd = "cd ~/.clawpal/watchdog && nohup node watchdog.js >> watchdog.log 2>&1 &"; + pool.exec(&host_id, cmd).await?; + // watchdog.js writes its own PID file to ~/.clawpal/watchdog/ + Ok(true) + }) } #[tauri::command] @@ -84,18 +90,20 @@ pub async fn remote_stop_watchdog( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - let pid_raw = pool - .sftp_read(&host_id, "~/.clawpal/watchdog/watchdog.pid") - .await; - if let Ok(pid_str) = pid_raw { + timed_async!("remote_stop_watchdog", { + let pid_raw = pool + .sftp_read(&host_id, "~/.clawpal/watchdog/watchdog.pid") + .await; + if let Ok(pid_str) = pid_raw { + let _ = pool + .exec(&host_id, &format!("kill {} 2>/dev/null", pid_str.trim())) + .await; + } let _ = pool - .exec(&host_id, &format!("kill {} 2>/dev/null", pid_str.trim())) + .exec(&host_id, "rm -f ~/.clawpal/watchdog/watchdog.pid") .await; - } - let _ = pool - .exec(&host_id, "rm -f ~/.clawpal/watchdog/watchdog.pid") - .await; - Ok(true) + Ok(true) + }) } #[tauri::command] @@ -103,16 +111,18 @@ pub async fn remote_uninstall_watchdog( pool: State<'_, SshConnectionPool>, host_id: String, ) -> Result { - // Stop first - let pid_raw = pool - .sftp_read(&host_id, "~/.clawpal/watchdog/watchdog.pid") - .await; - if let Ok(pid_str) = pid_raw { - let _ = pool - .exec(&host_id, &format!("kill {} 2>/dev/null", pid_str.trim())) + timed_async!("remote_uninstall_watchdog", { + // Stop first + let pid_raw = pool + .sftp_read(&host_id, "~/.clawpal/watchdog/watchdog.pid") .await; - } - // Remove entire directory - let _ = pool.exec(&host_id, "rm -rf ~/.clawpal/watchdog").await; - Ok(true) + if let Ok(pid_str) = pid_raw { + let _ = pool + .exec(&host_id, &format!("kill {} 2>/dev/null", pid_str.trim())) + .await; + } + // Remove entire directory + let _ = pool.exec(&host_id, "rm -rf ~/.clawpal/watchdog").await; + Ok(true) + }) } diff --git a/src-tauri/src/commands/watchdog_cmds.rs b/src-tauri/src/commands/watchdog_cmds.rs index d401baae..fde3ea9e 100644 --- a/src-tauri/src/commands/watchdog_cmds.rs +++ b/src-tauri/src/commands/watchdog_cmds.rs @@ -7,167 +7,177 @@ use crate::models::resolve_paths; #[tauri::command] pub async fn get_watchdog_status() -> Result { - tauri::async_runtime::spawn_blocking(|| { - let paths = resolve_paths(); - let wd_dir = paths.clawpal_dir.join("watchdog"); - let status_path = wd_dir.join("status.json"); - let pid_path = wd_dir.join("watchdog.pid"); - - let mut status = if status_path.exists() { - let text = std::fs::read_to_string(&status_path).map_err(|e| e.to_string())?; - serde_json::from_str::(&text).unwrap_or(Value::Null) - } else { - Value::Null - }; - - let alive = if pid_path.exists() { - let pid_str = std::fs::read_to_string(&pid_path).unwrap_or_default(); - if let Ok(pid) = pid_str.trim().parse::() { - std::process::Command::new("kill") - .args(["-0", &pid.to_string()]) - .output() - .map(|o| o.status.success()) - .unwrap_or(false) + timed_async!("get_watchdog_status", { + tauri::async_runtime::spawn_blocking(|| { + let paths = resolve_paths(); + let wd_dir = paths.clawpal_dir.join("watchdog"); + let status_path = wd_dir.join("status.json"); + let pid_path = wd_dir.join("watchdog.pid"); + + let mut status = if status_path.exists() { + let text = std::fs::read_to_string(&status_path).map_err(|e| e.to_string())?; + serde_json::from_str::(&text).unwrap_or(Value::Null) + } else { + Value::Null + }; + + let alive = if pid_path.exists() { + let pid_str = std::fs::read_to_string(&pid_path).unwrap_or_default(); + if let Ok(pid) = pid_str.trim().parse::() { + std::process::Command::new("kill") + .args(["-0", &pid.to_string()]) + .output() + .map(|o| o.status.success()) + .unwrap_or(false) + } else { + false + } } else { false + }; + + if let Value::Object(ref mut map) = status { + map.insert("alive".into(), Value::Bool(alive)); + map.insert( + "deployed".into(), + Value::Bool(wd_dir.join("watchdog.js").exists()), + ); + } else { + let mut map = serde_json::Map::new(); + map.insert("alive".into(), Value::Bool(alive)); + map.insert( + "deployed".into(), + Value::Bool(wd_dir.join("watchdog.js").exists()), + ); + status = Value::Object(map); } - } else { - false - }; - - if let Value::Object(ref mut map) = status { - map.insert("alive".into(), Value::Bool(alive)); - map.insert( - "deployed".into(), - Value::Bool(wd_dir.join("watchdog.js").exists()), - ); - } else { - let mut map = serde_json::Map::new(); - map.insert("alive".into(), Value::Bool(alive)); - map.insert( - "deployed".into(), - Value::Bool(wd_dir.join("watchdog.js").exists()), - ); - status = Value::Object(map); - } - Ok(status) + Ok(status) + }) + .await + .map_err(|e| e.to_string())? }) - .await - .map_err(|e| e.to_string())? } #[tauri::command] pub fn deploy_watchdog(app_handle: tauri::AppHandle) -> Result { - let paths = resolve_paths(); - let wd_dir = paths.clawpal_dir.join("watchdog"); - std::fs::create_dir_all(&wd_dir).map_err(|e| e.to_string())?; - - let resource_path = app_handle - .path() - .resolve( - "resources/watchdog.js", - tauri::path::BaseDirectory::Resource, - ) - .map_err(|e| format!("Failed to resolve watchdog resource: {e}"))?; - - let content = std::fs::read_to_string(&resource_path) - .map_err(|e| format!("Failed to read watchdog resource: {e}"))?; - - std::fs::write(wd_dir.join("watchdog.js"), content).map_err(|e| e.to_string())?; - crate::logging::log_info("Watchdog deployed"); - Ok(true) + timed_sync!("deploy_watchdog", { + let paths = resolve_paths(); + let wd_dir = paths.clawpal_dir.join("watchdog"); + std::fs::create_dir_all(&wd_dir).map_err(|e| e.to_string())?; + + let resource_path = app_handle + .path() + .resolve( + "resources/watchdog.js", + tauri::path::BaseDirectory::Resource, + ) + .map_err(|e| format!("Failed to resolve watchdog resource: {e}"))?; + + let content = std::fs::read_to_string(&resource_path) + .map_err(|e| format!("Failed to read watchdog resource: {e}"))?; + + std::fs::write(wd_dir.join("watchdog.js"), content).map_err(|e| e.to_string())?; + crate::logging::log_info("Watchdog deployed"); + Ok(true) + }) } #[tauri::command] pub fn start_watchdog() -> Result { - let paths = resolve_paths(); - let wd_dir = paths.clawpal_dir.join("watchdog"); - let script = wd_dir.join("watchdog.js"); - let pid_path = wd_dir.join("watchdog.pid"); - let log_path = wd_dir.join("watchdog.log"); + timed_sync!("start_watchdog", { + let paths = resolve_paths(); + let wd_dir = paths.clawpal_dir.join("watchdog"); + let script = wd_dir.join("watchdog.js"); + let pid_path = wd_dir.join("watchdog.pid"); + let log_path = wd_dir.join("watchdog.log"); - if !script.exists() { - return Err("Watchdog not deployed. Deploy first.".into()); - } + if !script.exists() { + return Err("Watchdog not deployed. Deploy first.".into()); + } - if pid_path.exists() { - let pid_str = std::fs::read_to_string(&pid_path).unwrap_or_default(); - if let Ok(pid) = pid_str.trim().parse::() { - let alive = std::process::Command::new("kill") - .args(["-0", &pid.to_string()]) - .output() - .map(|o| o.status.success()) - .unwrap_or(false); - if alive { - return Ok(true); + if pid_path.exists() { + let pid_str = std::fs::read_to_string(&pid_path).unwrap_or_default(); + if let Ok(pid) = pid_str.trim().parse::() { + let alive = std::process::Command::new("kill") + .args(["-0", &pid.to_string()]) + .output() + .map(|o| o.status.success()) + .unwrap_or(false); + if alive { + return Ok(true); + } } } - } - - let log_file = std::fs::OpenOptions::new() - .create(true) - .append(true) - .open(&log_path) - .map_err(|e| e.to_string())?; - let log_err = log_file.try_clone().map_err(|e| e.to_string())?; - - let _child = std::process::Command::new("node") - .arg(&script) - .current_dir(&wd_dir) - .env("CLAWPAL_WATCHDOG_DIR", &wd_dir) - .stdout(log_file) - .stderr(log_err) - .stdin(std::process::Stdio::null()) - .spawn() - .map_err(|e| format!("Failed to start watchdog: {e}"))?; - - // PID file is written by watchdog.js itself via acquirePidFile() - crate::logging::log_info("Watchdog started"); - Ok(true) + + let log_file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(&log_path) + .map_err(|e| e.to_string())?; + let log_err = log_file.try_clone().map_err(|e| e.to_string())?; + + let _child = std::process::Command::new("node") + .arg(&script) + .current_dir(&wd_dir) + .env("CLAWPAL_WATCHDOG_DIR", &wd_dir) + .stdout(log_file) + .stderr(log_err) + .stdin(std::process::Stdio::null()) + .spawn() + .map_err(|e| format!("Failed to start watchdog: {e}"))?; + + // PID file is written by watchdog.js itself via acquirePidFile() + crate::logging::log_info("Watchdog started"); + Ok(true) + }) } #[tauri::command] pub fn stop_watchdog() -> Result { - let paths = resolve_paths(); - let pid_path = paths.clawpal_dir.join("watchdog").join("watchdog.pid"); - - if !pid_path.exists() { - return Ok(true); - } - - let pid_str = std::fs::read_to_string(&pid_path).unwrap_or_default(); - if let Ok(pid) = pid_str.trim().parse::() { - let _ = std::process::Command::new("kill") - .arg(pid.to_string()) - .output(); - } - - let _ = std::fs::remove_file(&pid_path); - crate::logging::log_info("Watchdog stopped"); - Ok(true) -} + timed_sync!("stop_watchdog", { + let paths = resolve_paths(); + let pid_path = paths.clawpal_dir.join("watchdog").join("watchdog.pid"); -#[tauri::command] -pub fn uninstall_watchdog() -> Result { - let paths = resolve_paths(); - let wd_dir = paths.clawpal_dir.join("watchdog"); + if !pid_path.exists() { + return Ok(true); + } - // Stop first if running - let pid_path = wd_dir.join("watchdog.pid"); - if pid_path.exists() { let pid_str = std::fs::read_to_string(&pid_path).unwrap_or_default(); if let Ok(pid) = pid_str.trim().parse::() { let _ = std::process::Command::new("kill") .arg(pid.to_string()) .output(); } - } - - // Remove entire watchdog directory - if wd_dir.exists() { - std::fs::remove_dir_all(&wd_dir).map_err(|e| e.to_string())?; - } - crate::logging::log_info("Watchdog uninstalled"); - Ok(true) + + let _ = std::fs::remove_file(&pid_path); + crate::logging::log_info("Watchdog stopped"); + Ok(true) + }) +} + +#[tauri::command] +pub fn uninstall_watchdog() -> Result { + timed_sync!("uninstall_watchdog", { + let paths = resolve_paths(); + let wd_dir = paths.clawpal_dir.join("watchdog"); + + // Stop first if running + let pid_path = wd_dir.join("watchdog.pid"); + if pid_path.exists() { + let pid_str = std::fs::read_to_string(&pid_path).unwrap_or_default(); + if let Ok(pid) = pid_str.trim().parse::() { + let _ = std::process::Command::new("kill") + .arg(pid.to_string()) + .output(); + } + } + + // Remove entire watchdog directory + if wd_dir.exists() { + std::fs::remove_dir_all(&wd_dir).map_err(|e| e.to_string())?; + } + crate::logging::log_info("Watchdog uninstalled"); + Ok(true) + }) } diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index b0491a7c..7ebe39e2 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -18,10 +18,11 @@ use crate::commands::{ get_bug_report_settings, get_cached_model_catalog, get_channels_config_snapshot, get_channels_runtime_snapshot, get_cron_config_snapshot, get_cron_runs, get_cron_runtime_snapshot, get_instance_config_snapshot, get_instance_runtime_snapshot, - get_rescue_bot_status, get_session_model_override, get_ssh_transfer_stats, get_status_extra, - get_status_light, get_system_status, get_watchdog_status, list_agents_overview, list_backups, - list_bindings, list_channels_minimal, list_cron_jobs, list_discord_guild_channels, - list_history, list_model_profiles, list_recipes, list_registered_instances, list_session_files, + get_perf_report, get_perf_timings, get_process_metrics, get_rescue_bot_status, + get_session_model_override, get_ssh_transfer_stats, get_status_extra, get_status_light, + get_system_status, get_watchdog_status, list_agents_overview, list_backups, list_bindings, + list_channels_minimal, list_cron_jobs, list_discord_guild_channels, list_history, + list_model_profiles, list_recipes, list_registered_instances, list_session_files, list_ssh_config_hosts, list_ssh_hosts, local_openclaw_cli_available, local_openclaw_config_exists, log_app_event, manage_rescue_bot, migrate_legacy_instances, open_url, precheck_auth, precheck_instance, precheck_registry, precheck_transport, @@ -278,6 +279,9 @@ pub fn run() { read_gateway_log, read_gateway_error_log, log_app_event, + get_process_metrics, + get_perf_timings, + get_perf_report, remote_read_app_log, remote_read_error_log, remote_read_helper_log, @@ -304,6 +308,7 @@ pub fn run() { ]) .setup(|_app| { crate::bug_report::install_panic_hook(); + crate::commands::perf::init_perf_clock(); let settings = crate::commands::preferences::load_bug_report_settings_from_paths( &crate::models::resolve_paths(), ); diff --git a/src-tauri/tests/command_perf_e2e.rs b/src-tauri/tests/command_perf_e2e.rs new file mode 100644 index 00000000..7a7bf5e4 --- /dev/null +++ b/src-tauri/tests/command_perf_e2e.rs @@ -0,0 +1,185 @@ +//! E2E performance tests for all instrumented commands. +//! +//! Tests exercise local commands (file/config operations) and verify +//! that timing data is properly collected in the PerfRegistry. + +use clawpal::commands::perf::{ + get_perf_report, get_perf_timings, get_process_metrics, init_perf_clock, record_timing, +}; +use std::sync::Mutex; + +static ENV_LOCK: Mutex<()> = Mutex::new(()); + +fn setup() { + init_perf_clock(); + let _ = get_perf_timings(); +} + +fn temp_data_dir() -> std::path::PathBuf { + let ts = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos(); + let path = std::env::temp_dir().join(format!("clawpal-perf-e2e-{}", ts)); + std::fs::create_dir_all(&path).expect("create temp dir"); + path +} + +#[test] +fn registry_collects_samples() { + let _guard = ENV_LOCK.lock().unwrap_or_else(|e| e.into_inner()); + setup(); + record_timing("test_command_a", 42); + record_timing("test_command_b", 100); + record_timing("test_command_a", 55); + + let samples = get_perf_timings().expect("should return timings"); + assert!( + samples.len() >= 3, + "expected at least 3 samples, got {}", + samples.len() + ); + // Find our test samples (other tests may have added samples concurrently) + let a_samples: Vec<_> = samples + .iter() + .filter(|s| s.name == "test_command_a") + .collect(); + let b_samples: Vec<_> = samples + .iter() + .filter(|s| s.name == "test_command_b") + .collect(); + assert!(a_samples.len() >= 2, "expected 2+ test_command_a samples"); + assert!(b_samples.len() >= 1, "expected 1+ test_command_b samples"); + + // Drain should clear + let empty = get_perf_timings().expect("should return empty"); + assert!(empty.is_empty()); +} + +#[test] +fn report_aggregates_correctly() { + let _guard = ENV_LOCK.lock().unwrap_or_else(|e| e.into_inner()); + setup(); + record_timing("cmd_fast", 10); + record_timing("cmd_fast", 20); + record_timing("cmd_fast", 30); + record_timing("cmd_slow", 500); + record_timing("cmd_slow", 600); + + let report = get_perf_report().expect("should return report"); + let fast = &report["cmd_fast"]; + assert_eq!(fast["count"], 3); + assert_eq!(fast["p50_ms"], 20); + let slow = &report["cmd_slow"]; + assert_eq!(slow["count"], 2); +} + +#[test] +fn local_config_commands_record_timing() { + let _guard = ENV_LOCK.lock().unwrap_or_else(|e| e.into_inner()); + let data_dir = temp_data_dir(); + unsafe { + std::env::set_var("CLAWPAL_DATA_DIR", &data_dir); + } + setup(); + + use clawpal::commands::{ + get_app_preferences, list_ssh_hosts, local_openclaw_config_exists, read_app_log, + }; + + let _ = local_openclaw_config_exists("/nonexistent".to_string()); + let _ = list_ssh_hosts(); + let _ = get_app_preferences(); + let _ = read_app_log(Some(10)); + + let samples = get_perf_timings().expect("should have timings"); + let names: Vec<&str> = samples.iter().map(|s| s.name.as_str()).collect(); + assert!(names.contains(&"local_openclaw_config_exists")); + assert!(names.contains(&"list_ssh_hosts")); + + for s in &samples { + assert!( + s.elapsed_ms < 100, + "{} took {}ms — should be < 100ms for local ops", + s.name, + s.elapsed_ms + ); + } +} + +#[test] +fn z_local_perf_report_for_ci() { + let _guard = ENV_LOCK.lock().unwrap_or_else(|e| e.into_inner()); + let data_dir = temp_data_dir(); + unsafe { + std::env::set_var("CLAWPAL_DATA_DIR", &data_dir); + } + setup(); + + use clawpal::commands::{ + get_app_preferences, list_ssh_hosts, local_openclaw_config_exists, read_app_log, + read_error_log, + }; + + let commands: Vec<(&str, Box)> = vec![ + ( + "local_openclaw_config_exists", + Box::new(|| { + let _ = local_openclaw_config_exists("/tmp".to_string()); + }), + ), + ( + "list_ssh_hosts", + Box::new(|| { + let _ = list_ssh_hosts(); + }), + ), + ( + "get_app_preferences", + Box::new(|| { + let _ = get_app_preferences(); + }), + ), + ( + "read_app_log", + Box::new(|| { + let _ = read_app_log(Some(10)); + }), + ), + ( + "read_error_log", + Box::new(|| { + let _ = read_error_log(Some(10)); + }), + ), + ]; + + for (_, cmd_fn) in &commands { + for _ in 0..5 { + cmd_fn(); + } + } + + let report = get_perf_report().expect("should return report"); + println!(); + println!("PERF_REPORT_START"); + for (name, _) in &commands { + if let Some(stats) = report.get(*name) { + println!( + "LOCAL_CMD:{}:count={}:p50={}:p95={}:max={}:avg={}", + name, + stats["count"], + stats["p50_ms"], + stats["p95_ms"], + stats["max_ms"], + stats["avg_ms"], + ); + } + } + + let metrics = get_process_metrics().expect("metrics"); + let rss_mb = metrics.rss_bytes as f64 / (1024.0 * 1024.0); + println!("PROCESS:rss_mb={:.1}", rss_mb); + println!("PROCESS:platform={}", metrics.platform); + println!("PERF_REPORT_END"); +} diff --git a/src-tauri/tests/perf_metrics.rs b/src-tauri/tests/perf_metrics.rs new file mode 100644 index 00000000..c47febc4 --- /dev/null +++ b/src-tauri/tests/perf_metrics.rs @@ -0,0 +1,202 @@ +//! E2E tests for performance metrics instrumentation. +//! +//! These tests verify that: +//! 1. `get_process_metrics` returns valid data +//! 2. `trace_command` tracks timing correctly +//! 3. Memory readings are within expected bounds +//! 4. The perf clock measures uptime correctly + +use clawpal::commands::perf::{ + get_process_metrics, init_perf_clock, trace_command, uptime_ms, PerfSample, ProcessMetrics, +}; +use std::thread; +use std::time::Duration; + +// ── Gate: get_process_metrics returns sane values ── + +#[test] +fn process_metrics_returns_valid_pid() { + init_perf_clock(); + let metrics = get_process_metrics().expect("should return metrics"); + assert_eq!(metrics.pid, std::process::id()); +} + +#[test] +fn process_metrics_rss_within_bounds() { + init_perf_clock(); + let metrics = get_process_metrics().expect("should return metrics"); + + // Test process should use at least 1 MB and less than 80 MB (the target) + let rss_mb = metrics.rss_bytes as f64 / (1024.0 * 1024.0); + assert!( + rss_mb > 1.0, + "RSS too low: {:.1} MB — likely measurement error", + rss_mb + ); + assert!(rss_mb < 80.0, "RSS exceeds 80 MB target: {:.1} MB", rss_mb); +} + +#[test] +fn process_metrics_platform_is_set() { + init_perf_clock(); + let metrics = get_process_metrics().expect("should return metrics"); + assert!(!metrics.platform.is_empty(), "platform should be set"); + // Should be one of the supported platforms + assert!( + ["linux", "macos", "windows"].contains(&metrics.platform.as_str()), + "unexpected platform: {}", + metrics.platform + ); +} + +#[test] +fn process_metrics_uptime_is_positive() { + init_perf_clock(); + // Small sleep so uptime is measurably > 0 + thread::sleep(Duration::from_millis(5)); + let metrics = get_process_metrics().expect("should return metrics"); + assert!( + metrics.uptime_secs > 0.0, + "uptime should be positive: {}", + metrics.uptime_secs + ); +} + +// ── Gate: trace_command timing ── + +#[test] +fn trace_command_measures_fast_operation() { + init_perf_clock(); + let (result, elapsed_ms) = trace_command("test_fast_op", || { + let x = 2 + 2; + x + }); + assert_eq!(result, 4); + // A trivial operation should complete in well under 100ms (the local threshold) + assert!( + elapsed_ms < 100, + "fast operation took {}ms — should be < 100ms", + elapsed_ms + ); +} + +#[test] +fn trace_command_measures_slow_operation() { + init_perf_clock(); + let (_, elapsed_ms) = trace_command("test_slow_op", || { + thread::sleep(Duration::from_millis(150)); + }); + // Should measure at least 100ms + assert!( + elapsed_ms >= 100, + "slow operation measured as {}ms — should be >= 100ms", + elapsed_ms + ); + // But shouldn't be wildly over (allow up to 500ms for CI scheduling jitter) + assert!( + elapsed_ms < 500, + "slow operation measured as {}ms — excessive", + elapsed_ms + ); +} + +// ── Gate: uptime clock ── + +#[test] +fn uptime_ms_increases_over_time() { + init_perf_clock(); + let t1 = uptime_ms(); + thread::sleep(Duration::from_millis(20)); + let t2 = uptime_ms(); + assert!(t2 > t1, "uptime should increase: {} vs {}", t1, t2); + let delta = t2 - t1; + assert!( + delta >= 10, // allow some scheduling variance + "uptime delta too small: {}ms (expected ~20ms)", + delta + ); +} + +// ── Gate: memory stability under repeated calls ── + +#[test] +fn memory_stable_across_repeated_metrics_calls() { + init_perf_clock(); + + // Take initial measurement + let initial = get_process_metrics().expect("first call"); + let initial_rss = initial.rss_bytes; + + // Call get_process_metrics 100 times to ensure no memory leak in the measurement itself + for _ in 0..100 { + let _ = get_process_metrics(); + } + + let after = get_process_metrics().expect("last call"); + let growth = after.rss_bytes.saturating_sub(initial_rss); + let growth_mb = growth as f64 / (1024.0 * 1024.0); + + // Memory growth from 100 metric reads should be negligible (< 5 MB) + assert!( + growth_mb < 5.0, + "Memory grew {:.1} MB after 100 metrics calls — potential leak", + growth_mb + ); +} + +// ── Gate: PerfSample struct serialization ── + +#[test] +fn perf_sample_serializes_correctly() { + let sample = PerfSample { + name: "test_command".to_string(), + elapsed_ms: 42, + timestamp: 1710000000000, + exceeded_threshold: false, + }; + + let json = serde_json::to_string(&sample).expect("should serialize"); + assert!(json.contains("\"name\":\"test_command\"")); + assert!(json.contains("\"elapsedMs\":42")); // camelCase + assert!(json.contains("\"exceededThreshold\":false")); +} + +// ── Metrics reporter: outputs structured data for CI comment ── + +#[test] +fn z_report_metrics_for_ci() { + init_perf_clock(); + + // Process metrics + let metrics = get_process_metrics().expect("should return metrics"); + let rss_mb = metrics.rss_bytes as f64 / (1024.0 * 1024.0); + let vms_mb = metrics.vms_bytes as f64 / (1024.0 * 1024.0); + + // Command timing: measure a batch of get_process_metrics calls + let iterations = 50; + let mut times: Vec = Vec::with_capacity(iterations); + for _ in 0..iterations { + let (_, elapsed) = trace_command("get_process_metrics", || { + let _ = get_process_metrics(); + }); + times.push(elapsed); + } + times.sort(); + let p50 = times[times.len() / 2]; + let p95 = times[(times.len() as f64 * 0.95) as usize]; + let max = *times.last().unwrap_or(&0); + + // Output structured lines for CI to parse + // Format: METRIC:= + println!(); + println!("METRIC:rss_mb={:.1}", rss_mb); + println!("METRIC:vms_mb={:.1}", vms_mb); + println!("METRIC:pid={}", metrics.pid); + println!("METRIC:platform={}", metrics.platform); + println!("METRIC:uptime_secs={:.2}", metrics.uptime_secs); + println!("METRIC:cmd_p50_ms={}", p50); + println!("METRIC:cmd_p95_ms={}", p95); + println!("METRIC:cmd_max_ms={}", max); + println!("METRIC:rss_limit_mb=80"); + println!("METRIC:cmd_p95_limit_ms=100"); +} diff --git a/src/App.tsx b/src/App.tsx index de55dd39..78e85514 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -17,7 +17,7 @@ import { } from "lucide-react"; import { StartPage } from "./pages/StartPage"; import logoUrl from "./assets/logo.png"; -import { InstanceTabBar } from "./components/InstanceTabBar"; +const InstanceTabBar = lazy(() => import("./components/InstanceTabBar").then((m) => ({ default: m.InstanceTabBar }))); import { InstanceContext } from "./lib/instance-context"; import { api } from "./lib/api"; import { buildCacheKey, invalidateGlobalReadCache, prewarmRemoteInstanceReadCache, subscribeToCacheKey } from "./lib/use-api"; @@ -40,7 +40,7 @@ import { Label } from "@/components/ui/label"; import { cn, formatBytes } from "@/lib/utils"; import { toast, Toaster } from "sonner"; import type { ChannelNode, DiscordGuildChannel, DiscoveredInstance, DockerInstance, InstallSession, PrecheckIssue, RegisteredInstance, SshHost, SshTransferStats } from "./lib/types"; -import { SshFormWidget } from "./components/SshFormWidget"; +const SshFormWidget = lazy(() => import("./components/SshFormWidget").then((m) => ({ default: m.SshFormWidget }))); import { closeWorkspaceTab } from "@/lib/tabWorkspace"; import { SSH_PASSPHRASE_RETRY_HINT, @@ -75,14 +75,21 @@ const preloadRouteModules = () => ]); const PING_URL = "https://api.clawpal.zhixian.io/ping"; -const LEGACY_DOCKER_INSTANCES_KEY = "clawpal_docker_instances"; -const DEFAULT_DOCKER_OPENCLAW_HOME = "~/.clawpal/docker-local"; -const DEFAULT_DOCKER_CLAWPAL_DATA_DIR = "~/.clawpal/docker-local/data"; -const DEFAULT_DOCKER_INSTANCE_ID = "docker:local"; - -type Route = "home" | "recipes" | "cook" | "history" | "channels" | "cron" | "doctor" | "context" | "orchestrator"; -const INSTANCE_ROUTES: Route[] = ["home", "channels", "recipes", "cron", "doctor", "context", "history"]; -const OPEN_TABS_STORAGE_KEY = "clawpal_open_tabs"; +import { + LEGACY_DOCKER_INSTANCES_KEY, + DEFAULT_DOCKER_OPENCLAW_HOME, + DEFAULT_DOCKER_CLAWPAL_DATA_DIR, + DEFAULT_DOCKER_INSTANCE_ID, + sanitizeDockerPathSuffix, + deriveDockerPaths, + deriveDockerLabel, + hashInstanceToken, + normalizeDockerInstance, +} from "./lib/docker-instance-helpers"; +import { logDevException, logDevIgnoredError } from "./lib/dev-logging"; +import { Route, INSTANCE_ROUTES, OPEN_TABS_STORAGE_KEY } from "./lib/routes"; + + const APP_PREFERENCES_CACHE_KEY = buildCacheKey("__global__", "getAppPreferences", []); interface ProfileSyncStatus { phase: "idle" | "syncing" | "success" | "error"; @@ -90,68 +97,7 @@ interface ProfileSyncStatus { instanceId: string | null; } -function logDevException(label: string, detail: unknown): void { - if (!import.meta.env.DEV) return; - console.error(`[dev exception] ${label}`, detail); -} - -function logDevIgnoredError(context: string, detail: unknown): void { - if (!import.meta.env.DEV) return; - console.warn(`[dev ignored error] ${context}`, detail); -} - -function sanitizeDockerPathSuffix(raw: string): string { - const lowered = raw.toLowerCase().replace(/[^a-z0-9_-]/g, ""); - const trimmed = lowered.replace(/^[-_]+|[-_]+$/g, ""); - return trimmed || "docker-local"; -} - -function deriveDockerPaths(instanceId: string): { openclawHome: string; clawpalDataDir: string } { - if (instanceId === DEFAULT_DOCKER_INSTANCE_ID) { - return { - openclawHome: DEFAULT_DOCKER_OPENCLAW_HOME, - clawpalDataDir: DEFAULT_DOCKER_CLAWPAL_DATA_DIR, - }; - } - const suffixRaw = instanceId.startsWith("docker:") ? instanceId.slice(7) : instanceId; - const suffix = suffixRaw === "local" - ? "docker-local" - : suffixRaw.startsWith("docker-") - ? sanitizeDockerPathSuffix(suffixRaw) - : `docker-${sanitizeDockerPathSuffix(suffixRaw)}`; - const openclawHome = `~/.clawpal/${suffix}`; - return { - openclawHome, - clawpalDataDir: `${openclawHome}/data`, - }; -} - -function deriveDockerLabel(instanceId: string): string { - if (instanceId === DEFAULT_DOCKER_INSTANCE_ID) return "docker-local"; - const suffix = instanceId.startsWith("docker:") ? instanceId.slice(7) : instanceId; - const match = suffix.match(/^local-(\d+)$/); - if (match) return `docker-local-${match[1]}`; - return suffix.startsWith("docker-") ? suffix : `docker-${suffix}`; -} - -function hashInstanceToken(raw: string): number { - let hash = 2166136261; - for (let i = 0; i < raw.length; i += 1) { - hash ^= raw.charCodeAt(i); - hash = Math.imul(hash, 16777619); - } - return hash >>> 0; -} -function normalizeDockerInstance(instance: DockerInstance): DockerInstance { - const fallback = deriveDockerPaths(instance.id); - return { - ...instance, - label: instance.label?.trim() || deriveDockerLabel(instance.id), - openclawHome: instance.openclawHome || fallback.openclawHome, - clawpalDataDir: instance.clawpalDataDir || fallback.clawpalDataDir, - }; -} export function App() { const { t } = useTranslation(); diff --git a/src/assets/doctor.png b/src/assets/doctor.png deleted file mode 100644 index ea3d8b29..00000000 Binary files a/src/assets/doctor.png and /dev/null differ diff --git a/src/assets/doctor.webp b/src/assets/doctor.webp new file mode 100644 index 00000000..84e03890 Binary files /dev/null and b/src/assets/doctor.webp differ diff --git a/src/components/RescueAsciiHeader.tsx b/src/components/RescueAsciiHeader.tsx index e61a3b39..94f79f3d 100644 --- a/src/components/RescueAsciiHeader.tsx +++ b/src/components/RescueAsciiHeader.tsx @@ -1,6 +1,6 @@ import type { RescueBotRuntimeState } from "@/lib/types"; import { cn } from "@/lib/utils"; -import doctorImage from "@/assets/doctor.png"; +import doctorImage from "@/assets/doctor.webp"; interface RescueAsciiHeaderProps { state: RescueBotRuntimeState; diff --git a/src/components/__tests__/RescueAsciiHeader.test.tsx b/src/components/__tests__/RescueAsciiHeader.test.tsx index b8ce34a4..95eda647 100644 --- a/src/components/__tests__/RescueAsciiHeader.test.tsx +++ b/src/components/__tests__/RescueAsciiHeader.test.tsx @@ -26,7 +26,7 @@ describe("RescueAsciiHeader", () => { expect(activeHtml).toContain("role=\"img\""); expect(activeHtml).toContain("aria-label=\"Helper is enabled\""); expect(activeHtml).toContain("alt=\"Helper is enabled\""); - expect(activeHtml).toContain("src=\"/Users/ChenYu/Documents/Github/clawpal/src/assets/doctor.png\""); + expect(activeHtml).toContain("src=\"/Users/ChenYu/Documents/Github/clawpal/src/assets/doctor.webp\""); expect(activeHtml).toContain("mx-auto w-[264px] sm:w-[312px]"); expect(activeHtml).toContain("bg-[#78A287]"); expect(activeHtml.match(/animate-pulse/g)?.length ?? 0).toBeGreaterThan(0); diff --git a/src/i18n.ts b/src/i18n.ts index c9d61dea..120b5020 100644 --- a/src/i18n.ts +++ b/src/i18n.ts @@ -2,7 +2,11 @@ import i18n from "i18next"; import { initReactI18next } from "react-i18next"; import LanguageDetector from "i18next-browser-languagedetector"; import en from "./locales/en.json"; -import zh from "./locales/zh.json"; + +// English is bundled (fallback); Chinese is lazy-loaded on demand +const lazyLocales: Record Promise<{ default: Record }>> = { + zh: () => import("./locales/zh.json"), +}; i18n .use(LanguageDetector) @@ -10,7 +14,6 @@ i18n .init({ resources: { en: { translation: en }, - zh: { translation: zh }, }, fallbackLng: "en", interpolation: { escapeValue: false }, @@ -21,4 +24,22 @@ i18n }, }); +// Lazy-load detected language if not English +const detected = i18n.language?.split("-")[0]; +if (detected && detected !== "en" && lazyLocales[detected]) { + lazyLocales[detected]().then((mod) => { + i18n.addResourceBundle(detected, "translation", mod.default, true, true); + }); +} + +// Lazy-load on language change +i18n.on("languageChanged", (lng) => { + const base = lng.split("-")[0]; + if (base !== "en" && lazyLocales[base] && !i18n.hasResourceBundle(base, "translation")) { + lazyLocales[base]().then((mod) => { + i18n.addResourceBundle(base, "translation", mod.default, true, true); + }); + } +}); + export default i18n; diff --git a/src/lib/dev-logging.ts b/src/lib/dev-logging.ts new file mode 100644 index 00000000..69a76962 --- /dev/null +++ b/src/lib/dev-logging.ts @@ -0,0 +1,11 @@ +/** Log an exception detail in development mode only. */ +export function logDevException(label: string, detail: unknown): void { + if (!import.meta.env.DEV) return; + console.error(`[dev exception] ${label}`, detail); +} + +/** Log an ignored error context in development mode only. */ +export function logDevIgnoredError(context: string, detail: unknown): void { + if (!import.meta.env.DEV) return; + console.warn(`[dev ignored error] ${context}`, detail); +} diff --git a/src/lib/docker-instance-helpers.ts b/src/lib/docker-instance-helpers.ts new file mode 100644 index 00000000..fadf912c --- /dev/null +++ b/src/lib/docker-instance-helpers.ts @@ -0,0 +1,59 @@ +import type { DockerInstance } from "./types"; + +export const LEGACY_DOCKER_INSTANCES_KEY = "clawpal_docker_instances"; +export const DEFAULT_DOCKER_OPENCLAW_HOME = "~/.clawpal/docker-local"; +export const DEFAULT_DOCKER_CLAWPAL_DATA_DIR = "~/.clawpal/docker-local/data"; +export const DEFAULT_DOCKER_INSTANCE_ID = "docker:local"; + +export function sanitizeDockerPathSuffix(raw: string): string { + const lowered = raw.toLowerCase().replace(/[^a-z0-9_-]/g, ""); + const trimmed = lowered.replace(/^[-_]+|[-_]+$/g, ""); + return trimmed || "docker-local"; +} + +export function deriveDockerPaths(instanceId: string): { openclawHome: string; clawpalDataDir: string } { + if (instanceId === DEFAULT_DOCKER_INSTANCE_ID) { + return { + openclawHome: DEFAULT_DOCKER_OPENCLAW_HOME, + clawpalDataDir: DEFAULT_DOCKER_CLAWPAL_DATA_DIR, + }; + } + const suffixRaw = instanceId.startsWith("docker:") ? instanceId.slice(7) : instanceId; + const suffix = suffixRaw === "local" + ? "docker-local" + : suffixRaw.startsWith("docker-") + ? sanitizeDockerPathSuffix(suffixRaw) + : `docker-${sanitizeDockerPathSuffix(suffixRaw)}`; + const openclawHome = `~/.clawpal/${suffix}`; + return { + openclawHome, + clawpalDataDir: `${openclawHome}/data`, + }; +} + +export function deriveDockerLabel(instanceId: string): string { + if (instanceId === DEFAULT_DOCKER_INSTANCE_ID) return "docker-local"; + const suffix = instanceId.startsWith("docker:") ? instanceId.slice(7) : instanceId; + const match = suffix.match(/^local-(\d+)$/); + if (match) return `docker-local-${match[1]}`; + return suffix.startsWith("docker-") ? suffix : `docker-${suffix}`; +} + +export function hashInstanceToken(raw: string): number { + let hash = 2166136261; + for (let i = 0; i < raw.length; i += 1) { + hash ^= raw.charCodeAt(i); + hash = Math.imul(hash, 16777619); + } + return hash >>> 0; +} + +export function normalizeDockerInstance(instance: DockerInstance): DockerInstance { + const fallback = deriveDockerPaths(instance.id); + return { + ...instance, + label: instance.label?.trim() || deriveDockerLabel(instance.id), + openclawHome: instance.openclawHome || fallback.openclawHome, + clawpalDataDir: instance.clawpalDataDir || fallback.clawpalDataDir, + }; +} diff --git a/src/lib/routes.ts b/src/lib/routes.ts new file mode 100644 index 00000000..d3f54a3f --- /dev/null +++ b/src/lib/routes.ts @@ -0,0 +1,5 @@ +export type Route = "home" | "recipes" | "cook" | "history" | "channels" | "cron" | "doctor" | "context" | "orchestrator"; + +export const INSTANCE_ROUTES: Route[] = ["home", "channels", "recipes", "cron", "doctor", "context", "history"]; + +export const OPEN_TABS_STORAGE_KEY = "clawpal_open_tabs"; diff --git a/src/pages/__tests__/Doctor.test.tsx b/src/pages/__tests__/Doctor.test.tsx index 010ea9ef..fabb2e6c 100644 --- a/src/pages/__tests__/Doctor.test.tsx +++ b/src/pages/__tests__/Doctor.test.tsx @@ -55,7 +55,7 @@ describe("Doctor page rescue header", () => { expect(html).toContain("flex flex-col items-center"); expect(html).toContain("role=\"img\""); expect(html).toContain("alt=\"Diagnose\""); - expect(html).toContain("src=\"/Users/ChenYu/Documents/Github/clawpal/src/assets/doctor.png\""); + expect(html).toContain("src=\"/Users/ChenYu/Documents/Github/clawpal/src/assets/doctor.webp\""); expect(html).toContain("aria-label=\"Open logs\""); expect(html).toContain(">Diagnose<"); expect(html).toContain("Run a structured check before attempting repairs on the primary profile."); diff --git a/vite.config.ts b/vite.config.ts index 5af5416d..726925c4 100644 --- a/vite.config.ts +++ b/vite.config.ts @@ -11,4 +11,20 @@ export default defineConfig({ "@": path.resolve(__dirname, "./src"), }, }, + build: { + rollupOptions: { + output: { + manualChunks: { + // Split large vendor deps into separate chunks + "vendor-react": ["react", "react-dom"], + "vendor-i18n": ["i18next", "react-i18next", "i18next-browser-languagedetector"], + "vendor-ui": ["radix-ui", "cmdk", "class-variance-authority", "clsx", "tailwind-merge"], + "vendor-icons": ["lucide-react"], + "vendor-diff": ["react-diff-viewer-continued"], + }, + }, + }, + // Target smaller chunks + chunkSizeWarningLimit: 300, + }, });