diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2a9be86..cc0fb19 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,6 +13,14 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - name: Install system dependencies + run: | + sudo apt-get update + sudo apt-get install -y \ + lua5.4 \ + liblua5.4-dev \ + libslirp-dev + - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable with: @@ -37,4 +45,4 @@ jobs: - name: Test timeout-minutes: 15 - run: cargo test --workspace --all-targets --all-features --locked + run: RUN_ANVIL_TESTS=1 cargo test --workspace --all-targets --all-features --locked diff --git a/.gitignore b/.gitignore index 7d4ba02..a16d955 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,7 @@ sequencer.db sequencer.db-shm sequencer.db-wal benchmarks/results/ +/benchmarks/.deps/ +/examples/canonical-app/out/ +/out/ +/.DS_Store diff --git a/AGENTS.md b/AGENTS.md index 3a05c17..a6c9dad 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -8,7 +8,6 @@ Build and evolve a **sequencer prototype** for a future DeFi stack. Current scope is intentionally small: a **dummy wallet app** that supports: - `Transfer` -- `Deposit` - `Withdrawal` Primary objective in this phase: make sequencer behavior, safety checks, and persistence reliable before adding "real world" execution logic. @@ -26,36 +25,42 @@ Primary objective in this phase: make sequencer behavior, safety checks, and per ## Glossary - `chunk`: small bounded list of user ops processed/executed and persisted together to amortize SQLite cost and keep low-latency ack behavior. -- `frame`: canonical ordering boundary that contains user ops plus a `drain_n` decision for direct-input execution. +- `frame`: canonical ordering boundary that commits a `safe_block` plus a list of user ops; canonical execution drains all direct inputs safe at that block before executing the frame’s user ops. - `batch`: list of frames that will be posted on-chain as one unit. - `inclusion lane`: the hot-path single-lane loop that dequeues user ops, executes app logic, persists ordering, and rotates frame/batch boundaries. ## Architecture Map -- `sequencer/src/main.rs`: process bootstrap, env config, queue wiring, HTTP server. -- `sequencer/src/api/mod.rs`: `POST /tx` and `GET /ws/subscribe` endpoints (tx ingress + replay broadcaster). +- `sequencer/src/main.rs`: thin binary entrypoint. +- `sequencer/src/lib.rs`: public sequencer API (`run`, `RunConfig`). +- `sequencer/src/config.rs`: runtime input parsing and EIP-712 domain construction. +- `sequencer/src/runtime.rs`: bootstrap and runtime wiring. +- `sequencer/src/api/mod.rs`: `POST /tx` and `GET /ws/subscribe` endpoints (tx ingress + replay feed). - `sequencer/src/api/error.rs`: API error model + HTTP mapping. - `sequencer/src/inclusion_lane/mod.rs`: inclusion-lane exports and public surface. - `sequencer/src/inclusion_lane/lane.rs`: batched execution/commit loop (single lane). - `sequencer/src/inclusion_lane/types.rs`: inclusion-lane queue item and pipeline error types. - `sequencer/src/inclusion_lane/error.rs`: inclusion-lane runtime and catch-up error types. -- `sequencer/src/l2_tx_broadcaster/mod.rs`: centralized ordered-L2Tx poller + live fanout to WS subscribers. +- `sequencer/src/input_reader/`: safe-input ingestion from InputBox into SQLite. +- `sequencer/src/l2_tx_feed/mod.rs`: DB-backed ordered-L2Tx feed used by WS subscriptions. - `sequencer/src/storage/mod.rs`: DB open, migrations, frame persistence, and direct-input broker APIs. - `sequencer/src/storage/migrations/`: DB schema/bootstrapping (`0001`). - `sequencer-core/src/`: shared domain types/interfaces (`Application`, `SignedUserOp`, `SequencedL2Tx`, broadcast message model). - `examples/app-core/src/application/mod.rs`: wallet prototype implementing `Application`. -- `examples/canonical-app/src/main.rs`: placeholder canonical scheduler binary entrypoint. +- `benchmarks/src/`: benchmark harnesses and self-contained benchmark runtime. ## Domain Truths (Important) - This is a **sequencer prototype**, not a full DeFi stack yet. - API validates signature and enqueues signed `UserOp`; method decoding happens during application execution. +- Deposits are direct-input-only (L1 -> L2) and must not be represented as user ops. - Rejections (`InvalidNonce`, fee cap too low, insufficient gas balance) produce no state mutation and are not persisted. - Included txs are persisted as frame/batch data in `batches`, `frames`, `user_ops`, `direct_inputs`, and `sequenced_l2_txs`. - Frame fee is persisted in `frames.fee` and is fixed for the lifetime of that frame. - The next frame fee is sampled from `recommended_fees` when rotating to a new frame (default bootstrap value is `0`). -- `/ws/subscribe` has soft operational guardrails: subscriber cap (`SEQ_WS_MAX_SUBSCRIBERS`, default `64`) and catch-up cap (`SEQ_WS_MAX_CATCHUP_EVENTS`, default `50000`). +- `/ws/subscribe` currently has internal guardrails: subscriber cap `64`, catch-up cap `50000`. - Wallet state (balances/nonces) is in-memory right now (not persisted). +- EIP-712 domain name/version are fixed in code; chain ID and verifying contract are deployment-specific inputs. ## Hot-Path Invariants @@ -69,7 +74,7 @@ Primary objective in this phase: make sequencer behavior, safety checks, and per - Storage model is append-oriented; avoid mutable status flags for open/closed entities. - Open batch/frame are derived by “latest row” convention. -- `drain_n` is derivable from `sequenced_l2_txs` by counting direct-input rows per frame. +- A frame’s leading direct-input prefix is derivable from `sequenced_l2_txs` plus `frames.safe_block`. - Safe cursor/head values should be derived from persisted facts when possible, not duplicated as mutable fields. - Replay/catch-up must use persisted ordering plus persisted frame fee (`frames.fee`) to mirror inclusion semantics. - Included user-op identity is constrained by `UNIQUE(sender, nonce)`. @@ -101,38 +106,22 @@ cargo fmt --all cargo clippy --all-targets --all-features -- -D warnings ``` -Run server (defaults shown): +Run server: ```bash -SEQ_HTTP_ADDR=127.0.0.1:3000 \ -SEQ_DB_PATH=sequencer.db \ +SEQ_ETH_RPC_URL=http://127.0.0.1:8545 \ +SEQ_DOMAIN_CHAIN_ID=31337 \ +SEQ_DOMAIN_VERIFYING_CONTRACT=0x1111111111111111111111111111111111111111 \ cargo run -p sequencer ``` -Key env vars: +Optional env vars: - `SEQ_HTTP_ADDR` - `SEQ_DB_PATH` -- `SEQ_QUEUE_CAP` -- `SEQ_OVERLOAD_MAX_INFLIGHT_SUBMISSIONS` -- `SEQ_MAX_USER_OPS_PER_CHUNK` (preferred) -- `SEQ_MAX_BATCH` (legacy alias) -- `SEQ_SAFE_DIRECT_BUFFER_CAPACITY` -- `SEQ_MAX_BATCH_OPEN_MS` -- `SEQ_MAX_BATCH_USER_OP_BYTES` -- `SEQ_INCLUSION_LANE_IDLE_POLL_INTERVAL_MS` (preferred) -- `SEQ_INCLUSION_LANE_TICK_INTERVAL_MS` (legacy alias) -- `SEQ_COMMIT_LANE_TICK_INTERVAL_MS` (legacy alias) -- `SEQ_BROADCASTER_IDLE_POLL_INTERVAL_MS` -- `SEQ_BROADCASTER_PAGE_SIZE` -- `SEQ_BROADCASTER_SUBSCRIBER_BUFFER_CAPACITY` -- `SEQ_WS_MAX_SUBSCRIBERS` -- `SEQ_WS_MAX_CATCHUP_EVENTS` -- `SEQ_RUNTIME_METRICS_ENABLED` -- `SEQ_RUNTIME_METRICS_LOG_INTERVAL_MS` -- `SEQ_MAX_BODY_BYTES` -- `SEQ_SQLITE_SYNCHRONOUS` -- `SEQ_DOMAIN_NAME` -- `SEQ_DOMAIN_VERSION` +- `SEQ_LONG_BLOCK_RANGE_ERROR_CODES` + +Required env vars: +- `SEQ_ETH_RPC_URL` - `SEQ_DOMAIN_CHAIN_ID` - `SEQ_DOMAIN_VERIFYING_CONTRACT` @@ -178,6 +167,12 @@ Focus tests on: If adding integration tests, prefer black-box tests around `POST /tx` and commit outcomes. +Some `sequencer` tests use Anvil and are opt-in locally: + +```bash +RUN_ANVIL_TESTS=1 cargo test -p sequencer --lib +``` + ## Definition of Done for Agent Changes Before finishing, ensure: diff --git a/Cargo.lock b/Cargo.lock index abf57a2..b8f554c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -345,7 +345,7 @@ dependencies = [ "cfg-if", "const-hex", "derive_more", - "foldhash 0.2.0", + "foldhash", "hashbrown 0.16.1", "indexmap 2.13.0", "itoa", @@ -421,7 +421,7 @@ checksum = "ce8849c74c9ca0f5a03da1c865e3eb6f768df816e67dd3721a398a8a7e398011" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -556,7 +556,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -574,7 +574,7 @@ dependencies = [ "proc-macro2", "quote", "sha3", - "syn 2.0.117", + "syn 2.0.114", "syn-solidity", ] @@ -592,7 +592,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.117", + "syn 2.0.114", "syn-solidity", ] @@ -683,7 +683,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -745,21 +745,22 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "anyhow" -version = "1.0.102" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" - [[package]] name = "app-core" version = "0.1.0" dependencies = [ "alloy-primitives", "ethereum_ssz", + "ethereum_ssz_derive", "sequencer-core", ] +[[package]] +name = "ar" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d67af77d68a931ecd5cbd8a3b5987d63a1d1d1278f7f6a60ae33db485cdebb69" + [[package]] name = "ark-ff" version = "0.3.0" @@ -845,7 +846,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -883,7 +884,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -966,7 +967,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -988,7 +989,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -999,7 +1000,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1016,7 +1017,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1102,8 +1103,11 @@ checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" name = "benchmarks" version = "0.1.0" dependencies = [ + "alloy", "alloy-primitives", "alloy-sol-types", + "app-core", + "cartesi-rollups-contracts", "clap", "ethereum_ssz", "futures-util", @@ -1117,6 +1121,26 @@ dependencies = [ "tokio-tungstenite", ] +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.114", +] + [[package]] name = "bit-set" version = "0.8.0" @@ -1150,9 +1174,9 @@ dependencies = [ [[package]] name = "bitflags" -version = "2.11.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bitvec" @@ -1207,14 +1231,14 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] name = "bumpalo" -version = "3.20.2" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "byte-slice-cast" @@ -1256,7 +1280,49 @@ dependencies = [ name = "canonical-app" version = "0.1.0" dependencies = [ + "alloy-primitives", + "alloy-sol-types", "app-core", + "ethereum_ssz", + "k256", + "sequencer-core", + "trolley", + "types", +] + +[[package]] +name = "canonical-test" +version = "0.1.0" +dependencies = [ + "canonical-app", + "testsi", + "types", +] + +[[package]] +name = "cartesi-machine" +version = "2.0.0" +source = "git+https://github.com/cartesi/dave?branch=feature%2Fbump-emulator-2-stephen#5d0560a04f623318af9ef5b7f193170b618b6ced" +dependencies = [ + "base64", + "cartesi-machine-sys", + "derive_builder", + "hex", + "serde", + "serde_json", + "thiserror 2.0.18", +] + +[[package]] +name = "cartesi-machine-sys" +version = "2.0.0" +source = "git+https://github.com/cartesi/dave?branch=feature%2Fbump-emulator-2-stephen#5d0560a04f623318af9ef5b7f193170b618b6ced" +dependencies = [ + "bindgen", + "cfg-if", + "hex-literal 1.1.0", + "link-cplusplus", + "sha1", ] [[package]] @@ -1271,14 +1337,23 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.56" +version = "1.2.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" +checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29" dependencies = [ "find-msvc-tools", "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "1.0.4" @@ -1303,6 +1378,17 @@ dependencies = [ "windows-link", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" version = "4.5.60" @@ -1334,7 +1420,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1351,9 +1437,9 @@ checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "const-hex" -version = "1.18.1" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531185e432bb31db1ecda541e9e7ab21468d4d844ad7505e0546a49b4945d49b" +checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" dependencies = [ "cfg-if", "cpufeatures", @@ -1460,6 +1546,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "darling" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + [[package]] name = "darling" version = "0.21.3" @@ -1480,6 +1576,20 @@ dependencies = [ "darling_macro 0.23.0", ] +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.114", +] + [[package]] name = "darling_core" version = "0.21.3" @@ -1492,7 +1602,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1505,7 +1615,18 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.117", + "syn 2.0.114", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core 0.20.11", + "quote", + "syn 2.0.114", ] [[package]] @@ -1516,7 +1637,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1527,7 +1648,7 @@ checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ "darling_core 0.23.0", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1581,6 +1702,37 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.114", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.114", +] + [[package]] name = "derive_more" version = "2.1.1" @@ -1600,7 +1752,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.117", + "syn 2.0.114", "unicode-xid", ] @@ -1633,7 +1785,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1672,7 +1824,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1721,7 +1873,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1777,7 +1929,7 @@ dependencies = [ "darling 0.23.0", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -1830,6 +1982,17 @@ dependencies = [ "subtle", ] +[[package]] +name = "filetime" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +dependencies = [ + "cfg-if", + "libc", + "libredox", +] + [[package]] name = "find-msvc-tools" version = "0.1.9" @@ -1854,12 +2017,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foldhash" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" - [[package]] name = "foldhash" version = "0.2.0" @@ -1883,9 +2040,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1898,9 +2055,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1908,15 +2065,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1931,32 +2088,32 @@ checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-macro" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] name = "futures-sink" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.32" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1966,6 +2123,7 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", + "pin-utils", "slab", ] @@ -2008,24 +2166,11 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "r-efi 5.3.0", + "r-efi", "wasip2", "wasm-bindgen", ] -[[package]] -name = "getrandom" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" -dependencies = [ - "cfg-if", - "libc", - "r-efi 6.0.0", - "wasip2", - "wasip3", -] - [[package]] name = "glob" version = "0.3.3" @@ -2055,15 +2200,6 @@ version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -[[package]] -name = "hashbrown" -version = "0.15.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" -dependencies = [ - "foldhash 0.1.5", -] - [[package]] name = "hashbrown" version = "0.16.1" @@ -2072,7 +2208,7 @@ checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "allocator-api2", "equivalent", - "foldhash 0.2.0", + "foldhash", "serde", "serde_core", ] @@ -2113,6 +2249,18 @@ dependencies = [ "arrayvec", ] +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "hex-literal" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e712f64ec3850b98572bffac52e2c6f282b29fe6c5fa6d42334b30be438d95c1" + [[package]] name = "hmac" version = "0.12.1" @@ -2334,12 +2482,6 @@ dependencies = [ "zerovec", ] -[[package]] -name = "id-arena" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" - [[package]] name = "ident_case" version = "1.0.1" @@ -2384,7 +2526,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -2410,11 +2552,20 @@ dependencies = [ "serde_core", ] +[[package]] +name = "inventory" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "009ae045c87e7082cb72dab0ccd01ae075dd00141ddc108f43a0ea150a9e7227" +dependencies = [ + "rustversion", +] + [[package]] name = "ipnet" -version = "2.12.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" @@ -2467,9 +2618,9 @@ checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "js-sys" -version = "0.3.91" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -2492,9 +2643,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -2516,16 +2667,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] -name = "leb128fmt" -version = "0.1.0" +name = "libc" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] -name = "libc" -version = "0.2.182" +name = "libcmt-sys" +version = "0.1.0" +source = "git+https://github.com/GCdePaula/cartesi-tools-rs?rev=27b457994546ea4a8345ed078031782f155d3013#27b457994546ea4a8345ed078031782f155d3013" +dependencies = [ + "ar", + "bindgen", + "bytes", + "hex-literal 0.4.1", + "reqwest", + "sha2", + "tar", + "xz2", +] + +[[package]] +name = "libloading" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] [[package]] name = "libm" @@ -2533,6 +2703,18 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" +[[package]] +name = "libredox" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" +dependencies = [ + "bitflags", + "libc", + "plain", + "redox_syscall 0.7.3", +] + [[package]] name = "libsqlite3-sys" version = "0.36.0" @@ -2544,11 +2726,31 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "libtest-mimic" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7" +dependencies = [ + "clap", + "termcolor", + "threadpool", +] + +[[package]] +name = "link-cplusplus" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f78c730aaa7d0b9336a299029ea49f9ee53b0ed06e9202e8cb7db9bae7b8c82" +dependencies = [ + "cc", +] + [[package]] name = "linux-raw-sys" -version = "0.12.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" @@ -2586,6 +2788,17 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" +[[package]] +name = "lzma-sys" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + [[package]] name = "macro-string" version = "0.1.4" @@ -2594,7 +2807,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -2614,9 +2827,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" -version = "2.8.0" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "mime" @@ -2624,6 +2837,12 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "mio" version = "1.1.1" @@ -2635,6 +2854,16 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -2707,7 +2936,7 @@ checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -2761,7 +2990,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -2782,7 +3011,7 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.18", "smallvec", "windows-link", ] @@ -2826,14 +3055,14 @@ checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] name = "pin-project-lite" -version = "0.2.17" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2857,6 +3086,12 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + [[package]] name = "potential_utf" version = "0.1.4" @@ -2888,7 +3123,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -2904,9 +3139,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.5.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ "toml_edit", ] @@ -2930,7 +3165,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3024,9 +3259,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.45" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] @@ -3037,12 +3272,6 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" -[[package]] -name = "r-efi" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" - [[package]] name = "radium" version = "0.7.0" @@ -3122,9 +3351,9 @@ dependencies = [ [[package]] name = "rapidhash" -version = "4.4.1" +version = "4.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e48930979c155e2f33aa36ab3119b5ee81332beb6482199a8ecd6029b80b59" +checksum = "5d8b5b858a440a0bc02625b62dd95131b9201aa9f69f411195dd4a7cfb1de3d7" dependencies = [ "rustversion", ] @@ -3138,6 +3367,15 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16" +dependencies = [ + "bitflags", +] + [[package]] name = "ref-cast" version = "1.0.25" @@ -3155,7 +3393,19 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", +] + +[[package]] +name = "regex" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", ] [[package]] @@ -3171,9 +3421,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.10" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] name = "reqwest" @@ -3183,7 +3433,9 @@ checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64", "bytes", + "futures-channel", "futures-core", + "futures-util", "http", "http-body", "http-body-util", @@ -3348,9 +3600,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.4" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags", "errno", @@ -3414,9 +3666,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.23" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "schemars" @@ -3534,11 +3786,9 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tokio-tungstenite", - "tower", "tower-http", "tracing", "tracing-subscriber", - "url", ] [[package]] @@ -3591,7 +3841,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3658,7 +3908,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3730,11 +3980,10 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.8" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ - "errno", "libc", ] @@ -3831,7 +4080,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3853,9 +4102,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.117" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -3871,7 +4120,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3891,7 +4140,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3900,19 +4149,62 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tar" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" +dependencies = [ + "filetime", + "libc", + "xattr", +] + [[package]] name = "tempfile" -version = "3.26.0" +version = "3.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" dependencies = [ "fastrand", - "getrandom 0.4.2", + "getrandom 0.3.4", "once_cell", "rustix", "windows-sys 0.61.2", ] +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "testsi" +version = "0.1.0" +source = "git+https://github.com/GCdePaula/cartesi-tools-rs?rev=27b457994546ea4a8345ed078031782f155d3013#27b457994546ea4a8345ed078031782f155d3013" +dependencies = [ + "cartesi-machine", + "inventory", + "libtest-mimic", + "testsi-macros", + "thiserror 1.0.69", + "types", +] + +[[package]] +name = "testsi-macros" +version = "0.1.0" +source = "git+https://github.com/GCdePaula/cartesi-tools-rs?rev=27b457994546ea4a8345ed078031782f155d3013#27b457994546ea4a8345ed078031782f155d3013" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -3939,7 +4231,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -3950,7 +4242,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4029,9 +4321,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.50.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", @@ -4045,13 +4337,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.6.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4103,18 +4395,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "1.0.0+spec-1.1.0" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] [[package]] name = "toml_edit" -version = "0.25.4+spec-1.1.0" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7193cbd0ce53dc966037f54351dbbcf0d5a642c7f0038c382ef9e677ce8c13f2" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ "indexmap 2.13.0", "toml_datetime", @@ -4124,9 +4416,9 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.9+spec-1.1.0" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ "winnow", ] @@ -4142,7 +4434,6 @@ dependencies = [ "pin-project-lite", "sync_wrapper", "tokio", - "tokio-util", "tower-layer", "tower-service", "tracing", @@ -4199,7 +4490,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4241,6 +4532,15 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "trolley" +version = "0.1.0" +source = "git+https://github.com/GCdePaula/cartesi-tools-rs?rev=27b457994546ea4a8345ed078031782f155d3013#27b457994546ea4a8345ed078031782f155d3013" +dependencies = [ + "libcmt-sys", + "types", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -4270,6 +4570,15 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" +[[package]] +name = "types" +version = "0.1.0" +source = "git+https://github.com/GCdePaula/cartesi-tools-rs?rev=27b457994546ea4a8345ed078031782f155d3013#27b457994546ea4a8345ed078031782f155d3013" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", +] + [[package]] name = "ucd-trie" version = "0.1.7" @@ -4296,9 +4605,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-ident" -version = "1.0.24" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-segmentation" @@ -4400,20 +4709,11 @@ dependencies = [ "wit-bindgen", ] -[[package]] -name = "wasip3" -version = "0.4.0+wasi-0.3.0-rc-2026-01-06" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" -dependencies = [ - "wit-bindgen", -] - [[package]] name = "wasm-bindgen" -version = "0.2.114" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -4424,9 +4724,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.64" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", "futures-util", @@ -4438,9 +4738,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.114" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4448,60 +4748,26 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.114" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.114" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] -[[package]] -name = "wasm-encoder" -version = "0.244.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" -dependencies = [ - "leb128fmt", - "wasmparser", -] - -[[package]] -name = "wasm-metadata" -version = "0.244.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" -dependencies = [ - "anyhow", - "indexmap 2.13.0", - "wasm-encoder", - "wasmparser", -] - -[[package]] -name = "wasmparser" -version = "0.244.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" -dependencies = [ - "bitflags", - "hashbrown 0.15.5", - "indexmap 2.13.0", - "semver 1.0.27", -] - [[package]] name = "wasmtimer" version = "0.4.3" @@ -4518,9 +4784,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.91" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -4545,6 +4811,15 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + [[package]] name = "windows-core" version = "0.62.2" @@ -4566,7 +4841,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4577,7 +4852,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4774,102 +5049,39 @@ name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" -dependencies = [ - "wit-bindgen-rust-macro", -] - -[[package]] -name = "wit-bindgen-core" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" -dependencies = [ - "anyhow", - "heck", - "wit-parser", -] [[package]] -name = "wit-bindgen-rust" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" -dependencies = [ - "anyhow", - "heck", - "indexmap 2.13.0", - "prettyplease", - "syn 2.0.117", - "wasm-metadata", - "wit-bindgen-core", - "wit-component", -] - -[[package]] -name = "wit-bindgen-rust-macro" -version = "0.51.0" +name = "writeable" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" -dependencies = [ - "anyhow", - "prettyplease", - "proc-macro2", - "quote", - "syn 2.0.117", - "wit-bindgen-core", - "wit-bindgen-rust", -] +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] -name = "wit-component" -version = "0.244.0" +name = "wyz" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ - "anyhow", - "bitflags", - "indexmap 2.13.0", - "log", - "serde", - "serde_derive", - "serde_json", - "wasm-encoder", - "wasm-metadata", - "wasmparser", - "wit-parser", + "tap", ] [[package]] -name = "wit-parser" -version = "0.244.0" +name = "xattr" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" dependencies = [ - "anyhow", - "id-arena", - "indexmap 2.13.0", - "log", - "semver 1.0.27", - "serde", - "serde_derive", - "serde_json", - "unicode-xid", - "wasmparser", + "libc", + "rustix", ] [[package]] -name = "writeable" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" - -[[package]] -name = "wyz" -version = "0.5.1" +name = "xz2" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" dependencies = [ - "tap", + "lzma-sys", ] [[package]] @@ -4891,28 +5103,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.40" +version = "0.8.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a789c6e490b576db9f7e6b6d661bcc9799f7c0ac8352f56ea20193b2681532e5" +checksum = "57cf3aa6855b23711ee9852dfc97dfaa51c45feaba5b645d0c777414d494a961" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.40" +version = "0.8.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65c489a7071a749c849713807783f70672b28094011623e200cb86dcb835953" +checksum = "8a616990af1a287837c4fe6596ad77ef57948f787e46ce28e166facc0cc1cb75" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4932,7 +5144,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", "synstructure", ] @@ -4953,7 +5165,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] @@ -4986,11 +5198,11 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn 2.0.114", ] [[package]] name = "zmij" -version = "1.0.21" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" +checksum = "3ff05f8caa9038894637571ae6b9e29466c1f4f829d26c9b28f869a29cbe3445" diff --git a/Cargo.toml b/Cargo.toml index 35b93f8..14e5480 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "sdk/rust-client", "examples/app-core", "examples/canonical-app", + "examples/canonical-test", "benchmarks", ] default-members = ["sequencer"] @@ -23,3 +24,10 @@ authors = [ "Pedro Argento ", "Stephen Chen <20940639+stephenctw@users.noreply.github.com>", ] + +[workspace.dependencies] +libcmt-sys = { version = "0.1", git = "https://github.com/GCdePaula/cartesi-tools-rs", rev = "27b457994546ea4a8345ed078031782f155d3013" } +trolley = { version = "0.1", git = "https://github.com/GCdePaula/cartesi-tools-rs", rev = "27b457994546ea4a8345ed078031782f155d3013" } +testsi = { version = "0.1", git = "https://github.com/GCdePaula/cartesi-tools-rs", rev = "27b457994546ea4a8345ed078031782f155d3013" } +types = { version = "0.1", git = "https://github.com/GCdePaula/cartesi-tools-rs", rev = "27b457994546ea4a8345ed078031782f155d3013" } +cartesi-machine = { version = "2", git = "https://github.com/cartesi/dave", rev = "5d0560a04f623318af9ef5b7f193170b618b6ced", features = ["download_uarch"] } diff --git a/README.md b/README.md index 2cb8f9a..fc439b8 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Sequencer Prototype -Prototype sequencer, currently backed by a dummy wallet app (`Transfer`, `Deposit`, `Withdrawal`). +Prototype sequencer, currently backed by a dummy wallet app (`Transfer`, `Withdrawal`). Current focus is reliability of sequencing, persistence, and replay semantics. @@ -17,12 +17,13 @@ Current focus is reliability of sequencing, persistence, and replay semantics. - **User ops** arrive through the API, are validated, executed, and persisted by the inclusion lane. - **Direct inputs** are stored in SQLite (`direct_inputs`) and sequenced in append-only replay order (`sequenced_l2_txs`). -- **Ordering** is deterministic and persisted. Replay/catch-up reads `sequenced_l2_txs` (joined with `user_ops` / `direct_inputs`). +- **Deposits** are direct-input-only (L1 -> L2) and are not accepted as user ops. +- **Ordering** is deterministic and persisted. Replay/catch-up reads `sequenced_l2_txs` joined with `user_ops` and `direct_inputs`. - **Frame fee** is fixed per frame (`frames.fee`): - users sign `max_fee` - inclusion validates `max_fee >= current_frame_fee` - - execution charges `current_frame_fee` (not signed max) - - next frame fee is sampled from `recommended_fees` when rotating to a new frame + - execution charges `current_frame_fee` + - the next frame fee is sampled from `recommended_fees` when rotating to a new frame ## Quick Start @@ -35,14 +36,34 @@ cargo fmt --all cargo clippy --all-targets --all-features -- -D warnings ``` -Run server with defaults: +Run the server with a local deployment domain: ```bash -SEQ_HTTP_ADDR=127.0.0.1:3000 \ -SEQ_DB_PATH=sequencer.db \ +SEQ_ETH_RPC_URL=http://127.0.0.1:8545 \ +SEQ_DOMAIN_CHAIN_ID=31337 \ +SEQ_DOMAIN_VERIFYING_CONTRACT=0x1111111111111111111111111111111111111111 \ cargo run -p sequencer ``` +Optional runtime inputs: + +- `SEQ_HTTP_ADDR` defaults to `127.0.0.1:3000` +- `SEQ_DB_PATH` defaults to `sequencer.db` +- `SEQ_LONG_BLOCK_RANGE_ERROR_CODES` defaults to `-32005,-32600,-32602,-32616` + +Required runtime inputs: + +- `SEQ_ETH_RPC_URL` +- `SEQ_DOMAIN_CHAIN_ID` +- `SEQ_DOMAIN_VERIFYING_CONTRACT` + +Fixed protocol identity: + +- domain name: `CartesiAppSequencer` +- domain version: `1` + +Most queue sizes, polling intervals, and safety limits are now internal runtime constants instead of public launch-time configuration. + ## API ### `POST /tx` @@ -57,16 +78,16 @@ Request shape: "data": "0x..." }, "signature": "0x...", - "sender": "0x..." + "sender": "0x..." } ``` -POST notes: +Notes: - `signature` must be 65 bytes. -- `sender` is required and must match recovered signer. +- `sender` is required and must match the recovered signer. - `message.data` is SSZ-encoded method payload bytes. -- payload size is bounded at ingress; oversized requests are rejected before they enter hot path. +- payload size is bounded at ingress; oversized requests are rejected before entering the hot path. ### `GET /ws/subscribe?from_offset=` @@ -74,11 +95,10 @@ WebSocket stream of sequenced L2 transactions from persisted order. Notes: -- `from_offset` is optional (defaults to `0`). +- `from_offset` is optional and defaults to `0`. - messages are JSON text frames. - binary fields are hex-encoded (`0x`-prefixed). -- handshake is rejected with `429` when `SEQ_WS_MAX_SUBSCRIBERS` is exceeded (default `64`). -- connections with `live_start_offset - from_offset > SEQ_WS_MAX_CATCHUP_EVENTS` are closed immediately (default `50000`). +- the current runtime enforces a subscriber cap of `64` and a catch-up cap of `50000` events. Message shapes: @@ -100,34 +120,7 @@ Success response: } ``` -## Configuration - -Main environment variables: - -- `SEQ_HTTP_ADDR` -- `SEQ_DB_PATH` -- `SEQ_QUEUE_CAP` -- `SEQ_OVERLOAD_MAX_INFLIGHT_SUBMISSIONS` -- `SEQ_MAX_USER_OPS_PER_CHUNK` (`SEQ_MAX_BATCH` is legacy alias) -- `SEQ_SAFE_DIRECT_BUFFER_CAPACITY` -- `SEQ_MAX_BATCH_OPEN_MS` -- `SEQ_MAX_BATCH_USER_OP_BYTES` -- `SEQ_INCLUSION_LANE_IDLE_POLL_INTERVAL_MS` -- `SEQ_INCLUSION_LANE_TICK_INTERVAL_MS` (legacy alias) -- `SEQ_COMMIT_LANE_TICK_INTERVAL_MS` (legacy alias) -- `SEQ_BROADCASTER_IDLE_POLL_INTERVAL_MS` -- `SEQ_BROADCASTER_PAGE_SIZE` -- `SEQ_BROADCASTER_SUBSCRIBER_BUFFER_CAPACITY` -- `SEQ_WS_MAX_SUBSCRIBERS` -- `SEQ_WS_MAX_CATCHUP_EVENTS` -- `SEQ_MAX_BODY_BYTES` -- `SEQ_SQLITE_SYNCHRONOUS` -- `SEQ_DOMAIN_NAME` -- `SEQ_DOMAIN_VERSION` -- `SEQ_DOMAIN_CHAIN_ID` -- `SEQ_DOMAIN_VERIFYING_CONTRACT` - -## Storage Model (high level) +## Storage Model - `batches`: batch metadata - `frames`: frame boundaries within each batch @@ -135,26 +128,48 @@ Main environment variables: - `user_ops`: included user operations - `direct_inputs`: direct-input payload stream - `sequenced_l2_txs`: append-only ordered replay rows (`UserOp` xor `DirectInput`) -- `recommended_fees`: singleton mutable recommendation for next frame fee - -No SQL views are required in the current prototype schema. +- `recommended_fees`: singleton mutable recommendation for the next frame fee ## Project Layout -- `sequencer/src/main.rs`: bootstrap, env config, HTTP server + lane lifecycle +- `sequencer/src/main.rs`: thin binary entrypoint +- `sequencer/src/lib.rs`: public crate surface +- `sequencer/src/config.rs`: runtime input parsing and EIP-712 domain construction +- `sequencer/src/runtime.rs`: sequencer bootstrap and component wiring - `sequencer/src/api/`: HTTP API and error mapping - `sequencer/src/inclusion_lane/`: hot-path inclusion loop, chunk/frame/batch rotation, catch-up -- `sequencer/src/l2_tx_broadcaster/`: centralized ordered-L2Tx poller + subscriber fanout -- `sequencer/src/storage/`: schema, migrations, SQLite persistence and replay reads -- `sequencer-core/src/`: shared sequencer domain types and interfaces (`Application`, `SignedUserOp`, `SequencedL2Tx`, broadcaster message types) -- `examples/app-core/src/`: wallet prototype implementing the shared `Application` trait -- `examples/canonical-app/src/main.rs`: placeholder canonical runtime entrypoint +- `sequencer/src/input_reader/`: safe-input ingestion from InputBox into SQLite +- `sequencer/src/l2_tx_feed/`: DB-backed ordered-L2Tx feed for WS subscriptions +- `sequencer/src/storage/`: schema, migrations, SQLite persistence, and replay reads +- `sequencer-core/src/`: shared domain types and interfaces (`Application`, `SignedUserOp`, `SequencedL2Tx`, feed message types) +- `examples/app-core/src/`: wallet prototype implementing `Application` +- `benchmarks/`: benchmark harnesses and benchmark spec ## Prototype Limits -- Wallet state is in-memory (not persisted). -- Direct-input ingestion from chain is not implemented yet (currently append via storage APIs). -- Schema/migrations are still in prototype mode and may change. +- Wallet state is in-memory and not persisted. +- Schema and migrations are still in prototype mode and may change. + +## Local Test Prerequisites + +- Some `sequencer` tests spin up `Anvil`; install Foundry locally if you want the full test suite: +- Self-contained benchmarks also spawn `Anvil` from a preloaded rollups state dump. + +```bash +foundryup +``` + +- Prepare local benchmark + guest build dependencies: + +```bash +just setup +``` + +- Enable the Anvil-backed reader tests explicitly: + +```bash +RUN_ANVIL_TESTS=1 cargo test -p sequencer --lib +``` ## License diff --git a/TODO.md b/TODO.md index 85c45dd..a55cf7f 100644 --- a/TODO.md +++ b/TODO.md @@ -6,40 +6,68 @@ Build a robust sequencer prototype for a future DeFi stack, with deterministic o --- -## MVP Scope (In Scope) +## Done -### 1) Sequencer +### Sequencer Foundation + +- Thin binary entrypoint plus library runtime (`sequencer::run`, `RunConfig`). +- Simplified runtime/config surface with explicit EIP-712 deployment inputs. +- Hardened write path: API -> inclusion lane -> app execution -> persistence -> ack. +- `L2Tx` broadcaster with WebSocket fanout of ordered `L2Tx`s. +- Bounded WebSocket catch-up window plus subscriber guardrails. +- Shared shutdown supervision across API, inclusion lane, and broadcaster. +- Paged replay/catch-up in inclusion lane and broadcaster to avoid unbounded startup memory growth. +- Persisted `safe_block` frontier model for frames, with leading direct inputs materialized when opening a new frame. + +### Benchmarks & Tooling + +- Benchmark harnesses in `benchmarks/` for ack latency, end-to-end latency, sweeps, and unit hot path. +- Baseline reporting for p50 / p95 / p99, throughput, and RSS trends. +- Same-host benchmark workflows and docs aligned with the current runtime/config model. + +--- + +## MVP Scope (Remaining) + +### 1) Sequencer Core -- Keep and harden write path: API -> inclusion lane -> app execution -> persistence -> ack. - Implement direct-input reader from blockchain (ingests into `direct_inputs`). - Implement batch submitter (reads closed batches and submits on-chain). -- Implement `L2Tx` broadcaster (WebSocket fanout of ordered `L2Tx`s). - Implement inclusion fee estimator module that updates the suggested fee in DB (`recommended_fees`). -- Add API endpoint to query current suggested inclusion fee. -- Add API endpoint to query user current tx count. -- Keep storage/replay semantics deterministic and catch-up-safe. -- Change `drain_n` design to a "safe block" design. +- Add paginated historical `L2Tx` sync endpoint so lagging readers can backfill over HTTP before switching to `/ws/subscribe` for live updates. +- Keep storage/replay semantics deterministic and catch-up-safe as direct-input ingestion, batch submission, and recovery flows land. ---- +### 2) Recovery / Canonicality + +- Define how canonical progress is derived from persisted facts so replay stays deterministic. +- Detect when scheduler/canonical execution invalidates previously closed batches. +- Define the recovery procedure when persisted batches are invalidated: + - fail fast if the persisted state is inconsistent with canonical inputs + - rebuild or flush invalidated batches before resuming normal service + - notify readers when batches are invalidated + - notify readers when batches become final on-chain -### 2) Canonical App / Scheduler +### 3) Canonical App / Scheduler - Implement scheduler behavior in `examples/canonical-app` using shared `sequencer-core` + `examples/app-core`. - Ensure deterministic ordering model compatible with persisted sequencer order. -- Canonical app is the state-transition artifact used by verification flow (Cartesi Machine / RISC-V path), not by sequencer runtime itself. +- Keep the canonical app as the state-transition artifact used by verification flow (Cartesi Machine / RISC-V path), not by sequencer runtime itself. - Add focused tests for queue/drain/backstop behavior and ordering invariants. ---- +### 4) Benchmarks & Evaluation + +- Add canonical network-aware benchmark runs (client/server on different hosts or with injected latency/jitter). +- Turn target evaluation into a real pass/fail mode for the canonical network profile, not just same-host comparison. +- Tune queue / broadcaster / buffer sizing from benchmark evidence instead of ad hoc guesses. +- Revisit inclusion-lane adaptive chunk sizing only after the baseline latency/throughput envelopes are stable. -### 3) Benchmarks & Latency +### 5) Client / API Ergonomics -- Build benchmark harnesses in `benchmarks/` (using Rust client code paths). -- Measure ack latency and end-to-end latency. -- Report p50 / p95 / p99. -- Measure idle and under-load behavior. -- Include network-aware runs (client/server on different hosts) like network latency. -- Note: end-to-end depends on `L2Tx` broadcaster being available. -- Possible optimization idea (later): adaptive chunk sizing in inclusion lane based on queue pressure and latency budget. +- Add API endpoint to query current suggested inclusion fee. +- Decide whether wallet-specific convenience endpoints belong in the sequencer or in the application/client layer: + - current nonce / tx count + - EIP-712 domain discovery +- If those helper endpoints stay in the sequencer, implement them with a clear separation between core sequencer state and wallet-app-specific state. --- @@ -53,12 +81,3 @@ Notes: - These are intentionally outside MVP scope. - Still valuable for dogfooding and contributor onboarding. - - - - -Dev endpoint for direct inputs? - -Endpoints for domain, nonce, fee?? - -Implement health check?? Ready check?? diff --git a/benchmarks/BENCHMARK_SPEC.md b/benchmarks/BENCHMARK_SPEC.md index a3b15eb..1bdb77f 100644 --- a/benchmarks/BENCHMARK_SPEC.md +++ b/benchmarks/BENCHMARK_SPEC.md @@ -24,8 +24,10 @@ This spec makes that requirement measurable and repeatable. ### 2.3 Capacity and Overload Metrics 1. `max_sustainable_tps_at_0_rejections`: highest accepted TPS observed while `rejection_rate == 0%`. -2. `tps_at_first_non_200`: throughput point where first non-`200` response appears. -3. `tps_at_first_429`: throughput point where first `429 OVERLOADED` response appears. +2. `tps_at_first_any_rejection`: throughput point where the first rejection of any kind appears. +3. `tps_at_first_non_200`: throughput point where the first HTTP non-`200` response appears. +4. `tps_at_first_429`: throughput point where the first `429 OVERLOADED` response appears. +5. `tps_at_first_client_failure`: throughput point where the first timeout/network/client-side submission failure appears. ### 2.4 Memory Metrics @@ -54,7 +56,9 @@ This spec makes that requirement measurable and repeatable. 1. Target evaluation must use valid-only benchmark traffic (no intentionally invalid transactions). 2. Non-`200` responses must be broken down by status code so overload (`429`) is distinguishable from invalid-input failures (`400`/`422`). -3. If no non-`200` appears, `tps_at_first_non_200` and `tps_at_first_429` must be reported as `not reached`. +3. Client/network failures must be reported separately from HTTP non-`200` responses so client saturation is not mistaken for sequencer overload. +4. If no non-`200` appears, `tps_at_first_non_200` and `tps_at_first_429` must be reported as `not reached`. +5. If no client/network failure appears, `tps_at_first_client_failure` must be reported as `not reached`. ### 3.4 Memory Collection Rules @@ -121,9 +125,10 @@ Each benchmark report must include: 4. Rejection reason breakdown (when available). 5. Network profile and shaping method/config. 6. Memory metrics and memory sampling method/interval. -7. Full run configuration (command, concurrency/arrival settings, timeout settings). +7. Full run configuration (command, deployment domain, concurrency/arrival settings, timeout settings). 8. Target verdict lines: `ACK_TARGET` and `SOFT_CONFIRM_TARGET` (when target evaluation is performed). 9. Sanity assertion status and failure summary (if any). +10. For sweeps, separate first-knee metrics for any rejection, HTTP non-`200`, `429`, and client-side failure. ## 7. Mapping to Current Harnesses @@ -135,8 +140,5 @@ Each benchmark report must include: ## 8. Current Gaps 1. Add first-class support for canonical injected-latency runs. -2. Add explicit `p99.9` export in sweep CSV/JSON outputs. -3. Add standard `ACK_TARGET` and `SOFT_CONFIRM_TARGET` verdict lines in benchmark outputs. -4. Add rejection-reason breakdown consistently across relevant reports. -5. Ensure reports always include network shaping tool and exact shaping config. -6. Add first-class memory collection/reporting in benchmark tooling. +2. Ensure reports always include network shaping tool and exact shaping config. +3. Strengthen tx identity matching for e2e correlation if workloads ever stop guaranteeing effectively unique sender/payload pairs. diff --git a/benchmarks/Cargo.toml b/benchmarks/Cargo.toml index 970c94a..713bdf5 100644 --- a/benchmarks/Cargo.toml +++ b/benchmarks/Cargo.toml @@ -12,8 +12,11 @@ authors.workspace = true [dependencies] sequencer-rust-client = { path = "../sdk/rust-client" } sequencer-core = { path = "../sequencer-core" } +app-core = { path = "../examples/app-core" } +alloy = { version = "1.0", features = ["contract", "network", "reqwest", "signer-local"] } alloy-primitives = { version = "1.4.1", features = ["k256"] } alloy-sol-types = "1.4.1" +cartesi-rollups-contracts = "=2.2.0" futures-util = "0.3" k256 = "0.13.4" clap = { version = "4", features = ["derive"] } diff --git a/benchmarks/README.md b/benchmarks/README.md index 9b2c3d1..fc859a6 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -2,64 +2,75 @@ This crate contains executable benchmark harnesses for the sequencer API. -Benchmark goals, UX-facing metrics, and initial SLO targets are defined in +Benchmark goals, measurement definitions, and reporting requirements live in [`BENCHMARK_SPEC.md`](./BENCHMARK_SPEC.md). +## Domain Model + +Networked benchmarks sign EIP-712 payloads, so they need to know which sequencer +instance they are targeting. + +- **External target**: pass `--domain-chain-id` and `--domain-verifying-contract` to match the target sequencer deployment. +- **Self-contained target**: the harness spawns `anvil`, deploys a local `Application` through `ApplicationFactory`, and uses that deployed app address as the verifying contract for both signer and sequencer: + - chain ID: `31337` + - verifying contract: dynamically deployed local `Application` + - domain name: `CartesiAppSequencer` + - domain version: `1` + ## Commands From repository root: ```bash +just setup just --justfile benchmarks/justfile bench-unit -just --justfile benchmarks/justfile bench-ack -just --justfile benchmarks/justfile bench-e2e -just --justfile benchmarks/justfile bench-hammer -just --justfile benchmarks/justfile bench-sweep +just --justfile benchmarks/justfile bench-ack-self +just --justfile benchmarks/justfile bench-e2e-self +just --justfile benchmarks/justfile bench-hammer-self +just --justfile benchmarks/justfile bench-sweep-self just --justfile benchmarks/justfile bench-compare-latest just --justfile benchmarks/justfile all just --justfile benchmarks/justfile all-and-compare ``` -Or from inside `benchmarks/`: +Direct `cargo` examples: ```bash -just bench-unit -just bench-ack -just bench-e2e -just bench-hammer -just bench-sweep -just bench-compare-latest -just all -just all-and-compare +cargo run -p benchmarks --bin unit_hot_path -- --count 10000 --max-fee 0 +cargo run -p benchmarks --bin ack_latency -- --self-contained --count 200 --max-fee 0 --concurrency 1 +cargo run -p benchmarks --bin e2e_latency -- --self-contained --count 100 --max-fee 0 --from-offset 0 --concurrency 1 +cargo run -p benchmarks --bin ack_latency -- --self-contained --count 5000 --max-fee 0 --concurrency 32 --evaluate +cargo run -p benchmarks --bin e2e_latency -- --self-contained --count 5000 --max-fee 0 --from-offset 0 --concurrency 16 --evaluate +cargo run -p benchmarks --bin ack_latency -- --endpoint http://127.0.0.1:3000 --domain-chain-id 31337 --domain-verifying-contract 0x1111111111111111111111111111111111111111 --count 200 --max-fee 0 --concurrency 1 +cargo run -p benchmarks --bin e2e_latency -- --endpoint http://127.0.0.1:3000 --domain-chain-id 31337 --domain-verifying-contract 0x1111111111111111111111111111111111111111 --count 100 --max-fee 0 --from-offset 0 --concurrency 1 +cargo run -p benchmarks --bin sweep -- --self-contained --mode e2e --count 1000 --max-fee 0 --from-offset 0 --concurrency-list "1 2 4 8 16 32 64 96 128" +cargo run -p benchmarks --bin compare_latest --release -- --results-dir benchmarks/results --kind all --sweep-mode e2e ``` -Or directly with `cargo`: +## Benchmarks -```bash -cargo run -p benchmarks --bin unit_hot_path -- --count 10000 --max-fee 0 -cargo run -p benchmarks --bin ack_latency -- --http-url http://127.0.0.1:3000 --count 200 --max-fee 0 --concurrency 1 -cargo run -p benchmarks --bin e2e_latency -- --http-url http://127.0.0.1:3000 --count 100 --max-fee 0 --from-offset 0 --concurrency 1 -cargo run -p benchmarks --bin sweep --release -- --mode e2e --count 1000 --url http://127.0.0.1:3000 --max-fee 0 --from-offset 0 --concurrency-list "1 2 4 8 16 32 64 96 128" -cargo run -p benchmarks --bin compare_latest --release -- --results-dir benchmarks/results --kind all -``` +- `unit_hot_path`: measures local signing plus request JSON encoding. +- `ack_latency`: measures `POST /tx` acknowledgement latency for accepted txs. +- `e2e_latency`: measures submit-to-broadcast latency (`POST /tx` to matching `GET /ws/subscribe` event) for accepted txs. +- `--evaluate` on `ack_latency` and `e2e_latency`: prints a first-class target verdict block and stores it in JSON output. Today the verdict is expected to be `NOT_EVALUATED` because the harness only supports the same-host baseline, not the canonical network-aware profile from the spec. +- `bench-hammer`: high-concurrency e2e run that verifies each accepted tx is observed on WS. +- `bench-sweep`: runs a concurrency sweep and emits a CSV plus capacity summary. Sweep reports separate: + - first rejection of any kind + - first HTTP non-`200` + - first `429` + - first client-side failure (`io_*`, timeouts, connection failures) +- `bench-compare-latest`: compares the latest two benchmark artifacts and prints deltas. Use `--sweep-mode ack|e2e` to choose which sweep family to compare. +- `bench-soak-low-lat-self` and `bench-soak-high-throughput-self`: write timestamped JSON outputs by default so repeated runs do not overwrite previous soak artifacts. Pass `out=...` to force a specific path. ## Notes -- `unit_hot_path`: measures local signing + request-encoding costs (no network). -- `ack_latency`: measures `POST /tx` acknowledgment latency for accepted txs. -- `e2e_latency`: measures submit-to-broadcast latency (`POST /tx` to `GET /ws/subscribe` message) for accepted txs. -- `bench-hammer`: high-concurrency e2e run that hammers the sequencer and verifies each accepted tx is observed on WS. -- `bench-sweep`: runs a concurrency sweep (default `1..128`, `count=1000`) and emits a CSV plus an estimated knee. -- `bench-compare-latest`: compares the latest two `ack`, `e2e`, and `sweep` artifacts and prints deltas. -- For newly generated benchmark accounts, included transactions usually require sequencer frame fee `0`. +- Self-contained variants launch `anvil --load-state` from the preloaded rollups dump under `benchmarks/.deps/`; run `just setup` first. +- Self-contained variants also deploy a local `Application` through `ApplicationFactory`, so they require a canonical machine image at `examples/canonical-app/out/canonical-machine-image`; run `just canonical-build-machine-image` first. +- Self-contained variants therefore require Foundry's `anvil` binary to be installed locally. - Networked benches fail by default if any tx is rejected. Pass `--allow-rejections` to inspect mixed traffic. -- `e2e_latency` drains existing WS backlog before timing to reduce stale-history noise. -- Sweep CSV columns: `concurrency,completed_per_s,p95_ms,rejected`. +- `e2e_latency` drains existing WS backlog before timing so stale history does not pollute the measurement window. - `bench-sweep mode=e2e` carries `from_offset` forward across rounds to avoid re-reading old WS history. -- If sweep hits `Too many open files`, increase shell limit (`ulimit -n 4096`) or lower `conc_list`. -- Self-contained variants automatically spawn a sequencer and persist logs/results. -- For non-self-contained networked benches, run a sequencer instance beforehand, for example: - -```bash -just run -``` +- `--stop-on-first-non-200` now does exactly what it says: it stops on the first HTTP non-`200`, not on client-side transport failures. +- If sweep hits `Too many open files`, increase the shell limit (`ulimit -n 4096`) or use a smaller concurrency list. +- Self-contained variants automatically build a temp DB, spawn `anvil`, start the sequencer, and persist logs/results under `benchmarks/results`. +- For non-self-contained runs, start a sequencer instance first and make sure the benchmark domain matches the sequencer domain. diff --git a/benchmarks/justfile b/benchmarks/justfile index 65251c5..b8f0515 100644 --- a/benchmarks/justfile +++ b/benchmarks/justfile @@ -1,53 +1,70 @@ set shell := ["bash", "-euo", "pipefail", "-c"] set working-directory := ".." +anvil_dump_name := "rollups-contracts-2.2.0-anvil-v1.4.3" +anvil_dump_dir := "benchmarks/.deps/" + anvil_dump_name +anvil_dump_tar := "benchmarks/.deps/" + anvil_dump_name + ".tar.gz" +anvil_dump_url := "https://github.com/cartesi/rollups-contracts/releases/download/v2.2.0/rollups-contracts-2.2.0-anvil-v1.4.3.tar.gz" +root_anvil_dump_tar := "rollups-contracts-2.2.0-anvil-v1.4.3.tar.gz" +template_machine_image := "examples/canonical-app/out/canonical-machine-image" + default: @just --justfile benchmarks/justfile --list +setup: + mkdir -p benchmarks/.deps + if [[ ! -f {{anvil_dump_tar}} ]]; then if [[ -f {{root_anvil_dump_tar}} ]]; then cp {{root_anvil_dump_tar}} {{anvil_dump_tar}}; else wget {{anvil_dump_url}} -O {{anvil_dump_tar}}; fi; fi + if [[ ! -f {{anvil_dump_dir}}/state.json ]]; then rm -rf {{anvil_dump_dir}}; mkdir -p {{anvil_dump_dir}}; tar -xzf {{anvil_dump_tar}} -C {{anvil_dump_dir}}; fi + +clean: + rm -rf benchmarks/.deps + +ensure-machine-image: + test -d {{template_machine_image}} || { echo "missing {{template_machine_image}}; run 'just canonical-build-machine-image' first"; exit 1; } + bench-unit count="10000" max_fee="0": cargo run -p benchmarks --bin unit_hot_path --release -- --count {{count}} --max-fee {{max_fee}} -bench-ack count="200" url="http://127.0.0.1:3000" max_fee="0" concurrency="1" extra="": - cargo run -p benchmarks --bin ack_latency --release -- --endpoint {{url}} --count {{count}} --max-fee {{max_fee}} --concurrency {{concurrency}} {{extra}} +bench-ack domain_chain_id verifying_contract count="200" url="http://127.0.0.1:3000" max_fee="0" concurrency="1" extra="": + cargo run -p benchmarks --bin ack_latency --release -- --endpoint {{url}} --domain-chain-id {{domain_chain_id}} --domain-verifying-contract {{verifying_contract}} --count {{count}} --max-fee {{max_fee}} --concurrency {{concurrency}} {{extra}} -bench-ack-self count="200" max_fee="0" concurrency="1" extra="": +bench-ack-self count="200" max_fee="0" concurrency="1" extra="": ensure-machine-image cargo build -p sequencer --release cargo run -p benchmarks --bin ack_latency --release -- --self-contained --count {{count}} --max-fee {{max_fee}} --concurrency {{concurrency}} {{extra}} -bench-e2e count="100" url="http://127.0.0.1:3000" max_fee="0" from_offset="0" concurrency="1" extra="": - cargo run -p benchmarks --bin e2e_latency --release -- --endpoint {{url}} --count {{count}} --max-fee {{max_fee}} --from-offset {{from_offset}} --concurrency {{concurrency}} {{extra}} +bench-e2e domain_chain_id verifying_contract count="100" url="http://127.0.0.1:3000" max_fee="0" from_offset="0" concurrency="1" extra="": + cargo run -p benchmarks --bin e2e_latency --release -- --endpoint {{url}} --domain-chain-id {{domain_chain_id}} --domain-verifying-contract {{verifying_contract}} --count {{count}} --max-fee {{max_fee}} --from-offset {{from_offset}} --concurrency {{concurrency}} {{extra}} -bench-e2e-self count="100" max_fee="0" from_offset="0" concurrency="1" extra="": +bench-e2e-self count="100" max_fee="0" from_offset="0" concurrency="1" extra="": ensure-machine-image cargo build -p sequencer --release cargo run -p benchmarks --bin e2e_latency --release -- --self-contained --count {{count}} --max-fee {{max_fee}} --from-offset {{from_offset}} --concurrency {{concurrency}} {{extra}} -bench-hammer count="20000" url="http://127.0.0.1:3000" max_fee="0" concurrency="10" from_offset="0" workload="funded-transfer" extra="": - cargo run -p benchmarks --bin e2e_latency --release -- --endpoint {{url}} --count {{count}} --max-fee {{max_fee}} --from-offset {{from_offset}} --concurrency {{concurrency}} --request-timeout-ms 10000 --max-ws-wait-ms 20000 --workload {{workload}} {{extra}} +bench-hammer domain_chain_id verifying_contract count="20000" url="http://127.0.0.1:3000" max_fee="0" concurrency="10" from_offset="0" workload="funded-transfer" extra="": + cargo run -p benchmarks --bin e2e_latency --release -- --endpoint {{url}} --domain-chain-id {{domain_chain_id}} --domain-verifying-contract {{verifying_contract}} --count {{count}} --max-fee {{max_fee}} --from-offset {{from_offset}} --concurrency {{concurrency}} --request-timeout-ms 10000 --max-ws-wait-ms 20000 --workload {{workload}} {{extra}} -bench-hammer-self count="20000" max_fee="0" concurrency="10" from_offset="0" workload="funded-transfer" out="benchmarks/results/hammer-self-latest.json" extra="": +bench-hammer-self count="20000" max_fee="0" concurrency="10" from_offset="0" workload="funded-transfer" out="benchmarks/results/hammer-self-latest.json" extra="": ensure-machine-image cargo build -p sequencer --release cargo run -p benchmarks --bin e2e_latency --release -- --self-contained --count {{count}} --max-fee {{max_fee}} --from-offset {{from_offset}} --concurrency {{concurrency}} --request-timeout-ms 10000 --max-ws-wait-ms 20000 --workload {{workload}} --json-out {{out}} {{extra}} -bench-sweep mode="e2e" count="1000" url="http://127.0.0.1:3000" max_fee="0" from_offset="0" conc_list="1 2 4 8 16 32 64 96 128" extra="": - cargo run -p benchmarks --bin sweep --release -- --mode {{mode}} --count {{count}} --endpoint {{url}} --max-fee {{max_fee}} --from-offset {{from_offset}} --concurrency-list "{{conc_list}}" {{extra}} +bench-sweep domain_chain_id verifying_contract mode="e2e" count="1000" url="http://127.0.0.1:3000" max_fee="0" from_offset="0" conc_list="1 2 4 8 16 32 64 96 128" extra="": + cargo run -p benchmarks --bin sweep --release -- --mode {{mode}} --endpoint {{url}} --domain-chain-id {{domain_chain_id}} --domain-verifying-contract {{verifying_contract}} --count {{count}} --max-fee {{max_fee}} --from-offset {{from_offset}} --concurrency-list "{{conc_list}}" {{extra}} -bench-sweep-self mode="e2e" count="1000" max_fee="0" from_offset="0" conc_list="1 2 4 8 16 32 64 96 128" extra="": +bench-sweep-self mode="e2e" count="1000" max_fee="0" from_offset="0" conc_list="1 2 4 8 16 32 64 96 128" extra="": ensure-machine-image cargo build -p sequencer --release cargo run -p benchmarks --bin sweep --release -- --self-contained --mode {{mode}} --count {{count}} --max-fee {{max_fee}} --from-offset {{from_offset}} --concurrency-list "{{conc_list}}" {{extra}} -bench-soak-low-lat-self count="20000" max_fee="0" out="benchmarks/results/soak-low-lat-self.json" extra="": - just --justfile benchmarks/justfile bench-hammer-self {{count}} {{max_fee}} 10 0 funded-transfer {{out}} '{{extra}}' +bench-soak-low-lat-self count="20000" max_fee="0" out="" extra="": ensure-machine-image + out_path="{{out}}"; if [[ -z "${out_path}" ]]; then out_path="benchmarks/results/soak-low-lat-self-$(date +%s).json"; fi; just --justfile benchmarks/justfile bench-hammer-self {{count}} {{max_fee}} 10 0 funded-transfer "${out_path}" '{{extra}}' -bench-soak-high-throughput-self count="20000" max_fee="0" out="benchmarks/results/soak-high-throughput-self.json" extra="": - cargo build -p sequencer --release - cargo run -p benchmarks --bin e2e_latency --release -- --self-contained --count {{count}} --max-fee {{max_fee}} --from-offset 0 --concurrency 96 --request-timeout-ms 10000 --max-ws-wait-ms 20000 --workload synthetic --json-out {{out}} {{extra}} +bench-soak-high-throughput-self count="20000" max_fee="0" out="" extra="": ensure-machine-image + out_path="{{out}}"; if [[ -z "${out_path}" ]]; then out_path="benchmarks/results/soak-high-throughput-self-$(date +%s).json"; fi; cargo build -p sequencer --release; cargo run -p benchmarks --bin e2e_latency --release -- --self-contained --count {{count}} --max-fee {{max_fee}} --from-offset 0 --concurrency 96 --request-timeout-ms 10000 --max-ws-wait-ms 20000 --workload synthetic --json-out "${out_path}" {{extra}} -bench-capacity-sweep-self count="1000" max_fee="0" from_offset="0" conc_range="32:512:32" out="benchmarks/results/capacity-sweep-self.json" extra="": +bench-capacity-sweep-self count="1000" max_fee="0" from_offset="0" conc_range="32:512:32" out="benchmarks/results/capacity-sweep-self.json" extra="": ensure-machine-image cargo build -p sequencer --release cargo run -p benchmarks --bin sweep --release -- --self-contained --mode e2e --count {{count}} --max-fee {{max_fee}} --from-offset {{from_offset}} --workload synthetic --concurrency-range "{{conc_range}}" --stop-on-first-non-200 --json-out {{out}} {{extra}} -bench-compare-latest kind="all" results_dir="benchmarks/results": - cargo run -p benchmarks --bin compare_latest --release -- --results-dir {{results_dir}} --kind {{kind}} +bench-compare-latest kind="all" sweep_mode="e2e" results_dir="benchmarks/results": + cargo run -p benchmarks --bin compare_latest --release -- --results-dir {{results_dir}} --kind {{kind}} --sweep-mode {{sweep_mode}} all: bench-unit bench-ack-self bench-e2e-self bench-soak-low-lat-self bench-soak-high-throughput-self bench-capacity-sweep-self diff --git a/benchmarks/src/ack.rs b/benchmarks/src/ack.rs new file mode 100644 index 0000000..1058908 --- /dev/null +++ b/benchmarks/src/ack.rs @@ -0,0 +1,162 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use futures_util::future::join_all; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::time::Duration; + +use sequencer_rust_client::SequencerClient; + +use crate::{ + BenchResult, + domain::BenchmarkDomain, + rejection::classify_rejection, + runtime, + stats::{Stats, rejection_rate, summarize}, + support::{DEFAULT_PROGRESS_EVERY, now}, + workload::{WorkloadConfig, WorkloadState}, +}; + +#[derive(Debug, Clone)] +pub struct AckRunConfig { + pub endpoint: String, + pub domain: BenchmarkDomain, + pub count: u64, + pub concurrency: usize, + pub seed_offset: u64, + pub max_fee: u32, + pub request_timeout_ms: u64, + pub fail_on_rejection: bool, + pub workload: WorkloadConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AckRunReport { + pub count: u64, + pub endpoint: String, + pub concurrency: usize, + pub accepted: u64, + pub rejected: u64, + pub rejection_rate: f64, + pub rejection_breakdown: BTreeMap, + pub first_rejection: Option, + pub total_wall: Duration, + pub ack_latency_accepted: Stats, + pub ack_latency_rejected: Option, + pub memory: Option, + pub sequencer_log_path: Option, +} + +pub async fn run_ack_benchmark(config: AckRunConfig) -> BenchResult { + let domain = config.domain.eip712_domain(); + let timeout = Duration::from_millis(config.request_timeout_ms); + let client = SequencerClient::new_with_timeout(config.endpoint.clone(), timeout) + .map_err(|e| crate::support::err(format!("invalid endpoint '{}': {e}", config.endpoint)))?; + let mut workload = WorkloadState::initialize(&config.workload, config.seed_offset)?; + let effective_concurrency = if let Some(cap) = workload.concurrency_cap() { + let capped = config.concurrency.min(cap); + if capped < config.concurrency { + println!( + "workload concurrency capped: requested={}, effective={}, funded_accounts={}", + config.concurrency, capped, cap + ); + } + capped + } else { + config.concurrency + }; + let mut accepted_ack_samples = Vec::with_capacity(config.count as usize); + let mut rejected_ack_samples = Vec::new(); + let mut accepted = 0_u64; + let mut rejected = 0_u64; + let mut first_rejection: Option = None; + let mut rejection_breakdown = BTreeMap::::new(); + let started = now(); + + while accepted.saturating_add(rejected) < config.count { + let remaining = config + .count + .saturating_sub(accepted.saturating_add(rejected)); + let batch_size = remaining.min(effective_concurrency as u64) as usize; + + let mut inflight = Vec::with_capacity(batch_size); + for _ in 0..batch_size { + let fixture = workload.next_fixture(config.max_fee, &domain)?; + let client = client.clone(); + let sent_at = now(); + inflight.push(async move { + let outcome = client.submit_tx_with_status(&fixture.request).await; + (sent_at.elapsed(), outcome) + }); + } + + for (ack_latency, outcome) in join_all(inflight).await { + match classify_rejection(outcome) { + None => { + accepted = accepted.saturating_add(1); + accepted_ack_samples.push(ack_latency); + } + Some(rejection) => { + rejected = rejected.saturating_add(1); + rejected_ack_samples.push(ack_latency); + *rejection_breakdown + .entry(rejection.key.clone()) + .or_insert(0) += 1; + if first_rejection.is_none() { + first_rejection = Some(rejection.detail); + } + } + } + } + + let processed = accepted.saturating_add(rejected); + if DEFAULT_PROGRESS_EVERY > 0 + && processed > 0 + && processed.is_multiple_of(DEFAULT_PROGRESS_EVERY) + { + println!( + "progress: processed={processed}/{}, accepted={accepted}, rejected={rejected}", + config.count + ); + } + } + + if config.fail_on_rejection && rejected > 0 { + let reason = first_rejection + .clone() + .unwrap_or_else(|| "unknown rejection".to_string()); + return Err(std::io::Error::other(format!( + "ack benchmark saw {rejected} rejection(s): {reason}" + )) + .into()); + } + + if accepted_ack_samples.is_empty() { + return Err(std::io::Error::other("ack benchmark had no accepted txs").into()); + } + + let total_wall = started.elapsed(); + let ack_stats = summarize(accepted_ack_samples.as_slice())?; + let rejected_stats = if rejected_ack_samples.is_empty() { + None + } else { + Some(summarize(rejected_ack_samples.as_slice())?) + }; + + Ok(AckRunReport { + count: config.count, + endpoint: config.endpoint, + concurrency: config.concurrency, + accepted, + rejected, + rejection_rate: rejection_rate(accepted, rejected), + rejection_breakdown, + first_rejection, + total_wall, + ack_latency_accepted: ack_stats, + ack_latency_rejected: rejected_stats, + memory: None, + sequencer_log_path: None, + }) +} diff --git a/benchmarks/src/bin/ack_latency.rs b/benchmarks/src/bin/ack_latency.rs index 6de274a..b8214ea 100644 --- a/benchmarks/src/bin/ack_latency.rs +++ b/benchmarks/src/bin/ack_latency.rs @@ -1,22 +1,21 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use alloy_primitives::Address; use benchmarks::{ - AckRunConfig, BenchResult, DEFAULT_ENDPOINT, DEFAULT_WORKLOAD_INITIAL_BALANCE, - DEFAULT_WORKLOAD_TRANSFER_AMOUNT, WorkloadConfig, WorkloadKind, default_seed_offset, - print_ack_report, run_ack_benchmark, + AckRunConfig, BenchResult, DEFAULT_ENDPOINT, DEFAULT_WORKLOAD_TRANSFER_AMOUNT, DOMAIN_NAME, + DOMAIN_VERSION, NetworkProfile, WorkloadConfig, WorkloadKind, default_json_output_path, + default_seed_offset, evaluate_ack_target, parse_address, print_ack_report, + print_target_evaluation, resolve_external_benchmark_domain, run_ack_benchmark, runtime::{ - DEFAULT_MEMORY_SAMPLE_INTERVAL_MS, DEFAULT_RUNTIME_METRICS_LOG_INTERVAL_MS, - DEFAULT_SEQUENCER_BIN, DEFAULT_SEQUENCER_SHUTDOWN_TIMEOUT_MS, - DEFAULT_SEQUENCER_START_TIMEOUT_MS, ManagedSequencer, ManagedSequencerConfig, - MemorySampler, default_sequencer_log_path, parse_inclusion_lane_profile_from_log, + DEFAULT_MEMORY_SAMPLE_INTERVAL_MS, DEFAULT_SEQUENCER_BIN, ManagedSequencer, + ManagedSequencerConfig, MemorySampler, }, + write_json_output, }; -use clap::{Parser, ValueEnum}; -use serde::Serialize; -use std::fs; +use clap::Parser; +use serde_json::json; use std::path::Path; -use std::path::PathBuf; use std::time::Duration; #[derive(Debug, Parser)] @@ -24,37 +23,23 @@ use std::time::Duration; name = "ack_latency", about = "ack latency benchmark", version, - after_help = "Examples:\n cargo run -p benchmarks --bin ack_latency -- --endpoint http://127.0.0.1:3000 --count 1000 --concurrency 32 --max-fee 0\n cargo run -p benchmarks --bin ack_latency --release -- --count 5000 --allow-rejections" + after_help = "Examples:\n cargo run -p benchmarks --bin ack_latency -- --self-contained --count 1000 --concurrency 32 --max-fee 0\n cargo run -p benchmarks --bin ack_latency -- --endpoint http://127.0.0.1:3000 --domain-chain-id 31337 --domain-verifying-contract 0x1111111111111111111111111111111111111111 --count 1000 --concurrency 32 --max-fee 0\n cargo run -p benchmarks --bin ack_latency -- --self-contained --count 5000 --concurrency 32 --evaluate" )] struct Args { - #[arg(long, visible_alias = "http-url", default_value = DEFAULT_ENDPOINT)] + #[arg(long, default_value = DEFAULT_ENDPOINT)] endpoint: String, #[arg(long, default_value_t = false)] self_contained: bool, #[arg(long, default_value = DEFAULT_SEQUENCER_BIN)] sequencer_bin: String, - #[arg(long, default_value_t = DEFAULT_SEQUENCER_START_TIMEOUT_MS)] - sequencer_start_timeout_ms: u64, - #[arg(long, default_value_t = DEFAULT_SEQUENCER_SHUTDOWN_TIMEOUT_MS)] - sequencer_shutdown_timeout_ms: u64, - #[arg(long, default_value_t = true)] - temp_db: bool, #[arg(long)] - sequencer_log_path: Option, - #[arg(long, default_value_t = true)] - sequencer_runtime_metrics_enabled: bool, - #[arg(long, default_value_t = DEFAULT_RUNTIME_METRICS_LOG_INTERVAL_MS)] - sequencer_runtime_metrics_log_interval_ms: u64, - #[arg(long, default_value = "info")] - sequencer_rust_log: String, - #[arg(long, default_value_t = DEFAULT_MEMORY_SAMPLE_INTERVAL_MS)] - memory_sample_interval_ms: u64, - #[arg(long, value_enum, default_value_t = CliWorkload::Synthetic)] - workload: CliWorkload, + domain_chain_id: Option, + #[arg(long, value_parser = parse_address)] + domain_verifying_contract: Option
, + #[arg(long, value_enum, default_value_t = WorkloadKind::Synthetic)] + workload: WorkloadKind, #[arg(long)] accounts_file: Option, - #[arg(long, default_value_t = DEFAULT_WORKLOAD_INITIAL_BALANCE)] - initial_balance: u64, #[arg(long, default_value_t = DEFAULT_WORKLOAD_TRANSFER_AMOUNT)] transfer_amount: u64, #[arg(long, default_value_t = 200_u64)] @@ -67,65 +52,19 @@ struct Args { max_fee: u32, #[arg(long, default_value_t = 3_000_u64)] request_timeout_ms: u64, - #[arg(long, default_value_t = 0_u64)] - progress_every: u64, #[arg(long, default_value_t = false)] allow_rejections: bool, + #[arg(long, default_value_t = false)] + evaluate: bool, #[arg(long)] json_out: Option, } -#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] -enum CliWorkload { - #[value(name = "synthetic")] - Synthetic, - #[value(name = "funded-transfer")] - FundedTransfer, -} - -impl From for WorkloadKind { - fn from(value: CliWorkload) -> Self { - match value { - CliWorkload::Synthetic => Self::Synthetic, - CliWorkload::FundedTransfer => Self::FundedTransfer, - } - } -} - -impl CliWorkload { - fn as_str(self) -> &'static str { - match self { - Self::Synthetic => "synthetic", - Self::FundedTransfer => "funded-transfer", - } - } -} - -#[derive(Debug, Serialize)] -struct AckJsonConfig { - endpoint: String, - self_contained: bool, - count: u64, - concurrency: usize, - max_fee: u32, - request_timeout_ms: u64, - allow_rejections: bool, - workload: String, - accounts_file: Option, - initial_balance: u64, - transfer_amount: u64, -} - -#[derive(Debug, Serialize)] -struct AckJsonOutput { - benchmark: &'static str, - config: AckJsonConfig, - report: benchmarks::AckRunReport, -} - #[tokio::main] async fn main() -> BenchResult<()> { let args = Args::parse(); + let effective_concurrency = args.concurrency.max(1); + let network_profile = NetworkProfile::same_host_baseline(); let json_out = args.json_out.clone().or_else(|| { args.self_contained .then(|| default_json_output_path("ack-latency")) @@ -134,58 +73,63 @@ async fn main() -> BenchResult<()> { Some( ManagedSequencer::spawn(ManagedSequencerConfig { sequencer_bin: args.sequencer_bin.clone(), - start_timeout: Duration::from_millis(args.sequencer_start_timeout_ms), - shutdown_timeout: Duration::from_millis(args.sequencer_shutdown_timeout_ms), - temp_db: args.temp_db, - log_path: args - .sequencer_log_path - .clone() - .or_else(|| Some(default_sequencer_log_path("ack-latency-self-contained"))), - runtime_metrics_enabled: args.sequencer_runtime_metrics_enabled, - runtime_metrics_log_interval: Duration::from_millis( - args.sequencer_runtime_metrics_log_interval_ms, - ), - rust_log: args.sequencer_rust_log.clone(), + log_prefix: "ack-latency-self-contained", }) .await?, ) } else { None }; + let domain = if let Some(value) = managed.as_ref() { + if args.domain_chain_id.is_some() || args.domain_verifying_contract.is_some() { + return Err(std::io::Error::other( + "self-contained benchmarks use the deployed local Application; remove explicit --domain-* args", + ) + .into()); + } + value.domain() + } else { + resolve_external_benchmark_domain(args.domain_chain_id, args.domain_verifying_contract)? + }; let endpoint = managed .as_ref() .map(|value| value.endpoint.clone()) .unwrap_or_else(|| args.endpoint.clone()); println!( - "ack config: endpoint={}, self_contained={}, count={}, concurrency={}, max_fee={}, request_timeout_ms={}, allow_rejections={}, workload={:?}", + "ack config: endpoint={}, self_contained={}, domain_chain_id={}, domain_verifying_contract={}, count={}, concurrency={}, max_fee={}, request_timeout_ms={}, allow_rejections={}, evaluate={}, workload={}", endpoint, args.self_contained, + domain.chain_id, + domain.verifying_contract, args.count, - args.concurrency.max(1), + effective_concurrency, args.max_fee, args.request_timeout_ms, args.allow_rejections, - args.workload + args.evaluate, + args.workload.as_str(), ); let memory_sampler = managed.as_ref().and_then(|value| value.pid()).map(|pid| { - MemorySampler::start(pid, Duration::from_millis(args.memory_sample_interval_ms)) + MemorySampler::start( + pid, + Duration::from_millis(DEFAULT_MEMORY_SAMPLE_INTERVAL_MS), + ) }); let config = AckRunConfig { endpoint, + domain, count: args.count, - concurrency: args.concurrency.max(1), + concurrency: effective_concurrency, seed_offset: args.seed_offset.unwrap_or_else(default_seed_offset), max_fee: args.max_fee, request_timeout_ms: args.request_timeout_ms, - progress_every: args.progress_every, fail_on_rejection: !args.allow_rejections, workload: WorkloadConfig { - kind: args.workload.into(), + kind: args.workload, accounts_file: args.accounts_file.clone(), - initial_balance: args.initial_balance, transfer_amount: args.transfer_amount, }, }; @@ -215,49 +159,43 @@ async fn main() -> BenchResult<()> { } } - if let Some(path) = report_result - .as_ref() - .ok() - .and_then(|report| report.sequencer_log_path.clone()) - && let Some(profile) = parse_inclusion_lane_profile_from_log(PathBuf::from(path).as_path())? - && let Ok(report) = report_result.as_mut() - { - report.inclusion_lane_profile = Some(profile); - } - let report = report_result?; + let evaluation = args + .evaluate + .then(|| evaluate_ack_target(&report, network_profile.clone())); + print_ack_report(&report); + if let Some(value) = evaluation.as_ref() { + print_target_evaluation(value); + } + if let Some(path) = json_out.as_ref() { - if let Some(parent) = Path::new(path).parent() { - fs::create_dir_all(parent)?; - } - let payload = AckJsonOutput { - benchmark: "ack_latency", - config: AckJsonConfig { - endpoint: report.endpoint.clone(), - self_contained: args.self_contained, - count: args.count, - concurrency: args.concurrency.max(1), - max_fee: args.max_fee, - request_timeout_ms: args.request_timeout_ms, - allow_rejections: args.allow_rejections, - workload: args.workload.as_str().to_string(), - accounts_file: args.accounts_file.clone(), - initial_balance: args.initial_balance, - transfer_amount: args.transfer_amount, - }, - report, - }; - fs::write(path, serde_json::to_vec_pretty(&payload)?)?; + let config_json = json!({ + "endpoint": report.endpoint, + "self_contained": args.self_contained, + "domain_name": DOMAIN_NAME, + "domain_version": DOMAIN_VERSION, + "domain_chain_id": domain.chain_id, + "domain_verifying_contract": domain.verifying_contract.to_string(), + "count": args.count, + "concurrency": effective_concurrency, + "max_fee": args.max_fee, + "request_timeout_ms": args.request_timeout_ms, + "allow_rejections": args.allow_rejections, + "evaluation_requested": args.evaluate, + "network_profile": network_profile, + "workload": args.workload.as_str(), + "accounts_file": args.accounts_file, + "transfer_amount": args.transfer_amount, + }); + write_json_output( + Path::new(path), + "ack_latency", + &config_json, + &report, + evaluation.as_ref(), + )?; println!("ack json: {path}"); } Ok(()) } - -fn default_json_output_path(prefix: &str) -> String { - let ts = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .map(|value| value.as_secs()) - .unwrap_or(0); - format!("benchmarks/results/{prefix}-{ts}.json") -} diff --git a/benchmarks/src/bin/compare_latest.rs b/benchmarks/src/bin/compare_latest.rs index e5ce067..d85a236 100644 --- a/benchmarks/src/bin/compare_latest.rs +++ b/benchmarks/src/bin/compare_latest.rs @@ -1,12 +1,15 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use benchmarks::BenchResult; +use benchmarks::{ + AckRunReport, BenchResult, BenchmarkJsonOutput, E2eRunReport, runtime::MemoryReport, +}; use clap::{Parser, ValueEnum}; -use serde::Deserialize; +use serde_json::Value; use std::collections::BTreeMap; use std::fs; use std::path::{Path, PathBuf}; +use std::time::Duration; #[derive(Debug, Clone, Copy, ValueEnum)] enum CompareKind { @@ -16,6 +19,28 @@ enum CompareKind { All, } +#[derive(Debug, Clone, Copy, ValueEnum)] +enum SweepMode { + Ack, + E2e, +} + +impl SweepMode { + fn file_prefix(self) -> &'static str { + match self { + Self::Ack => "ack-sweep-", + Self::E2e => "e2e-sweep-", + } + } + + fn as_str(self) -> &'static str { + match self { + Self::Ack => "ack", + Self::E2e => "e2e", + } + } +} + #[derive(Debug, Parser)] #[command(name = "compare_latest")] #[command(about = "Compare the latest two benchmark result files")] @@ -24,67 +49,8 @@ struct Cli { results_dir: PathBuf, #[arg(long, value_enum, default_value_t = CompareKind::All)] kind: CompareKind, -} - -#[derive(Debug, Deserialize)] -struct DurationJson { - secs: u64, - nanos: u32, -} - -impl DurationJson { - fn as_secs_f64(&self) -> f64 { - self.secs as f64 + f64::from(self.nanos) / 1_000_000_000.0 - } - - fn as_ms_f64(&self) -> f64 { - self.as_secs_f64() * 1_000.0 - } -} - -#[derive(Debug, Deserialize)] -struct StatsJson { - p50: DurationJson, - p95: DurationJson, - p99: DurationJson, - p999: DurationJson, - max: DurationJson, -} - -#[derive(Debug, Deserialize)] -struct MemoryJson { - rss_start_mb: f64, - rss_peak_mb: f64, - rss_growth_mb: f64, -} - -#[derive(Debug, Deserialize)] -struct AckFileJson { - report: AckReportJson, -} - -#[derive(Debug, Deserialize)] -struct AckReportJson { - accepted: u64, - rejected: u64, - total_wall: DurationJson, - ack_latency_accepted: StatsJson, - memory: Option, -} - -#[derive(Debug, Deserialize)] -struct E2eFileJson { - report: E2eReportJson, -} - -#[derive(Debug, Deserialize)] -struct E2eReportJson { - accepted: u64, - rejected: u64, - total_wall: DurationJson, - ack_latency_accepted: StatsJson, - e2e_latency_accepted: StatsJson, - memory: Option, + #[arg(long, value_enum, default_value_t = SweepMode::E2e)] + sweep_mode: SweepMode, } #[derive(Debug, Clone)] @@ -92,9 +58,21 @@ struct SweepCsvRow { concurrency: u64, accepted_tps: f64, rejected_count: u64, + http_rejected_count: u64, p95_ms: f64, p99_ms: f64, p999_ms: f64, + client_failure_count: u64, + http_429_count: u64, +} + +#[derive(Debug, Clone)] +struct SweepSummaryView { + tps_at_first_any_rejection: Option, + tps_at_first_non_200: Option, + tps_at_first_429: Option, + tps_at_first_client_failure: Option, + max_sustainable_tps_at_0_rejections: Option, } fn main() -> BenchResult<()> { @@ -102,13 +80,13 @@ fn main() -> BenchResult<()> { match cli.kind { CompareKind::Ack => compare_ack(&cli.results_dir)?, CompareKind::E2e => compare_e2e(&cli.results_dir)?, - CompareKind::Sweep => compare_sweep(&cli.results_dir)?, + CompareKind::Sweep => compare_sweep(&cli.results_dir, cli.sweep_mode)?, CompareKind::All => { compare_ack(&cli.results_dir)?; println!(); compare_e2e(&cli.results_dir)?; println!(); - compare_sweep(&cli.results_dir)?; + compare_sweep(&cli.results_dir, cli.sweep_mode)?; } } Ok(()) @@ -116,8 +94,8 @@ fn main() -> BenchResult<()> { fn compare_ack(results_dir: &Path) -> BenchResult<()> { let (old_path, new_path) = latest_two_files(results_dir, "ack-latency-", ".json")?; - let old = read_json::(&old_path)?; - let new = read_json::(&new_path)?; + let old = read_json::>(&old_path)?; + let new = read_json::>(&new_path)?; println!( "ACK latest two:\n old: {}\n new: {}", @@ -127,15 +105,15 @@ fn compare_ack(results_dir: &Path) -> BenchResult<()> { print_common( old.report.accepted, old.report.rejected, - &old.report.total_wall, + old.report.total_wall, ); print_common_delta( old.report.accepted, old.report.rejected, - &old.report.total_wall, + old.report.total_wall, new.report.accepted, new.report.rejected, - &new.report.total_wall, + new.report.total_wall, ); print_stats_delta( "ack latency", @@ -148,8 +126,8 @@ fn compare_ack(results_dir: &Path) -> BenchResult<()> { fn compare_e2e(results_dir: &Path) -> BenchResult<()> { let (old_path, new_path) = latest_two_files(results_dir, "e2e-latency-", ".json")?; - let old = read_json::(&old_path)?; - let new = read_json::(&new_path)?; + let old = read_json::>(&old_path)?; + let new = read_json::>(&new_path)?; println!( "E2E latest two:\n old: {}\n new: {}", @@ -159,15 +137,15 @@ fn compare_e2e(results_dir: &Path) -> BenchResult<()> { print_common( old.report.accepted, old.report.rejected, - &old.report.total_wall, + old.report.total_wall, ); print_common_delta( old.report.accepted, old.report.rejected, - &old.report.total_wall, + old.report.total_wall, new.report.accepted, new.report.rejected, - &new.report.total_wall, + new.report.total_wall, ); print_stats_delta( "ack latency", @@ -183,13 +161,14 @@ fn compare_e2e(results_dir: &Path) -> BenchResult<()> { Ok(()) } -fn compare_sweep(results_dir: &Path) -> BenchResult<()> { - let (old_path, new_path) = latest_two_files(results_dir, "e2e-sweep-", ".csv")?; +fn compare_sweep(results_dir: &Path, sweep_mode: SweepMode) -> BenchResult<()> { + let (old_path, new_path) = latest_two_files(results_dir, sweep_mode.file_prefix(), ".csv")?; let old_rows = read_sweep_rows(&old_path)?; let new_rows = read_sweep_rows(&new_path)?; println!( - "SWEEP latest two:\n old: {}\n new: {}", + "{} SWEEP latest two:\n old: {}\n new: {}", + sweep_mode.as_str().to_uppercase(), old_path.display(), new_path.display() ); @@ -198,7 +177,9 @@ fn compare_sweep(results_dir: &Path) -> BenchResult<()> { let new_by_concurrency = map_sweep_rows(&new_rows); println!(" deltas by concurrency:"); - println!(" c,accepted_tps_delta,p95_ms_delta,p99_ms_delta,p999_ms_delta,rejected_delta"); + println!( + " c,accepted_tps_delta,p95_ms_delta,p99_ms_delta,p999_ms_delta,rejected_delta,client_failure_delta,http_429_delta" + ); let mut all_concurrency: Vec = old_by_concurrency .keys() @@ -213,35 +194,59 @@ fn compare_sweep(results_dir: &Path) -> BenchResult<()> { let new = new_by_concurrency.get(&concurrency); if let (Some(old), Some(new)) = (old, new) { println!( - " {},{:+.3},{:+.3},{:+.3},{:+.3},{:+}", + " {},{:+.3},{:+.3},{:+.3},{:+.3},{:+},{:+},{:+}", concurrency, new.accepted_tps - old.accepted_tps, new.p95_ms - old.p95_ms, new.p99_ms - old.p99_ms, new.p999_ms - old.p999_ms, i128::from(new.rejected_count) - i128::from(old.rejected_count), + i128::from(new.client_failure_count) - i128::from(old.client_failure_count), + i128::from(new.http_429_count) - i128::from(old.http_429_count), ); } else { - println!(" {concurrency},n/a,n/a,n/a,n/a,n/a"); + println!(" {concurrency},n/a,n/a,n/a,n/a,n/a,n/a,n/a"); } } - let old_max_zero_rejection_tps = max_zero_rejection_tps(&old_rows); - let new_max_zero_rejection_tps = max_zero_rejection_tps(&new_rows); - let old_first_rejection_tps = first_rejection_tps(&old_rows); - let new_first_rejection_tps = first_rejection_tps(&new_rows); + let old_summary = compute_sweep_summary(old_rows.as_slice()); + let new_summary = compute_sweep_summary(new_rows.as_slice()); println!(" summary:"); println!( " max_sustainable_tps_at_0_rejections: old={:.3}, new={:.3}, delta={:+.3}", - old_max_zero_rejection_tps, - new_max_zero_rejection_tps, - new_max_zero_rejection_tps - old_max_zero_rejection_tps + old_summary + .max_sustainable_tps_at_0_rejections + .unwrap_or(0.0), + new_summary + .max_sustainable_tps_at_0_rejections + .unwrap_or(0.0), + new_summary + .max_sustainable_tps_at_0_rejections + .unwrap_or(0.0) + - old_summary + .max_sustainable_tps_at_0_rejections + .unwrap_or(0.0) + ); + println!( + " tps_at_first_any_rejection: old={}, new={}", + fmt_opt(old_summary.tps_at_first_any_rejection), + fmt_opt(new_summary.tps_at_first_any_rejection), ); println!( " tps_at_first_non_200: old={}, new={}", - fmt_opt(old_first_rejection_tps), - fmt_opt(new_first_rejection_tps), + fmt_opt(old_summary.tps_at_first_non_200), + fmt_opt(new_summary.tps_at_first_non_200), + ); + println!( + " tps_at_first_429: old={}, new={}", + fmt_opt(old_summary.tps_at_first_429), + fmt_opt(new_summary.tps_at_first_429), + ); + println!( + " tps_at_first_client_failure: old={}, new={}", + fmt_opt(old_summary.tps_at_first_client_failure), + fmt_opt(new_summary.tps_at_first_client_failure), ); Ok(()) } @@ -297,7 +302,7 @@ fn trailing_number(file_name: &str) -> Option { digits.parse::().ok() } -fn read_json Deserialize<'de>>(path: &Path) -> BenchResult { +fn read_json serde::Deserialize<'de>>(path: &Path) -> BenchResult { let raw = fs::read_to_string(path)?; let parsed = serde_json::from_str::(&raw)?; Ok(parsed) @@ -324,16 +329,22 @@ fn read_sweep_rows(path: &Path) -> BenchResult> { let concurrency = parse_csv_u64(&cols, &header_idx, "concurrency")?; let accepted_tps = parse_csv_f64(&cols, &header_idx, "accepted_tps")?; let rejected_count = parse_csv_u64(&cols, &header_idx, "rejected_count")?; + let http_rejected_count = parse_csv_u64(&cols, &header_idx, "http_rejected_count")?; let p95_ms = parse_csv_f64(&cols, &header_idx, "p95_ms")?; let p99_ms = parse_csv_f64(&cols, &header_idx, "p99_ms")?; let p999_ms = parse_csv_f64(&cols, &header_idx, "p999_ms")?; + let client_failure_count = parse_csv_u64(&cols, &header_idx, "client_failure_count")?; + let http_429_count = parse_csv_u64(&cols, &header_idx, "http_429_count")?; rows.push(SweepCsvRow { concurrency, accepted_tps, rejected_count, + http_rejected_count, p95_ms, p99_ms, p999_ms, + client_failure_count, + http_429_count, }); } Ok(rows) @@ -373,20 +384,33 @@ fn map_sweep_rows(rows: &[SweepCsvRow]) -> BTreeMap { out } -fn max_zero_rejection_tps(rows: &[SweepCsvRow]) -> f64 { - rows.iter() - .filter(|row| row.rejected_count == 0) - .map(|row| row.accepted_tps) - .fold(0.0, f64::max) -} - -fn first_rejection_tps(rows: &[SweepCsvRow]) -> Option { - rows.iter() - .find(|row| row.rejected_count > 0) - .map(|row| row.accepted_tps) +fn compute_sweep_summary(rows: &[SweepCsvRow]) -> SweepSummaryView { + SweepSummaryView { + tps_at_first_any_rejection: rows + .iter() + .find(|row| row.rejected_count > 0) + .map(|row| row.accepted_tps), + tps_at_first_non_200: rows + .iter() + .find(|row| row.http_rejected_count > 0) + .map(|row| row.accepted_tps), + tps_at_first_429: rows + .iter() + .find(|row| row.http_429_count > 0) + .map(|row| row.accepted_tps), + tps_at_first_client_failure: rows + .iter() + .find(|row| row.client_failure_count > 0) + .map(|row| row.accepted_tps), + max_sustainable_tps_at_0_rejections: rows + .iter() + .filter(|row| row.rejected_count == 0) + .map(|row| row.accepted_tps) + .max_by(|a, b| a.total_cmp(b)), + } } -fn print_common(accepted: u64, rejected: u64, total_wall: &DurationJson) { +fn print_common(accepted: u64, rejected: u64, total_wall: Duration) { let throughput = throughput(accepted, total_wall); println!(" old summary:"); println!(" accepted: {accepted}"); @@ -397,10 +421,10 @@ fn print_common(accepted: u64, rejected: u64, total_wall: &DurationJson) { fn print_common_delta( old_accepted: u64, old_rejected: u64, - old_total_wall: &DurationJson, + old_total_wall: Duration, new_accepted: u64, new_rejected: u64, - new_total_wall: &DurationJson, + new_total_wall: Duration, ) { let old_tps = throughput(old_accepted, old_total_wall); let new_tps = throughput(new_accepted, new_total_wall); @@ -421,23 +445,23 @@ fn print_common_delta( ); } -fn print_stats_delta(name: &str, old: &StatsJson, new: &StatsJson) { +fn print_stats_delta(name: &str, old: &benchmarks::Stats, new: &benchmarks::Stats) { println!(" {name} delta (new - old):"); - print_metric_delta("p50", old.p50.as_ms_f64(), new.p50.as_ms_f64()); - print_metric_delta("p95", old.p95.as_ms_f64(), new.p95.as_ms_f64()); - print_metric_delta("p99", old.p99.as_ms_f64(), new.p99.as_ms_f64()); - print_metric_delta("p99.9", old.p999.as_ms_f64(), new.p999.as_ms_f64()); - print_metric_delta("max", old.max.as_ms_f64(), new.max.as_ms_f64()); + print_metric_delta("p50", duration_ms(old.p50), duration_ms(new.p50)); + print_metric_delta("p95", duration_ms(old.p95), duration_ms(new.p95)); + print_metric_delta("p99", duration_ms(old.p99), duration_ms(new.p99)); + print_metric_delta("p99.9", duration_ms(old.p999), duration_ms(new.p999)); + print_metric_delta("max", duration_ms(old.max), duration_ms(new.max)); } -fn print_memory_delta(old: Option<&MemoryJson>, new: Option<&MemoryJson>) { +fn print_memory_delta(old: Option<&MemoryReport>, new: Option<&MemoryReport>) { let (Some(old), Some(new)) = (old, new) else { return; }; println!(" memory delta (new - old):"); - print_metric_delta("rss_start_mb", old.rss_start_mb, new.rss_start_mb); - print_metric_delta("rss_peak_mb", old.rss_peak_mb, new.rss_peak_mb); - print_metric_delta("rss_growth_mb", old.rss_growth_mb, new.rss_growth_mb); + print_optional_metric_delta("rss_start_mb", old.rss_start_mb, new.rss_start_mb); + print_optional_metric_delta("rss_peak_mb", old.rss_peak_mb, new.rss_peak_mb); + print_optional_metric_delta("rss_growth_mb", old.rss_growth_mb, new.rss_growth_mb); } fn print_metric_delta(label: &str, old_value: f64, new_value: f64) { @@ -450,7 +474,15 @@ fn print_metric_delta(label: &str, old_value: f64, new_value: f64) { ); } -fn throughput(accepted: u64, total_wall: &DurationJson) -> f64 { +fn print_optional_metric_delta(label: &str, old_value: Option, new_value: Option) { + let (Some(old_value), Some(new_value)) = (old_value, new_value) else { + println!(" {label}: n/a"); + return; + }; + print_metric_delta(label, old_value, new_value); +} + +fn throughput(accepted: u64, total_wall: Duration) -> f64 { let secs = total_wall.as_secs_f64(); if secs == 0.0 { return 0.0; @@ -458,6 +490,10 @@ fn throughput(accepted: u64, total_wall: &DurationJson) -> f64 { accepted as f64 / secs } +fn duration_ms(value: Duration) -> f64 { + value.as_secs_f64() * 1000.0 +} + fn pct(new_value: f64, old_value: f64) -> f64 { if old_value == 0.0 { 0.0 diff --git a/benchmarks/src/bin/e2e_latency.rs b/benchmarks/src/bin/e2e_latency.rs index 23a47cc..1cbd404 100644 --- a/benchmarks/src/bin/e2e_latency.rs +++ b/benchmarks/src/bin/e2e_latency.rs @@ -1,22 +1,21 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use alloy_primitives::Address; use benchmarks::{ - BenchResult, DEFAULT_ENDPOINT, DEFAULT_WORKLOAD_INITIAL_BALANCE, - DEFAULT_WORKLOAD_TRANSFER_AMOUNT, E2eRunConfig, WorkloadConfig, WorkloadKind, - default_seed_offset, print_e2e_report, run_e2e_benchmark, + BenchResult, DEFAULT_ENDPOINT, DEFAULT_WORKLOAD_TRANSFER_AMOUNT, DOMAIN_NAME, DOMAIN_VERSION, + E2eRunConfig, NetworkProfile, WorkloadConfig, WorkloadKind, default_json_output_path, + default_seed_offset, evaluate_soft_confirm_target, parse_address, print_e2e_report, + print_target_evaluation, resolve_external_benchmark_domain, run_e2e_benchmark, runtime::{ - DEFAULT_MEMORY_SAMPLE_INTERVAL_MS, DEFAULT_RUNTIME_METRICS_LOG_INTERVAL_MS, - DEFAULT_SEQUENCER_BIN, DEFAULT_SEQUENCER_SHUTDOWN_TIMEOUT_MS, - DEFAULT_SEQUENCER_START_TIMEOUT_MS, ManagedSequencer, ManagedSequencerConfig, - MemorySampler, default_sequencer_log_path, parse_inclusion_lane_profile_from_log, + DEFAULT_MEMORY_SAMPLE_INTERVAL_MS, DEFAULT_SEQUENCER_BIN, ManagedSequencer, + ManagedSequencerConfig, MemorySampler, }, + write_json_output, }; -use clap::{Parser, ValueEnum}; -use serde::Serialize; -use std::fs; +use clap::Parser; +use serde_json::json; use std::path::Path; -use std::path::PathBuf; use std::time::Duration; #[derive(Debug, Parser)] @@ -24,41 +23,25 @@ use std::time::Duration; name = "e2e_latency", about = "end-to-end latency benchmark", version, - after_help = "Examples:\n cargo run -p benchmarks --bin e2e_latency -- --endpoint http://127.0.0.1:3000 --count 1000 --concurrency 16 --max-fee 0 --from-offset 0\n cargo run -p benchmarks --bin e2e_latency --release -- --count 2000 --concurrency 64 --allow-rejections" + after_help = "Examples:\n cargo run -p benchmarks --bin e2e_latency -- --self-contained --count 1000 --concurrency 16 --max-fee 0 --from-offset 0\n cargo run -p benchmarks --bin e2e_latency -- --endpoint http://127.0.0.1:3000 --domain-chain-id 31337 --domain-verifying-contract 0x1111111111111111111111111111111111111111 --count 1000 --concurrency 16 --max-fee 0 --from-offset 0\n cargo run -p benchmarks --bin e2e_latency -- --self-contained --count 5000 --concurrency 16 --evaluate" )] struct Args { - #[arg(long, visible_alias = "http-url", default_value = DEFAULT_ENDPOINT)] + #[arg(long, default_value = DEFAULT_ENDPOINT)] endpoint: String, #[arg(long, default_value_t = false)] self_contained: bool, #[arg(long, default_value = DEFAULT_SEQUENCER_BIN)] sequencer_bin: String, - #[arg(long, default_value_t = DEFAULT_SEQUENCER_START_TIMEOUT_MS)] - sequencer_start_timeout_ms: u64, - #[arg(long, default_value_t = DEFAULT_SEQUENCER_SHUTDOWN_TIMEOUT_MS)] - sequencer_shutdown_timeout_ms: u64, - #[arg(long, default_value_t = true)] - temp_db: bool, #[arg(long)] - sequencer_log_path: Option, - #[arg(long, default_value_t = true)] - sequencer_runtime_metrics_enabled: bool, - #[arg(long, default_value_t = DEFAULT_RUNTIME_METRICS_LOG_INTERVAL_MS)] - sequencer_runtime_metrics_log_interval_ms: u64, - #[arg(long, default_value = "info")] - sequencer_rust_log: String, - #[arg(long, default_value_t = DEFAULT_MEMORY_SAMPLE_INTERVAL_MS)] - memory_sample_interval_ms: u64, - #[arg(long, value_enum, default_value_t = CliWorkload::Synthetic)] - workload: CliWorkload, + domain_chain_id: Option, + #[arg(long, value_parser = parse_address)] + domain_verifying_contract: Option
, + #[arg(long, value_enum, default_value_t = WorkloadKind::Synthetic)] + workload: WorkloadKind, #[arg(long)] accounts_file: Option, - #[arg(long, default_value_t = DEFAULT_WORKLOAD_INITIAL_BALANCE)] - initial_balance: u64, #[arg(long, default_value_t = DEFAULT_WORKLOAD_TRANSFER_AMOUNT)] transfer_amount: u64, - #[arg(long, visible_alias = "ws-url")] - ws_subscribe_url: Option, #[arg(long, default_value_t = 0_u64)] from_offset: u64, #[arg(long, default_value_t = 100_u64)] @@ -73,74 +56,19 @@ struct Args { request_timeout_ms: u64, #[arg(long, default_value_t = 5_000_u64)] max_ws_wait_ms: u64, - #[arg(long, default_value_t = 0_u64)] - progress_every: u64, - #[arg(long, default_value_t = false)] - skip_backlog_drain: bool, - #[arg(long, default_value_t = 25_u64)] - backlog_drain_idle_ms: u64, - #[arg(long, default_value_t = 2_000_u64)] - backlog_drain_max_ms: u64, #[arg(long, default_value_t = false)] allow_rejections: bool, + #[arg(long, default_value_t = false)] + evaluate: bool, #[arg(long)] json_out: Option, } -#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] -enum CliWorkload { - #[value(name = "synthetic")] - Synthetic, - #[value(name = "funded-transfer")] - FundedTransfer, -} - -impl From for WorkloadKind { - fn from(value: CliWorkload) -> Self { - match value { - CliWorkload::Synthetic => Self::Synthetic, - CliWorkload::FundedTransfer => Self::FundedTransfer, - } - } -} - -impl CliWorkload { - fn as_str(self) -> &'static str { - match self { - Self::Synthetic => "synthetic", - Self::FundedTransfer => "funded-transfer", - } - } -} - -#[derive(Debug, Serialize)] -struct E2eJsonConfig { - endpoint: String, - ws_subscribe_url: Option, - self_contained: bool, - from_offset: u64, - count: u64, - concurrency: usize, - max_fee: u32, - request_timeout_ms: u64, - max_ws_wait_ms: u64, - allow_rejections: bool, - workload: String, - accounts_file: Option, - initial_balance: u64, - transfer_amount: u64, -} - -#[derive(Debug, Serialize)] -struct E2eJsonOutput { - benchmark: &'static str, - config: E2eJsonConfig, - report: benchmarks::E2eRunReport, -} - #[tokio::main] async fn main() -> BenchResult<()> { let args = Args::parse(); + let effective_concurrency = args.concurrency.max(1); + let network_profile = NetworkProfile::same_host_baseline(); let json_out = args.json_out.clone().or_else(|| { args.self_contained .then(|| default_json_output_path("e2e-latency")) @@ -149,72 +77,67 @@ async fn main() -> BenchResult<()> { Some( ManagedSequencer::spawn(ManagedSequencerConfig { sequencer_bin: args.sequencer_bin.clone(), - start_timeout: Duration::from_millis(args.sequencer_start_timeout_ms), - shutdown_timeout: Duration::from_millis(args.sequencer_shutdown_timeout_ms), - temp_db: args.temp_db, - log_path: args - .sequencer_log_path - .clone() - .or_else(|| Some(default_sequencer_log_path("e2e-latency-self-contained"))), - runtime_metrics_enabled: args.sequencer_runtime_metrics_enabled, - runtime_metrics_log_interval: Duration::from_millis( - args.sequencer_runtime_metrics_log_interval_ms, - ), - rust_log: args.sequencer_rust_log.clone(), + log_prefix: "e2e-latency-self-contained", }) .await?, ) } else { None }; + let domain = if let Some(value) = managed.as_ref() { + if args.domain_chain_id.is_some() || args.domain_verifying_contract.is_some() { + return Err(std::io::Error::other( + "self-contained benchmarks use the deployed local Application; remove explicit --domain-* args", + ) + .into()); + } + value.domain() + } else { + resolve_external_benchmark_domain(args.domain_chain_id, args.domain_verifying_contract)? + }; let endpoint = managed .as_ref() .map(|value| value.endpoint.clone()) .unwrap_or_else(|| args.endpoint.clone()); - let ws_subscribe_url = if args.self_contained { - managed.as_ref().map(|value| value.ws_subscribe_url.clone()) - } else { - args.ws_subscribe_url.clone() - }; println!( - "e2e config: endpoint={}, self_contained={}, ws_subscribe_url={:?}, from_offset={}, count={}, concurrency={}, max_fee={}, request_timeout_ms={}, max_ws_wait_ms={}, allow_rejections={}, workload={:?}", + "e2e config: endpoint={}, self_contained={}, domain_chain_id={}, domain_verifying_contract={}, from_offset={}, count={}, concurrency={}, max_fee={}, request_timeout_ms={}, max_ws_wait_ms={}, allow_rejections={}, evaluate={}, workload={}", endpoint, args.self_contained, - ws_subscribe_url, + domain.chain_id, + domain.verifying_contract, args.from_offset, args.count, - args.concurrency.max(1), + effective_concurrency, args.max_fee, args.request_timeout_ms, args.max_ws_wait_ms, args.allow_rejections, - args.workload + args.evaluate, + args.workload.as_str(), ); let memory_sampler = managed.as_ref().and_then(|value| value.pid()).map(|pid| { - MemorySampler::start(pid, Duration::from_millis(args.memory_sample_interval_ms)) + MemorySampler::start( + pid, + Duration::from_millis(DEFAULT_MEMORY_SAMPLE_INTERVAL_MS), + ) }); let config = E2eRunConfig { endpoint, - ws_subscribe_url, + domain, from_offset: args.from_offset, count: args.count, - concurrency: args.concurrency.max(1), + concurrency: effective_concurrency, seed_offset: args.seed_offset.unwrap_or_else(default_seed_offset), max_fee: args.max_fee, request_timeout_ms: args.request_timeout_ms, max_ws_wait_ms: args.max_ws_wait_ms, - progress_every: args.progress_every, - drain_backlog_before_bench: !args.skip_backlog_drain, - backlog_drain_idle_ms: args.backlog_drain_idle_ms, - backlog_drain_max_ms: args.backlog_drain_max_ms, fail_on_rejection: !args.allow_rejections, workload: WorkloadConfig { - kind: args.workload.into(), + kind: args.workload, accounts_file: args.accounts_file.clone(), - initial_balance: args.initial_balance, transfer_amount: args.transfer_amount, }, }; @@ -244,56 +167,45 @@ async fn main() -> BenchResult<()> { } } - if let Some(path) = report_result - .as_ref() - .ok() - .and_then(|report| report.sequencer_log_path.clone()) - && let Some(profile) = parse_inclusion_lane_profile_from_log(PathBuf::from(path).as_path())? - && let Ok(report) = report_result.as_mut() - { - report.inclusion_lane_profile = Some(profile); - } - let report = report_result?; + let evaluation = args + .evaluate + .then(|| evaluate_soft_confirm_target(&report, network_profile.clone())); + print_e2e_report(&report); + if let Some(value) = evaluation.as_ref() { + print_target_evaluation(value); + } + if let Some(path) = json_out.as_ref() { - if let Some(parent) = Path::new(path).parent() { - fs::create_dir_all(parent)?; - } - let payload = E2eJsonOutput { - benchmark: "e2e_latency", - config: E2eJsonConfig { - endpoint: report.endpoint.clone(), - ws_subscribe_url: if args.self_contained { - Some(report.ws_subscribe_url.clone()) - } else { - args.ws_subscribe_url.clone() - }, - self_contained: args.self_contained, - from_offset: args.from_offset, - count: args.count, - concurrency: args.concurrency.max(1), - max_fee: args.max_fee, - request_timeout_ms: args.request_timeout_ms, - max_ws_wait_ms: args.max_ws_wait_ms, - allow_rejections: args.allow_rejections, - workload: args.workload.as_str().to_string(), - accounts_file: args.accounts_file.clone(), - initial_balance: args.initial_balance, - transfer_amount: args.transfer_amount, - }, - report, - }; - fs::write(path, serde_json::to_vec_pretty(&payload)?)?; + let config_json = json!({ + "endpoint": report.endpoint, + "self_contained": args.self_contained, + "domain_name": DOMAIN_NAME, + "domain_version": DOMAIN_VERSION, + "domain_chain_id": domain.chain_id, + "domain_verifying_contract": domain.verifying_contract.to_string(), + "from_offset": args.from_offset, + "count": args.count, + "concurrency": effective_concurrency, + "max_fee": args.max_fee, + "request_timeout_ms": args.request_timeout_ms, + "max_ws_wait_ms": args.max_ws_wait_ms, + "allow_rejections": args.allow_rejections, + "evaluation_requested": args.evaluate, + "network_profile": network_profile, + "workload": args.workload.as_str(), + "accounts_file": args.accounts_file, + "transfer_amount": args.transfer_amount, + }); + write_json_output( + Path::new(path), + "e2e_latency", + &config_json, + &report, + evaluation.as_ref(), + )?; println!("e2e json: {path}"); } Ok(()) } - -fn default_json_output_path(prefix: &str) -> String { - let ts = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .map(|value| value.as_secs()) - .unwrap_or(0); - format!("benchmarks/results/{prefix}-{ts}.json") -} diff --git a/benchmarks/src/bin/sweep.rs b/benchmarks/src/bin/sweep.rs index 510e886..9ddbd1a 100644 --- a/benchmarks/src/bin/sweep.rs +++ b/benchmarks/src/bin/sweep.rs @@ -1,22 +1,22 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use alloy_primitives::Address; use benchmarks::{ - AckRunConfig, BenchResult, DEFAULT_ENDPOINT, DEFAULT_WORKLOAD_INITIAL_BALANCE, - DEFAULT_WORKLOAD_TRANSFER_AMOUNT, E2eRunConfig, WorkloadConfig, WorkloadKind, - default_seed_offset, print_ack_report, print_e2e_report, run_ack_benchmark, run_e2e_benchmark, + AckRunConfig, BenchResult, DEFAULT_ENDPOINT, DEFAULT_WORKLOAD_TRANSFER_AMOUNT, DOMAIN_NAME, + DOMAIN_VERSION, E2eRunConfig, NetworkProfile, SweepRow, SweepRunReport, WorkloadConfig, + WorkloadKind, compute_capacity_summary, default_json_output_path, default_seed_offset, + parse_address, print_ack_report, print_e2e_report, print_sweep_report, + resolve_external_benchmark_domain, run_ack_benchmark, run_e2e_benchmark, runtime::{ - DEFAULT_MEMORY_SAMPLE_INTERVAL_MS, DEFAULT_RUNTIME_METRICS_LOG_INTERVAL_MS, - DEFAULT_SEQUENCER_BIN, DEFAULT_SEQUENCER_SHUTDOWN_TIMEOUT_MS, - DEFAULT_SEQUENCER_START_TIMEOUT_MS, ManagedSequencer, ManagedSequencerConfig, - MemorySampler, default_sequencer_log_path, parse_inclusion_lane_profile_from_log, + DEFAULT_MEMORY_SAMPLE_INTERVAL_MS, DEFAULT_SEQUENCER_BIN, ManagedSequencer, + ManagedSequencerConfig, MemorySampler, }, + write_json_output, write_sweep_csv, }; use clap::{Parser, ValueEnum}; -use serde::Serialize; -use std::collections::BTreeMap; -use std::fs; -use std::path::PathBuf; +use serde_json::json; +use std::path::{Path, PathBuf}; use std::process::Command; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; @@ -38,63 +38,32 @@ impl SweepMode { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] -enum CliWorkload { - #[value(name = "synthetic")] - Synthetic, - #[value(name = "funded-transfer")] - FundedTransfer, -} - -impl From for WorkloadKind { - fn from(value: CliWorkload) -> Self { - match value { - CliWorkload::Synthetic => Self::Synthetic, - CliWorkload::FundedTransfer => Self::FundedTransfer, - } - } -} - #[derive(Debug, Clone, Parser)] #[command( name = "sweep", about = "benchmark sweep runner", version, - after_help = "Examples:\n cargo run -p benchmarks --bin sweep -- --mode e2e --endpoint http://127.0.0.1:3000 --count 1000 --concurrency-list \"1 2 4 8 16 32 64\"\n cargo run -p benchmarks --bin sweep -- --mode e2e --concurrency-range 1:128:8 --json-out benchmarks/results/e2e-latest.json\n cargo run -p benchmarks --bin sweep --release -- --mode ack --count 5000 --concurrency-list \"1 2 4 8 16 32 64 96 128\"" + after_help = "Examples:\n cargo run -p benchmarks --bin sweep -- --self-contained --mode e2e --count 1000 --concurrency-list \"1 2 4 8 16 32 64\"\n cargo run -p benchmarks --bin sweep -- --endpoint http://127.0.0.1:3000 --domain-chain-id 31337 --domain-verifying-contract 0x1111111111111111111111111111111111111111 --mode e2e --count 1000 --concurrency-range 1:128:8 --json-out benchmarks/results/e2e-sweep-latest.json" )] struct Args { #[arg(long, value_enum, default_value_t = SweepMode::E2e)] mode: SweepMode, #[arg(long, default_value_t = 1_000_u64)] count: u64, - #[arg(long, visible_alias = "url", default_value = DEFAULT_ENDPOINT)] + #[arg(long, default_value = DEFAULT_ENDPOINT)] endpoint: String, #[arg(long, default_value_t = false)] self_contained: bool, #[arg(long, default_value = DEFAULT_SEQUENCER_BIN)] sequencer_bin: String, - #[arg(long, default_value_t = DEFAULT_SEQUENCER_START_TIMEOUT_MS)] - sequencer_start_timeout_ms: u64, - #[arg(long, default_value_t = DEFAULT_SEQUENCER_SHUTDOWN_TIMEOUT_MS)] - sequencer_shutdown_timeout_ms: u64, - #[arg(long, default_value_t = true)] - temp_db: bool, #[arg(long)] - sequencer_log_path: Option, - #[arg(long, default_value_t = true)] - sequencer_runtime_metrics_enabled: bool, - #[arg(long, default_value_t = DEFAULT_RUNTIME_METRICS_LOG_INTERVAL_MS)] - sequencer_runtime_metrics_log_interval_ms: u64, - #[arg(long, default_value = "info")] - sequencer_rust_log: String, - #[arg(long, default_value_t = DEFAULT_MEMORY_SAMPLE_INTERVAL_MS)] - memory_sample_interval_ms: u64, - #[arg(long, value_enum, default_value_t = CliWorkload::Synthetic)] - workload: CliWorkload, + domain_chain_id: Option, + #[arg(long, value_parser = parse_address)] + domain_verifying_contract: Option
, + #[arg(long, value_enum, default_value_t = WorkloadKind::Synthetic)] + workload: WorkloadKind, #[arg(long)] accounts_file: Option, - #[arg(long, default_value_t = DEFAULT_WORKLOAD_INITIAL_BALANCE)] - initial_balance: u64, #[arg(long, default_value_t = DEFAULT_WORKLOAD_TRANSFER_AMOUNT)] transfer_amount: u64, #[arg(long, default_value_t = 0_u32)] @@ -126,104 +95,76 @@ struct Args { stop_on_first_non_200: bool, } -#[derive(Debug, Clone, Serialize)] -struct SweepRow { - concurrency: usize, - accepted_tps: f64, - accepted_count: u64, - rejected_count: u64, - rejection_rate: f64, - p95_ms: f64, - p99_ms: f64, - p999_ms: f64, - rejection_breakdown: BTreeMap, -} - -#[derive(Debug, Clone, Serialize)] -struct SweepSummary { - tps_at_first_non_200: Option, - tps_at_first_429: Option, - max_sustainable_tps_at_0_rejections: Option, -} - -#[derive(Debug, Clone, Serialize)] -struct SweepJson { - mode: String, - endpoint: String, - count: u64, - max_fee: u32, - from_offset: u64, - rows: Vec, - summary: SweepSummary, - memory: Option, - sequencer_log_path: Option, - inclusion_lane_profile: Option, -} - #[tokio::main] async fn main() -> BenchResult<()> { let args = Args::parse(); + let network_profile = NetworkProfile::same_host_baseline(); + let json_prefix = format!("{}-sweep", args.mode.as_str()); let json_out = args.json_out.clone().or_else(|| { args.self_contained - .then(|| default_json_output_path("sweep")) + .then(|| default_json_output_path(json_prefix.as_str())) }); let concurrencies = resolve_concurrency_list(&args)?; if concurrencies.is_empty() { return Err(std::io::Error::other("concurrency list cannot be empty").into()); } - fs::create_dir_all(args.results_dir.as_str())?; + std::fs::create_dir_all(args.results_dir.as_str())?; let timestamp = timestamp_string(); - let csv_path = format!( + let csv_path = PathBuf::from(format!( "{}/{}-sweep-{}.csv", args.results_dir, args.mode.as_str(), timestamp - ); + )); let mut managed = if args.self_contained { Some( ManagedSequencer::spawn(ManagedSequencerConfig { sequencer_bin: args.sequencer_bin.clone(), - start_timeout: Duration::from_millis(args.sequencer_start_timeout_ms), - shutdown_timeout: Duration::from_millis(args.sequencer_shutdown_timeout_ms), - temp_db: args.temp_db, - log_path: args - .sequencer_log_path - .clone() - .or_else(|| Some(default_sequencer_log_path("sweep-self-contained"))), - runtime_metrics_enabled: args.sequencer_runtime_metrics_enabled, - runtime_metrics_log_interval: Duration::from_millis( - args.sequencer_runtime_metrics_log_interval_ms, - ), - rust_log: args.sequencer_rust_log.clone(), + log_prefix: "sweep-self-contained", }) .await?, ) } else { None }; + let domain = if let Some(value) = managed.as_ref() { + if args.domain_chain_id.is_some() || args.domain_verifying_contract.is_some() { + return Err(std::io::Error::other( + "self-contained benchmarks use the deployed local Application; remove explicit --domain-* args", + ) + .into()); + } + value.domain() + } else { + resolve_external_benchmark_domain(args.domain_chain_id, args.domain_verifying_contract)? + }; let endpoint = managed .as_ref() .map(|value| value.endpoint.clone()) .unwrap_or_else(|| args.endpoint.clone()); - let ws_subscribe_url = managed.as_ref().map(|value| value.ws_subscribe_url.clone()); let sequencer_log_path = managed .as_ref() .map(|value| value.log_path().to_string_lossy().to_string()); let memory_sampler = managed.as_ref().and_then(|value| value.pid()).map(|pid| { - MemorySampler::start(pid, Duration::from_millis(args.memory_sample_interval_ms)) + MemorySampler::start( + pid, + Duration::from_millis(DEFAULT_MEMORY_SAMPLE_INTERVAL_MS), + ) }); println!( - "starting {} sweep: endpoint={} self_contained={} count={} max_fee={} workload={:?} stop_on_first_non_200={} concs={:?}", + "starting {} sweep: endpoint={} self_contained={} domain_chain_id={} domain_verifying_contract={} count={} max_fee={} workload={} stop_on_first_non_200={} concs={:?}", args.mode.as_str(), endpoint, args.self_contained, + domain.chain_id, + domain.verifying_contract, args.count, args.max_fee, - args.workload, + args.workload.as_str(), args.stop_on_first_non_200, concurrencies ); @@ -235,9 +176,8 @@ async fn main() -> BenchResult<()> { let mut seed_offset = default_seed_offset(); let workload = WorkloadConfig { - kind: args.workload.into(), + kind: args.workload, accounts_file: args.accounts_file.clone(), - initial_balance: args.initial_balance, transfer_amount: args.transfer_amount, }; @@ -255,35 +195,35 @@ async fn main() -> BenchResult<()> { SweepMode::Ack => { let config = AckRunConfig { endpoint: endpoint.clone(), + domain, count: args.count, concurrency, seed_offset, max_fee: args.max_fee, request_timeout_ms: 3_000, - progress_every: 500, fail_on_rejection: false, workload: workload.clone(), }; run_ack_benchmark(config).await.map(|report| { seed_offset = seed_offset.saturating_add(args.count); print_ack_report(&report); - SweepRow { + SweepRow::new( concurrency, - accepted_tps: tx_per_second(report.accepted as usize, report.total_wall), - accepted_count: report.accepted, - rejected_count: report.rejected, - rejection_rate: report.rejection_rate, - p95_ms: report.ack_latency_accepted.p95.as_secs_f64() * 1000.0, - p99_ms: report.ack_latency_accepted.p99.as_secs_f64() * 1000.0, - p999_ms: report.ack_latency_accepted.p999.as_secs_f64() * 1000.0, - rejection_breakdown: report.rejection_breakdown, - } + tx_per_second(report.accepted as usize, report.total_wall), + report.accepted, + report.rejected, + report.rejection_rate, + report.ack_latency_accepted.p95.as_secs_f64() * 1000.0, + report.ack_latency_accepted.p99.as_secs_f64() * 1000.0, + report.ack_latency_accepted.p999.as_secs_f64() * 1000.0, + report.rejection_breakdown, + ) }) } SweepMode::E2e => { let config = E2eRunConfig { endpoint: endpoint.clone(), - ws_subscribe_url: ws_subscribe_url.clone(), + domain, from_offset: current_from_offset, count: args.count, concurrency, @@ -291,10 +231,6 @@ async fn main() -> BenchResult<()> { max_fee: args.max_fee, request_timeout_ms: args.e2e_request_timeout_ms, max_ws_wait_ms: args.e2e_max_ws_wait_ms, - progress_every: 500, - drain_backlog_before_bench: true, - backlog_drain_idle_ms: 25, - backlog_drain_max_ms: 2_000, fail_on_rejection: false, workload: workload.clone(), }; @@ -303,17 +239,17 @@ async fn main() -> BenchResult<()> { current_from_offset = current_from_offset.saturating_add(report.consumed_ws_events_total); print_e2e_report(&report); - SweepRow { + SweepRow::new( concurrency, - accepted_tps: tx_per_second(report.accepted as usize, report.total_wall), - accepted_count: report.accepted, - rejected_count: report.rejected, - rejection_rate: report.rejection_rate, - p95_ms: report.e2e_latency_accepted.p95.as_secs_f64() * 1000.0, - p99_ms: report.e2e_latency_accepted.p99.as_secs_f64() * 1000.0, - p999_ms: report.e2e_latency_accepted.p999.as_secs_f64() * 1000.0, - rejection_breakdown: report.rejection_breakdown, - } + tx_per_second(report.accepted as usize, report.total_wall), + report.accepted, + report.rejected, + report.rejection_rate, + report.e2e_latency_accepted.p95.as_secs_f64() * 1000.0, + report.e2e_latency_accepted.p99.as_secs_f64() * 1000.0, + report.e2e_latency_accepted.p999.as_secs_f64() * 1000.0, + report.rejection_breakdown, + ) }) } }; @@ -321,7 +257,7 @@ async fn main() -> BenchResult<()> { match result { Ok(row) => { total_accepted = total_accepted.saturating_add(row.accepted_count); - let should_stop = args.stop_on_first_non_200 && row.rejected_count > 0; + let should_stop = args.stop_on_first_non_200 && row.has_http_rejection(); rows.push(row); if should_stop { println!("stopping sweep at first non-200 response"); @@ -357,78 +293,53 @@ async fn main() -> BenchResult<()> { return shutdown_result; } } - let inclusion_lane_profile = if let Some(path) = sequencer_log_path.as_ref() { - parse_inclusion_lane_profile_from_log(PathBuf::from(path).as_path())? - } else { - None - }; - if let Some(err) = run_error { return Err(err); } - write_csv(csv_path.as_str(), rows.as_slice())?; + write_sweep_csv(csv_path.as_path(), rows.as_slice())?; let summary = compute_capacity_summary(rows.as_slice()); + let report = SweepRunReport { + rows, + summary, + memory: memory_report, + sequencer_log_path, + }; println!(); - println!("sweep csv: {csv_path}"); - println!( - "tps_at_first_non_200: {}", - format_optional(summary.tps_at_first_non_200) - ); - println!( - "tps_at_first_429: {}", - format_optional(summary.tps_at_first_429) - ); - println!( - "max_sustainable_tps_at_0_rejections: {}", - format_optional(summary.max_sustainable_tps_at_0_rejections) - ); - if let Some(memory) = memory_report.as_ref() { - benchmarks::print_memory_report(memory); - } - if let Some(path) = sequencer_log_path.as_ref() { - println!("sequencer_log_path: {path}"); - } - if let Some(profile) = inclusion_lane_profile.as_ref() { - println!("inclusion_lane_profile:"); - println!(" samples: {}", profile.samples); - println!( - " latest_user_op_app_share_pct_of_app_plus_persist: {}", - format_optional(profile.latest_user_op_app_share_pct_of_app_plus_persist) - ); - println!( - " latest_user_op_persist_share_pct_of_app_plus_persist: {}", - format_optional(profile.latest_user_op_persist_share_pct_of_app_plus_persist) - ); - println!( - " avg_user_op_app_share_pct_of_app_plus_persist: {}", - format_optional(profile.avg_user_op_app_share_pct_of_app_plus_persist) - ); - println!( - " avg_user_op_persist_share_pct_of_app_plus_persist: {}", - format_optional(profile.avg_user_op_persist_share_pct_of_app_plus_persist) - ); - } + println!("sweep csv: {}", csv_path.display()); + print_sweep_report(&report); if let Some(path) = json_out.as_ref() { - let json_path = PathBuf::from(path); - if let Some(parent) = json_path.parent() { - fs::create_dir_all(parent)?; - } - let payload = SweepJson { - mode: args.mode.as_str().to_string(), - endpoint, - count: args.count, - max_fee: args.max_fee, - from_offset: args.from_offset, - rows, - summary, - memory: memory_report, - sequencer_log_path, - inclusion_lane_profile, - }; - fs::write(path, serde_json::to_vec_pretty(&payload)?)?; + let config_json = json!({ + "mode": args.mode.as_str(), + "endpoint": endpoint, + "self_contained": args.self_contained, + "domain_name": DOMAIN_NAME, + "domain_version": DOMAIN_VERSION, + "domain_chain_id": domain.chain_id, + "domain_verifying_contract": domain.verifying_contract.to_string(), + "count": args.count, + "max_fee": args.max_fee, + "from_offset": args.from_offset, + "results_dir": args.results_dir, + "stop_on_first_non_200": args.stop_on_first_non_200, + "network_profile": network_profile, + "workload": args.workload.as_str(), + "accounts_file": args.accounts_file, + "transfer_amount": args.transfer_amount, + "concurrency_list": concurrencies, + "e2e_request_timeout_ms": args.e2e_request_timeout_ms, + "e2e_max_ws_wait_ms": args.e2e_max_ws_wait_ms, + "csv_path": csv_path, + }); + write_json_output( + Path::new(path), + "sweep", + &config_json, + &report, + Option::<&serde_json::Value>::None, + )?; println!("sweep json: {path}"); } Ok(()) @@ -488,60 +399,6 @@ fn parse_concurrency_range(value: &str) -> BenchResult> { Ok(out) } -fn write_csv(path: &str, rows: &[SweepRow]) -> BenchResult<()> { - let mut out = String::from( - "concurrency,accepted_tps,accepted_count,rejected_count,rejection_rate,p95_ms,p99_ms,p999_ms\n", - ); - for row in rows { - out.push_str( - format!( - "{},{:.6},{},{},{:.6},{:.6},{:.6},{:.6}\n", - row.concurrency, - row.accepted_tps, - row.accepted_count, - row.rejected_count, - row.rejection_rate, - row.p95_ms, - row.p99_ms, - row.p999_ms, - ) - .as_str(), - ); - } - fs::write(path, out)?; - Ok(()) -} - -fn compute_capacity_summary(rows: &[SweepRow]) -> SweepSummary { - let tps_at_first_non_200 = rows - .iter() - .find(|row| row.rejected_count > 0) - .map(|row| row.accepted_tps); - - let tps_at_first_429 = rows - .iter() - .find(|row| { - row.rejection_breakdown - .get("http_429") - .copied() - .unwrap_or(0) - > 0 - }) - .map(|row| row.accepted_tps); - - let max_sustainable_tps_at_0_rejections = rows - .iter() - .filter(|row| row.rejected_count == 0) - .map(|row| row.accepted_tps) - .max_by(|a, b| a.total_cmp(b)); - - SweepSummary { - tps_at_first_non_200, - tps_at_first_429, - max_sustainable_tps_at_0_rejections, - } -} - fn tx_per_second(count: usize, total_wall: std::time::Duration) -> f64 { if total_wall.is_zero() { 0.0 @@ -558,21 +415,6 @@ fn timestamp_string() -> String { secs.to_string() } -fn default_json_output_path(prefix: &str) -> String { - let ts = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map(|value| value.as_secs()) - .unwrap_or(0); - format!("benchmarks/results/{prefix}-{ts}.json") -} - -fn format_optional(value: Option) -> String { - match value { - Some(v) => format!("{v:.2}"), - None => "not reached".to_string(), - } -} - fn fd_soft_limit_string() -> String { #[cfg(unix)] { @@ -590,86 +432,3 @@ fn fd_soft_limit_string() -> String { "n/a".to_string() } } - -#[cfg(test)] -mod tests { - use super::{SweepRow, compute_capacity_summary}; - use std::collections::BTreeMap; - - #[test] - fn capacity_summary_equal_case() { - let rows = vec![ - SweepRow { - concurrency: 1, - accepted_tps: 10.0, - accepted_count: 100, - rejected_count: 0, - rejection_rate: 0.0, - p95_ms: 1.0, - p99_ms: 1.0, - p999_ms: 1.0, - rejection_breakdown: BTreeMap::new(), - }, - SweepRow { - concurrency: 2, - accepted_tps: 20.0, - accepted_count: 100, - rejected_count: 1, - rejection_rate: 1.0, - p95_ms: 2.0, - p99_ms: 2.0, - p999_ms: 2.0, - rejection_breakdown: BTreeMap::from([("http_429".to_string(), 1_u64)]), - }, - ]; - - let summary = compute_capacity_summary(rows.as_slice()); - assert_eq!(summary.tps_at_first_non_200, Some(20.0)); - assert_eq!(summary.tps_at_first_429, Some(20.0)); - assert_eq!(summary.max_sustainable_tps_at_0_rejections, Some(10.0)); - } - - #[test] - fn capacity_summary_diverging_case() { - let rows = vec![ - SweepRow { - concurrency: 1, - accepted_tps: 10.0, - accepted_count: 100, - rejected_count: 0, - rejection_rate: 0.0, - p95_ms: 1.0, - p99_ms: 1.0, - p999_ms: 1.0, - rejection_breakdown: BTreeMap::new(), - }, - SweepRow { - concurrency: 2, - accepted_tps: 18.0, - accepted_count: 100, - rejected_count: 1, - rejection_rate: 1.0, - p95_ms: 1.5, - p99_ms: 1.5, - p999_ms: 1.5, - rejection_breakdown: BTreeMap::from([("http_422".to_string(), 1_u64)]), - }, - SweepRow { - concurrency: 4, - accepted_tps: 25.0, - accepted_count: 100, - rejected_count: 1, - rejection_rate: 1.0, - p95_ms: 2.0, - p99_ms: 2.0, - p999_ms: 2.0, - rejection_breakdown: BTreeMap::from([("http_429".to_string(), 1_u64)]), - }, - ]; - - let summary = compute_capacity_summary(rows.as_slice()); - assert_eq!(summary.tps_at_first_non_200, Some(18.0)); - assert_eq!(summary.tps_at_first_429, Some(25.0)); - assert_eq!(summary.max_sustainable_tps_at_0_rejections, Some(10.0)); - } -} diff --git a/benchmarks/src/bin/unit_hot_path.rs b/benchmarks/src/bin/unit_hot_path.rs index ebecc68..accacc3 100644 --- a/benchmarks/src/bin/unit_hot_path.rs +++ b/benchmarks/src/bin/unit_hot_path.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 (see LICENSE) use benchmarks::{ - BenchResult, default_domain, make_signed_fixture, now, print_stats, summarize, + BenchResult, make_signed_fixture, now, print_stats, self_contained_domain, summarize, throughput_tx_per_s, }; use clap::Parser; @@ -27,7 +27,7 @@ fn main() -> BenchResult<()> { "unit config: count={}, max_fee={}", args.count, args.max_fee ); - let domain = default_domain(); + let domain = self_contained_domain().eip712_domain(); let mut fixture_build_samples = Vec::with_capacity(args.count as usize); let mut json_encode_samples = Vec::with_capacity(args.count as usize); diff --git a/benchmarks/src/domain.rs b/benchmarks/src/domain.rs new file mode 100644 index 0000000..c78c62a --- /dev/null +++ b/benchmarks/src/domain.rs @@ -0,0 +1,115 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use alloy_primitives::{Address, U256}; +use alloy_sol_types::Eip712Domain; +use serde::{Deserialize, Serialize}; + +use crate::{BenchResult, support::err}; + +pub const DEFAULT_ENDPOINT: &str = "http://127.0.0.1:3000"; +pub const DOMAIN_NAME: &str = "CartesiAppSequencer"; +pub const DOMAIN_VERSION: &str = "1"; +pub const SELF_CONTAINED_DOMAIN_CHAIN_ID: u64 = 31_337; +pub const SELF_CONTAINED_DOMAIN_VERIFYING_CONTRACT: &str = + "0x1111111111111111111111111111111111111111"; + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct BenchmarkDomain { + pub chain_id: u64, + pub verifying_contract: Address, +} + +impl BenchmarkDomain { + pub fn eip712_domain(self) -> Eip712Domain { + Eip712Domain { + name: Some(DOMAIN_NAME.to_string().into()), + version: Some(DOMAIN_VERSION.to_string().into()), + chain_id: Some(U256::from(self.chain_id)), + verifying_contract: Some(self.verifying_contract), + salt: None, + } + } +} + +pub fn parse_address(raw: &str) -> Result { + if !raw.starts_with("0x") { + return Err("verifying contract must be 0x-prefixed".to_string()); + } + + let bytes = + alloy_primitives::hex::decode(raw).map_err(|err| format!("invalid address hex: {err}"))?; + if bytes.len() != 20 { + return Err("verifying contract must be 20 bytes".to_string()); + } + Ok(Address::from_slice(&bytes)) +} + +pub fn resolve_external_benchmark_domain( + domain_chain_id: Option, + domain_verifying_contract: Option
, +) -> BenchResult { + let chain_id = domain_chain_id.ok_or_else(|| { + err("external benchmarks require --domain-chain-id to match the target sequencer") + })?; + let verifying_contract = domain_verifying_contract.ok_or_else(|| { + err("external benchmarks require --domain-verifying-contract to match the target sequencer") + })?; + + Ok(BenchmarkDomain { + chain_id, + verifying_contract, + }) +} + +pub fn self_contained_domain() -> BenchmarkDomain { + BenchmarkDomain { + chain_id: SELF_CONTAINED_DOMAIN_CHAIN_ID, + // Used only by offline/unit benchmark paths that do not spawn a local chain. + verifying_contract: parse_address(SELF_CONTAINED_DOMAIN_VERIFYING_CONTRACT) + .expect("self-contained verifying contract is valid"), + } +} + +#[cfg(test)] +mod tests { + use super::{ + SELF_CONTAINED_DOMAIN_CHAIN_ID, SELF_CONTAINED_DOMAIN_VERIFYING_CONTRACT, + resolve_external_benchmark_domain, self_contained_domain, + }; + use crate::parse_address; + + #[test] + fn external_domain_requires_explicit_inputs() { + let error = + resolve_external_benchmark_domain(None, None).expect_err("missing external domain"); + assert!(error.to_string().contains("--domain-chain-id")); + } + + #[test] + fn external_domain_uses_explicit_inputs() { + let domain = resolve_external_benchmark_domain( + Some(SELF_CONTAINED_DOMAIN_CHAIN_ID), + Some( + parse_address("0x1111111111111111111111111111111111111111") + .expect("valid verifying contract"), + ), + ) + .expect("external domain"); + assert_eq!(domain.chain_id, SELF_CONTAINED_DOMAIN_CHAIN_ID); + assert_eq!( + domain.verifying_contract.to_string(), + "0x1111111111111111111111111111111111111111" + ); + } + + #[test] + fn self_contained_domain_is_stable_for_offline_paths() { + let domain = self_contained_domain(); + assert_eq!(domain.chain_id, SELF_CONTAINED_DOMAIN_CHAIN_ID); + assert_eq!( + domain.verifying_contract.to_string(), + SELF_CONTAINED_DOMAIN_VERIFYING_CONTRACT + ); + } +} diff --git a/benchmarks/src/e2e.rs b/benchmarks/src/e2e.rs new file mode 100644 index 0000000..35a1463 --- /dev/null +++ b/benchmarks/src/e2e.rs @@ -0,0 +1,329 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use futures_util::{StreamExt, future::join_all}; +use serde::{Deserialize, Serialize}; +use std::collections::{BTreeMap, HashMap}; +use std::time::{Duration, Instant}; +use tokio_tungstenite::tungstenite::Message; +use tokio_tungstenite::{MaybeTlsStream, WebSocketStream, connect_async}; + +use sequencer_core::api::WsTxMessage; +use sequencer_rust_client::SequencerClient; + +use crate::{ + BenchResult, + domain::BenchmarkDomain, + rejection::classify_rejection, + runtime, + stats::{Stats, rejection_rate, summarize}, + support::{DEFAULT_PROGRESS_EVERY, io_err, now}, + workload::{WorkloadConfig, WorkloadState}, +}; + +const DEFAULT_BACKLOG_DRAIN_IDLE_MS: u64 = 25; +const DEFAULT_BACKLOG_DRAIN_MAX_MS: u64 = 2_000; + +#[derive(Debug, Clone)] +pub struct E2eRunConfig { + pub endpoint: String, + pub domain: BenchmarkDomain, + pub from_offset: u64, + pub count: u64, + pub concurrency: usize, + pub seed_offset: u64, + pub max_fee: u32, + pub request_timeout_ms: u64, + pub max_ws_wait_ms: u64, + pub fail_on_rejection: bool, + pub workload: WorkloadConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct E2eRunReport { + pub count: u64, + pub endpoint: String, + pub ws_subscribe_url: String, + pub concurrency: usize, + pub accepted: u64, + pub rejected: u64, + pub rejection_rate: f64, + pub rejection_breakdown: BTreeMap, + pub first_rejection: Option, + pub drained_ws_backlog_events: u64, + pub consumed_ws_events_total: u64, + pub total_wall: Duration, + pub ack_latency_accepted: Stats, + pub ack_latency_rejected: Option, + pub e2e_latency_accepted: Stats, + pub memory: Option, + pub sequencer_log_path: Option, +} + +pub async fn run_e2e_benchmark(config: E2eRunConfig) -> BenchResult { + let timeout = Duration::from_millis(config.request_timeout_ms); + let client = SequencerClient::new_with_timeout(config.endpoint.clone(), timeout) + .map_err(|e| crate::support::err(format!("invalid endpoint '{}': {e}", config.endpoint)))?; + let ws_subscribe_url = client.ws_subscribe_url(config.from_offset); + let domain = config.domain.eip712_domain(); + let mut workload = WorkloadState::initialize(&config.workload, config.seed_offset)?; + let effective_concurrency = if let Some(cap) = workload.concurrency_cap() { + let capped = config.concurrency.min(cap); + if capped < config.concurrency { + println!( + "workload concurrency capped: requested={}, effective={}, funded_accounts={}", + config.concurrency, capped, cap + ); + } + capped + } else { + config.concurrency + }; + + let mut ws = connect_async(ws_subscribe_url.as_str()) + .await + .map(|(stream, _)| stream) + .map_err(|e| { + io_err(format!( + "ws connect failed: url={ws_subscribe_url}, error={e}" + )) + })?; + let mut consumed_ws_events_total = 0_u64; + + let drained_ws_backlog_events = drain_existing_ws_backlog( + &mut ws, + Duration::from_millis(DEFAULT_BACKLOG_DRAIN_IDLE_MS), + Duration::from_millis(DEFAULT_BACKLOG_DRAIN_MAX_MS), + ) + .await?; + consumed_ws_events_total = consumed_ws_events_total.saturating_add(drained_ws_backlog_events); + println!("drained_ws_backlog_events: {drained_ws_backlog_events}"); + + let mut accepted_ack_samples = Vec::with_capacity(config.count as usize); + let mut rejected_ack_samples = Vec::new(); + let mut e2e_samples = Vec::with_capacity(config.count as usize); + let mut accepted = 0_u64; + let mut rejected = 0_u64; + let mut first_rejection: Option = None; + let mut rejection_breakdown = BTreeMap::::new(); + let started = now(); + + let mut processed = 0_u64; + while processed < config.count { + let remaining = config.count.saturating_sub(processed); + let batch_size = remaining.min(effective_concurrency as u64) as usize; + + let mut inflight = Vec::with_capacity(batch_size); + for _ in 0..batch_size { + let fixture = workload.next_fixture(config.max_fee, &domain)?; + let match_key = fixture_match_key( + fixture.expected_sender.as_str(), + fixture.expected_data_hex.as_str(), + ); + let client = client.clone(); + let submit_started = now(); + inflight.push(async move { + let outcome = client.submit_tx_with_status(&fixture.request).await; + (match_key, submit_started, submit_started.elapsed(), outcome) + }); + } + + let mut expected_submit_starts = HashMap::>::with_capacity(batch_size); + for (match_key, submit_started, ack_latency, outcome) in join_all(inflight).await { + match classify_rejection(outcome) { + None => { + accepted = accepted.saturating_add(1); + accepted_ack_samples.push(ack_latency); + expected_submit_starts + .entry(match_key) + .or_default() + .push(submit_started); + } + Some(rejection) => { + rejected = rejected.saturating_add(1); + rejected_ack_samples.push(ack_latency); + *rejection_breakdown + .entry(rejection.key.clone()) + .or_insert(0) += 1; + if first_rejection.is_none() { + first_rejection = Some(rejection.detail); + } + } + } + } + + if !expected_submit_starts.is_empty() { + let mut matched = wait_for_matching_user_ops( + &mut ws, + &mut expected_submit_starts, + Duration::from_millis(config.max_ws_wait_ms), + ) + .await?; + consumed_ws_events_total = + consumed_ws_events_total.saturating_add(matched.consumed_events); + e2e_samples.append(&mut matched.e2e_samples); + } + + processed = processed.saturating_add(batch_size as u64); + if DEFAULT_PROGRESS_EVERY > 0 + && processed > 0 + && processed.is_multiple_of(DEFAULT_PROGRESS_EVERY) + { + println!( + "progress: processed={processed}/{}, accepted={accepted}, rejected={rejected}", + config.count + ); + } + } + + if config.fail_on_rejection && rejected > 0 { + let reason = first_rejection + .clone() + .unwrap_or_else(|| "unknown rejection".to_string()); + return Err(std::io::Error::other(format!( + "e2e benchmark saw {rejected} rejection(s): {reason}" + )) + .into()); + } + + if accepted_ack_samples.is_empty() { + return Err(std::io::Error::other("e2e benchmark had no accepted txs").into()); + } + if e2e_samples.len() != accepted as usize { + return Err(std::io::Error::other(format!( + "e2e sample mismatch: accepted={accepted}, matched_ws_events={}", + e2e_samples.len() + )) + .into()); + } + + let total_wall = started.elapsed(); + let ack_stats = summarize(accepted_ack_samples.as_slice())?; + let e2e_stats = summarize(e2e_samples.as_slice())?; + let rejected_stats = if rejected_ack_samples.is_empty() { + None + } else { + Some(summarize(rejected_ack_samples.as_slice())?) + }; + + Ok(E2eRunReport { + count: config.count, + endpoint: config.endpoint, + ws_subscribe_url, + concurrency: config.concurrency, + accepted, + rejected, + rejection_rate: rejection_rate(accepted, rejected), + rejection_breakdown, + first_rejection, + drained_ws_backlog_events, + consumed_ws_events_total, + total_wall, + ack_latency_accepted: ack_stats, + ack_latency_rejected: rejected_stats, + e2e_latency_accepted: e2e_stats, + memory: None, + sequencer_log_path: None, + }) +} + +struct MatchResult { + e2e_samples: Vec, + consumed_events: u64, +} + +async fn wait_for_matching_user_ops( + ws: &mut WebSocketStream>, + expected_submit_starts: &mut HashMap>, + max_wait: Duration, +) -> BenchResult { + let deadline = tokio::time::Instant::now() + max_wait; + let expected_total: usize = expected_submit_starts.values().map(Vec::len).sum(); + let mut e2e_samples = Vec::with_capacity(expected_total); + let mut consumed_events = 0_u64; + + while expected_submit_starts + .values() + .any(|entries| !entries.is_empty()) + { + let now = tokio::time::Instant::now(); + if now >= deadline { + let pending: usize = expected_submit_starts.values().map(Vec::len).sum(); + return Err(io_err(format!( + "timed out waiting for {pending} ws event(s)" + ))); + } + let remaining = deadline - now; + let maybe_frame = tokio::time::timeout(remaining, ws.next()) + .await + .map_err(|_| io_err("ws timeout"))?; + let frame = maybe_frame + .ok_or_else(|| io_err("ws stream closed"))? + .map_err(|err| io_err(format!("ws frame read failed: {err}")))?; + + let Message::Text(text) = frame else { + continue; + }; + let event: WsTxMessage = serde_json::from_str(text.as_str())?; + consumed_events = consumed_events.saturating_add(1); + + if let WsTxMessage::UserOp { sender, data, .. } = event { + let key = event_match_key(sender.as_str(), data.as_str()); + if let Some(entries) = expected_submit_starts.get_mut(key.as_str()) + && let Some(submit_started) = entries.pop() + { + e2e_samples.push(submit_started.elapsed()); + } + } + } + + Ok(MatchResult { + e2e_samples, + consumed_events, + }) +} + +async fn drain_existing_ws_backlog( + ws: &mut WebSocketStream>, + idle_quiet_window: Duration, + max_total: Duration, +) -> BenchResult { + let mut drained = 0_u64; + let hard_deadline = tokio::time::Instant::now() + max_total; + + loop { + let now = tokio::time::Instant::now(); + if now >= hard_deadline { + break; + } + let remaining_until_deadline = hard_deadline - now; + let poll_timeout = remaining_until_deadline.min(idle_quiet_window); + + match tokio::time::timeout(poll_timeout, ws.next()).await { + Err(_) => break, + Ok(None) => return Err(io_err("ws stream closed while draining backlog")), + Ok(Some(Err(err))) => { + return Err(io_err(format!( + "ws frame read failed while draining backlog: {err}" + ))); + } + Ok(Some(Ok(_))) => { + drained = drained.saturating_add(1); + } + } + } + + Ok(drained) +} + +fn fixture_match_key(sender: &str, data_hex: &str) -> String { + format!( + "{}|{}", + sender.to_ascii_lowercase(), + data_hex.to_ascii_lowercase() + ) +} + +fn event_match_key(sender: &str, data_hex: &str) -> String { + fixture_match_key(sender, data_hex) +} diff --git a/benchmarks/src/evaluation.rs b/benchmarks/src/evaluation.rs new file mode 100644 index 0000000..d684bc7 --- /dev/null +++ b/benchmarks/src/evaluation.rs @@ -0,0 +1,214 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use serde::{Deserialize, Serialize}; + +use crate::{AckRunReport, E2eRunReport}; + +pub const TARGET_EVALUATION_MIN_ACCEPTED_COUNT: u64 = 5_000; +pub const DIAGNOSTIC_P999_MIN_ACCEPTED_COUNT: u64 = 10_000; +pub const ACK_P99_TARGET_MS: f64 = 500.0; +pub const SOFT_CONFIRM_P99_TARGET_MS: f64 = 1_000.0; + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum NetworkProfileKind { + SameHostBaseline, + CanonicalNetworkAware, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NetworkProfile { + pub kind: NetworkProfileKind, + pub shaping_method: String, + pub shaping_config: String, +} + +impl NetworkProfile { + pub fn same_host_baseline() -> Self { + Self { + kind: NetworkProfileKind::SameHostBaseline, + shaping_method: "none".to_string(), + shaping_config: + "no injected latency; benchmark client connects directly to the target sequencer" + .to_string(), + } + } + + fn is_canonical_target_scenario(&self) -> bool { + matches!(self.kind, NetworkProfileKind::CanonicalNetworkAware) + } +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum TargetVerdict { + Pass, + Fail, + NotEvaluated, +} + +impl TargetVerdict { + fn as_line(self) -> &'static str { + match self { + Self::Pass => "PASS", + Self::Fail => "FAIL", + Self::NotEvaluated => "NOT_EVALUATED", + } + } +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum P999Confidence { + DiagnosticLowConfidence, + Sufficient, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TargetEvaluation { + pub label: String, + pub verdict: TargetVerdict, + pub threshold_p99_ms: f64, + pub observed_p99_ms: f64, + pub observed_p999_ms: f64, + pub meets_latency_threshold: bool, + pub zero_rejection: bool, + pub sample_size_valid: bool, + pub accepted_count: u64, + pub rejected_count: u64, + pub p999_confidence: P999Confidence, + pub network_profile: NetworkProfile, + pub reason: Option, +} + +pub fn evaluate_ack_target( + report: &AckRunReport, + network_profile: NetworkProfile, +) -> TargetEvaluation { + build_target_evaluation( + "ACK_TARGET", + report.accepted, + report.rejected, + report.ack_latency_accepted.p99.as_secs_f64() * 1000.0, + report.ack_latency_accepted.p999.as_secs_f64() * 1000.0, + ACK_P99_TARGET_MS, + network_profile, + ) +} + +pub fn evaluate_soft_confirm_target( + report: &E2eRunReport, + network_profile: NetworkProfile, +) -> TargetEvaluation { + build_target_evaluation( + "SOFT_CONFIRM_TARGET", + report.accepted, + report.rejected, + report.e2e_latency_accepted.p99.as_secs_f64() * 1000.0, + report.e2e_latency_accepted.p999.as_secs_f64() * 1000.0, + SOFT_CONFIRM_P99_TARGET_MS, + network_profile, + ) +} + +pub fn print_target_evaluation(evaluation: &TargetEvaluation) { + if let Some(reason) = evaluation.reason.as_ref() { + println!( + "{}: {} ({reason})", + evaluation.label, + evaluation.verdict.as_line() + ); + } else { + println!("{}: {}", evaluation.label, evaluation.verdict.as_line()); + } + println!( + " observed_p99_ms: {:.3} (threshold {:.3})", + evaluation.observed_p99_ms, evaluation.threshold_p99_ms + ); + println!(" observed_p99.9_ms: {:.3}", evaluation.observed_p999_ms); + println!( + " latency_threshold_met: {}", + evaluation.meets_latency_threshold + ); + println!(" zero_rejection: {}", evaluation.zero_rejection); + println!(" sample_size_valid: {}", evaluation.sample_size_valid); + println!(" accepted_count: {}", evaluation.accepted_count); + println!(" rejected_count: {}", evaluation.rejected_count); + println!( + " p99.9_confidence: {}", + match evaluation.p999_confidence { + P999Confidence::DiagnosticLowConfidence => "diagnostic-low-confidence", + P999Confidence::Sufficient => "sufficient", + } + ); + println!( + " network_profile: {:?} ({})", + evaluation.network_profile.kind, evaluation.network_profile.shaping_method + ); +} + +fn build_target_evaluation( + label: &str, + accepted_count: u64, + rejected_count: u64, + observed_p99_ms: f64, + observed_p999_ms: f64, + threshold_p99_ms: f64, + network_profile: NetworkProfile, +) -> TargetEvaluation { + let zero_rejection = rejected_count == 0; + let sample_size_valid = accepted_count >= TARGET_EVALUATION_MIN_ACCEPTED_COUNT; + let meets_latency_threshold = observed_p99_ms <= threshold_p99_ms; + let p999_confidence = if accepted_count >= DIAGNOSTIC_P999_MIN_ACCEPTED_COUNT { + P999Confidence::Sufficient + } else { + P999Confidence::DiagnosticLowConfidence + }; + + let (verdict, reason) = if !network_profile.is_canonical_target_scenario() { + ( + TargetVerdict::NotEvaluated, + Some( + "canonical network-aware scenario is not configured in this harness yet" + .to_string(), + ), + ) + } else if !sample_size_valid { + ( + TargetVerdict::NotEvaluated, + Some(format!( + "accepted_count={} is below the target-evaluation minimum of {}", + accepted_count, TARGET_EVALUATION_MIN_ACCEPTED_COUNT + )), + ) + } else if !zero_rejection { + ( + TargetVerdict::NotEvaluated, + Some(format!( + "rejected_count={} but target evaluation requires zero rejections", + rejected_count + )), + ) + } else if meets_latency_threshold { + (TargetVerdict::Pass, None) + } else { + (TargetVerdict::Fail, None) + }; + + TargetEvaluation { + label: label.to_string(), + verdict, + threshold_p99_ms, + observed_p99_ms, + observed_p999_ms, + meets_latency_threshold, + zero_rejection, + sample_size_valid, + accepted_count, + rejected_count, + p999_confidence, + network_profile, + reason, + } +} diff --git a/benchmarks/src/lib.rs b/benchmarks/src/lib.rs index fd5f3d8..151e5ba 100644 --- a/benchmarks/src/lib.rs +++ b/benchmarks/src/lib.rs @@ -1,1223 +1,44 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +mod ack; +mod domain; +mod e2e; +mod evaluation; +mod rejection; +mod report; pub mod runtime; - -use alloy_primitives::{Address, Signature, U256}; -use alloy_sol_types::{Eip712Domain, SolStruct}; -use futures_util::StreamExt; -use futures_util::future::join_all; -use k256::ecdsa::SigningKey; -use k256::ecdsa::signature::hazmat::PrehashSigner; -use sequencer_core::api::{TxRequest, TxResponse, WsTxMessage}; -use sequencer_core::application::{Deposit, Method, Transfer, Withdrawal}; -use sequencer_core::user_op::{SignedUserOp, UserOp}; -use sequencer_rust_client::{SequencerClient, SubmitRejected, SubmitTxError}; -use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, HashMap}; -use std::error::Error; -use std::fs; -use std::time::{Duration, Instant}; -use std::time::{SystemTime, UNIX_EPOCH}; -use tokio_tungstenite::tungstenite::Message; -use tokio_tungstenite::{MaybeTlsStream, WebSocketStream, connect_async}; - -pub type BenchResult = Result>; -pub const DEFAULT_ENDPOINT: &str = "http://127.0.0.1:3000"; -pub const DEFAULT_WORKLOAD_INITIAL_BALANCE: u64 = 1_000_000; -pub const DEFAULT_WORKLOAD_TRANSFER_AMOUNT: u64 = 1; - -const ANVIL_DEFAULT_PRIVATE_KEYS: [&str; 10] = [ - "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", - "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d", - "0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a", - "0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6", - "0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a", - "0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba", - "0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e", - "0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356", - "0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97", - "0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6", -]; - -#[derive(Debug, Clone)] -pub struct SignedTxFixture { - pub request: TxRequest, - pub expected_sender: String, - pub expected_data_hex: String, -} - -#[derive(Debug, Clone, Serialize)] -pub struct Stats { - pub count: usize, - pub min: Duration, - pub max: Duration, - pub mean: Duration, - pub p50: Duration, - pub p95: Duration, - pub p99: Duration, - pub p999: Duration, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "snake_case")] -pub enum WorkloadKind { - Synthetic, - FundedTransfer, -} - -#[derive(Debug, Clone, Serialize)] -pub struct WorkloadConfig { - pub kind: WorkloadKind, - pub accounts_file: Option, - pub initial_balance: u64, - pub transfer_amount: u64, -} - -impl Default for WorkloadConfig { - fn default() -> Self { - Self { - kind: WorkloadKind::Synthetic, - accounts_file: None, - initial_balance: DEFAULT_WORKLOAD_INITIAL_BALANCE, - transfer_amount: DEFAULT_WORKLOAD_TRANSFER_AMOUNT, - } - } -} - -#[derive(Debug, Clone)] -pub struct AckRunConfig { - pub endpoint: String, - pub count: u64, - pub concurrency: usize, - pub seed_offset: u64, - pub max_fee: u32, - pub request_timeout_ms: u64, - pub progress_every: u64, - pub fail_on_rejection: bool, - pub workload: WorkloadConfig, -} - -#[derive(Debug, Clone, Serialize)] -pub struct AckRunReport { - pub count: u64, - pub endpoint: String, - pub concurrency: usize, - pub accepted: u64, - pub rejected: u64, - pub rejection_rate: f64, - pub rejection_breakdown: BTreeMap, - pub first_rejection: Option, - pub total_wall: Duration, - pub ack_latency_accepted: Stats, - pub ack_latency_rejected: Option, - pub memory: Option, - pub sequencer_log_path: Option, - pub inclusion_lane_profile: Option, -} - -#[derive(Debug, Clone)] -pub struct E2eRunConfig { - pub endpoint: String, - pub ws_subscribe_url: Option, - pub from_offset: u64, - pub count: u64, - pub concurrency: usize, - pub seed_offset: u64, - pub max_fee: u32, - pub request_timeout_ms: u64, - pub max_ws_wait_ms: u64, - pub progress_every: u64, - pub drain_backlog_before_bench: bool, - pub backlog_drain_idle_ms: u64, - pub backlog_drain_max_ms: u64, - pub fail_on_rejection: bool, - pub workload: WorkloadConfig, -} - -#[derive(Debug, Clone, Serialize)] -pub struct E2eRunReport { - pub count: u64, - pub endpoint: String, - pub ws_subscribe_url: String, - pub concurrency: usize, - pub accepted: u64, - pub rejected: u64, - pub rejection_rate: f64, - pub rejection_breakdown: BTreeMap, - pub first_rejection: Option, - pub drained_ws_backlog_events: u64, - pub consumed_ws_events_total: u64, - pub total_wall: Duration, - pub ack_latency_accepted: Stats, - pub ack_latency_rejected: Option, - pub e2e_latency_accepted: Stats, - pub memory: Option, - pub sequencer_log_path: Option, - pub inclusion_lane_profile: Option, -} - -struct RejectionOutcome { - key: String, - detail: String, -} - -struct WorkloadState { - inner: WorkloadStateInner, -} - -enum WorkloadStateInner { - Synthetic { - next_seed: u64, - }, - FundedTransfer { - accounts: Vec, - round_robin_index: usize, - transfer_amount: u64, - }, -} - -#[derive(Clone)] -struct FundedAccount { - signing_key: SigningKey, - sender: Address, - next_nonce: u32, -} - -impl WorkloadState { - async fn initialize( - config: &WorkloadConfig, - seed_offset: u64, - client: &SequencerClient, - max_fee: u32, - domain: &Eip712Domain, - ) -> BenchResult { - match config.kind { - WorkloadKind::Synthetic => Ok(Self { - inner: WorkloadStateInner::Synthetic { - next_seed: seed_offset, - }, - }), - WorkloadKind::FundedTransfer => { - let mut accounts = load_funded_accounts(config.accounts_file.as_deref())?; - setup_funded_accounts( - client, - max_fee, - U256::from(config.initial_balance), - domain, - accounts.as_mut_slice(), - ) - .await?; - Ok(Self { - inner: WorkloadStateInner::FundedTransfer { - accounts, - round_robin_index: 0, - transfer_amount: config.transfer_amount, - }, - }) - } - } - } - - fn next_fixture( - &mut self, - max_fee: u32, - domain: &Eip712Domain, - ) -> BenchResult { - match &mut self.inner { - WorkloadStateInner::Synthetic { next_seed } => { - let fixture = make_signed_fixture(*next_seed, max_fee, domain)?; - *next_seed = next_seed.wrapping_add(1); - Ok(fixture) - } - WorkloadStateInner::FundedTransfer { - accounts, - round_robin_index, - transfer_amount, - } => { - if accounts.is_empty() { - return Err(err("funded workload has zero accounts")); - } - let sender_index = *round_robin_index % accounts.len(); - let recipient_index = (sender_index + 1) % accounts.len(); - let recipient = accounts[recipient_index].sender; - let sender = &mut accounts[sender_index]; - - let amount = - U256::from((*transfer_amount).saturating_add(u64::from(sender.next_nonce))); - let method = Method::Transfer(Transfer { - amount, - to: recipient, - }); - let data = ssz::Encode::as_ssz_bytes(&method); - if data.len() > SignedUserOp::MAX_METHOD_PAYLOAD_BYTES { - return Err(err(format!( - "funded transfer payload too large: {} > {}", - data.len(), - SignedUserOp::MAX_METHOD_PAYLOAD_BYTES - ))); - } - - let user_op = UserOp { - nonce: sender.next_nonce, - max_fee, - data: data.into(), - }; - let fixture = - make_signed_fixture_from_signing_key(&sender.signing_key, user_op, domain)?; - sender.next_nonce = sender.next_nonce.wrapping_add(1); - *round_robin_index = (*round_robin_index + 1) % accounts.len(); - Ok(fixture) - } - } - } - - fn concurrency_cap(&self) -> Option { - match &self.inner { - WorkloadStateInner::Synthetic { .. } => None, - WorkloadStateInner::FundedTransfer { accounts, .. } => Some(accounts.len().max(1)), - } - } -} - -pub async fn run_ack_benchmark(config: AckRunConfig) -> BenchResult { - let domain = default_domain(); - let timeout = Duration::from_millis(config.request_timeout_ms); - let client = SequencerClient::new_with_timeout(config.endpoint.clone(), timeout) - .map_err(|e| err(format!("invalid endpoint '{}': {e}", config.endpoint)))?; - let mut workload = WorkloadState::initialize( - &config.workload, - config.seed_offset, - &client, - config.max_fee, - &domain, - ) - .await?; - let effective_concurrency = if let Some(cap) = workload.concurrency_cap() { - let capped = config.concurrency.min(cap); - if capped < config.concurrency { - println!( - "workload concurrency capped: requested={}, effective={}, funded_accounts={}", - config.concurrency, capped, cap - ); - } - capped - } else { - config.concurrency - }; - let mut accepted_ack_samples = Vec::with_capacity(config.count as usize); - let mut rejected_ack_samples = Vec::new(); - let mut accepted = 0_u64; - let mut rejected = 0_u64; - let mut first_rejection: Option = None; - let mut rejection_breakdown = BTreeMap::::new(); - let started = now(); - - while accepted.saturating_add(rejected) < config.count { - let remaining = config - .count - .saturating_sub(accepted.saturating_add(rejected)); - let batch_size = remaining.min(effective_concurrency as u64) as usize; - - let mut inflight = Vec::with_capacity(batch_size); - for _ in 0..batch_size { - let fixture = workload.next_fixture(config.max_fee, &domain)?; - let client = client.clone(); - let sent_at = now(); - inflight.push(async move { - let outcome = client.submit_tx_with_status(&fixture.request).await; - (sent_at.elapsed(), outcome) - }); - } - - for (ack_latency, outcome) in join_all(inflight).await { - match classify_rejection(outcome) { - None => { - accepted = accepted.saturating_add(1); - accepted_ack_samples.push(ack_latency); - } - Some(rejection) => { - rejected = rejected.saturating_add(1); - rejected_ack_samples.push(ack_latency); - *rejection_breakdown - .entry(rejection.key.clone()) - .or_insert(0) += 1; - if first_rejection.is_none() { - first_rejection = Some(rejection.detail); - } - } - } - } - - let processed = accepted.saturating_add(rejected); - if config.progress_every > 0 - && processed > 0 - && processed.is_multiple_of(config.progress_every) - { - println!( - "progress: processed={processed}/{}, accepted={accepted}, rejected={rejected}", - config.count - ); - } - } - - if config.fail_on_rejection && rejected > 0 { - let reason = first_rejection - .clone() - .unwrap_or_else(|| "unknown rejection".to_string()); - return Err(std::io::Error::other(format!( - "ack benchmark saw {rejected} rejection(s): {reason}" - )) - .into()); - } - - if accepted_ack_samples.is_empty() { - return Err(std::io::Error::other("ack benchmark had no accepted txs").into()); - } - - let total_wall = started.elapsed(); - let ack_stats = summarize(accepted_ack_samples.as_slice())?; - let rejected_stats = if rejected_ack_samples.is_empty() { - None - } else { - Some(summarize(rejected_ack_samples.as_slice())?) - }; - - Ok(AckRunReport { - count: config.count, - endpoint: config.endpoint, - concurrency: config.concurrency, - accepted, - rejected, - rejection_rate: rejection_rate(accepted, rejected), - rejection_breakdown, - first_rejection, - total_wall, - ack_latency_accepted: ack_stats, - ack_latency_rejected: rejected_stats, - memory: None, - sequencer_log_path: None, - inclusion_lane_profile: None, - }) -} - -pub fn print_ack_report(report: &AckRunReport) { - println!( - "ack benchmark completed: count={}, endpoint={}, concurrency={}", - report.count, report.endpoint, report.concurrency - ); - println!(" accepted: {}", report.accepted); - println!(" rejected: {}", report.rejected); - println!(" rejection_rate: {:.4}%", report.rejection_rate); - println!( - "accepted_completed_per_s: {:.2} tx/s", - throughput_tx_per_s(report.ack_latency_accepted.count, report.total_wall) - ); - if let Some(reason) = report.first_rejection.as_ref() { - println!(" first_rejection: {reason}"); - } - if !report.rejection_breakdown.is_empty() { - println!(" rejection_breakdown:"); - for (key, count) in &report.rejection_breakdown { - println!(" {key}: {count}"); - } - } - print_stats("ack_latency_accepted", &report.ack_latency_accepted); - if let Some(stats) = report.ack_latency_rejected.as_ref() { - print_stats("ack_latency_rejected", stats); - } - if let Some(memory) = report.memory.as_ref() { - print_memory_report(memory); - } - if let Some(path) = report.sequencer_log_path.as_ref() { - println!("sequencer_log_path: {path}"); - } - if let Some(profile) = report.inclusion_lane_profile.as_ref() { - println!("inclusion_lane_profile:"); - println!(" samples: {}", profile.samples); - println!( - " latest_user_op_app_share_pct_of_app_plus_persist: {}", - format_optional_f64(profile.latest_user_op_app_share_pct_of_app_plus_persist) - ); - println!( - " latest_user_op_persist_share_pct_of_app_plus_persist: {}", - format_optional_f64(profile.latest_user_op_persist_share_pct_of_app_plus_persist) - ); - println!( - " avg_user_op_app_share_pct_of_app_plus_persist: {}", - format_optional_f64(profile.avg_user_op_app_share_pct_of_app_plus_persist) - ); - println!( - " avg_user_op_persist_share_pct_of_app_plus_persist: {}", - format_optional_f64(profile.avg_user_op_persist_share_pct_of_app_plus_persist) - ); - } -} - -pub async fn run_e2e_benchmark(config: E2eRunConfig) -> BenchResult { - let timeout = Duration::from_millis(config.request_timeout_ms); - let client = SequencerClient::new_with_timeout(config.endpoint.clone(), timeout) - .map_err(|e| err(format!("invalid endpoint '{}': {e}", config.endpoint)))?; - let ws_subscribe_url = config - .ws_subscribe_url - .clone() - .map(|base| append_from_offset(base.as_str(), config.from_offset)) - .unwrap_or_else(|| client.ws_subscribe_url(config.from_offset)); - let domain = default_domain(); - let mut workload = WorkloadState::initialize( - &config.workload, - config.seed_offset, - &client, - config.max_fee, - &domain, - ) - .await?; - let effective_concurrency = if let Some(cap) = workload.concurrency_cap() { - let capped = config.concurrency.min(cap); - if capped < config.concurrency { - println!( - "workload concurrency capped: requested={}, effective={}, funded_accounts={}", - config.concurrency, capped, cap - ); - } - capped - } else { - config.concurrency - }; - - let mut ws = if config.ws_subscribe_url.is_some() { - connect_async(ws_subscribe_url.as_str()) - .await - .map(|(stream, _)| stream) - .map_err(|e| { - io_err(format!( - "ws connect failed: url={ws_subscribe_url}, error={e}" - )) - })? - } else { - client.subscribe(config.from_offset).await.map_err(|e| { - io_err(format!( - "ws connect failed: url={ws_subscribe_url}, error={e}" - )) - })? - }; - let mut consumed_ws_events_total = 0_u64; - let mut drained_ws_backlog_events = 0_u64; - - if config.drain_backlog_before_bench { - let drained = drain_existing_ws_backlog( - &mut ws, - Duration::from_millis(config.backlog_drain_idle_ms), - Duration::from_millis(config.backlog_drain_max_ms), - ) - .await?; - consumed_ws_events_total = consumed_ws_events_total.saturating_add(drained); - drained_ws_backlog_events = drained; - println!("drained_ws_backlog_events: {drained}"); - } - - let mut accepted_ack_samples = Vec::with_capacity(config.count as usize); - let mut rejected_ack_samples = Vec::new(); - let mut e2e_samples = Vec::with_capacity(config.count as usize); - let mut accepted = 0_u64; - let mut rejected = 0_u64; - let mut first_rejection: Option = None; - let mut rejection_breakdown = BTreeMap::::new(); - let started = now(); - - let mut processed = 0_u64; - while processed < config.count { - let remaining = config.count.saturating_sub(processed); - let batch_size = remaining.min(effective_concurrency as u64) as usize; - - let mut inflight = Vec::with_capacity(batch_size); - for _ in 0..batch_size { - let fixture = workload.next_fixture(config.max_fee, &domain)?; - let match_key = fixture_match_key( - fixture.expected_sender.as_str(), - fixture.expected_data_hex.as_str(), - ); - let client = client.clone(); - let submit_started = now(); - inflight.push(async move { - let outcome = client.submit_tx_with_status(&fixture.request).await; - (match_key, submit_started, submit_started.elapsed(), outcome) - }); - } - - let mut expected_submit_starts = - HashMap::>::with_capacity(batch_size); - for (match_key, submit_started, ack_latency, outcome) in join_all(inflight).await { - match classify_rejection(outcome) { - None => { - accepted = accepted.saturating_add(1); - accepted_ack_samples.push(ack_latency); - expected_submit_starts - .entry(match_key) - .or_default() - .push(submit_started); - } - Some(rejection) => { - rejected = rejected.saturating_add(1); - rejected_ack_samples.push(ack_latency); - *rejection_breakdown - .entry(rejection.key.clone()) - .or_insert(0) += 1; - if first_rejection.is_none() { - first_rejection = Some(rejection.detail); - } - } - } - } - - if !expected_submit_starts.is_empty() { - let mut matched = wait_for_matching_user_ops( - &mut ws, - &mut expected_submit_starts, - Duration::from_millis(config.max_ws_wait_ms), - ) - .await?; - consumed_ws_events_total = - consumed_ws_events_total.saturating_add(matched.consumed_events); - e2e_samples.append(&mut matched.e2e_samples); - } - - processed = processed.saturating_add(batch_size as u64); - if config.progress_every > 0 - && processed > 0 - && processed.is_multiple_of(config.progress_every) - { - println!( - "progress: processed={processed}/{}, accepted={accepted}, rejected={rejected}", - config.count - ); - } - } - - if config.fail_on_rejection && rejected > 0 { - let reason = first_rejection - .clone() - .unwrap_or_else(|| "unknown rejection".to_string()); - return Err(std::io::Error::other(format!( - "e2e benchmark saw {rejected} rejection(s): {reason}" - )) - .into()); - } - - if accepted_ack_samples.is_empty() { - return Err(std::io::Error::other("e2e benchmark had no accepted txs").into()); - } - if e2e_samples.len() != accepted as usize { - return Err(std::io::Error::other(format!( - "e2e sample mismatch: accepted={accepted}, matched_ws_events={}", - e2e_samples.len() - )) - .into()); - } - - let total_wall = started.elapsed(); - let ack_stats = summarize(accepted_ack_samples.as_slice())?; - let e2e_stats = summarize(e2e_samples.as_slice())?; - let rejected_stats = if rejected_ack_samples.is_empty() { - None - } else { - Some(summarize(rejected_ack_samples.as_slice())?) - }; - - Ok(E2eRunReport { - count: config.count, - endpoint: config.endpoint, - ws_subscribe_url, - concurrency: config.concurrency, - accepted, - rejected, - rejection_rate: rejection_rate(accepted, rejected), - rejection_breakdown, - first_rejection, - drained_ws_backlog_events, - consumed_ws_events_total, - total_wall, - ack_latency_accepted: ack_stats, - ack_latency_rejected: rejected_stats, - e2e_latency_accepted: e2e_stats, - memory: None, - sequencer_log_path: None, - inclusion_lane_profile: None, - }) -} - -pub fn print_e2e_report(report: &E2eRunReport) { - println!( - "e2e benchmark completed: count={}, endpoint={}, ws={}, concurrency={}", - report.count, report.endpoint, report.ws_subscribe_url, report.concurrency - ); - println!(" accepted: {}", report.accepted); - println!(" rejected: {}", report.rejected); - println!(" rejection_rate: {:.4}%", report.rejection_rate); - println!( - "accepted_completed_per_s: {:.2} tx/s", - throughput_tx_per_s(report.e2e_latency_accepted.count, report.total_wall) - ); - println!( - "consumed_ws_events_total: {}", - report.consumed_ws_events_total - ); - if let Some(reason) = report.first_rejection.as_ref() { - println!(" first_rejection: {reason}"); - } - if !report.rejection_breakdown.is_empty() { - println!(" rejection_breakdown:"); - for (key, count) in &report.rejection_breakdown { - println!(" {key}: {count}"); - } - } - print_stats("ack_latency_accepted", &report.ack_latency_accepted); - if let Some(stats) = report.ack_latency_rejected.as_ref() { - print_stats("ack_latency_rejected", stats); - } - print_stats("e2e_latency_accepted", &report.e2e_latency_accepted); - if let Some(memory) = report.memory.as_ref() { - print_memory_report(memory); - } - if let Some(path) = report.sequencer_log_path.as_ref() { - println!("sequencer_log_path: {path}"); - } - if let Some(profile) = report.inclusion_lane_profile.as_ref() { - println!("inclusion_lane_profile:"); - println!(" samples: {}", profile.samples); - println!( - " latest_user_op_app_share_pct_of_app_plus_persist: {}", - format_optional_f64(profile.latest_user_op_app_share_pct_of_app_plus_persist) - ); - println!( - " latest_user_op_persist_share_pct_of_app_plus_persist: {}", - format_optional_f64(profile.latest_user_op_persist_share_pct_of_app_plus_persist) - ); - println!( - " avg_user_op_app_share_pct_of_app_plus_persist: {}", - format_optional_f64(profile.avg_user_op_app_share_pct_of_app_plus_persist) - ); - println!( - " avg_user_op_persist_share_pct_of_app_plus_persist: {}", - format_optional_f64(profile.avg_user_op_persist_share_pct_of_app_plus_persist) - ); - } -} - -pub fn print_memory_report(memory: &runtime::MemoryReport) { - println!("memory:"); - println!(" method: {}", memory.method); - println!(" sample_interval_ms: {}", memory.sample_interval_ms); - println!( - " rss_start_mb: {}", - format_optional_f64(memory.rss_start_mb) - ); - println!(" rss_peak_mb: {}", format_optional_f64(memory.rss_peak_mb)); - println!(" rss_end_mb: {}", format_optional_f64(memory.rss_end_mb)); - println!( - " rss_growth_mb: {}", - format_optional_f64(memory.rss_growth_mb) - ); - println!( - " rss_growth_per_1k_accepted_tx_mb: {}", - format_optional_f64(memory.rss_growth_per_1k_accepted_tx_mb) - ); -} - -pub fn default_domain() -> Eip712Domain { - Eip712Domain { - name: Some("CartesiAppSequencer".to_string().into()), - version: Some("1".to_string().into()), - chain_id: Some(U256::from(1_u64)), - verifying_contract: Some(Address::from_slice(&[0_u8; 20])), - salt: None, - } -} - -pub fn make_signed_fixture( - seed: u64, - max_fee: u32, - domain: &Eip712Domain, -) -> BenchResult { - let signing_key = signing_key_for_seed(seed)?; - let sender = address_from_signing_key(&signing_key); - let method = Method::Withdrawal(Withdrawal { - amount: U256::from(seed.saturating_add(1)), - }); - let data = ssz::Encode::as_ssz_bytes(&method); - if data.len() > SignedUserOp::MAX_METHOD_PAYLOAD_BYTES { - return Err(err(format!( - "benchmark payload too large: {} > {}", - data.len(), - SignedUserOp::MAX_METHOD_PAYLOAD_BYTES - ))); - } - let message = UserOp { - nonce: 0, - max_fee, - data: data.clone().into(), - }; - - let fixture = make_signed_fixture_from_signing_key(&signing_key, message, domain)?; - if fixture.expected_sender != sender.to_string() { - return Err(err("unexpected synthetic sender mismatch")); - } - Ok(fixture) -} - -pub async fn submit_tx( - endpoint: &str, - req: &TxRequest, - timeout: Duration, -) -> BenchResult { - let client = SequencerClient::new_with_timeout(endpoint.to_string(), timeout) - .map_err(|e| err(format!("invalid endpoint '{endpoint}': {e}")))?; - match client.submit_tx(req).await { - Ok(response) => Ok(response), - Err(SubmitRejected::Transport(transport_err)) => { - Err(err(format!("tx submit failed: {transport_err}"))) - } - Err(SubmitRejected::Http { status, body }) => Err(err(format!( - "/tx rejected with status {status}: {body}. Hint: sequencer frame fee and payload-size bounds must allow these txs." - ))), - Err(SubmitRejected::Decode(reason)) => { - Err(err(format!("tx submit decode failed: {reason}"))) - } - } -} - -async fn setup_funded_accounts( - client: &SequencerClient, - max_fee: u32, - initial_balance: U256, - domain: &Eip712Domain, - accounts: &mut [FundedAccount], -) -> BenchResult<()> { - for account in accounts { - let method = Method::Deposit(Deposit { - amount: initial_balance, - to: account.sender, - }); - let data = ssz::Encode::as_ssz_bytes(&method); - let user_op = UserOp { - nonce: 0, - max_fee, - data: data.into(), - }; - let fixture = make_signed_fixture_from_signing_key(&account.signing_key, user_op, domain)?; - let (status, body) = client - .submit_tx_with_status(&fixture.request) - .await - .map_err(|e| { - err(format!( - "funded setup tx failed for {}: {e}", - account.sender - )) - })?; - if status != 200 { - return Err(err(format!( - "funded setup tx rejected for {}: status={status}, body={body}. Hint: self-contained mode (fresh db) avoids nonce conflicts for funded workload.", - account.sender - ))); - } - account.next_nonce = 1; - } - Ok(()) -} - -fn load_funded_accounts(accounts_file: Option<&str>) -> BenchResult> { - let keys = match accounts_file { - Some(path) => load_private_keys_from_file(path)?, - None => ANVIL_DEFAULT_PRIVATE_KEYS - .iter() - .map(|s| s.to_string()) - .collect(), - }; - if keys.is_empty() { - return Err(err("no private keys available for funded workload")); - } - - let mut accounts = Vec::with_capacity(keys.len()); - for key_hex in keys { - let signing_key = signing_key_from_hex(key_hex.as_str())?; - let sender = address_from_signing_key(&signing_key); - accounts.push(FundedAccount { - signing_key, - sender, - next_nonce: 0, - }); - } - Ok(accounts) -} - -fn load_private_keys_from_file(path: &str) -> BenchResult> { - let contents = fs::read_to_string(path) - .map_err(|e| err(format!("failed reading accounts file '{path}': {e}")))?; - let mut keys = Vec::new(); - for line in contents.lines() { - let mut candidate = line.trim(); - if let Some((_, rhs)) = candidate.split_once(')') { - candidate = rhs.trim(); - } - if candidate.starts_with("0x") && candidate.len() == 66 { - let is_hex = candidate - .as_bytes() - .iter() - .skip(2) - .all(|b| b.is_ascii_hexdigit()); - if is_hex { - keys.push(candidate.to_string()); - } - } - } - if keys.is_empty() { - return Err(err(format!( - "accounts file '{path}' did not contain any 32-byte hex private keys" - ))); - } - Ok(keys) -} - -fn signing_key_from_hex(hex: &str) -> BenchResult { - let bytes = alloy_primitives::hex::decode(hex) - .map_err(|e| err(format!("invalid private key hex '{hex}': {e}")))?; - if bytes.len() != 32 { - return Err(err(format!( - "invalid private key length: expected 32 bytes, got {}", - bytes.len() - ))); - } - let mut key_bytes = [0_u8; 32]; - key_bytes.copy_from_slice(&bytes); - SigningKey::from_bytes((&key_bytes).into()) - .map_err(|e| err(format!("invalid private key material: {e}"))) -} - -fn make_signed_fixture_from_signing_key( - signing_key: &SigningKey, - user_op: UserOp, - domain: &Eip712Domain, -) -> BenchResult { - let sender = address_from_signing_key(signing_key); - let signature = sign_user_op(domain, &user_op, signing_key)?; - let data = user_op.data.to_vec(); - Ok(SignedTxFixture { - request: TxRequest { - message: user_op, - signature, - sender: sender.to_string(), - }, - expected_sender: sender.to_string(), - expected_data_hex: alloy_primitives::hex::encode_prefixed(data), - }) -} - -fn fixture_match_key(sender: &str, data_hex: &str) -> String { - format!( - "{}|{}", - sender.to_ascii_lowercase(), - data_hex.to_ascii_lowercase() - ) -} - -fn event_match_key(sender: &str, data_hex: &str) -> String { - fixture_match_key(sender, data_hex) -} - -fn classify_rejection(outcome: Result<(u16, String), SubmitTxError>) -> Option { - match outcome { - Ok((200, _body)) => None, - Ok((status, body)) => Some(RejectionOutcome { - key: format!("http_{status}"), - detail: format!("status={status}, body={body}"), - }), - Err(err) => Some(RejectionOutcome { - key: err.breakdown_key().to_string(), - detail: err.to_string(), - }), - } -} - -pub fn summarize(samples: &[Duration]) -> BenchResult { - if samples.is_empty() { - return Err(err("cannot summarize empty sample set")); - } - - let mut nanos: Vec = samples.iter().map(Duration::as_nanos).collect(); - nanos.sort_unstable(); - let sum: u128 = nanos.iter().copied().sum(); - let count = nanos.len(); - - Ok(Stats { - count, - min: duration_from_nanos(nanos[0]), - max: duration_from_nanos(nanos[count - 1]), - mean: duration_from_nanos(sum / count as u128), - p50: duration_from_nanos(percentile(&nanos, 0.50)), - p95: duration_from_nanos(percentile(&nanos, 0.95)), - p99: duration_from_nanos(percentile(&nanos, 0.99)), - p999: duration_from_nanos(percentile(&nanos, 0.999)), - }) -} - -pub fn print_stats(name: &str, stats: &Stats) { - println!("{name}:"); - println!(" count: {}", stats.count); - println!(" min: {}", format_ms(stats.min)); - println!(" p50: {}", format_ms(stats.p50)); - println!(" p95: {}", format_ms(stats.p95)); - println!(" p99: {}", format_ms(stats.p99)); - println!(" p99.9: {}", format_ms(stats.p999)); - println!(" max: {}", format_ms(stats.max)); - println!(" mean: {}", format_ms(stats.mean)); -} - -pub fn throughput_tx_per_s(accepted_count: usize, total_wall: Duration) -> f64 { - if total_wall.is_zero() { - 0.0 - } else { - accepted_count as f64 / total_wall.as_secs_f64() - } -} - -pub fn rejection_rate(accepted: u64, rejected: u64) -> f64 { - let total = accepted.saturating_add(rejected); - if total == 0 { - 0.0 - } else { - (rejected as f64 / total as f64) * 100.0 - } -} - -pub fn now() -> Instant { - Instant::now() -} - -pub fn default_seed_offset() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_default() - .as_nanos() as u64 -} - -fn signing_key_for_seed(seed: u64) -> BenchResult { - let mut bytes = [0_u8; 32]; - bytes[24..32].copy_from_slice(&seed.saturating_add(1).to_be_bytes()); - SigningKey::from_bytes((&bytes).into()) - .map_err(|e| err(format!("build signing key failed: {e}"))) -} - -fn sign_user_op( - domain: &Eip712Domain, - user_op: &UserOp, - signing_key: &SigningKey, -) -> BenchResult { - let hash = user_op.eip712_signing_hash(domain); - let k256_sig = signing_key - .sign_prehash(hash.as_slice()) - .map_err(|e| err(format!("sign user op prehash failed: {e}")))?; - - let expected_sender = address_from_signing_key(signing_key); - let signature = [false, true] - .into_iter() - .map(|parity| Signature::from_signature_and_parity(k256_sig, parity)) - .find(|candidate| { - candidate - .recover_address_from_prehash(&hash) - .ok() - .map(|sender| sender == expected_sender) - .unwrap_or(false) - }) - .ok_or_else(|| err("could not recover parity for signature"))?; - - Ok(alloy_primitives::hex::encode_prefixed(signature.as_bytes())) -} - -fn address_from_signing_key(signing_key: &SigningKey) -> Address { - let verifying = signing_key.verifying_key().to_encoded_point(false); - Address::from_raw_public_key(&verifying.as_bytes()[1..]) -} - -fn percentile(sorted_nanos: &[u128], p: f64) -> u128 { - let last = sorted_nanos.len() - 1; - let rank = (p * last as f64).ceil() as usize; - sorted_nanos[rank.min(last)] -} - -fn duration_from_nanos(value: u128) -> Duration { - let nanos = u64::try_from(value).unwrap_or(u64::MAX); - Duration::from_nanos(nanos) -} - -fn format_ms(value: Duration) -> String { - format!("{:.3} ms", value.as_secs_f64() * 1000.0) -} - -fn format_optional_f64(value: Option) -> String { - match value { - Some(v) => format!("{v:.3}"), - None => "n/a".to_string(), - } -} - -fn append_from_offset(base_ws_subscribe_url: &str, from_offset: u64) -> String { - let separator = if base_ws_subscribe_url.contains('?') { - '&' - } else { - '?' - }; - format!("{base_ws_subscribe_url}{separator}from_offset={from_offset}") -} - -struct MatchResult { - e2e_samples: Vec, - consumed_events: u64, -} - -async fn wait_for_matching_user_ops( - ws: &mut WebSocketStream>, - expected_submit_starts: &mut HashMap>, - max_wait: Duration, -) -> BenchResult { - let deadline = tokio::time::Instant::now() + max_wait; - let expected_total: usize = expected_submit_starts.values().map(Vec::len).sum(); - let mut e2e_samples = Vec::with_capacity(expected_total); - let mut consumed_events = 0_u64; - - while expected_submit_starts - .values() - .any(|entries| !entries.is_empty()) - { - let now = tokio::time::Instant::now(); - if now >= deadline { - let pending: usize = expected_submit_starts.values().map(Vec::len).sum(); - return Err(io_err(format!( - "timed out waiting for {pending} ws event(s)" - ))); - } - let remaining = deadline - now; - let maybe_frame = tokio::time::timeout(remaining, ws.next()) - .await - .map_err(|_| io_err("ws timeout"))?; - let frame = maybe_frame - .ok_or_else(|| io_err("ws stream closed"))? - .map_err(|err| io_err(format!("ws frame read failed: {err}")))?; - - let Message::Text(text) = frame else { - continue; - }; - let event: WsTxMessage = serde_json::from_str(text.as_str())?; - consumed_events = consumed_events.saturating_add(1); - - if let WsTxMessage::UserOp { sender, data, .. } = event { - let key = event_match_key(sender.as_str(), data.as_str()); - if let Some(entries) = expected_submit_starts.get_mut(key.as_str()) - && let Some(submit_started) = entries.pop() - { - e2e_samples.push(submit_started.elapsed()); - } - } - } - - Ok(MatchResult { - e2e_samples, - consumed_events, - }) -} - -async fn drain_existing_ws_backlog( - ws: &mut WebSocketStream>, - idle_quiet_window: Duration, - max_total: Duration, -) -> BenchResult { - let mut drained = 0_u64; - let hard_deadline = tokio::time::Instant::now() + max_total; - - loop { - let now = tokio::time::Instant::now(); - if now >= hard_deadline { - break; - } - let remaining_until_deadline = hard_deadline - now; - let poll_timeout = remaining_until_deadline.min(idle_quiet_window); - - match tokio::time::timeout(poll_timeout, ws.next()).await { - Err(_) => break, - Ok(None) => return Err(io_err("ws stream closed while draining backlog")), - Ok(Some(Err(err))) => { - return Err(io_err(format!( - "ws frame read failed while draining backlog: {err}" - ))); - } - Ok(Some(Ok(_))) => { - drained = drained.saturating_add(1); - } - } - } - - Ok(drained) -} - -fn io_err(message: impl Into) -> Box { - Box::new(std::io::Error::other(message.into())) -} - -fn err(message: impl Into) -> Box { - Box::new(std::io::Error::other(message.into())) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn summarize_includes_p999() { - let samples: Vec = (1_u64..=10_000).map(Duration::from_micros).collect(); - let stats = summarize(samples.as_slice()).expect("summarize"); - assert_eq!(stats.count, 10_000); - assert!(stats.p999 >= stats.p99); - assert!(stats.p999 <= stats.max); - } - - #[test] - fn classify_rejection_maps_http_and_transport() { - let http = classify_rejection(Ok((429, "overloaded".to_string()))).expect("http rejection"); - assert_eq!(http.key, "http_429"); - - let transport = - classify_rejection(Err(SubmitTxError::TimeoutRead)).expect("transport rejection"); - assert_eq!(transport.key, "timeout_read"); - } - - #[test] - fn funded_transfer_round_robin_nonce_progression() { - let mut accounts = Vec::new(); - for key in ANVIL_DEFAULT_PRIVATE_KEYS.iter().take(2) { - let signing_key = signing_key_from_hex(key).expect("signing key"); - accounts.push(FundedAccount { - sender: address_from_signing_key(&signing_key), - signing_key, - next_nonce: 1, - }); - } - - let domain = default_domain(); - let mut state = WorkloadState { - inner: WorkloadStateInner::FundedTransfer { - accounts, - round_robin_index: 0, - transfer_amount: 1, - }, - }; - - let one = state.next_fixture(0, &domain).expect("fixture 1"); - let two = state.next_fixture(0, &domain).expect("fixture 2"); - let three = state.next_fixture(0, &domain).expect("fixture 3"); - - assert_ne!(one.expected_sender, two.expected_sender); - assert_eq!(one.expected_sender, three.expected_sender); - assert_eq!(one.request.message.nonce, 1); - assert_eq!(two.request.message.nonce, 1); - assert_eq!(three.request.message.nonce, 2); - } -} +mod stats; +mod support; +mod sweep; +mod workload; + +pub use ack::{AckRunConfig, AckRunReport, run_ack_benchmark}; +pub use domain::{ + BenchmarkDomain, DEFAULT_ENDPOINT, DOMAIN_NAME, DOMAIN_VERSION, SELF_CONTAINED_DOMAIN_CHAIN_ID, + SELF_CONTAINED_DOMAIN_VERIFYING_CONTRACT, parse_address, resolve_external_benchmark_domain, + self_contained_domain, +}; +pub use e2e::{E2eRunConfig, E2eRunReport, run_e2e_benchmark}; +pub use evaluation::{ + ACK_P99_TARGET_MS, DIAGNOSTIC_P999_MIN_ACCEPTED_COUNT, NetworkProfile, NetworkProfileKind, + P999Confidence, SOFT_CONFIRM_P99_TARGET_MS, TARGET_EVALUATION_MIN_ACCEPTED_COUNT, + TargetEvaluation, TargetVerdict, evaluate_ack_target, evaluate_soft_confirm_target, + print_target_evaluation, +}; +pub use report::{ + BenchmarkJsonOutput, default_json_output_path, print_ack_report, print_e2e_report, + print_memory_report, write_json_output, +}; +pub use stats::{Stats, print_stats, rejection_rate, summarize, throughput_tx_per_s}; +pub use support::{default_seed_offset, now}; +pub use sweep::{ + SweepRow, SweepRunReport, SweepSummary, compute_capacity_summary, print_sweep_report, + write_csv as write_sweep_csv, +}; +pub use workload::{ + DEFAULT_WORKLOAD_TRANSFER_AMOUNT, SignedTxFixture, WorkloadConfig, WorkloadKind, + make_signed_fixture, +}; + +pub type BenchResult = Result>; diff --git a/benchmarks/src/rejection.rs b/benchmarks/src/rejection.rs new file mode 100644 index 0000000..726b1b6 --- /dev/null +++ b/benchmarks/src/rejection.rs @@ -0,0 +1,85 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use sequencer_rust_client::SubmitTxError; +use std::collections::BTreeMap; + +#[derive(Debug, Clone)] +pub struct RejectionOutcome { + pub key: String, + pub detail: String, +} + +pub fn classify_rejection( + outcome: Result<(u16, String), SubmitTxError>, +) -> Option { + match outcome { + Ok((200, _body)) => None, + Ok((status, body)) => Some(RejectionOutcome { + key: format!("http_{status}"), + detail: format!("status={status}, body={body}"), + }), + Err(err) => Some(RejectionOutcome { + key: err.breakdown_key().to_string(), + detail: err.to_string(), + }), + } +} + +pub fn http_rejection_count(breakdown: &BTreeMap) -> u64 { + breakdown + .iter() + .filter(|(key, _)| is_http_breakdown_key(key)) + .map(|(_, count)| *count) + .sum() +} + +pub fn http_429_count(breakdown: &BTreeMap) -> u64 { + breakdown.get("http_429").copied().unwrap_or(0) +} + +pub fn client_failure_count(rejected_count: u64, breakdown: &BTreeMap) -> u64 { + rejected_count.saturating_sub(http_rejection_count(breakdown)) +} + +pub fn has_http_rejection(breakdown: &BTreeMap) -> bool { + http_rejection_count(breakdown) > 0 +} + +pub fn has_http_429(breakdown: &BTreeMap) -> bool { + http_429_count(breakdown) > 0 +} + +fn is_http_breakdown_key(key: &str) -> bool { + key.starts_with("http_") +} + +#[cfg(test)] +mod tests { + use super::{classify_rejection, client_failure_count, http_429_count, http_rejection_count}; + use sequencer_rust_client::SubmitTxError; + use std::collections::BTreeMap; + + #[test] + fn classify_rejection_maps_http_and_transport() { + let http = classify_rejection(Ok((429, "overloaded".to_string()))).expect("http rejection"); + assert_eq!(http.key, "http_429"); + + let transport = + classify_rejection(Err(SubmitTxError::TimeoutRead)).expect("transport rejection"); + assert_eq!(transport.key, "timeout_read"); + } + + #[test] + fn counts_http_and_client_failures_separately() { + let breakdown = BTreeMap::from([ + ("http_429".to_string(), 2_u64), + ("http_422".to_string(), 3_u64), + ("io_connect".to_string(), 4_u64), + ]); + + assert_eq!(http_rejection_count(&breakdown), 5); + assert_eq!(http_429_count(&breakdown), 2); + assert_eq!(client_failure_count(9, &breakdown), 4); + } +} diff --git a/benchmarks/src/report.rs b/benchmarks/src/report.rs new file mode 100644 index 0000000..2b5c869 --- /dev/null +++ b/benchmarks/src/report.rs @@ -0,0 +1,146 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::fs; +use std::path::Path; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::{ + AckRunReport, BenchResult, E2eRunReport, TargetEvaluation, runtime, + stats::{format_optional_f64, print_stats, throughput_tx_per_s}, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BenchmarkJsonOutput { + pub benchmark: String, + pub config: Value, + pub report: R, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub evaluation: Option, +} + +pub fn write_json_output( + path: &Path, + benchmark: &str, + config: &C, + report: &R, + evaluation: Option<&E>, +) -> BenchResult<()> +where + C: Serialize, + R: Serialize, + E: Serialize, +{ + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + let payload = BenchmarkJsonOutput { + benchmark: benchmark.to_string(), + config: serde_json::to_value(config)?, + report, + evaluation, + }; + fs::write(path, serde_json::to_vec_pretty(&payload)?)?; + Ok(()) +} + +pub fn default_json_output_path(prefix: &str) -> String { + let ts = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|value| value.as_secs()) + .unwrap_or(0); + format!("benchmarks/results/{prefix}-{ts}.json") +} + +pub fn print_ack_report(report: &AckRunReport) { + println!( + "ack benchmark completed: count={}, endpoint={}, concurrency={}", + report.count, report.endpoint, report.concurrency + ); + println!(" accepted: {}", report.accepted); + println!(" rejected: {}", report.rejected); + println!(" rejection_rate: {:.4}%", report.rejection_rate); + println!( + "accepted_completed_per_s: {:.2} tx/s", + throughput_tx_per_s(report.ack_latency_accepted.count, report.total_wall) + ); + if let Some(reason) = report.first_rejection.as_ref() { + println!(" first_rejection: {reason}"); + } + if !report.rejection_breakdown.is_empty() { + println!(" rejection_breakdown:"); + for (key, count) in &report.rejection_breakdown { + println!(" {key}: {count}"); + } + } + print_stats("ack_latency_accepted", &report.ack_latency_accepted); + if let Some(stats) = report.ack_latency_rejected.as_ref() { + print_stats("ack_latency_rejected", stats); + } + if let Some(memory) = report.memory.as_ref() { + print_memory_report(memory); + } + if let Some(path) = report.sequencer_log_path.as_ref() { + println!("sequencer_log_path: {path}"); + } +} + +pub fn print_e2e_report(report: &E2eRunReport) { + println!( + "e2e benchmark completed: count={}, endpoint={}, ws={}, concurrency={}", + report.count, report.endpoint, report.ws_subscribe_url, report.concurrency + ); + println!(" accepted: {}", report.accepted); + println!(" rejected: {}", report.rejected); + println!(" rejection_rate: {:.4}%", report.rejection_rate); + println!( + "accepted_completed_per_s: {:.2} tx/s", + throughput_tx_per_s(report.e2e_latency_accepted.count, report.total_wall) + ); + println!( + "consumed_ws_events_total: {}", + report.consumed_ws_events_total + ); + if let Some(reason) = report.first_rejection.as_ref() { + println!(" first_rejection: {reason}"); + } + if !report.rejection_breakdown.is_empty() { + println!(" rejection_breakdown:"); + for (key, count) in &report.rejection_breakdown { + println!(" {key}: {count}"); + } + } + print_stats("ack_latency_accepted", &report.ack_latency_accepted); + if let Some(stats) = report.ack_latency_rejected.as_ref() { + print_stats("ack_latency_rejected", stats); + } + print_stats("e2e_latency_accepted", &report.e2e_latency_accepted); + if let Some(memory) = report.memory.as_ref() { + print_memory_report(memory); + } + if let Some(path) = report.sequencer_log_path.as_ref() { + println!("sequencer_log_path: {path}"); + } +} + +pub fn print_memory_report(memory: &runtime::MemoryReport) { + println!("memory:"); + println!(" method: {}", memory.method); + println!(" sample_interval_ms: {}", memory.sample_interval_ms); + println!( + " rss_start_mb: {}", + format_optional_f64(memory.rss_start_mb) + ); + println!(" rss_peak_mb: {}", format_optional_f64(memory.rss_peak_mb)); + println!(" rss_end_mb: {}", format_optional_f64(memory.rss_end_mb)); + println!( + " rss_growth_mb: {}", + format_optional_f64(memory.rss_growth_mb) + ); + println!( + " rss_growth_per_1k_accepted_tx_mb: {}", + format_optional_f64(memory.rss_growth_per_1k_accepted_tx_mb) + ); +} diff --git a/benchmarks/src/runtime.rs b/benchmarks/src/runtime.rs index fe7a7df..84f06ec 100644 --- a/benchmarks/src/runtime.rs +++ b/benchmarks/src/runtime.rs @@ -1,9 +1,14 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use serde::Serialize; +use alloy::network::EthereumWallet; +use alloy::providers::ProviderBuilder; +use alloy::signers::local::PrivateKeySigner; +use alloy_primitives::{Address, B256, Bytes}; +use app_core::application::default_private_keys; +use cartesi_rollups_contracts::application_factory::ApplicationFactory; +use serde::{Deserialize, Serialize}; use std::fs::{self, OpenOptions}; -use std::io::{BufRead, BufReader}; use std::path::{Path, PathBuf}; use std::process::Stdio; use std::time::Duration; @@ -14,53 +19,47 @@ use tokio::process::{Child, Command}; use tokio::sync::oneshot; use tokio::task::JoinHandle; -use crate::BenchResult; +use crate::{BenchResult, BenchmarkDomain, SELF_CONTAINED_DOMAIN_CHAIN_ID}; pub const DEFAULT_SEQUENCER_BIN: &str = "target/release/sequencer"; -pub const DEFAULT_SEQUENCER_START_TIMEOUT_MS: u64 = 10_000; -pub const DEFAULT_SEQUENCER_SHUTDOWN_TIMEOUT_MS: u64 = 3_000; pub const DEFAULT_MEMORY_SAMPLE_INTERVAL_MS: u64 = 500; -pub const DEFAULT_RUNTIME_METRICS_LOG_INTERVAL_MS: u64 = 5_000; pub const DEFAULT_SEQUENCER_LOGS_DIR: &str = "benchmarks/results"; - +pub const DEFAULT_ANVIL_STATE_DIR: &str = "benchmarks/.deps/rollups-contracts-2.2.0-anvil-v1.4.3"; +pub const DEFAULT_TEMPLATE_MACHINE_IMAGE_PATH: &str = + "examples/canonical-app/out/canonical-machine-image"; + +const DEFAULT_SEQUENCER_START_TIMEOUT: Duration = Duration::from_secs(10); +const DEFAULT_SEQUENCER_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(3); +const DEFAULT_SEQUENCER_RUST_LOG: &str = "info"; +const DEFAULT_ANVIL_START_TIMEOUT: Duration = Duration::from_secs(10); +const DEFAULT_ANVIL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(3); #[derive(Debug, Clone)] pub struct ManagedSequencerConfig { pub sequencer_bin: String, - pub start_timeout: Duration, - pub shutdown_timeout: Duration, - pub temp_db: bool, - pub log_path: Option, - pub runtime_metrics_enabled: bool, - pub runtime_metrics_log_interval: Duration, - pub rust_log: String, + pub log_prefix: &'static str, } pub struct ManagedSequencer { + anvil: ManagedAnvil, child: Child, shutdown_timeout: Duration, temp_dir: Option, pub endpoint: String, - pub ws_subscribe_url: String, - pub db_path: PathBuf, + domain: BenchmarkDomain, log_path: PathBuf, } impl ManagedSequencer { pub async fn spawn(config: ManagedSequencerConfig) -> BenchResult { let (endpoint, http_addr) = build_local_endpoint()?; - let ws_subscribe_url = format!("{}/ws/subscribe", endpoint.replacen("http://", "ws://", 1)); + let anvil = ManagedAnvil::spawn(config.log_prefix).await?; + let domain = anvil.domain(); - let (temp_dir, db_path) = if config.temp_db { - let dir = tempfile::tempdir()?; - let path = dir_path_join(dir.path(), "sequencer.db"); - (Some(dir), path) - } else { - (None, PathBuf::from("sequencer.bench.db")) - }; + let dir = tempfile::tempdir()?; + let db_path = dir_path_join(dir.path(), "sequencer.db"); + let temp_dir = Some(dir); - let log_path = config - .log_path - .unwrap_or_else(|| default_sequencer_log_path("sequencer-self-contained")); + let log_path = default_sequencer_log_path(config.log_prefix); if let Some(parent) = log_path.parent() { fs::create_dir_all(parent)?; } @@ -76,19 +75,13 @@ impl ManagedSequencer { .arg(http_addr) .arg("--db-path") .arg(path_as_str(db_path.as_path())?) - .env( - "SEQ_RUNTIME_METRICS_ENABLED", - if config.runtime_metrics_enabled { - "true" - } else { - "false" - }, - ) - .env( - "SEQ_RUNTIME_METRICS_LOG_INTERVAL_MS", - config.runtime_metrics_log_interval.as_millis().to_string(), - ) - .env("RUST_LOG", config.rust_log.as_str()) + .arg("--eth-rpc-url") + .arg(anvil.endpoint.as_str()) + .arg("--domain-chain-id") + .arg(domain.chain_id.to_string()) + .arg("--domain-verifying-contract") + .arg(domain.verifying_contract.to_string()) + .env("RUST_LOG", DEFAULT_SEQUENCER_RUST_LOG) .stdout(Stdio::from(stdout_log)) .stderr(Stdio::from(stderr_log)) .spawn() @@ -99,15 +92,20 @@ impl ManagedSequencer { )) })?; - wait_for_readiness(endpoint.as_str(), &mut child, config.start_timeout).await?; + wait_for_readiness( + endpoint.as_str(), + &mut child, + DEFAULT_SEQUENCER_START_TIMEOUT, + ) + .await?; Ok(Self { + anvil, child, - shutdown_timeout: config.shutdown_timeout, + shutdown_timeout: DEFAULT_SEQUENCER_SHUTDOWN_TIMEOUT, temp_dir, endpoint, - ws_subscribe_url, - db_path, + domain, log_path, }) } @@ -120,8 +118,115 @@ impl ManagedSequencer { self.log_path.as_path() } + pub fn domain(&self) -> BenchmarkDomain { + self.domain + } + pub async fn shutdown(mut self) -> BenchResult<()> { let _ = self.temp_dir.take(); + send_graceful_terminate(&mut self.child).await; + let sequencer_result: BenchResult<()> = + match tokio::time::timeout(self.shutdown_timeout, self.child.wait()).await { + Ok(wait_result) => { + let _ = wait_result?; + Ok(()) + } + Err(_) => { + self.child.start_kill()?; + let _ = self.child.wait().await; + Ok(()) + } + }; + let anvil_result = self.anvil.shutdown().await; + sequencer_result?; + anvil_result + } +} + +struct ManagedAnvil { + child: Child, + shutdown_timeout: Duration, + endpoint: String, + domain: BenchmarkDomain, +} + +impl ManagedAnvil { + async fn spawn(log_prefix: &str) -> BenchResult { + let state_dir = PathBuf::from(DEFAULT_ANVIL_STATE_DIR); + let state_path = dir_path_join(state_dir.as_path(), "state.json"); + let deployment_path = dir_path_join(state_dir.as_path(), "deployments/31337/InputBox.json"); + let application_factory_path = dir_path_join( + state_dir.as_path(), + "deployments/31337/ApplicationFactory.json", + ); + + ensure_exists( + state_path.as_path(), + format!("missing {}; run `just setup` first", state_path.display()), + )?; + ensure_exists( + deployment_path.as_path(), + format!( + "missing {}; run `just setup` first", + deployment_path.display() + ), + )?; + ensure_exists( + application_factory_path.as_path(), + format!( + "missing {}; run `just setup` first", + application_factory_path.display() + ), + )?; + + let input_box_address = read_input_box_address(deployment_path.as_path())?; + let application_factory_address = + read_deployment_address(application_factory_path.as_path(), "ApplicationFactory")?; + let (endpoint, http_addr) = build_local_endpoint()?; + let log_path = default_anvil_log_path(log_prefix); + if let Some(parent) = log_path.parent() { + fs::create_dir_all(parent)?; + } + let stdout_log = OpenOptions::new() + .create(true) + .truncate(true) + .write(true) + .open(log_path.as_path())?; + let stderr_log = stdout_log.try_clone()?; + + let mut child = Command::new("anvil") + .arg("--host") + .arg("127.0.0.1") + .arg("--port") + .arg(http_addr.rsplit(':').next().expect("port")) + .arg("--load-state") + .arg(path_as_str(state_path.as_path())?) + .stdout(Stdio::from(stdout_log)) + .stderr(Stdio::from(stderr_log)) + .spawn() + .map_err(|err| io_other(format!("failed to spawn anvil: {err}")))?; + + wait_for_rpc_readiness(endpoint.as_str(), &mut child, DEFAULT_ANVIL_START_TIMEOUT).await?; + let domain = deploy_benchmark_application( + endpoint.as_str(), + application_factory_address, + input_box_address, + ) + .await?; + + Ok(Self { + child, + shutdown_timeout: DEFAULT_ANVIL_SHUTDOWN_TIMEOUT, + endpoint, + domain, + }) + } + + fn domain(&self) -> BenchmarkDomain { + self.domain + } + + async fn shutdown(mut self) -> BenchResult<()> { send_graceful_terminate(&mut self.child).await; match tokio::time::timeout(self.shutdown_timeout, self.child.wait()).await { Ok(wait_result) => { @@ -137,7 +242,7 @@ impl ManagedSequencer { } } -#[derive(Debug, Clone, Serialize, Default)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub struct MemoryReport { pub method: String, pub sample_interval_ms: u64, @@ -153,18 +258,6 @@ pub struct MemorySampler { join: JoinHandle, } -#[derive(Debug, Clone, Serialize, Default)] -pub struct InclusionLaneProfileReport { - pub samples: u64, - pub latest_window_ms: Option, - pub latest_user_op_app_execute_phase_ms: Option, - pub latest_user_op_persist_phase_ms: Option, - pub latest_user_op_app_share_pct_of_app_plus_persist: Option, - pub latest_user_op_persist_share_pct_of_app_plus_persist: Option, - pub avg_user_op_app_share_pct_of_app_plus_persist: Option, - pub avg_user_op_persist_share_pct_of_app_plus_persist: Option, -} - impl MemorySampler { pub fn start(pid: u32, sample_interval: Duration) -> Self { let (stop_tx, stop_rx) = oneshot::channel::(); @@ -260,66 +353,6 @@ fn sample_rss_mb(pid: u32) -> Option { Some(rss_kib / 1024.0) } -pub fn parse_inclusion_lane_profile_from_log( - log_path: &Path, -) -> BenchResult> { - if !log_path.exists() { - return Ok(None); - } - let file = std::fs::File::open(log_path)?; - let reader = BufReader::new(file); - - let mut report = InclusionLaneProfileReport::default(); - let mut app_share_sum = 0.0_f64; - let mut persist_share_sum = 0.0_f64; - let mut app_share_samples = 0_u64; - let mut persist_share_samples = 0_u64; - - for line_result in reader.lines() { - let line = line_result?; - let line = strip_ansi_escapes(line.as_str()); - if !line.contains("inclusion lane metrics") { - continue; - } - - report.samples = report.samples.saturating_add(1); - report.latest_window_ms = parse_u64_field(line.as_str(), "window_ms"); - report.latest_user_op_app_execute_phase_ms = - parse_u64_field(line.as_str(), "user_op_app_execute_phase_ms"); - report.latest_user_op_persist_phase_ms = - parse_u64_field(line.as_str(), "user_op_persist_phase_ms"); - report.latest_user_op_app_share_pct_of_app_plus_persist = - parse_f64_field(line.as_str(), "user_op_app_share_pct_of_app_plus_persist"); - report.latest_user_op_persist_share_pct_of_app_plus_persist = parse_f64_field( - line.as_str(), - "user_op_persist_share_pct_of_app_plus_persist", - ); - - if let Some(value) = report.latest_user_op_app_share_pct_of_app_plus_persist { - app_share_sum += value; - app_share_samples = app_share_samples.saturating_add(1); - } - if let Some(value) = report.latest_user_op_persist_share_pct_of_app_plus_persist { - persist_share_sum += value; - persist_share_samples = persist_share_samples.saturating_add(1); - } - } - - if report.samples == 0 { - return Ok(None); - } - if app_share_samples > 0 { - report.avg_user_op_app_share_pct_of_app_plus_persist = - Some(app_share_sum / app_share_samples as f64); - } - if persist_share_samples > 0 { - report.avg_user_op_persist_share_pct_of_app_plus_persist = - Some(persist_share_sum / persist_share_samples as f64); - } - - Ok(Some(report)) -} - pub fn default_sequencer_log_path(prefix: &str) -> PathBuf { let ts = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) @@ -328,58 +361,14 @@ pub fn default_sequencer_log_path(prefix: &str) -> PathBuf { PathBuf::from(format!("{DEFAULT_SEQUENCER_LOGS_DIR}/{prefix}-{ts}.log")) } -fn parse_u64_field(line: &str, key: &str) -> Option { - let needle = format!("{key}="); - line.split_whitespace() - .find_map(|token| token.strip_prefix(needle.as_str())) - .and_then(clean_token_value) - .and_then(|value| value.parse::().ok()) -} - -fn parse_f64_field(line: &str, key: &str) -> Option { - let needle = format!("{key}="); - line.split_whitespace() - .find_map(|token| token.strip_prefix(needle.as_str())) - .and_then(clean_token_value) - .and_then(|value| value.parse::().ok()) -} - -fn clean_token_value(raw: &str) -> Option { - let value = raw - .trim() - .trim_matches(',') - .trim_matches('"') - .trim_matches('\''); - (!value.is_empty()).then(|| value.to_string()) -} - -fn strip_ansi_escapes(input: &str) -> String { - let mut out = String::with_capacity(input.len()); - let bytes = input.as_bytes(); - let mut i = 0_usize; - - while i < bytes.len() { - if bytes[i] == 0x1b { - i += 1; - if i < bytes.len() && bytes[i] == b'[' { - i += 1; - while i < bytes.len() { - let b = bytes[i]; - i += 1; - if b.is_ascii_alphabetic() { - break; - } - } - continue; - } - continue; - } - - out.push(bytes[i] as char); - i += 1; - } - - out +fn default_anvil_log_path(prefix: &str) -> PathBuf { + let ts = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|value| value.as_millis()) + .unwrap_or(0); + PathBuf::from(format!( + "{DEFAULT_SEQUENCER_LOGS_DIR}/{prefix}-anvil-{ts}.log" + )) } async fn wait_for_readiness( @@ -408,6 +397,29 @@ async fn wait_for_readiness( } } +async fn wait_for_rpc_readiness( + endpoint: &str, + child: &mut Child, + timeout: Duration, +) -> BenchResult<()> { + let deadline = tokio::time::Instant::now() + timeout; + loop { + if let Some(status) = child.try_wait()? { + return Err(io_other(format!("anvil exited before readiness: status={status}")).into()); + } + if rpc_endpoint_is_ready(endpoint).await { + return Ok(()); + } + if tokio::time::Instant::now() >= deadline { + return Err(io_other(format!( + "timed out waiting for anvil readiness at {endpoint}" + )) + .into()); + } + tokio::time::sleep(Duration::from_millis(100)).await; + } +} + async fn http_endpoint_is_ready(endpoint: &str) -> bool { let Some(host_port) = endpoint.strip_prefix("http://") else { return false; @@ -433,6 +445,35 @@ async fn http_endpoint_is_ready(endpoint: &str) -> bool { } } +async fn rpc_endpoint_is_ready(endpoint: &str) -> bool { + let Some(host_port) = endpoint.strip_prefix("http://") else { + return false; + }; + let mut stream = + match tokio::time::timeout(Duration::from_millis(300), TcpStream::connect(host_port)).await + { + Ok(Ok(value)) => value, + _ => return false, + }; + + let body = r#"{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}"#; + let request = format!( + "POST / HTTP/1.1\r\nHost: {host_port}\r\nContent-Type: application/json\r\nContent-Length: {}\r\nConnection: close\r\n\r\n{body}", + body.len() + ); + if stream.write_all(request.as_bytes()).await.is_err() { + return false; + } + let mut head = [0_u8; 128]; + match tokio::time::timeout(Duration::from_millis(300), stream.read(&mut head)).await { + Ok(Ok(read)) if read > 0 => std::str::from_utf8(&head[..read]) + .ok() + .map(|text| text.contains("200 OK")) + .unwrap_or(false), + _ => false, + } +} + async fn send_graceful_terminate(child: &mut Child) { let Some(pid) = child.id() else { return; @@ -467,46 +508,181 @@ fn dir_path_join(base: &Path, file: &str) -> PathBuf { path } +fn ensure_exists(path: &Path, missing_message: String) -> BenchResult<()> { + if path.exists() { + Ok(()) + } else { + Err(io_other(missing_message).into()) + } +} + fn path_as_str(path: &Path) -> BenchResult<&str> { path.to_str() .ok_or_else(|| io_other(format!("path is not valid UTF-8: {}", path.display())).into()) } +fn read_input_box_address(path: &Path) -> BenchResult
{ + read_deployment_address(path, "InputBox") +} + +fn read_deployment_address(path: &Path, contract_name: &str) -> BenchResult
{ + #[derive(Deserialize)] + struct DeploymentInfo { + address: String, + #[serde(rename = "contractName")] + contract_name: String, + } + + let deployment: DeploymentInfo = serde_json::from_str(&fs::read_to_string(path)?) + .map_err(|err| io_other(format!("failed to parse {}: {err}", path.display())))?; + if deployment.contract_name != contract_name { + return Err(io_other(format!( + "expected {} deployment in {}, found {}", + contract_name, + path.display(), + deployment.contract_name + )) + .into()); + } + deployment.address.parse().map_err(|err| { + io_other(format!( + "invalid {} address in {}: {err}", + contract_name, + path.display() + )) + .into() + }) +} + +async fn deploy_benchmark_application( + endpoint: &str, + application_factory_address: Address, + input_box_address: Address, +) -> BenchResult { + let private_key = default_private_keys() + .first() + .ok_or_else(|| io_other("missing default Anvil private key"))?; + let signer: PrivateKeySigner = private_key + .parse() + .map_err(|err| io_other(format!("invalid default Anvil private key: {err}")))?; + let app_owner = signer.address(); + let wallet = EthereumWallet::from(signer); + let provider = ProviderBuilder::new() + .wallet(wallet) + .connect(endpoint) + .await + .map_err(|err| { + io_other(format!( + "failed to connect wallet provider to {endpoint}: {err}" + )) + })?; + + let factory = ApplicationFactory::new(application_factory_address, &provider); + let template_hash = load_template_hash(Path::new(DEFAULT_TEMPLATE_MACHINE_IMAGE_PATH))?; + let data_availability: Bytes = input_box_address.as_slice().to_vec().into(); + let create_application = + factory.newApplication_1(Address::ZERO, app_owner, template_hash, data_availability); + let application_address = create_application + .clone() + .call() + .await + .map_err(|err| io_other(format!("failed to simulate application deployment: {err}")))?; + let receipt = create_application + .send() + .await + .map_err(|err| { + io_other(format!( + "failed to send application deployment transaction: {err}" + )) + })? + .get_receipt() + .await + .map_err(|err| { + io_other(format!( + "failed to confirm application deployment transaction: {err}" + )) + })?; + if !receipt.status() { + return Err(io_other("application deployment transaction reverted").into()); + } + + Ok(BenchmarkDomain { + chain_id: SELF_CONTAINED_DOMAIN_CHAIN_ID, + verifying_contract: application_address, + }) +} + +fn load_template_hash(machine_image_path: &Path) -> BenchResult { + ensure_exists( + machine_image_path, + format!( + "missing {}; run `just canonical-build-machine-image` first", + machine_image_path.display() + ), + )?; + + let output = std::process::Command::new("cartesi-machine") + .arg(format!("--load={}", path_as_str(machine_image_path)?)) + .arg("--initial-hash") + .output() + .map_err(|err| { + io_other(format!( + "failed to run cartesi-machine for {}: {err}", + machine_image_path.display() + )) + })?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(io_other(format!( + "cartesi-machine --initial-hash failed for {}: {}", + machine_image_path.display(), + stderr.trim() + )) + .into()); + } + + let stdout = String::from_utf8(output.stdout) + .map_err(|err| io_other(format!("cartesi-machine stdout was not valid UTF-8: {err}")))?; + let stderr = String::from_utf8(output.stderr) + .map_err(|err| io_other(format!("cartesi-machine stderr was not valid UTF-8: {err}")))?; + let combined_output = format!("{stdout}\n{stderr}"); + let hash = extract_template_hash(&combined_output).ok_or_else(|| { + io_other(format!( + "could not find template hash in cartesi-machine output for {}", + machine_image_path.display() + )) + })?; + + format!("0x{hash}").parse().map_err(|err| { + io_other(format!("invalid template hash from cartesi-machine: {err}")).into() + }) +} + +fn extract_template_hash(output: &str) -> Option<&str> { + const HASH_LEN: usize = 64; + + output.split_whitespace().find_map(|token| { + let trimmed = token.trim_end_matches(':'); + if trimmed.len() >= HASH_LEN { + trimmed.as_bytes().windows(HASH_LEN).find_map(|window| { + std::str::from_utf8(window) + .ok() + .filter(|candidate| candidate.chars().all(|ch| ch.is_ascii_hexdigit())) + }) + } else { + None + } + }) +} + fn io_other(message: impl Into) -> std::io::Error { std::io::Error::other(message.into()) } #[cfg(test)] mod tests { - use super::{default_sequencer_log_path, parse_inclusion_lane_profile_from_log}; - use std::fs; - - #[test] - fn parses_inclusion_lane_profile_summary_from_logs() { - let temp = tempfile::tempdir().expect("tempdir"); - let log_path = temp.path().join("sequencer.log"); - let content = r#" -2026-03-01T00:00:00Z INFO x: inclusion lane metrics window_ms=5000 user_op_app_execute_phase_ms=120 user_op_persist_phase_ms=80 user_op_app_share_pct_of_app_plus_persist=60.0 user_op_persist_share_pct_of_app_plus_persist=40.0 -2026-03-01T00:00:05Z INFO x: inclusion lane metrics window_ms=5000 user_op_app_execute_phase_ms=140 user_op_persist_phase_ms=60 user_op_app_share_pct_of_app_plus_persist=70.0 user_op_persist_share_pct_of_app_plus_persist=30.0 -"#; - fs::write(log_path.as_path(), content).expect("write log"); - - let report = parse_inclusion_lane_profile_from_log(log_path.as_path()) - .expect("parse result") - .expect("profile present"); - - assert_eq!(report.samples, 2); - assert_eq!(report.latest_user_op_app_execute_phase_ms, Some(140)); - assert_eq!(report.latest_user_op_persist_phase_ms, Some(60)); - assert_eq!( - report.avg_user_op_app_share_pct_of_app_plus_persist, - Some(65.0) - ); - assert_eq!( - report.avg_user_op_persist_share_pct_of_app_plus_persist, - Some(35.0) - ); - } + use super::{default_anvil_log_path, default_sequencer_log_path, extract_template_hash}; #[test] fn default_log_path_uses_results_dir() { @@ -516,20 +692,18 @@ mod tests { } #[test] - fn parses_ansi_colored_profile_lines() { - let temp = tempfile::tempdir().expect("tempdir"); - let log_path = temp.path().join("sequencer.log"); - let content = "\u{1b}[2m2026-03-01T00:00:00Z\u{1b}[0m \u{1b}[32mINFO\u{1b}[0m x: inclusion lane metrics \u{1b}[3mwindow_ms\u{1b}[0m\u{1b}[2m=\u{1b}[0m5000 \u{1b}[3muser_op_app_share_pct_of_app_plus_persist\u{1b}[0m\u{1b}[2m=\u{1b}[0m60.5 \u{1b}[3muser_op_persist_share_pct_of_app_plus_persist\u{1b}[0m\u{1b}[2m=\u{1b}[0m39.5\n"; - fs::write(log_path.as_path(), content).expect("write log"); - - let report = parse_inclusion_lane_profile_from_log(log_path.as_path()) - .expect("parse result") - .expect("profile present"); - assert_eq!(report.samples, 1); - assert_eq!(report.latest_window_ms, Some(5000)); + fn default_anvil_log_path_uses_results_dir() { + let value = default_anvil_log_path("ack-latency"); + assert!(value.to_string_lossy().contains("benchmarks/results/")); + assert!(value.to_string_lossy().contains("ack-latency-anvil")); + } + + #[test] + fn extract_template_hash_parses_cartesi_machine_output() { + let output = "Loading machine: please wait\n53799514: 6b7e5d3e45079545ffa46edee3a7a8e68dbff4ccadf0520b772a357807d42de8\n"; assert_eq!( - report.latest_user_op_app_share_pct_of_app_plus_persist, - Some(60.5) + extract_template_hash(output), + Some("6b7e5d3e45079545ffa46edee3a7a8e68dbff4ccadf0520b772a357807d42de8") ); } } diff --git a/benchmarks/src/stats.rs b/benchmarks/src/stats.rs new file mode 100644 index 0000000..e08ecc4 --- /dev/null +++ b/benchmarks/src/stats.rs @@ -0,0 +1,107 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use serde::{Deserialize, Serialize}; +use std::time::Duration; + +use crate::{BenchResult, support::err}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Stats { + pub count: usize, + pub min: Duration, + pub max: Duration, + pub mean: Duration, + pub p50: Duration, + pub p95: Duration, + pub p99: Duration, + pub p999: Duration, +} + +pub fn summarize(samples: &[Duration]) -> BenchResult { + if samples.is_empty() { + return Err(err("cannot summarize empty sample set")); + } + + let mut nanos: Vec = samples.iter().map(Duration::as_nanos).collect(); + nanos.sort_unstable(); + let sum: u128 = nanos.iter().copied().sum(); + let count = nanos.len(); + + Ok(Stats { + count, + min: duration_from_nanos(nanos[0]), + max: duration_from_nanos(nanos[count - 1]), + mean: duration_from_nanos(sum / count as u128), + p50: duration_from_nanos(percentile(&nanos, 0.50)), + p95: duration_from_nanos(percentile(&nanos, 0.95)), + p99: duration_from_nanos(percentile(&nanos, 0.99)), + p999: duration_from_nanos(percentile(&nanos, 0.999)), + }) +} + +pub fn print_stats(name: &str, stats: &Stats) { + println!("{name}:"); + println!(" count: {}", stats.count); + println!(" min: {}", format_ms(stats.min)); + println!(" p50: {}", format_ms(stats.p50)); + println!(" p95: {}", format_ms(stats.p95)); + println!(" p99: {}", format_ms(stats.p99)); + println!(" p99.9: {}", format_ms(stats.p999)); + println!(" max: {}", format_ms(stats.max)); + println!(" mean: {}", format_ms(stats.mean)); +} + +pub fn throughput_tx_per_s(accepted_count: usize, total_wall: Duration) -> f64 { + if total_wall.is_zero() { + 0.0 + } else { + accepted_count as f64 / total_wall.as_secs_f64() + } +} + +pub fn rejection_rate(accepted: u64, rejected: u64) -> f64 { + let total = accepted.saturating_add(rejected); + if total == 0 { + 0.0 + } else { + (rejected as f64 / total as f64) * 100.0 + } +} + +pub(crate) fn format_optional_f64(value: Option) -> String { + match value { + Some(v) => format!("{v:.3}"), + None => "n/a".to_string(), + } +} + +fn percentile(sorted_nanos: &[u128], p: f64) -> u128 { + let last = sorted_nanos.len() - 1; + let rank = (p * last as f64).ceil() as usize; + sorted_nanos[rank.min(last)] +} + +fn duration_from_nanos(value: u128) -> Duration { + let nanos = u64::try_from(value).unwrap_or(u64::MAX); + Duration::from_nanos(nanos) +} + +fn format_ms(value: Duration) -> String { + format!("{:.3} ms", value.as_secs_f64() * 1000.0) +} + +#[cfg(test)] +mod tests { + use super::summarize; + use std::time::Duration; + + #[test] + fn summarize_includes_p999() { + let samples: Vec = (1_u64..=10_000).map(Duration::from_micros).collect(); + let stats = summarize(samples.as_slice()).expect("summarize"); + assert_eq!(stats.count, 10_000); + assert!(stats.p999 >= stats.p99); + assert!(stats.p999 <= stats.max); + } +} diff --git a/benchmarks/src/support.rs b/benchmarks/src/support.rs new file mode 100644 index 0000000..7d614a5 --- /dev/null +++ b/benchmarks/src/support.rs @@ -0,0 +1,26 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::error::Error; +use std::time::{Instant, SystemTime, UNIX_EPOCH}; + +pub const DEFAULT_PROGRESS_EVERY: u64 = 500; + +pub fn now() -> Instant { + Instant::now() +} + +pub fn default_seed_offset() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_nanos() as u64 +} + +pub fn io_err(message: impl Into) -> Box { + Box::new(std::io::Error::other(message.into())) +} + +pub fn err(message: impl Into) -> Box { + Box::new(std::io::Error::other(message.into())) +} diff --git a/benchmarks/src/sweep.rs b/benchmarks/src/sweep.rs new file mode 100644 index 0000000..de7fd2e --- /dev/null +++ b/benchmarks/src/sweep.rs @@ -0,0 +1,235 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::fs; +use std::path::Path; + +use crate::{ + BenchResult, + rejection::{ + client_failure_count, has_http_429, has_http_rejection, http_429_count, + http_rejection_count, + }, + report::print_memory_report, + runtime, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SweepRow { + pub concurrency: usize, + pub accepted_tps: f64, + pub accepted_count: u64, + pub rejected_count: u64, + pub http_rejected_count: u64, + pub http_429_count: u64, + pub client_failure_count: u64, + pub rejection_rate: f64, + pub p95_ms: f64, + pub p99_ms: f64, + pub p999_ms: f64, + pub rejection_breakdown: BTreeMap, +} + +impl SweepRow { + pub fn new( + concurrency: usize, + accepted_tps: f64, + accepted_count: u64, + rejected_count: u64, + rejection_rate: f64, + p95_ms: f64, + p99_ms: f64, + p999_ms: f64, + rejection_breakdown: BTreeMap, + ) -> Self { + let http_rejected_count = http_rejection_count(&rejection_breakdown); + let http_429_count = http_429_count(&rejection_breakdown); + let client_failure_count = client_failure_count(rejected_count, &rejection_breakdown); + Self { + concurrency, + accepted_tps, + accepted_count, + rejected_count, + http_rejected_count, + http_429_count, + client_failure_count, + rejection_rate, + p95_ms, + p99_ms, + p999_ms, + rejection_breakdown, + } + } + + pub fn has_http_rejection(&self) -> bool { + has_http_rejection(&self.rejection_breakdown) + } + + pub fn has_http_429(&self) -> bool { + has_http_429(&self.rejection_breakdown) + } + + pub fn has_client_failure(&self) -> bool { + self.client_failure_count > 0 + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SweepSummary { + pub tps_at_first_any_rejection: Option, + pub tps_at_first_non_200: Option, + pub tps_at_first_429: Option, + pub tps_at_first_client_failure: Option, + pub max_sustainable_tps_at_0_rejections: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SweepRunReport { + pub rows: Vec, + pub summary: SweepSummary, + pub memory: Option, + pub sequencer_log_path: Option, +} + +pub fn compute_capacity_summary(rows: &[SweepRow]) -> SweepSummary { + let tps_at_first_any_rejection = rows + .iter() + .find(|row| row.rejected_count > 0) + .map(|row| row.accepted_tps); + + let tps_at_first_non_200 = rows + .iter() + .find(|row| row.has_http_rejection()) + .map(|row| row.accepted_tps); + + let tps_at_first_429 = rows + .iter() + .find(|row| row.has_http_429()) + .map(|row| row.accepted_tps); + + let tps_at_first_client_failure = rows + .iter() + .find(|row| row.has_client_failure()) + .map(|row| row.accepted_tps); + + let max_sustainable_tps_at_0_rejections = rows + .iter() + .filter(|row| row.rejected_count == 0) + .map(|row| row.accepted_tps) + .max_by(|a, b| a.total_cmp(b)); + + SweepSummary { + tps_at_first_any_rejection, + tps_at_first_non_200, + tps_at_first_429, + tps_at_first_client_failure, + max_sustainable_tps_at_0_rejections, + } +} + +pub fn write_csv(path: &Path, rows: &[SweepRow]) -> BenchResult<()> { + let mut out = String::from( + "concurrency,accepted_tps,accepted_count,rejected_count,http_rejected_count,http_429_count,client_failure_count,rejection_rate,p95_ms,p99_ms,p999_ms\n", + ); + for row in rows { + out.push_str( + format!( + "{},{:.6},{},{},{},{},{},{:.6},{:.6},{:.6},{:.6}\n", + row.concurrency, + row.accepted_tps, + row.accepted_count, + row.rejected_count, + row.http_rejected_count, + row.http_429_count, + row.client_failure_count, + row.rejection_rate, + row.p95_ms, + row.p99_ms, + row.p999_ms, + ) + .as_str(), + ); + } + fs::write(path, out)?; + Ok(()) +} + +pub fn print_sweep_report(report: &SweepRunReport) { + println!( + "tps_at_first_any_rejection: {}", + format_optional(report.summary.tps_at_first_any_rejection) + ); + println!( + "tps_at_first_non_200: {}", + format_optional(report.summary.tps_at_first_non_200) + ); + println!( + "tps_at_first_429: {}", + format_optional(report.summary.tps_at_first_429) + ); + println!( + "tps_at_first_client_failure: {}", + format_optional(report.summary.tps_at_first_client_failure) + ); + println!( + "max_sustainable_tps_at_0_rejections: {}", + format_optional(report.summary.max_sustainable_tps_at_0_rejections) + ); + if let Some(memory) = report.memory.as_ref() { + print_memory_report(memory); + } + if let Some(path) = report.sequencer_log_path.as_ref() { + println!("sequencer_log_path: {path}"); + } +} + +fn format_optional(value: Option) -> String { + match value { + Some(v) => format!("{v:.2}"), + None => "not reached".to_string(), + } +} + +#[cfg(test)] +mod tests { + use super::{SweepRow, compute_capacity_summary}; + use std::collections::BTreeMap; + + #[test] + fn capacity_summary_tracks_http_and_client_failures_separately() { + let rows = vec![ + SweepRow::new(1, 10.0, 100, 0, 0.0, 1.0, 1.0, 1.0, BTreeMap::new()), + SweepRow::new( + 2, + 20.0, + 100, + 1, + 1.0, + 2.0, + 2.0, + 2.0, + BTreeMap::from([("io_connect".to_string(), 1_u64)]), + ), + SweepRow::new( + 3, + 30.0, + 100, + 1, + 1.0, + 3.0, + 3.0, + 3.0, + BTreeMap::from([("http_429".to_string(), 1_u64)]), + ), + ]; + + let summary = compute_capacity_summary(rows.as_slice()); + assert_eq!(summary.max_sustainable_tps_at_0_rejections, Some(10.0)); + assert_eq!(summary.tps_at_first_any_rejection, Some(20.0)); + assert_eq!(summary.tps_at_first_client_failure, Some(20.0)); + assert_eq!(summary.tps_at_first_non_200, Some(30.0)); + assert_eq!(summary.tps_at_first_429, Some(30.0)); + } +} diff --git a/benchmarks/src/workload.rs b/benchmarks/src/workload.rs new file mode 100644 index 0000000..3b995be --- /dev/null +++ b/benchmarks/src/workload.rs @@ -0,0 +1,401 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use alloy_primitives::{Address, Signature, U256}; +use alloy_sol_types::{Eip712Domain, SolStruct}; +use app_core::application::{ + MAX_METHOD_PAYLOAD_BYTES, Method, Transfer, Withdrawal, default_private_keys, +}; +use clap::ValueEnum; +use k256::ecdsa::SigningKey; +use k256::ecdsa::signature::hazmat::PrehashSigner; +use sequencer_core::api::TxRequest; +use sequencer_core::user_op::UserOp; +use serde::{Deserialize, Serialize}; +use std::fs; + +use crate::{BenchResult, support::err}; + +pub const DEFAULT_WORKLOAD_TRANSFER_AMOUNT: u64 = 1; + +#[derive(Debug, Clone)] +pub struct SignedTxFixture { + pub request: TxRequest, + pub expected_sender: String, + pub expected_data_hex: String, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, ValueEnum)] +#[serde(rename_all = "snake_case")] +pub enum WorkloadKind { + Synthetic, + FundedTransfer, +} + +impl WorkloadKind { + pub fn as_str(self) -> &'static str { + match self { + Self::Synthetic => "synthetic", + Self::FundedTransfer => "funded-transfer", + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WorkloadConfig { + pub kind: WorkloadKind, + pub accounts_file: Option, + pub transfer_amount: u64, +} + +impl Default for WorkloadConfig { + fn default() -> Self { + Self { + kind: WorkloadKind::Synthetic, + accounts_file: None, + transfer_amount: DEFAULT_WORKLOAD_TRANSFER_AMOUNT, + } + } +} + +pub(crate) struct WorkloadState { + inner: WorkloadStateInner, +} + +enum WorkloadStateInner { + Synthetic { + next_seed: u64, + }, + FundedTransfer { + accounts: Vec, + round_robin_index: usize, + transfer_amount: u64, + }, +} + +#[derive(Clone)] +struct FundedAccount { + signing_key: SigningKey, + sender: Address, + next_nonce: u32, +} + +impl WorkloadState { + pub(crate) fn initialize(config: &WorkloadConfig, seed_offset: u64) -> BenchResult { + match config.kind { + WorkloadKind::Synthetic => Ok(Self { + inner: WorkloadStateInner::Synthetic { + next_seed: seed_offset, + }, + }), + WorkloadKind::FundedTransfer => { + let accounts = load_funded_accounts(config.accounts_file.as_deref())?; + Ok(Self { + inner: WorkloadStateInner::FundedTransfer { + accounts, + round_robin_index: 0, + transfer_amount: config.transfer_amount, + }, + }) + } + } + } + + pub(crate) fn next_fixture( + &mut self, + max_fee: u32, + domain: &Eip712Domain, + ) -> BenchResult { + match &mut self.inner { + WorkloadStateInner::Synthetic { next_seed } => { + let fixture = make_signed_fixture(*next_seed, max_fee, domain)?; + *next_seed = next_seed.wrapping_add(1); + Ok(fixture) + } + WorkloadStateInner::FundedTransfer { + accounts, + round_robin_index, + transfer_amount, + } => { + if accounts.is_empty() { + return Err(err("funded workload has zero accounts")); + } + let sender_index = *round_robin_index % accounts.len(); + let recipient_index = (sender_index + 1) % accounts.len(); + let recipient = accounts[recipient_index].sender; + let sender = &mut accounts[sender_index]; + + let amount = + U256::from((*transfer_amount).saturating_add(u64::from(sender.next_nonce))); + let method = Method::Transfer(Transfer { + amount, + to: recipient, + }); + let data = ssz::Encode::as_ssz_bytes(&method); + if data.len() > MAX_METHOD_PAYLOAD_BYTES { + return Err(err(format!( + "funded transfer payload too large: {} > {}", + data.len(), + MAX_METHOD_PAYLOAD_BYTES + ))); + } + + let user_op = UserOp { + nonce: sender.next_nonce, + max_fee, + data: data.into(), + }; + let fixture = + make_signed_fixture_from_signing_key(&sender.signing_key, user_op, domain)?; + sender.next_nonce = sender.next_nonce.wrapping_add(1); + *round_robin_index = (*round_robin_index + 1) % accounts.len(); + Ok(fixture) + } + } + } + + pub(crate) fn concurrency_cap(&self) -> Option { + match &self.inner { + WorkloadStateInner::Synthetic { .. } => None, + WorkloadStateInner::FundedTransfer { accounts, .. } => Some(accounts.len().max(1)), + } + } +} + +pub fn make_signed_fixture( + seed: u64, + max_fee: u32, + domain: &Eip712Domain, +) -> BenchResult { + let signing_key = signing_key_for_seed(seed)?; + let sender = address_from_signing_key(&signing_key); + let method = Method::Withdrawal(Withdrawal { + amount: U256::from(seed.saturating_add(1)), + }); + let data = ssz::Encode::as_ssz_bytes(&method); + if data.len() > MAX_METHOD_PAYLOAD_BYTES { + return Err(err(format!( + "benchmark payload too large: {} > {}", + data.len(), + MAX_METHOD_PAYLOAD_BYTES + ))); + } + let message = UserOp { + nonce: 0, + max_fee, + data: data.clone().into(), + }; + + let fixture = make_signed_fixture_from_signing_key(&signing_key, message, domain)?; + if fixture.expected_sender != sender.to_string() { + return Err(err("unexpected synthetic sender mismatch")); + } + Ok(fixture) +} + +fn load_funded_accounts(accounts_file: Option<&str>) -> BenchResult> { + let keys = match accounts_file { + Some(path) => load_private_keys_from_file(path)?, + None => default_private_keys().to_vec(), + }; + if keys.is_empty() { + return Err(err("no private keys available for funded workload")); + } + + let mut accounts = Vec::with_capacity(keys.len()); + for key_hex in keys { + let signing_key = signing_key_from_hex(key_hex.as_str())?; + let sender = address_from_signing_key(&signing_key); + accounts.push(FundedAccount { + signing_key, + sender, + next_nonce: 0, + }); + } + Ok(accounts) +} + +fn load_private_keys_from_file(path: &str) -> BenchResult> { + let contents = fs::read_to_string(path) + .map_err(|e| err(format!("failed reading accounts file '{path}': {e}")))?; + let mut keys = Vec::new(); + for (line_no, line) in contents.lines().enumerate() { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + let mut parts = trimmed.split_whitespace(); + let address = parts.next().ok_or_else(|| { + err(format!( + "accounts file '{path}' line {}: missing address", + line_no + 1 + )) + })?; + let private_key = parts.next().ok_or_else(|| { + err(format!( + "accounts file '{path}' line {}: missing private key", + line_no + 1 + )) + })?; + if parts.next().is_some() { + return Err(err(format!( + "accounts file '{path}' line {}: expected exactly two fields:
", + line_no + 1 + ))); + } + + validate_hex_token(path, line_no + 1, "address", address, 42)?; + validate_hex_token(path, line_no + 1, "private_key", private_key, 66)?; + keys.push(private_key.to_string()); + } + if keys.is_empty() { + return Err(err(format!( + "accounts file '{path}' did not contain any account records" + ))); + } + Ok(keys) +} + +fn validate_hex_token( + path: &str, + line_no: usize, + field: &str, + value: &str, + expected_len: usize, +) -> BenchResult<()> { + if value.len() != expected_len { + return Err(err(format!( + "accounts file '{path}' line {}: invalid {field} length: expected {}, got {}", + line_no, + expected_len, + value.len() + ))); + } + if !value.starts_with("0x") { + return Err(err(format!( + "accounts file '{path}' line {}: {field} must start with 0x", + line_no + ))); + } + if !value.as_bytes().iter().skip(2).all(u8::is_ascii_hexdigit) { + return Err(err(format!( + "accounts file '{path}' line {}: {field} must be hex", + line_no + ))); + } + Ok(()) +} + +fn signing_key_from_hex(hex: &str) -> BenchResult { + let bytes = alloy_primitives::hex::decode(hex) + .map_err(|e| err(format!("invalid private key hex '{hex}': {e}")))?; + if bytes.len() != 32 { + return Err(err(format!( + "invalid private key length: expected 32 bytes, got {}", + bytes.len() + ))); + } + let mut key_bytes = [0_u8; 32]; + key_bytes.copy_from_slice(&bytes); + SigningKey::from_bytes((&key_bytes).into()) + .map_err(|e| err(format!("invalid private key material: {e}"))) +} + +fn make_signed_fixture_from_signing_key( + signing_key: &SigningKey, + user_op: UserOp, + domain: &Eip712Domain, +) -> BenchResult { + let sender = address_from_signing_key(signing_key); + let signature = sign_user_op(domain, &user_op, signing_key)?; + let data = user_op.data.to_vec(); + Ok(SignedTxFixture { + request: TxRequest { + message: user_op, + signature, + sender: sender.to_string(), + }, + expected_sender: sender.to_string(), + expected_data_hex: alloy_primitives::hex::encode_prefixed(data), + }) +} + +fn signing_key_for_seed(seed: u64) -> BenchResult { + let mut bytes = [0_u8; 32]; + bytes[24..32].copy_from_slice(&seed.saturating_add(1).to_be_bytes()); + SigningKey::from_bytes((&bytes).into()) + .map_err(|e| err(format!("build signing key failed: {e}"))) +} + +fn sign_user_op( + domain: &Eip712Domain, + user_op: &UserOp, + signing_key: &SigningKey, +) -> BenchResult { + let hash = user_op.eip712_signing_hash(domain); + let k256_sig = signing_key + .sign_prehash(hash.as_slice()) + .map_err(|e| err(format!("sign user op prehash failed: {e}")))?; + + let expected_sender = address_from_signing_key(signing_key); + let signature = [false, true] + .into_iter() + .map(|parity| Signature::from_signature_and_parity(k256_sig, parity)) + .find(|candidate| { + candidate + .recover_address_from_prehash(&hash) + .ok() + .map(|sender| sender == expected_sender) + .unwrap_or(false) + }) + .ok_or_else(|| err("could not recover parity for signature"))?; + + Ok(alloy_primitives::hex::encode_prefixed(signature.as_bytes())) +} + +fn address_from_signing_key(signing_key: &SigningKey) -> Address { + let verifying = signing_key.verifying_key().to_encoded_point(false); + Address::from_raw_public_key(&verifying.as_bytes()[1..]) +} + +#[cfg(test)] +mod tests { + use super::{ + FundedAccount, WorkloadState, WorkloadStateInner, address_from_signing_key, + default_private_keys, signing_key_from_hex, + }; + use crate::self_contained_domain; + + #[test] + fn funded_transfer_round_robin_nonce_progression() { + let mut accounts = Vec::new(); + for key in default_private_keys().iter().take(2) { + let signing_key = signing_key_from_hex(key.as_str()).expect("signing key"); + accounts.push(FundedAccount { + sender: address_from_signing_key(&signing_key), + signing_key, + next_nonce: 1, + }); + } + + let domain = self_contained_domain().eip712_domain(); + let mut state = WorkloadState { + inner: WorkloadStateInner::FundedTransfer { + accounts, + round_robin_index: 0, + transfer_amount: 1, + }, + }; + + let one = state.next_fixture(0, &domain).expect("fixture 1"); + let two = state.next_fixture(0, &domain).expect("fixture 2"); + let three = state.next_fixture(0, &domain).expect("fixture 3"); + + assert_ne!(one.expected_sender, two.expected_sender); + assert_eq!(one.expected_sender, three.expected_sender); + assert_eq!(one.request.message.nonce, 1); + assert_eq!(two.request.message.nonce, 1); + assert_eq!(three.request.message.nonce, 2); + } +} diff --git a/examples/app-core/Cargo.toml b/examples/app-core/Cargo.toml index e59e74f..74121f4 100644 --- a/examples/app-core/Cargo.toml +++ b/examples/app-core/Cargo.toml @@ -13,3 +13,4 @@ authors.workspace = true sequencer-core = { path = "../../sequencer-core" } alloy-primitives = { version = "1.4.1", features = ["serde", "k256"] } ssz = { package = "ethereum_ssz", version = "0.10" } +ssz_derive = { package = "ethereum_ssz_derive", version = "0.10" } diff --git a/examples/app-core/anvil_accounts.txt b/examples/app-core/anvil_accounts.txt new file mode 100644 index 0000000..8ce2fbe --- /dev/null +++ b/examples/app-core/anvil_accounts.txt @@ -0,0 +1,100 @@ +0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 +0x70997970C51812dc3A010C7d01b50e0d17dc79C8 0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d +0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC 0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a +0x90F79bf6EB2c4f870365E785982E1f101E93b906 0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6 +0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65 0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a +0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc 0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba +0x976EA74026E726554dB657fA54763abd0C3a0aa9 0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e +0x14dC79964da2C08b23698B3D3cc7Ca32193d9955 0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356 +0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f 0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97 +0xa0Ee7A142d267C1f36714E4a8F75612F20a79720 0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6 +0xBcd4042DE499D14e55001CcbB24a551F3b954096 0xf214f2b2cd398c806f84e317254e0f0b801d0643303237d97a22a48e01628897 +0x71bE63f3384f5fb98995898A86B02Fb2426c5788 0x701b615bbdfb9de65240bc28bd21bbc0d996645a3dd57e7b12bc2bdf6f192c82 +0xFABB0ac9d68B0B445fB7357272Ff202C5651694a 0xa267530f49f8280200edf313ee7af6b827f2a8bce2897751d06a843f644967b1 +0x1CBd3b2770909D4e10f157cABC84C7264073C9Ec 0x47c99abed3324a2707c28affff1267e45918ec8c3f20b8aa892e8b065d2942dd +0xdF3e18d64BC6A983f673Ab319CCaE4f1a57C7097 0xc526ee95bf44d8fc405a158bb884d9d1238d99f0612e9f33d006bb0789009aaa +0xcd3B766CCDd6AE721141F452C550Ca635964ce71 0x8166f546bab6da521a8369cab06c5d2b9e46670292d85c875ee9ec20e84ffb61 +0x2546BcD3c84621e976D8185a91A922aE77ECEc30 0xea6c44ac03bff858b476bba40716402b03e41b8e97e276d1baec7c37d42484a0 +0xbDA5747bFD65F08deb54cb465eB87D40e51B197E 0x689af8efa8c651a91ad287602527f3af2fe9f6501a7ac4b061667b5a93e037fd +0xdD2FD4581271e230360230F9337D5c0430Bf44C0 0xde9be858da4a475276426320d5e9262ecfc3ba460bfac56360bfa6c4c28b4ee0 +0x8626f6940E2eb28930eFb4CeF49B2d1F2C9C1199 0xdf57089febbacf7ba0bc227dafbffa9fc08a93fdc68e1e42411a14efcf23656e +0x09DB0a93B389bEF724429898f539AEB7ac2Dd55f 0xeaa861a9a01391ed3d587d8a5a84ca56ee277629a8b02c22093a419bf240e65d +0x02484cb50AAC86Eae85610D6f4Bf026f30f6627D 0xc511b2aa70776d4ff1d376e8537903dae36896132c90b91d52c1dfbae267cd8b +0x08135Da0A343E492FA2d4282F2AE34c6c5CC1BbE 0x224b7eb7449992aac96d631d9677f7bf5888245eef6d6eeda31e62d2f29a83e4 +0x5E661B79FE2D3F6cE70F5AAC07d8Cd9abb2743F1 0x4624e0802698b9769f5bdb260a3777fbd4941ad2901f5966b854f953497eec1b +0x61097BA76cD906d2ba4FD106E757f7Eb455fc295 0x375ad145df13ed97f8ca8e27bb21ebf2a3819e9e0a06509a812db377e533def7 +0xDf37F81dAAD2b0327A0A50003740e1C935C70913 0x18743e59419b01d1d846d97ea070b5a3368a3e7f6f0242cf497e1baac6972427 +0x553BC17A05702530097c3677091C5BB47a3a7931 0xe383b226df7c8282489889170b0f68f66af6459261f4833a781acd0804fafe7a +0x87BdCE72c06C21cd96219BD8521bDF1F42C78b5e 0xf3a6b71b94f5cd909fb2dbb287da47badaa6d8bcdc45d595e2884835d8749001 +0x40Fc963A729c542424cD800349a7E4Ecc4896624 0x4e249d317253b9641e477aba8dd5d8f1f7cf5250a5acadd1229693e262720a19 +0x9DCCe783B6464611f38631e6C851bf441907c710 0x233c86e887ac435d7f7dc64979d7758d69320906a0d340d2b6518b0fd20aa998 +0x1BcB8e569EedAb4668e55145Cfeaf190902d3CF2 0x85a74ca11529e215137ccffd9c95b2c72c5fb0295c973eb21032e823329b3d2d +0x8263Fce86B1b78F95Ab4dae11907d8AF88f841e7 0xac8698a440d33b866b6ffe8775621ce1a4e6ebd04ab7980deb97b3d997fc64fb +0xcF2d5b3cBb4D7bF04e3F7bFa8e27081B52191f91 0xf076539fbce50f0513c488f32bf81524d30ca7a29f400d68378cc5b1b17bc8f2 +0x86c53Eb85D0B7548fea5C4B4F82b4205C8f6Ac18 0x5544b8b2010dbdbef382d254802d856629156aba578f453a76af01b81a80104e +0x1aac82773CB722166D7dA0d5b0FA35B0307dD99D 0x47003709a0a9a4431899d4e014c1fd01c5aad19e873172538a02370a119bae11 +0x2f4f06d218E426344CFE1A83D53dAd806994D325 0x9644b39377553a920edc79a275f45fa5399cbcf030972f771d0bca8097f9aad3 +0x1003ff39d25F2Ab16dBCc18EcE05a9B6154f65F4 0xcaa7b4a2d30d1d565716199f068f69ba5df586cf32ce396744858924fdf827f0 +0x9eAF5590f2c84912A08de97FA28d0529361Deb9E 0xfc5a028670e1b6381ea876dd444d3faaee96cffae6db8d93ca6141130259247c +0x11e8F3eA3C6FcF12EcfF2722d75CEFC539c51a1C 0x5b92c5fe82d4fabee0bc6d95b4b8a3f9680a0ed7801f631035528f32c9eb2ad5 +0x7D86687F980A56b832e9378952B738b614A99dc6 0xb68ac4aa2137dd31fd0732436d8e59e959bb62b4db2e6107b15f594caf0f405f +0x9eF6c02FB2ECc446146E05F1fF687a788a8BF76d 0xc95eaed402c8bd203ba04d81b35509f17d0719e3f71f40061a2ec2889bc4caa7 +0x08A2DE6F3528319123b25935C92888B16db8913E 0x55afe0ab59c1f7bbd00d5531ddb834c3c0d289a4ff8f318e498cb3f004db0b53 +0xe141C82D99D85098e03E1a1cC1CdE676556fDdE0 0xc3f9b30f83d660231203f8395762fa4257fa7db32039f739630f87b8836552cc +0x4b23D303D9e3719D6CDf8d172Ea030F80509ea15 0x3db34a7bcc6424e7eadb8e290ce6b3e1423c6e3ef482dd890a812cd3c12bbede +0xC004e69C5C04A223463Ff32042dd36DabF63A25a 0xae2daaa1ce8a70e510243a77187d2bc8da63f0186074e4a4e3a7bfae7fa0d639 +0x5eb15C0992734B5e77c888D713b4FC67b3D679A2 0x5ea5c783b615eb12be1afd2bdd9d96fae56dda0efe894da77286501fd56bac64 +0x7Ebb637fd68c523613bE51aad27C35C4DB199B9c 0xf702e0ff916a5a76aaf953de7583d128c013e7f13ecee5d701b49917361c5e90 +0x3c3E2E178C69D4baD964568415a0f0c84fd6320A 0x7ec49efc632757533404c2139a55b4d60d565105ca930a58709a1c52d86cf5d3 +0x35304262b9E87C00c430149f28dD154995d01207 0x755e273950f5ae64f02096ae99fe7d4f478a28afd39ef2422068ee7304c636c0 +0xD4A1E660C916855229e1712090CcfD8a424A2E33 0xaf6ecabcdbbfb2aefa8248b19d811234cd95caa51b8e59b6ffd3d4bbc2a6be4c +0xEe7f6A930B29d7350498Af97f0F9672EaecbeeFf 0x70c2bd1b41084c2e2238551eace483321f8c1a413a471c3b49c8a5d1d6b3d0c4 +0x145e2dc5C8238d1bE628F87076A37d4a26a78544 0xcb8e373c93609268cdcec93450f3578b92bb20c3ac2e77968d106025005f97b5 +0xD6A098EbCc5f8Bd4e174D915C54486B077a34A51 0x6f29f6e0b750bcdd31c3403f48f11d72215990375b6d23380b39c9bbf854a7d3 +0x042a63149117602129B6922ecFe3111168C2C323 0xff249f7eba6d8d3a65794995d724400a23d3b0bd1714265c965870ef47808be8 +0xa0EC9eE47802CeB56eb58ce80F3E41630B771b04 0x5599a7be5589682da3e0094806840e8510dae6493665a701b06c59cbe9d97968 +0xe8B1ff302A740fD2C6e76B620d45508dAEc2DDFf 0x93de2205919f5b472723722fedb992e962c34d29c4caaedd82cd33e16f1fd3cf +0xAb707cb80e7de7C75d815B1A653433F3EEc44c74 0xd20ecf81c6c3ad87a4e8dbeb7ceef41dd0eebc7a1657efb9d34e47217694b5cb +0x0d803cdeEe5990f22C2a8DF10A695D2312dA26CC 0xe4058704ed240d68a94b6fb226824734ddabd4b1fe37bc85ce22f5b17f98830e +0x1c87Bb9234aeC6aDc580EaE6C8B59558A4502220 0x4ae4408221b5042c0ee36f6e9e6b586a00d0452aa89df2e7f4f5aec42152ec43 +0x4779d18931B35540F84b0cd0e9633855B84df7b8 0x0e7c38ba429fa5081692121c4fcf6304ca5895c6c86d31ed155b0493c516107f +0xC0543b0b980D8c834CBdF023b2d2A75b5f9D1909 0xd5df67c2e4da3ff9c8c6045d9b7c41581efeb2a3660921ad4ba863cc4b8c211c +0x73B3074ac649A8dc31c2C90a124469456301a30F 0x92456ac1fa1ef65a04fb4689580ad5e4cda7369f3620ef3a02fa4015725f460a +0x265188114EB5d5536BC8654d8e9710FE72C28c4d 0x65b10e7d7315bb8b7f7c6eefcbd87b36ad4007c4ade9c032354f016e84ad9c5e +0x924Ba5Ce9f91ddED37b4ebf8c0dc82A40202fc0A 0x365820b3376c77dab008476d49f7cd7af87fc7bbd57dc490378106c3353b2b33 +0x64492E25C30031EDAD55E57cEA599CDB1F06dad1 0xb07579b9864bb8e69e8b6e716284ab5b5f39fe5bb57ae4c83af795a242390202 +0x262595fa2a3A86adACDe208589614d483e3eF1C0 0xbf071d2b017426fcbf763cce3b3efe3ffc9663a42c77a431df521ef6c79cacad +0xDFd99099Fa13541a64AEe9AAd61c0dbf3D32D492 0x8bbffff1588b3c4eb8d415382546f6f6d5f0f61087c3be7c7c4d9e0d41d97258 +0x63c3686EF31C03a641e2Ea8993A91Ea351e5891a 0xb658f0575a14a7ac05075cb0f8727f0aae168a091dfb32d92514d1a7c11cf498 +0x9394cb5f737Bd3aCea7dcE90CA48DBd42801EE5d 0x228330af91fa515d7514cf5ac6594ab90b296cbd8ff7bc4567306aa66cacd79f +0x344dca30F5c5f74F2f13Dc1d48Ad3A9069d13Ad9 0xe6f80f9618311c0cd58f6a3fc6621cdbf6da4a72cc42e2974c98829343e7927b +0xF23E054D8b4D0BECFa22DeEF5632F27f781f8bf5 0x36d0435aa9a2c24d72a0aa69673b3acc2649969c38a581103df491aac6c33dd4 +0x6d69F301d1Da5C7818B5e61EECc745b30179C68b 0xf3ed98f9148171cfed177aef647e8ac0e2579075f640d05d37df28e6e0551083 +0xF0cE7BaB13C99bA0565f426508a7CD8f4C247E5a 0x8fc20c439fd7cf4f36217471a5db7594188540cf9997a314520a018de27544dd +0x011bD5423C5F77b5a0789E27f922535fd76B688F 0x549078aab3adafeff862b2d40b6b27756c5c4669475c3367edfb8dcf63ea1ae5 +0xD9065f27e9b706E5F7628e067cC00B288dddbF19 0xacf192decb2e4ddd8ad61693ab8edd67e3620b2ed79880ff4e1e04482c52c916 +0x54ccCeB38251C29b628ef8B00b3cAB97e7cAc7D5 0x47dc59330fb8c356ef61c55c11f9bb49ee463df50cbfe59f389de7637037b029 +0xA1196426b41627ae75Ea7f7409E074BE97367da2 0xf0050439b33fd77f7183f44375bc43a869a9880dca82a187fab9be91e020d029 +0xE74cEf90b6CF1a77FEfAd731713e6f53e575C183 0xe995cc7ea38e5c2927b97607765c2a20f4a6052d6810a3a1102e84d77c0df13b +0x7Df8Efa6d6F1CB5C4f36315e0AcB82b02Ae8BA40 0x8232e778c8e32eddb268e12aee5e82c7bb540cc176e150d64f35ee4ae2faf2b2 +0x9E126C57330FA71556628e0aabd6B6B6783d99fA 0xba8c9ff38e4179748925335a9891b969214b37dc3723a1754b8b849d3eea9ac0 +0x586BA39027A74e8D40E6626f89Ae97bA7f616644 0xe66935494406a2b7cddd5b90f0a416cd499353f9f5b16d3f53e9db79b5af315c +0x9A50ed082Cf2fc003152580dcDB320B834fA379E 0xdf1d05a0dc7ff9b702517352bbcc48cd78c4f1c8e7e0be4a7e8c9d8a01318dca +0xbc8183bac3E969042736f7af07f76223D11D2148 0xaf905e7d181f83cf2b32316c035db8cc6dc37b8ee658a39c648a40f7f5aea732 +0x586aF62EAe7F447D14D25f53918814e04d3A5BA4 0x2e07199788560fbb67ad75c647ab4c1288c035e370cd8efd8cc98c117a9e1dbc +0xCcDd262f272Ee6C226266eEa13eE48D4d932Ce66 0xbeab65f35a77de7af63a97748e6a3bb90372f9225ebc6e8d0f1dc14098ac683a +0xF0eeDDC5e015d4c459590E01Dcc2f2FD1d2baac7 0x0ae04d323697ac9f6590e30ac497b8bb84ba66a3f7db8648792e92a5773c9dc7 +0x4edFEDFf17ab9642F8464D6143900903dD21421a 0x7cda9d93162b809fb8474f22c724da7e9590ac3bfba54ec15bdd54953ab3e5ff +0x492C973C16E8aeC46f4d71716E91b05B245377C9 0xf6702b85537d0a844debc36e28e409af35c683a0d968ff1a01eab8bc17542397 +0xE5D3ab6883b7e8c35c04675F28BB992Ca1129ee4 0x4034badb4e3cdf45d4032c7671a82d4967a0ce4c1bf3ddb72bf8fba38c151f6f +0x71F280DEA6FC5a03790941Ad72956f545FeB7a52 0x967483ff906486d78577d1749000ddcee7c65f480f154fb5d9d45170f0489d33 +0xE77478D9E136D3643cFc6fef578Abf63F9Ab91B1 0x9c9186fb8f85adc661f3da56dd64e3b9a3f95b17c05ed9c6561f9ee9225da327 +0x6C8EA11559DFE79Ae3dBDD6A67b47F61b929398f 0xef463dfdd318a103afeb0e4e75b3c3c0b13a681c2dc48b22bc05a949d5fa28d5 +0x48fA7b63049A6F4E7316EB2D9c5BDdA8933BCA2f 0x165b52d20a0ebc82b1e87bd02e221f3a2bec6ff6f61326eea3e6180cc23ccf43 +0x16aDfbeFdEfD488C992086D472A4CA577a0e5e54 0x945ff88d4066b8f6d61eb1dbc7c31dc1ad0078b8a781e0ea7b4c4f097e62dfd3 +0x225356FF5d64889D7364Be2c990f93a66298Ee8D 0x1ecfea2bcec4e5e3af2430ae90d554bc272cd7743efb66138c90840c729ebffe +0xcBDc0F9a4C38f1e010bD3B6e43598A55D1868c23 0xa6d83a50114f5bbd5557832caf948c4f202e31e7f8dd3bffdb579bf78dc4c166 +0xBc5BdceE96b1BC47822C74e6f64186fbA7d686be 0xf6b39438613b3f5dae4e84a73e90ea8a4efa0ab7b69cc532fdfe3932d20d52bb +0x0536896a5e38BbD59F3F369FF3682677965aBD19 0x41f789906acc91db1f402d803b8537830856da0211f4ccf22f526d918b26c881 +0xFE0f143FcAD5B561b1eD2AC960278A2F23559Ef9 0xc1b5e6b1cd081956fa11c35329eeb84d31bceaf7253e84e0f90323d55065aa1f +0x98D08079928FcCB30598c6C6382ABfd7dbFaA1cD 0xa3f5fbad1692c5b72802300aefb5b760364018018ddb5fe7589a2203d0d10e60 diff --git a/examples/app-core/src/application/anvil_accounts.rs b/examples/app-core/src/application/anvil_accounts.rs new file mode 100644 index 0000000..1ab631f --- /dev/null +++ b/examples/app-core/src/application/anvil_accounts.rs @@ -0,0 +1,149 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::sync::OnceLock; + +use alloy_primitives::{Address, hex}; + +const ANVIL_ACCOUNTS_RAW: &str = + include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/anvil_accounts.txt")); + +#[derive(Debug)] +struct ParsedAnvilAccounts { + addresses: Vec
, + private_keys: Vec, +} + +static PARSED_ANVIL_ACCOUNTS: OnceLock = OnceLock::new(); + +pub fn prefunded_addresses() -> &'static [Address] { + parsed_anvil_accounts().addresses.as_slice() +} + +pub fn default_private_keys() -> &'static [String] { + parsed_anvil_accounts().private_keys.as_slice() +} + +fn parsed_anvil_accounts() -> &'static ParsedAnvilAccounts { + PARSED_ANVIL_ACCOUNTS.get_or_init(|| { + parse_accounts(ANVIL_ACCOUNTS_RAW) + .unwrap_or_else(|reason| panic!("invalid anvil_accounts.txt: {reason}")) + }) +} + +fn parse_accounts(raw: &str) -> Result { + let mut addresses = Vec::new(); + let mut private_keys = Vec::new(); + + for (line_no, line) in raw.lines().enumerate() { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + let mut parts = trimmed.split_whitespace(); + let address = parts + .next() + .ok_or_else(|| format!("line {}: missing address", line_no + 1))?; + let private_key = parts + .next() + .ok_or_else(|| format!("line {}: missing private key", line_no + 1))?; + if parts.next().is_some() { + return Err(format!( + "line {}: expected exactly two tokens:
", + line_no + 1 + )); + } + + addresses.push(parse_hex_address(address, line_no + 1)?); + validate_hex_token(private_key, 66, "private key", line_no + 1)?; + private_keys.push(private_key.to_string()); + } + + if addresses.is_empty() { + return Err("file does not contain any account records".to_string()); + } + if addresses.len() != private_keys.len() { + return Err("internal parser mismatch between addresses and keys".to_string()); + } + + Ok(ParsedAnvilAccounts { + addresses, + private_keys, + }) +} + +fn parse_hex_address(value: &str, line_no: usize) -> Result { + validate_hex_token(value, 42, "address", line_no)?; + let decoded = hex::decode(value) + .map_err(|err| format!("line {}: invalid address hex: {err}", line_no))?; + if decoded.len() != 20 { + return Err(format!( + "line {}: invalid address byte length: expected 20, got {}", + line_no, + decoded.len() + )); + } + Ok(Address::from_slice(decoded.as_slice())) +} + +fn validate_hex_token( + token: &str, + expected_len: usize, + field_name: &str, + line_no: usize, +) -> Result<(), String> { + if token.len() != expected_len { + return Err(format!( + "line {}: invalid {} length: expected {}, got {}", + line_no, + field_name, + expected_len, + token.len() + )); + } + if !token.starts_with("0x") { + return Err(format!( + "line {}: {} must start with 0x", + line_no, field_name + )); + } + if !token.as_bytes().iter().skip(2).all(u8::is_ascii_hexdigit) { + return Err(format!("line {}: {} must be hex", line_no, field_name)); + } + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::parse_accounts; + + #[test] + fn parse_accounts_accepts_valid_lines() { + let raw = "\ +0x1111111111111111111111111111111111111111 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +0x2222222222222222222222222222222222222222 0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb +"; + let parsed = parse_accounts(raw).expect("valid accounts"); + assert_eq!(parsed.addresses.len(), 2); + assert_eq!(parsed.private_keys.len(), 2); + } + + #[test] + fn parse_accounts_rejects_wrong_token_count() { + let raw = "0x1111111111111111111111111111111111111111"; + let err = parse_accounts(raw).expect_err("missing key must fail"); + assert!(err.contains("expected exactly two tokens") || err.contains("missing private key")); + } + + #[test] + fn parse_accounts_rejects_wrong_lengths_and_non_hex() { + let wrong_len = "0x111111111111111111111111111111111111111 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; + let err = parse_accounts(wrong_len).expect_err("wrong address len must fail"); + assert!(err.contains("invalid address length")); + + let non_hex = "0x1111111111111111111111111111111111111111 0xzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; + let err = parse_accounts(non_hex).expect_err("non-hex key must fail"); + assert!(err.contains("private key must be hex")); + } +} diff --git a/sequencer-core/src/application/method.rs b/examples/app-core/src/application/method.rs similarity index 80% rename from sequencer-core/src/application/method.rs rename to examples/app-core/src/application/method.rs index bda626a..05b1126 100644 --- a/sequencer-core/src/application/method.rs +++ b/examples/app-core/src/application/method.rs @@ -4,12 +4,13 @@ use alloy_primitives::{Address, U256}; use ssz_derive::{Decode, Encode}; +pub const MAX_METHOD_PAYLOAD_BYTES: usize = 1 + 32 + 20; + #[derive(PartialEq, Debug, Encode, Decode, Clone)] #[ssz(enum_behaviour = "union")] pub enum Method { Withdrawal(Withdrawal), Transfer(Transfer), - Deposit(Deposit), } #[derive(PartialEq, Debug, Encode, Decode, Clone)] @@ -22,9 +23,3 @@ pub struct Transfer { pub amount: U256, pub to: Address, } - -#[derive(PartialEq, Debug, Encode, Decode, Clone)] -pub struct Deposit { - pub amount: U256, - pub to: Address, -} diff --git a/examples/app-core/src/application/mod.rs b/examples/app-core/src/application/mod.rs index 2780534..89b0e85 100644 --- a/examples/app-core/src/application/mod.rs +++ b/examples/app-core/src/application/mod.rs @@ -1,6 +1,10 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +mod anvil_accounts; +mod method; mod wallet; +pub use anvil_accounts::{default_private_keys, prefunded_addresses}; +pub use method::{MAX_METHOD_PAYLOAD_BYTES, Method, Transfer, Withdrawal}; pub use wallet::{WalletApp, WalletConfig}; diff --git a/examples/app-core/src/application/wallet.rs b/examples/app-core/src/application/wallet.rs index bf30dcb..e051a60 100644 --- a/examples/app-core/src/application/wallet.rs +++ b/examples/app-core/src/application/wallet.rs @@ -6,7 +6,10 @@ use std::collections::HashMap; use alloy_primitives::{Address, U256}; use ssz::Decode; -use sequencer_core::application::{AppError, Application, InvalidReason, Method}; +use super::{ + MAX_METHOD_PAYLOAD_BYTES as WALLET_MAX_METHOD_PAYLOAD_BYTES, Method, prefunded_addresses, +}; +use sequencer_core::application::{AppError, Application, InvalidReason}; use sequencer_core::l2_tx::ValidUserOp; use sequencer_core::user_op::UserOp; @@ -26,10 +29,17 @@ pub struct WalletApp { executed_input_count: u64, } +pub const PREFUNDED_BALANCE: u64 = 1_000_000; + impl WalletApp { pub fn new(_config: WalletConfig) -> Self { + let mut balances = HashMap::with_capacity(prefunded_addresses().len()); + for address in prefunded_addresses() { + balances.insert(*address, U256::from(PREFUNDED_BALANCE)); + } + Self { - balances: HashMap::new(), + balances, nonces: HashMap::new(), executed_input_count: 0, } @@ -70,6 +80,8 @@ impl Default for WalletApp { } impl Application for WalletApp { + const MAX_METHOD_PAYLOAD_BYTES: usize = WALLET_MAX_METHOD_PAYLOAD_BYTES; + fn current_user_nonce(&self, sender: Address) -> u32 { self.expected_nonce(&sender) } @@ -133,9 +145,6 @@ impl Application for WalletApp { self.credit(transfer.to, transfer.amount); } } - Some(Method::Deposit(deposit)) => { - self.credit(deposit.to, deposit.amount); - } Some(Method::Withdrawal(withdrawal)) => { let _ = self.debit_if_possible(sender, withdrawal.amount); } @@ -158,8 +167,10 @@ impl Application for WalletApp { #[cfg(test)] mod tests { - use super::{WalletApp, WalletConfig}; use alloy_primitives::{Address, U256}; + use ssz_derive::{Decode, Encode}; + + use super::{WalletApp, WalletConfig}; use sequencer_core::application::{Application, InvalidReason}; use sequencer_core::l2_tx::ValidUserOp; use sequencer_core::user_op::UserOp; @@ -204,4 +215,70 @@ mod tests { assert_eq!(app.current_user_nonce(sender), 1); assert_eq!(app.current_user_balance(sender), U256::from(7_u64)); } + + #[test] + fn wallet_starts_with_prefunded_anvil_accounts() { + let app = WalletApp::new(WalletConfig); + let addresses = super::prefunded_addresses(); + assert!(addresses.len() >= 2); + + for address in addresses.iter().take(2) { + assert_eq!( + app.current_user_balance(*address), + U256::from(super::PREFUNDED_BALANCE) + ); + } + } + + #[derive(PartialEq, Debug, Encode, Decode, Clone)] + struct LegacyDeposit { + amount: U256, + to: Address, + } + + #[derive(PartialEq, Debug, Encode, Decode, Clone)] + struct LegacyWithdrawal { + amount: U256, + } + + #[derive(PartialEq, Debug, Encode, Decode, Clone)] + struct LegacyTransfer { + amount: U256, + to: Address, + } + + #[derive(PartialEq, Debug, Encode, Decode, Clone)] + #[ssz(enum_behaviour = "union")] + enum LegacyMethod { + Withdrawal(LegacyWithdrawal), + Transfer(LegacyTransfer), + Deposit(LegacyDeposit), + } + + #[test] + fn legacy_deposit_payload_is_included_as_no_op() { + let mut app = WalletApp::new(WalletConfig); + let sender = super::prefunded_addresses()[0]; + let recipient = Address::from_slice(&[0x77; 20]); + let before_sender_nonce = app.current_user_nonce(sender); + let before_sender_balance = app.current_user_balance(sender); + let before_recipient = app.current_user_balance(recipient); + + let legacy = LegacyMethod::Deposit(LegacyDeposit { + amount: U256::from(123_u64), + to: recipient, + }); + let valid = ValidUserOp { + sender, + fee: 0, + data: ssz::Encode::as_ssz_bytes(&legacy), + }; + + app.execute_valid_user_op(&valid) + .expect("execute valid user op"); + + assert_eq!(app.current_user_nonce(sender), before_sender_nonce + 1); + assert_eq!(app.current_user_balance(sender), before_sender_balance); + assert_eq!(app.current_user_balance(recipient), before_recipient); + } } diff --git a/examples/canonical-app/Cargo.toml b/examples/canonical-app/Cargo.toml index 1d694f0..b8211ad 100644 --- a/examples/canonical-app/Cargo.toml +++ b/examples/canonical-app/Cargo.toml @@ -11,3 +11,12 @@ authors.workspace = true [dependencies] app-core = { path = "../app-core" } +sequencer-core = { path = "../../sequencer-core" } +trolley = { workspace = true } +alloy-primitives = { version = "1.4.1", features = ["serde", "k256"] } +alloy-sol-types = "1.4.1" +ssz = { package = "ethereum_ssz", version = "0.10" } + +[dev-dependencies] +k256 = "0.13" +types = { workspace = true } diff --git a/examples/canonical-app/Cross.toml b/examples/canonical-app/Cross.toml new file mode 100644 index 0000000..d6f5bf7 --- /dev/null +++ b/examples/canonical-app/Cross.toml @@ -0,0 +1,3 @@ +[target.riscv64gc-unknown-linux-musl] +# Pinning the cross image keeps the guest toolchain deterministic across teammates and CI. +image = "ghcr.io/cross-rs/riscv64gc-unknown-linux-musl@sha256:f5a375283c54578efcc6e61c78ea7661392c2e9b2e108afe89938d2f7b8b489d" diff --git a/examples/canonical-app/Dockerfile b/examples/canonical-app/Dockerfile new file mode 100644 index 0000000..d993874 --- /dev/null +++ b/examples/canonical-app/Dockerfile @@ -0,0 +1,31 @@ +# Stage 1: download `rollup-init` +# ----------------------------------------------------------------------------- +FROM riscv64/debian:stable-slim AS extractor + +ARG MACHINE_EMULATOR_TOOLS_VERSION=0.17.2 +ARG TOOLS_SHA512="4af9911a5a76738d526bfc2b5462cf96c9dee98ec8b23f3ca91ac4849d5761765f471b5e2e8779809bc4a26d2799f8e744622864fa549ada5941e21d999ff4be" + +ADD https://github.com/cartesi/machine-guest-tools/releases/download/v${MACHINE_EMULATOR_TOOLS_VERSION}/machine-guest-tools_riscv64.deb /tmp/tools.deb + +RUN echo "${TOOLS_SHA512} /tmp/tools.deb" | sha512sum -c - \ + && dpkg -x /tmp/tools.deb /tmp/out + + +# Stage 2: rootfs +# ----------------------------------------------------------------------------- +# alpine:3.22.2 (riscv64/alpine) +FROM riscv64/alpine@sha256:372839ff152f938e12282226fb5f9ddaef72f9662dcadbf9dd0de5ce287c694e + +# Add libgcc for Rust +RUN apk add --no-cache libgcc + +# Copy `cartesi-init` +COPY --from=extractor --chmod=755 /tmp/out/usr/sbin/cartesi-init /usr/sbin/cartesi-init + +RUN adduser -h /dapp -D dapp +ENV PATH="/dapp:${PATH}" +WORKDIR /dapp +COPY --chown=dapp:dapp --chmod=755 out/dapp . + +# ENTRYPOINT ["dapp"] +# CMD ["dapp"] diff --git a/examples/canonical-app/justfile b/examples/canonical-app/justfile new file mode 100644 index 0000000..b66934a --- /dev/null +++ b/examples/canonical-app/justfile @@ -0,0 +1,47 @@ +set shell := ["bash", "-euo", "pipefail", "-c"] + +out_dir := "out" +linux_kernel := out_dir + "/linux.bin" +rootfs_tar := out_dir + "/canonical-rootfs.tar" +rootfs_ext2 := out_dir + "/canonical-rootfs.ext2" +machine_image := out_dir + "/canonical-machine-image" +dapp_binary := out_dir + "/dapp" + +download-deps: + mkdir -p {{out_dir}} + wget https://github.com/cartesi/image-kernel/releases/download/v0.20.0/linux-6.5.13-ctsi-1-v0.20.0.bin \ + -O {{linux_kernel}} + +build-dapp: + mkdir -p {{out_dir}} + CROSS_CONFIG=Cross.toml DOCKER_DEFAULT_PLATFORM=linux/amd64 cross build --package canonical-app --target riscv64gc-unknown-linux-musl --release + cp ../../target/riscv64gc-unknown-linux-musl/release/canonical-app {{dapp_binary}} + +build-rootfs: build-dapp + mkdir -p {{out_dir}} + docker buildx build \ + --platform linux/riscv64 \ + --output type=tar,dest={{rootfs_tar}} \ + --file Dockerfile \ + ../.. + xgenext2fs -f -z -B 4096 -i 4096 -r +4096 -a {{rootfs_tar}} -L rootfs {{rootfs_ext2}} + +clean-machine-image: + rm -rf {{machine_image}} + +clean: + rm -rf {{out_dir}} + +build-machine-image: clean-machine-image build-rootfs + test -f {{linux_kernel}} || { echo "missing {{linux_kernel}}; run 'just setup' first"; exit 1; } + cartesi-machine \ + --ram-length=128Mi \ + --ram-image={{linux_kernel}} \ + --flash-drive=label:root,data_filename:{{rootfs_ext2}} \ + --append-init=WORKDIR="/dapp" \ + --append-entrypoint="/dapp/dapp" \ + --assert-rolling-template --final-hash \ + --store={{machine_image}} + +test-guest: build-machine-image + cargo run -p canonical-test diff --git a/examples/canonical-app/src/lib.rs b/examples/canonical-app/src/lib.rs new file mode 100644 index 0000000..e7b2c82 --- /dev/null +++ b/examples/canonical-app/src/lib.rs @@ -0,0 +1,6 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +pub mod scheduler; + +pub use scheduler::{SEQUENCER_ADDRESS, SchedulerConfig, run_scheduler_forever}; diff --git a/examples/canonical-app/src/main.rs b/examples/canonical-app/src/main.rs index de37086..d789aa7 100644 --- a/examples/canonical-app/src/main.rs +++ b/examples/canonical-app/src/main.rs @@ -1,7 +1,12 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use app_core::application::{WalletApp, WalletConfig}; +use canonical_app::{SchedulerConfig, run_scheduler_forever}; +use trolley::cmt::RollupCmt; + fn main() { - // Placeholder entrypoint for the future Cartesi-machine scheduler runtime. - let _ = app_core::application::WalletConfig; + let rollup = RollupCmt::try_new().expect("failed to initialize rollup"); + let app = WalletApp::new(WalletConfig); + run_scheduler_forever(rollup, app, SchedulerConfig::default()); } diff --git a/examples/canonical-app/src/scheduler/core.rs b/examples/canonical-app/src/scheduler/core.rs new file mode 100644 index 0000000..213bac5 --- /dev/null +++ b/examples/canonical-app/src/scheduler/core.rs @@ -0,0 +1,813 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use alloy_primitives::{Address, Signature, U256, address}; +use alloy_sol_types::Eip712Domain; +use alloy_sol_types::SolStruct; +use sequencer_core::application::Application; +use sequencer_core::batch::{Batch, Frame, WireUserOp}; +use std::collections::VecDeque; + +pub const SEQUENCER_ADDRESS: Address = address!("0x1111111111111111111111111111111111111111"); +pub const MAX_WAIT_BLOCKS: u64 = 1200; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SchedulerConfig { + pub sequencer_address: Address, + pub max_wait_blocks: u64, +} + +impl Default for SchedulerConfig { + fn default() -> Self { + Self { + sequencer_address: SEQUENCER_ADDRESS, + max_wait_blocks: MAX_WAIT_BLOCKS, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct SchedulerInput { + pub sender: Address, + pub inclusion_block: u64, + pub domain: Eip712Domain, + pub payload: Vec, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(super) enum ProcessOutcome { + DirectEnqueued, + BatchExecuted, + BatchInvalid, +} + +#[derive(Debug)] +pub struct Scheduler { + app: A, + config: SchedulerConfig, + direct_q: VecDeque, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct QueuedDirectInput { + payload: Vec, + inclusion_block: u64, +} + +impl Scheduler { + pub fn new(app: A, config: SchedulerConfig) -> Self { + Self { + app, + config, + direct_q: VecDeque::new(), + } + } + + #[cfg(test)] + pub fn queued_direct_len(&self) -> usize { + self.direct_q.len() + } + + pub(super) fn process_input(&mut self, input: SchedulerInput) -> ProcessOutcome { + // Execute overdue directs before any input to keep backstop semantics explicit. + self.force_execute_overdue(input.inclusion_block); + + if input.sender != self.config.sequencer_address { + self.direct_q.push_back(QueuedDirectInput { + payload: input.payload, + inclusion_block: input.inclusion_block, + }); + ProcessOutcome::DirectEnqueued + } else { + self.process_batch_payload(input.inclusion_block, &input.domain, &input.payload) + } + } + + fn process_batch_payload( + &mut self, + inclusion_block: u64, + domain: &Eip712Domain, + payload: &[u8], + ) -> ProcessOutcome { + let Ok(batch): Result = ssz::Decode::from_ssz_bytes(payload) else { + return ProcessOutcome::BatchInvalid; + }; + + let Some((frame_head, frame_tail)) = batch.frames.split_first() else { + return ProcessOutcome::BatchExecuted; + }; + + if !self.batch_is_valid_for_block(inclusion_block, frame_head, frame_tail) { + return ProcessOutcome::BatchInvalid; + } + + if has_elapsed_since( + frame_head.safe_block, + self.config.max_wait_blocks, + inclusion_block, + ) { + // Invalidate old batches + return ProcessOutcome::BatchInvalid; + } + + for frame in &batch.frames { + self.drain_directs_safe_at(frame.safe_block); + self.execute_frame_user_ops(domain, frame); + } + + ProcessOutcome::BatchExecuted + } + + fn batch_is_valid_for_block(&self, inclusion_block: u64, head: &Frame, tail: &[Frame]) -> bool { + if head.safe_block > inclusion_block { + return false; + } + + let mut previous_safe_block = head.safe_block; + for frame in tail { + if frame.safe_block > inclusion_block || frame.safe_block < previous_safe_block { + return false; + } + + previous_safe_block = frame.safe_block; + } + + true + } + + fn execute_frame_user_ops(&mut self, domain: &Eip712Domain, frame: &Frame) { + for user_op in &frame.user_ops { + if let Some(sender) = self.recover_sender(domain, user_op) { + let plain = user_op.to_user_op(); + if u64::from(plain.max_fee) < frame.fee_price { + eprintln!("scheduler skipped frame user-op due to max_fee < fee_price"); + continue; + } + if let Err(err) = + self.app + .validate_and_execute_user_op(sender, &plain, frame.fee_price) + { + eprintln!("scheduler skipped frame user-op due to app error: {err}"); + } + } else { + eprintln!("scheduler skipped frame user-op due to invalid signature"); + } + } + } + + fn recover_sender(&self, domain: &Eip712Domain, wire_user_op: &WireUserOp) -> Option
{ + if wire_user_op.signature.len() != WireUserOp::SIGNATURE_BYTES { + return None; + } + let signature = Signature::from_raw(wire_user_op.signature.as_slice()).ok()?; + let user_op = wire_user_op.to_user_op(); + let signing_hash = user_op.eip712_signing_hash(domain); + signature.recover_address_from_prehash(&signing_hash).ok() + } + + fn drain_directs_safe_at(&mut self, safe_block: u64) { + while let Some(front) = self.direct_q.front() { + if front.inclusion_block > safe_block { + break; + } + let queued = self.direct_q.pop_front().expect("queue front must exist"); + if let Err(err) = self.app.execute_direct_input(queued.payload.as_slice()) { + eprintln!("scheduler failed to execute drained direct input: {err}"); + } + } + } + + fn force_execute_overdue(&mut self, current_block: u64) { + while let Some(front) = self.direct_q.front() { + if has_elapsed_since( + front.inclusion_block, + self.config.max_wait_blocks, + current_block, + ) { + let status = self.app.execute_direct_input(front.payload.as_slice()); + if let Err(err) = status { + eprintln!("scheduler failed to execute overdue direct input: {err}"); + } + + self.direct_q.pop_front().expect("queue front must exist"); + } else { + break; + } + } + } +} + +fn has_elapsed_since(start_block: u64, wait_blocks: u64, current_block: u64) -> bool { + current_block.saturating_sub(start_block) >= wait_blocks +} + +pub(super) fn input_domain(chain_id: u64, verifying_contract: Address) -> Eip712Domain { + Eip712Domain { + name: None, + version: None, + chain_id: Some(U256::from(chain_id)), + verifying_contract: Some(verifying_contract), + salt: None, + } +} + +pub(super) fn block_to_u64(block: U256) -> u64 { + // Solidity ABI exposes block numbers as uint256, but scheduler semantics use u64. + // A value that does not fit u64 is a malformed host input for this prototype. + u64::try_from(block).expect("block number does not fit u64") +} + +pub(super) fn chain_id_to_u64(chain_id: U256) -> u64 { + u64::try_from(chain_id).expect("chain id does not fit u64") +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::address; + use k256::ecdsa::SigningKey; + use k256::ecdsa::signature::hazmat::PrehashSigner; + use sequencer_core::user_op::UserOp; + + #[cfg(test)] + #[derive(Default)] + struct RecordingApp { + executed: Vec, + balances: std::collections::HashMap, + nonces: std::collections::HashMap, + } + + #[cfg(test)] + #[derive(Debug, Clone, PartialEq, Eq)] + enum RecordedTx { + UserOp(u8), + Direct(u8), + } + + #[cfg(test)] + impl RecordingApp { + fn events(&self) -> &[RecordedTx] { + self.executed.as_slice() + } + + fn balance_of(&self, sender: Address) -> U256 { + *self.balances.get(&sender).unwrap_or(&U256::ZERO) + } + + fn nonce_of(&self, sender: Address) -> u32 { + self.nonces.get(&sender).copied().unwrap_or(0) + } + + fn credit(&mut self, sender: Address, amount: u64) { + let current = self.balance_of(sender); + self.balances + .insert(sender, current.saturating_add(U256::from(amount))); + } + } + + #[cfg(test)] + impl Application for RecordingApp { + const MAX_METHOD_PAYLOAD_BYTES: usize = app_core::application::MAX_METHOD_PAYLOAD_BYTES; + + fn current_user_nonce(&self, _sender: Address) -> u32 { + self.nonce_of(_sender) + } + + fn current_user_balance(&self, _sender: Address) -> U256 { + self.balance_of(_sender) + } + + fn validate_user_op( + &self, + sender: Address, + user_op: &sequencer_core::user_op::UserOp, + current_fee: u64, + ) -> Result<(), sequencer_core::application::InvalidReason> { + let expected_nonce = self.nonce_of(sender); + if user_op.nonce != expected_nonce { + return Err(sequencer_core::application::InvalidReason::InvalidNonce { + expected: expected_nonce, + got: user_op.nonce, + }); + } + if u64::from(user_op.max_fee) < current_fee { + return Err(sequencer_core::application::InvalidReason::InvalidMaxFee { + max_fee: user_op.max_fee, + base_fee: current_fee, + }); + } + let required = U256::from(current_fee); + let balance = self.balance_of(sender); + if balance < required { + return Err( + sequencer_core::application::InvalidReason::InsufficientGasBalance { + required, + available: balance, + }, + ); + } + Ok(()) + } + + fn execute_valid_user_op( + &mut self, + user_op: &sequencer_core::l2_tx::ValidUserOp, + ) -> Result<(), sequencer_core::application::AppError> { + let sender = user_op.sender; + let fee = U256::from(user_op.fee); + let balance = self.balance_of(sender); + if balance < fee { + return Err(sequencer_core::application::AppError::Internal { + reason: "validated user op cannot pay fee".to_string(), + }); + } + self.balances.insert(sender, balance - fee); + let next_nonce = self.nonce_of(sender).wrapping_add(1); + self.nonces.insert(sender, next_nonce); + + let marker = user_op.data.first().copied().unwrap_or_default(); + self.executed.push(RecordedTx::UserOp(marker)); + Ok(()) + } + + fn execute_direct_input( + &mut self, + payload: &[u8], + ) -> Result<(), sequencer_core::application::AppError> { + let marker = payload.first().copied().unwrap_or(0); + self.executed.push(RecordedTx::Direct(marker)); + Ok(()) + } + } + + const SEQUENCER: Address = address!("0x1111111111111111111111111111111111111111"); + const DIRECT_SENDER: Address = address!("0x2222222222222222222222222222222222222222"); + const TEST_CHAIN_ID: u64 = 1; + const TEST_VERIFYING_CONTRACT: Address = Address::ZERO; + + fn test_domain() -> Eip712Domain { + input_domain(TEST_CHAIN_ID, TEST_VERIFYING_CONTRACT) + } + + fn direct_input(block: u64, marker: u8) -> SchedulerInput { + SchedulerInput { + sender: DIRECT_SENDER, + inclusion_block: block, + domain: test_domain(), + payload: vec![marker], + } + } + + fn batch_input(block: u64, batch: Batch) -> SchedulerInput { + SchedulerInput { + sender: SEQUENCER, + inclusion_block: block, + domain: test_domain(), + payload: ssz::Encode::as_ssz_bytes(&batch), + } + } + + fn address_from_signing_key(signing_key: &SigningKey) -> Address { + let verifying = signing_key.verifying_key().to_encoded_point(false); + Address::from_raw_public_key(&verifying.as_bytes()[1..]) + } + + fn sign_wire_user_op( + domain: &Eip712Domain, + signing_key: &SigningKey, + nonce: u32, + max_fee: u32, + data: Vec, + ) -> WireUserOp { + let user_op = UserOp { + nonce, + max_fee, + data: data.clone().into(), + }; + let hash = user_op.eip712_signing_hash(domain); + let k256_sig = signing_key + .sign_prehash(hash.as_slice()) + .expect("sign user op hash"); + + let sender = address_from_signing_key(signing_key); + let signature = [false, true] + .into_iter() + .map(|parity| Signature::from_signature_and_parity(k256_sig, parity)) + .find(|candidate| { + candidate + .recover_address_from_prehash(&hash) + .ok() + .map(|value| value == sender) + .unwrap_or(false) + }) + .expect("recoverable parity for signature"); + + WireUserOp { + nonce, + max_fee, + data, + signature: signature.as_bytes().to_vec(), + } + } + + #[test] + fn batch_drains_safe_directs_before_executing_user_ops() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 100, + }, + ); + + assert_eq!( + scheduler.process_input(direct_input(10, 1)), + ProcessOutcome::DirectEnqueued + ); + + let signing_key = SigningKey::from_bytes((&[1_u8; 32]).into()).expect("signing key"); + + let batch = Batch { + frames: vec![Frame { + user_ops: vec![sign_wire_user_op( + &test_domain(), + &signing_key, + 0, + 1, + vec![2], + )], + safe_block: 10, + fee_price: 0, + }], + }; + + assert_eq!( + scheduler.process_input(batch_input(20, batch)), + ProcessOutcome::BatchExecuted + ); + assert_eq!( + scheduler.app.events(), + [RecordedTx::Direct(1), RecordedTx::UserOp(2)] + ); + assert_eq!(scheduler.queued_direct_len(), 0); + } + + #[test] + fn pre_batch_backstop_executes_overdue_directs_before_user_ops() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 5, + }, + ); + + scheduler.process_input(direct_input(1, 1)); + let signing_key = SigningKey::from_bytes((&[2_u8; 32]).into()).expect("signing key"); + let batch = Batch { + frames: vec![Frame { + user_ops: vec![sign_wire_user_op( + &test_domain(), + &signing_key, + 0, + 1, + vec![2], + )], + safe_block: 2, + fee_price: 0, + }], + }; + + scheduler.process_input(batch_input(6, batch)); + assert_eq!( + scheduler.app.events(), + [RecordedTx::Direct(1), RecordedTx::UserOp(2)] + ); + } + + #[test] + fn stale_batch_is_invalidated() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 5, + }, + ); + + scheduler.process_input(direct_input(1, 9)); + let signing_key = SigningKey::from_bytes((&[3_u8; 32]).into()).expect("signing key"); + let stale_batch = Batch { + frames: vec![Frame { + user_ops: vec![sign_wire_user_op( + &test_domain(), + &signing_key, + 0, + 1, + vec![7], + )], + safe_block: 4, + fee_price: 0, + }], + }; + + let outcome = scheduler.process_input(batch_input(10, stale_batch)); + assert_eq!(outcome, ProcessOutcome::BatchInvalid); + assert_eq!(scheduler.app.events(), [RecordedTx::Direct(9)]); + } + + #[test] + fn non_monotonic_safe_blocks_invalidate_batch() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 100, + }, + ); + + let signing_key_a = SigningKey::from_bytes((&[4_u8; 32]).into()).expect("signing key a"); + let signing_key_b = SigningKey::from_bytes((&[5_u8; 32]).into()).expect("signing key b"); + let invalid = Batch { + frames: vec![ + Frame { + user_ops: vec![sign_wire_user_op( + &test_domain(), + &signing_key_a, + 0, + 1, + vec![1], + )], + safe_block: 8, + fee_price: 0, + }, + Frame { + user_ops: vec![sign_wire_user_op( + &test_domain(), + &signing_key_b, + 0, + 1, + vec![2], + )], + safe_block: 7, + fee_price: 0, + }, + ], + }; + + assert_eq!( + scheduler.process_input(batch_input(10, invalid)), + ProcessOutcome::BatchInvalid + ); + assert!(scheduler.app.events().is_empty()); + } + + #[test] + fn frame_safe_block_above_inclusion_block_invalidates_batch() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 100, + }, + ); + + let signing_key = SigningKey::from_bytes((&[6_u8; 32]).into()).expect("signing key"); + let invalid = Batch { + frames: vec![Frame { + user_ops: vec![sign_wire_user_op( + &test_domain(), + &signing_key, + 0, + 1, + vec![9], + )], + safe_block: 11, + fee_price: 0, + }], + }; + + assert_eq!( + scheduler.process_input(batch_input(10, invalid)), + ProcessOutcome::BatchInvalid + ); + assert!(scheduler.app.events().is_empty()); + } + + #[test] + fn frame_drain_uses_consistent_inclusive_safe_block_rule() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 100, + }, + ); + + scheduler.process_input(direct_input(10, 1)); + scheduler.process_input(direct_input(11, 2)); + let batch = Batch { + frames: vec![Frame { + user_ops: vec![], + safe_block: 10, + fee_price: 0, + }], + }; + + scheduler.process_input(batch_input(12, batch)); + assert_eq!(scheduler.app.events(), [RecordedTx::Direct(1)]); + assert_eq!(scheduler.queued_direct_len(), 1); + } + + #[test] + fn decode_failure_invalidates_batch_and_keeps_running() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 100, + }, + ); + + let bad_batch = SchedulerInput { + sender: SEQUENCER, + inclusion_block: 10, + domain: test_domain(), + payload: vec![0xFF, 0xEE, 0xDD], + }; + assert_eq!( + scheduler.process_input(bad_batch), + ProcessOutcome::BatchInvalid + ); + + assert_eq!( + scheduler.process_input(direct_input(11, 3)), + ProcessOutcome::DirectEnqueued + ); + assert_eq!(scheduler.queued_direct_len(), 1); + } + + #[test] + fn backstop_drains_all_overdue_directs() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 5, + }, + ); + + scheduler.process_input(direct_input(1, 1)); + scheduler.process_input(direct_input(2, 2)); + scheduler.process_input(direct_input(8, 3)); + + assert_eq!( + scheduler.app.events(), + [RecordedTx::Direct(1), RecordedTx::Direct(2)] + ); + assert_eq!(scheduler.queued_direct_len(), 1); + } + + #[test] + fn invalid_signature_is_skipped() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 100, + }, + ); + + let batch = Batch { + frames: vec![Frame { + user_ops: vec![WireUserOp { + nonce: 0, + max_fee: 0, + data: vec![7], + signature: vec![0_u8; WireUserOp::SIGNATURE_BYTES], + }], + safe_block: 1, + fee_price: 0, + }], + }; + + assert_eq!( + scheduler.process_input(batch_input(1, batch)), + ProcessOutcome::BatchExecuted + ); + assert!(scheduler.app.events().is_empty()); + } + + #[test] + fn invalid_nonce_max_fee_or_balance_is_skipped() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 100, + }, + ); + let signing_key = SigningKey::from_bytes((&[9_u8; 32]).into()).expect("signing key"); + let sender = address_from_signing_key(&signing_key); + scheduler.app.credit(sender, 1); + + let bad_nonce = sign_wire_user_op(&test_domain(), &signing_key, 1, 10, vec![1]); + let bad_max_fee = sign_wire_user_op(&test_domain(), &signing_key, 0, 0, vec![2]); + let insufficient = sign_wire_user_op(&test_domain(), &signing_key, 0, 10, vec![3]); + let valid = sign_wire_user_op(&test_domain(), &signing_key, 0, 10, vec![4]); + + let batch = Batch { + frames: vec![ + Frame { + user_ops: vec![bad_nonce], + safe_block: 1, + fee_price: 1, + }, + Frame { + user_ops: vec![bad_max_fee], + safe_block: 1, + fee_price: 5, + }, + Frame { + user_ops: vec![insufficient], + safe_block: 1, + fee_price: 2, + }, + Frame { + user_ops: vec![valid], + safe_block: 1, + fee_price: 1, + }, + ], + }; + + assert_eq!( + scheduler.process_input(batch_input(1, batch)), + ProcessOutcome::BatchExecuted + ); + assert_eq!(scheduler.app.events(), [RecordedTx::UserOp(4)]); + } + + #[test] + fn empty_batches_are_valid_noops() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 100, + }, + ); + + let batch = Batch { frames: vec![] }; + + assert_eq!( + scheduler.process_input(batch_input(10, batch)), + ProcessOutcome::BatchExecuted + ); + assert!(scheduler.app.events().is_empty()); + } + + #[test] + fn batch_uses_input_domain_for_signature_recovery() { + let mut scheduler = Scheduler::new( + RecordingApp::default(), + SchedulerConfig { + sequencer_address: SEQUENCER, + max_wait_blocks: 100, + }, + ); + let signing_key = SigningKey::from_bytes((&[10_u8; 32]).into()).expect("signing key"); + let sender = address_from_signing_key(&signing_key); + scheduler.app.credit(sender, 1); + let batch_domain = input_domain( + TEST_CHAIN_ID + 7, + address!("0x3333333333333333333333333333333333333333"), + ); + let batch = Batch { + frames: vec![Frame { + user_ops: vec![sign_wire_user_op( + &batch_domain, + &signing_key, + 0, + 1, + vec![9], + )], + safe_block: 1, + fee_price: 0, + }], + }; + + let input = SchedulerInput { + sender: SEQUENCER, + inclusion_block: 1, + domain: batch_domain, + payload: ssz::Encode::as_ssz_bytes(&batch), + }; + + assert_eq!( + scheduler.process_input(input), + ProcessOutcome::BatchExecuted + ); + assert_eq!(scheduler.app.events(), [RecordedTx::UserOp(9)]); + } +} diff --git a/examples/canonical-app/src/scheduler/mod.rs b/examples/canonical-app/src/scheduler/mod.rs new file mode 100644 index 0000000..0f4c0c5 --- /dev/null +++ b/examples/canonical-app/src/scheduler/mod.rs @@ -0,0 +1,161 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +mod core; + +pub use core::{SEQUENCER_ADDRESS, SchedulerConfig}; + +use sequencer_core::application::Application; +use trolley::{Rollup, RollupRequest}; + +pub fn run_scheduler_forever( + mut rollup: R, + app: A, + scheduler_config: SchedulerConfig, +) -> ! { + let mut scheduler = core::Scheduler::new(app, scheduler_config); + + loop { + match rollup.next_input() { + Ok(RollupRequest::Advance { metadata, payload }) => { + let inclusion_block = core::block_to_u64(metadata.block_number); + let domain = core::input_domain( + core::chain_id_to_u64(metadata.chain_id), + metadata.app_contract, + ); + + let input = core::SchedulerInput { + sender: metadata.msg_sender, + inclusion_block, + domain, + payload, + }; + + if matches!( + scheduler.process_input(input), + core::ProcessOutcome::BatchInvalid + ) { + let _ = rollup.emit_report(b"scheduler dropped invalid batch"); + } + } + Ok(RollupRequest::Inspect { .. }) => { + let _ = rollup.emit_report(b"scheduler inspect endpoint not implemented"); + } + Err(err) => panic!("scheduler failed while reading next input: {err}"), + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::VecDeque; + use std::sync::{Arc, Mutex}; + + use alloy_primitives::{Address, U256}; + use app_core::application::{WalletApp, WalletConfig}; + use trolley::{InputMetadata, RollupError}; + + use super::*; + + struct MockRollup { + inputs: VecDeque>, + reports: Arc>>>, + } + + impl MockRollup { + fn with_inputs( + inputs: Vec>, + ) -> (Self, Arc>>>) { + let reports = Arc::new(Mutex::new(Vec::new())); + ( + Self { + inputs: VecDeque::from(inputs), + reports: Arc::clone(&reports), + }, + reports, + ) + } + } + + impl Rollup for MockRollup { + fn next_input(&mut self) -> trolley::RollupResult { + self.inputs + .pop_front() + .unwrap_or(Err(RollupError::CmtCallFailed { + operation: "next_input", + code: -1, + })) + } + + fn revert(&mut self) -> ! { + panic!("mock rollup revert is not used in this test"); + } + + fn gio(&mut self, _domain: u16, _id: &[u8]) -> trolley::RollupResult<(u16, Vec)> { + unimplemented!("mock rollup gio is not used in this test"); + } + + fn emit_voucher(&mut self, _voucher: &types::Voucher) -> trolley::RollupResult<()> { + unimplemented!("mock rollup emit_voucher is not used in this test"); + } + + fn emit_notice(&mut self, _notice: &types::Notice) -> trolley::RollupResult<()> { + unimplemented!("mock rollup emit_notice is not used in this test"); + } + + fn emit_report(&mut self, report: &[u8]) -> trolley::RollupResult<()> { + self.reports + .lock() + .expect("poisoned reports mutex") + .push(report.to_vec()); + Ok(()) + } + } + + fn metadata(sender: Address, block: u64) -> InputMetadata { + InputMetadata { + chain_id: U256::from(1_u64), + app_contract: Address::ZERO, + msg_sender: sender, + block_number: U256::from(block), + block_timestamp: U256::ZERO, + prev_randao: U256::ZERO, + index: U256::ZERO, + } + } + + #[test] + fn run_scheduler_emits_report_for_invalid_batch_before_rollup_error() { + let sequencer = SchedulerConfig::default().sequencer_address; + let invalid_batch_input = RollupRequest::Advance { + metadata: metadata(sequencer, 10), + payload: vec![0xFF, 0xEE, 0xDD], + }; + let terminal_err = Err(RollupError::CmtCallFailed { + operation: "next_input", + code: -22, + }); + let (rollup, reports) = + MockRollup::with_inputs(vec![Ok(invalid_batch_input), terminal_err]); + + let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + run_scheduler_forever( + rollup, + WalletApp::new(WalletConfig), + SchedulerConfig::default(), + ) + })); + + assert!( + result.is_err(), + "scheduler loop should panic on rollup error" + ); + let reports = reports.lock().expect("poisoned reports mutex"); + assert!( + reports + .iter() + .any(|report| report.as_slice() == b"scheduler dropped invalid batch"), + "missing invalid batch report, got: {reports:?}" + ); + } +} diff --git a/examples/canonical-test/Cargo.toml b/examples/canonical-test/Cargo.toml new file mode 100644 index 0000000..43ca9ad --- /dev/null +++ b/examples/canonical-test/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "canonical-test" +version.workspace = true +edition.workspace = true +license.workspace = true +description.workspace = true +homepage.workspace = true +repository.workspace = true +readme = "../../README.md" +authors.workspace = true + +[[bin]] +name = "canonical-test" +path = "src/main.rs" + +[dependencies] +canonical-app = { path = "../canonical-app" } +testsi = { workspace = true } +types = { workspace = true } diff --git a/examples/canonical-test/src/main.rs b/examples/canonical-test/src/main.rs new file mode 100644 index 0000000..b815794 --- /dev/null +++ b/examples/canonical-test/src/main.rs @@ -0,0 +1,39 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use canonical_app::SEQUENCER_ADDRESS; +use std::path::PathBuf; +use types::alloy_primitives::Address; + +testsi::testsi_main!(); + +#[testsi::test_dapp(kind("scheduler"))] +pub fn scheduler_reports_invalid_batch_from_guest() -> testsi::TestResult { + let machine_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("../canonical-app/out/canonical-machine-image"); + let mut machine = testsi::MachineBuilder::load_from(machine_path) + .at_chain(31337) + .deployed_at(Address::ZERO) + .no_console_putchar(false) + .try_build()?; + + let input = testsi::InputBuilder::from_address(SEQUENCER_ADDRESS) + .at_block(10) + .with_payload(&[0xff, 0xee, 0xdd]); + + let (outputs, reports) = machine.advance_state(input)?; + + assert!( + outputs.list().is_empty(), + "invalid batch smoke test should not emit notices/vouchers, got {:?}", + outputs.list() + ); + assert!( + reports + .iter() + .any(|report| report.as_slice() == b"scheduler dropped invalid batch"), + "expected invalid batch report, got {reports:?}" + ); + + Ok(()) +} diff --git a/justfile b/justfile index 4351b10..9333e51 100644 --- a/justfile +++ b/justfile @@ -18,7 +18,23 @@ test-sequencer: cargo test -p sequencer --test ws_broadcaster -- --test-threads=1 bench target="all": - just --justfile benchmarks/justfile {{target}} + just -f benchmarks/justfile {{target}} + +setup: + just -f examples/canonical-app/justfile download-deps + just -f benchmarks/justfile setup + +canonical-build-machine-image: + just -f examples/canonical-app/justfile build-machine-image + +canonical-test-guest: + just -f examples/canonical-app/justfile test-guest + +clean: + cargo clean + rm -f sequencer.db sequencer.db-shm sequencer.db-wal + just -f examples/canonical-app/justfile clean + just -f benchmarks/justfile clean fmt: cargo fmt --all diff --git a/sequencer-core/src/api.rs b/sequencer-core/src/api.rs index 955f382..b7a2342 100644 --- a/sequencer-core/src/api.rs +++ b/sequencer-core/src/api.rs @@ -1,6 +1,10 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use alloy_primitives::{Address, Signature}; +use alloy_sol_types::{Eip712Domain, SolStruct}; +use thiserror::Error; + use crate::broadcast::BroadcastTxMessage; use crate::user_op::{SignedUserOp, UserOp}; use serde::{Deserialize, Serialize}; @@ -20,6 +24,91 @@ impl TxRequest { // Conservative wire-level cap for TxRequest JSON. It intentionally leaves headroom for field // names, quotes, separators, and decimal nonce/max_fee rendering. pub const MAX_JSON_BYTES_RECOMMENDED: usize = 4 * 1024; + + pub fn into_signed_user_op( + self, + domain: &Eip712Domain, + max_user_op_data_bytes: usize, + ) -> Result { + self.validate_hex_lengths()?; + self.validate_payload_size(max_user_op_data_bytes)?; + + let signature = self.decode_signature()?; + let expected_sender = self.decode_address()?; + let recovered_sender = recover_sender(&self.message, &signature, domain)?; + + if expected_sender != recovered_sender { + return Err(TxRequestError::invalid_signature("sender mismatch")); + } + + Ok(SignedUserOp { + sender: recovered_sender, + signature, + user_op: self.message, + }) + } + + fn validate_hex_lengths(&self) -> Result<(), TxRequestError> { + if self.signature.len() != Self::SIGNATURE_HEX_LEN { + return Err(TxRequestError::bad_request(format!( + "signature must be {} hex chars (0x + 65 bytes)", + Self::SIGNATURE_HEX_LEN + ))); + } + if self.sender.len() != Self::ADDRESS_HEX_LEN { + return Err(TxRequestError::bad_request(format!( + "sender must be {} hex chars (0x + 20 bytes)", + Self::ADDRESS_HEX_LEN + ))); + } + Ok(()) + } + + fn validate_payload_size(&self, max_user_op_data_bytes: usize) -> Result<(), TxRequestError> { + let user_op_data_len = self.message.data.len(); + if user_op_data_len > max_user_op_data_bytes { + return Err(TxRequestError::bad_request(format!( + "user op payload too large: max {} bytes, got {} bytes", + max_user_op_data_bytes, user_op_data_len + ))); + } + Ok(()) + } + + fn decode_signature(&self) -> Result { + let signature_bytes = + decode_hex_0x(self.signature.as_str()).map_err(TxRequestError::bad_request)?; + if signature_bytes.len() != SignedUserOp::SIGNATURE_BYTES { + return Err(TxRequestError::bad_request("signature must be 65 bytes")); + } + parse_signature(&signature_bytes) + } + + fn decode_address(&self) -> Result { + let bytes = decode_hex_0x(self.sender.as_str()).map_err(TxRequestError::bad_request)?; + if bytes.len() != Self::ADDRESS_BYTES { + return Err(TxRequestError::bad_request("address must be 20 bytes")); + } + Ok(Address::from_slice(&bytes)) + } +} + +#[derive(Debug, Error, Clone)] +pub enum TxRequestError { + #[error("{0}")] + BadRequest(String), + #[error("{0}")] + InvalidSignature(String), +} + +impl TxRequestError { + pub fn bad_request(message: impl Into) -> Self { + Self::BadRequest(message.into()) + } + + pub fn invalid_signature(message: impl Into) -> Self { + Self::InvalidSignature(message.into()) + } } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -30,3 +119,38 @@ pub struct TxResponse { } pub type WsTxMessage = BroadcastTxMessage; + +fn decode_hex_0x(value: &str) -> Result, String> { + if !value.starts_with("0x") { + return Err("hex string must start with 0x".to_string()); + } + alloy_primitives::hex::decode(value).map_err(|err| format!("invalid hex: {err}")) +} + +fn recover_sender( + user_op: &UserOp, + signature: &Signature, + domain: &Eip712Domain, +) -> Result { + let signing_hash = user_op.eip712_signing_hash(domain); + signature + .recover_address_from_prehash(&signing_hash) + .map_err(|_| TxRequestError::invalid_signature("cannot recover sender")) +} + +fn parse_signature(bytes: &[u8]) -> Result { + Signature::from_raw(bytes).map_err(|err| match err { + alloy_primitives::SignatureError::FromBytes(_) => { + TxRequestError::bad_request("signature must be 65 bytes") + } + alloy_primitives::SignatureError::FromHex(_) => { + TxRequestError::bad_request("invalid signature hex") + } + alloy_primitives::SignatureError::InvalidParity(_) => { + TxRequestError::invalid_signature("invalid signature parity") + } + alloy_primitives::SignatureError::K256(_) => { + TxRequestError::invalid_signature("invalid signature") + } + }) +} diff --git a/sequencer-core/src/application/mod.rs b/sequencer-core/src/application/mod.rs index 77a7514..150b624 100644 --- a/sequencer-core/src/application/mod.rs +++ b/sequencer-core/src/application/mod.rs @@ -1,16 +1,12 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -mod method; - use crate::l2_tx::ValidUserOp; use crate::user_op::UserOp; use alloy_primitives::{Address, U256}; use std::fmt; use thiserror::Error; -pub use method::{Deposit, Method, Transfer, Withdrawal}; - #[derive(Debug, Error)] pub enum AppError { #[error("internal: {reason}")] @@ -62,6 +58,8 @@ impl fmt::Display for InvalidReason { } pub trait Application: Send { + const MAX_METHOD_PAYLOAD_BYTES: usize; + fn current_user_nonce(&self, sender: Address) -> u32; fn current_user_balance(&self, sender: Address) -> U256; diff --git a/sequencer-core/src/batch.rs b/sequencer-core/src/batch.rs new file mode 100644 index 0000000..b126352 --- /dev/null +++ b/sequencer-core/src/batch.rs @@ -0,0 +1,37 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use crate::user_op::UserOp; +use ssz_derive::{Decode, Encode}; + +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +pub struct Batch { + pub frames: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +pub struct Frame { + pub user_ops: Vec, + pub safe_block: u64, + pub fee_price: u64, +} + +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +pub struct WireUserOp { + pub nonce: u32, + pub max_fee: u32, + pub data: Vec, + pub signature: Vec, +} + +impl WireUserOp { + pub const SIGNATURE_BYTES: usize = 65; + + pub fn to_user_op(&self) -> UserOp { + UserOp { + nonce: self.nonce, + max_fee: self.max_fee, + data: self.data.clone().into(), + } + } +} diff --git a/sequencer-core/src/lib.rs b/sequencer-core/src/lib.rs index 1f2fa25..d807cd3 100644 --- a/sequencer-core/src/lib.rs +++ b/sequencer-core/src/lib.rs @@ -3,6 +3,7 @@ pub mod api; pub mod application; +pub mod batch; pub mod broadcast; pub mod l2_tx; pub mod user_op; diff --git a/sequencer-core/src/user_op.rs b/sequencer-core/src/user_op.rs index a22ebd9..fde6d56 100644 --- a/sequencer-core/src/user_op.rs +++ b/sequencer-core/src/user_op.rs @@ -25,18 +25,11 @@ impl SignedUserOp { pub const SIGNATURE_BYTES: usize = 65; pub const NONCE_BYTES: usize = 4; pub const MAX_FEE_BYTES: usize = 4; - // Method is SSZ enum-union encoded; Transfer includes a 1-byte union tag + 32-byte amount + 20-byte recipient. - pub const MAX_METHOD_PAYLOAD_BYTES: usize = 1 + 32 + 20; - pub const MAX_BATCH_BYTES_UPPER_BOUND: usize = Self::SIGNATURE_BYTES - + Self::NONCE_BYTES - + Self::MAX_FEE_BYTES - + Self::MAX_METHOD_PAYLOAD_BYTES; - pub const fn max_batch_bytes_upper_bound() -> usize { - Self::MAX_BATCH_BYTES_UPPER_BOUND - } + pub const MAX_BATCH_METADATA_BYTES: usize = + Self::SIGNATURE_BYTES + Self::NONCE_BYTES + Self::MAX_FEE_BYTES; - pub const fn batch_bytes_upper_bound_for_data_len(data_len: usize) -> usize { - Self::SIGNATURE_BYTES + Self::NONCE_BYTES + Self::MAX_FEE_BYTES + data_len + pub const fn max_batch_metadata() -> usize { + Self::MAX_BATCH_METADATA_BYTES } } diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 1cdc57f..4343aa4 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -12,29 +12,26 @@ authors.workspace = true [dependencies] app-core = { path = "../examples/app-core" } sequencer-core = { path = "../sequencer-core" } -alloy = { version = "1.4", default-features = false, features = ["contract", "provider-http", "rpc-types", "eips"] } -alloy-primitives = { version = "1.4.1", features = ["serde", "k256"] } -alloy-sol-types = "1.4.1" -cartesi-rollups-contracts = "2.1.1" -async-recursion = "1.0" axum = { version = "0.8.8", features = ["ws"] } -rusqlite = { version = "0.38.0", features = ["bundled"] } -rusqlite_migration = "2.3.0" +tokio = { version = "1.35", features = ["macros", "rt-multi-thread", "sync", "time", "net", "signal"] } serde = { version = "1", features = ["derive"] } serde_json = "1" -ssz = { package = "ethereum_ssz", version = "0.10" } -ssz_derive = { package = "ethereum_ssz_derive", version = "0.10" } -thiserror = "1" -tokio = { version = "1.35", features = ["macros", "rt-multi-thread", "sync", "time", "net"] } -url = "2" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } tower-http = { version = "0.6.8", features = ["trace"] } -tower = { version = "0.5", features = ["limit", "load-shed", "util"] } +rusqlite = { version = "0.38.0", features = ["bundled"] } +rusqlite_migration = "2.3.0" +alloy-primitives = { version = "1.4.1", features = ["serde", "k256"] } +alloy-sol-types = "1.4.1" +alloy = { version = "1.0", features = ["contract", "network", "reqwest", "rpc-types", "sol-types", "node-bindings"] } +thiserror = "1" +ssz = { package = "ethereum_ssz", version = "0.10" } +ssz_derive = { package = "ethereum_ssz_derive", version = "0.10" } clap = { version = "4", features = ["derive", "env"] } +async-recursion = "1" +cartesi-rollups-contracts = "=2.2.0" [dev-dependencies] -alloy = { version = "1.4", default-features = false, features = ["contract", "provider-http", "rpc-types", "eips", "node-bindings"] } futures-util = "0.3" tokio-tungstenite = "0.28" k256 = "0.13.4" diff --git a/sequencer/src/api/error.rs b/sequencer/src/api/error.rs index 60696a7..9a75d76 100644 --- a/sequencer/src/api/error.rs +++ b/sequencer/src/api/error.rs @@ -8,6 +8,7 @@ use serde::Serialize; use thiserror::Error; use crate::inclusion_lane::SequencerError; +use sequencer_core::api::TxRequestError; #[derive(Debug, Error, Clone)] pub enum ApiError { @@ -20,6 +21,8 @@ pub enum ApiError { #[error("{0}")] ExecutionRejected(String), #[error("{0}")] + Unavailable(String), + #[error("{0}")] InternalError(String), #[error("{0}")] Overloaded(String), @@ -49,6 +52,10 @@ impl ApiError { Self::InternalError(message.into()) } + pub fn unavailable(message: impl Into) -> Self { + Self::Unavailable(message.into()) + } + pub fn overloaded(message: impl Into) -> Self { Self::Overloaded(message.into()) } @@ -58,6 +65,7 @@ impl ApiError { Self::BadRequest(_) | Self::InvalidSignature(_) => StatusCode::BAD_REQUEST, Self::PayloadTooLarge(_) => StatusCode::PAYLOAD_TOO_LARGE, Self::ExecutionRejected(_) => StatusCode::UNPROCESSABLE_ENTITY, + Self::Unavailable(_) => StatusCode::SERVICE_UNAVAILABLE, Self::InternalError(_) => StatusCode::INTERNAL_SERVER_ERROR, Self::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS, } @@ -69,6 +77,7 @@ impl ApiError { Self::PayloadTooLarge(_) => "PAYLOAD_TOO_LARGE", Self::InvalidSignature(_) => "INVALID_SIGNATURE", Self::ExecutionRejected(_) => "EXECUTION_REJECTED", + Self::Unavailable(_) => "UNAVAILABLE", Self::InternalError(_) => "INTERNAL_ERROR", Self::Overloaded(_) => "OVERLOADED", } @@ -79,11 +88,21 @@ impl From for ApiError { fn from(value: SequencerError) -> Self { match value { SequencerError::Invalid(message) => Self::ExecutionRejected(message), + SequencerError::Unavailable(message) => Self::Unavailable(message), SequencerError::Internal(message) => Self::InternalError(message), } } } +impl From for ApiError { + fn from(value: TxRequestError) -> Self { + match value { + TxRequestError::BadRequest(message) => Self::BadRequest(message), + TxRequestError::InvalidSignature(message) => Self::InvalidSignature(message), + } + } +} + impl IntoResponse for ApiError { fn into_response(self) -> Response { let body = ErrorResponse { diff --git a/sequencer/src/api/mod.rs b/sequencer/src/api/mod.rs index 5850d2b..f395433 100644 --- a/sequencer/src/api/mod.rs +++ b/sequencer/src/api/mod.rs @@ -2,202 +2,111 @@ // SPDX-License-Identifier: Apache-2.0 (see LICENSE) mod error; +mod state; +mod tx; +mod ws; +use std::io; use std::sync::Arc; -use std::time::SystemTime; +use alloy_sol_types::Eip712Domain; use axum::Router; -use axum::error_handling::HandleErrorLayer; -use axum::extract::ws::{CloseFrame, Message, WebSocket, WebSocketUpgrade, close_code}; -use axum::extract::{DefaultBodyLimit, Json, Query, State}; +use axum::extract::DefaultBodyLimit; use axum::http::StatusCode; -use axum::response::{IntoResponse, Response}; use axum::routing::{get, post}; -use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; -use tokio::sync::mpsc::error::TrySendError; -use tokio::sync::oneshot; -use tokio::sync::{OwnedSemaphorePermit, Semaphore}; -use tower::limit::ConcurrencyLimitLayer; -use tower::load_shed::LoadShedLayer; -use tower::{BoxError, ServiceBuilder}; use tower_http::trace::TraceLayer; -use tracing::{debug, warn}; -use alloy_primitives::{Address, Signature}; -use alloy_sol_types::{Eip712Domain, SolStruct}; -use sequencer_core::api::{TxRequest, TxResponse}; -use sequencer_core::user_op::SignedUserOp; +pub use error::ApiError; +use state::ApiState; -use crate::inclusion_lane::{InclusionLaneInput, PendingUserOp}; -use crate::l2_tx_broadcaster::{BroadcastTxMessage, L2TxBroadcaster}; -use crate::storage::Storage; +use crate::inclusion_lane::PendingUserOp; +use crate::l2_tx_feed::L2TxFeed; +use crate::shutdown::ShutdownSignal; +use sequencer_core::api::TxRequest; -pub use error::ApiError; +const DEFAULT_WS_MAX_SUBSCRIBERS: usize = 64; +const DEFAULT_WS_MAX_CATCHUP_EVENTS: u64 = 50_000; +const DEFAULT_MAX_BODY_BYTES: usize = TxRequest::MAX_JSON_BYTES_RECOMMENDED; -#[derive(Clone)] -pub struct AppState { - pub tx_sender: mpsc::Sender, - pub domain: Eip712Domain, - pub overload_max_inflight_submissions: usize, - pub ws_subscriber_limit: Arc, +pub type ApiServerTask = tokio::task::JoinHandle>; + +#[derive(Debug, Clone, Copy)] +pub struct ApiConfig { + pub max_body_bytes: usize, + pub ws_max_subscribers: usize, pub ws_max_catchup_events: u64, - pub broadcaster: L2TxBroadcaster, } -#[derive(Debug, Deserialize)] -struct SubscribeQuery { - from_offset: Option, +impl Default for ApiConfig { + fn default() -> Self { + Self { + max_body_bytes: DEFAULT_MAX_BODY_BYTES, + ws_max_subscribers: DEFAULT_WS_MAX_SUBSCRIBERS, + ws_max_catchup_events: DEFAULT_WS_MAX_CATCHUP_EVENTS, + } + } } -pub fn router(state: Arc, max_body_bytes: usize) -> Router { - let tx_concurrency_limit = state.overload_max_inflight_submissions; - let tx_route = post(submit_tx).layer( - ServiceBuilder::new() - .layer(HandleErrorLayer::new(handle_tx_route_error)) - .layer(LoadShedLayer::new()) - .layer(ConcurrencyLimitLayer::new(tx_concurrency_limit)), - ); +pub async fn start( + http_addr: impl tokio::net::ToSocketAddrs, + tx_sender: mpsc::Sender, + domain: Eip712Domain, + max_user_op_data_bytes: usize, + shutdown: ShutdownSignal, + tx_feed: L2TxFeed, + config: ApiConfig, +) -> io::Result { + let listener = tokio::net::TcpListener::bind(http_addr).await?; + Ok(start_on_listener( + listener, + tx_sender, + domain, + max_user_op_data_bytes, + shutdown, + tx_feed, + config, + )) +} + +pub fn start_on_listener( + listener: tokio::net::TcpListener, + tx_sender: mpsc::Sender, + domain: Eip712Domain, + max_user_op_data_bytes: usize, + shutdown: ShutdownSignal, + tx_feed: L2TxFeed, + config: ApiConfig, +) -> ApiServerTask { + let state = Arc::new(ApiState::new( + tx_sender, + domain, + max_user_op_data_bytes, + shutdown.clone(), + tx_feed, + config, + )); + let app = router(state, config.max_body_bytes); + + tokio::spawn(async move { + axum::serve(listener, app) + .with_graceful_shutdown(async move { + shutdown.wait_for_shutdown().await; + }) + .await + }) +} +fn router(state: Arc, max_body_bytes: usize) -> Router { Router::new() - .route("/tx", tx_route) - .route("/ws/subscribe", get(subscribe_l2_txs)) - .route("/livez", get(livez)) - .route("/readyz", get(readyz)) - .route("/healthz", get(healthz)) + .route("/tx", post(tx::submit_tx)) + .route("/ws/subscribe", get(ws::subscribe_l2_txs)) .with_state(state) // Enforces a raw request-body cap before JSON deserialization, including whitespace. .layer(DefaultBodyLimit::max(max_body_bytes)) .layer(TraceLayer::new_for_http()) } -async fn submit_tx( - State(state): State>, - req: Result, axum::extract::rejection::JsonRejection>, -) -> Result, ApiError> { - let Json(req) = req.map_err(map_json_rejection)?; - - if req.signature.len() != TxRequest::SIGNATURE_HEX_LEN { - return Err(ApiError::bad_request(format!( - "signature must be {} hex chars (0x + 65 bytes)", - TxRequest::SIGNATURE_HEX_LEN - ))); - } - if req.sender.len() != TxRequest::ADDRESS_HEX_LEN { - return Err(ApiError::bad_request(format!( - "sender must be {} hex chars (0x + 20 bytes)", - TxRequest::ADDRESS_HEX_LEN - ))); - } - - let user_op = req.message; - let user_op_data_len = user_op.data.len(); - let user_op_size_upper_bound = - SignedUserOp::batch_bytes_upper_bound_for_data_len(user_op_data_len); - - // Keep over-sized payloads out of the hot path so chunk-level batch checks can stay simple. - if user_op_size_upper_bound > SignedUserOp::max_batch_bytes_upper_bound() { - return Err(ApiError::bad_request(format!( - "user op payload too large: max {} bytes, got {} bytes", - SignedUserOp::MAX_METHOD_PAYLOAD_BYTES, - user_op_data_len - ))); - } - - let signature_bytes = decode_hex_0x(&req.signature).map_err(ApiError::bad_request)?; - if signature_bytes.len() != 65 { - return Err(ApiError::bad_request("signature must be 65 bytes")); - } - let signature = parse_signature(&signature_bytes)?; - let nonce = user_op.nonce; - - let signing_hash = user_op.eip712_signing_hash(&state.domain); - let sender = signature - .recover_address_from_prehash(&signing_hash) - .map_err(|_| ApiError::invalid_signature("cannot recover sender"))?; - - let expected = parse_address(req.sender.as_str()).map_err(ApiError::bad_request)?; - if expected != sender { - return Err(ApiError::invalid_signature("sender mismatch")); - } - - let signed = SignedUserOp { - sender, - user_op, - signature, - }; - - let (respond_to, recv) = oneshot::channel(); - let enqueued = PendingUserOp { - signed, - respond_to, - received_at: SystemTime::now(), - }; - - enqueue_tx(&state, enqueued)?; - - let commit_result = recv - .await - .map_err(|_| ApiError::internal_error("inclusion lane dropped response"))?; - commit_result.map_err(ApiError::from)?; - debug!(sender = %sender, nonce, "tx committed"); - - Ok(Json(TxResponse { - ok: true, - sender: sender.to_string(), - nonce, - })) -} - -fn decode_hex_0x(value: &str) -> Result, String> { - if !value.starts_with("0x") { - return Err("hex string must start with 0x".to_string()); - } - alloy_primitives::hex::decode(value).map_err(|err| format!("invalid hex: {err}")) -} - -fn parse_address(value: &str) -> Result { - let bytes = decode_hex_0x(value)?; - if bytes.len() != 20 { - return Err("address must be 20 bytes".to_string()); - } - Ok(Address::from_slice(&bytes)) -} - -fn parse_signature(bytes: &[u8]) -> Result { - Signature::from_raw(bytes).map_err(|err| match err { - alloy_primitives::SignatureError::FromBytes(_) => { - ApiError::bad_request("signature must be 65 bytes") - } - alloy_primitives::SignatureError::FromHex(_) => { - ApiError::bad_request("invalid signature hex") - } - alloy_primitives::SignatureError::InvalidParity(_) => { - ApiError::invalid_signature("invalid signature parity") - } - alloy_primitives::SignatureError::K256(_) => { - ApiError::invalid_signature("invalid signature") - } - }) -} - -fn enqueue_tx(state: &AppState, tx: PendingUserOp) -> Result<(), ApiError> { - match state.tx_sender.try_send(InclusionLaneInput::UserOp(tx)) { - Ok(()) => Ok(()), - Err(TrySendError::Full(_)) => Err(ApiError::overloaded("queue full")), - Err(TrySendError::Closed(_)) => Err(ApiError::internal_error("inclusion lane unavailable")), - } -} - -async fn handle_tx_route_error(err: BoxError) -> impl IntoResponse { - if err.is::() { - ApiError::overloaded("tx endpoint overloaded").into_response() - } else { - warn!(error = %err, "tx endpoint middleware error"); - ApiError::internal_error("tx endpoint unavailable").into_response() - } -} - // Keep non-413 JSON extractor failures normalized to 400 for a stable API contract. fn map_json_rejection(err: axum::extract::rejection::JsonRejection) -> ApiError { if err.status() == StatusCode::PAYLOAD_TOO_LARGE { @@ -206,265 +115,3 @@ fn map_json_rejection(err: axum::extract::rejection::JsonRejection) -> ApiError ApiError::bad_request(format!("invalid JSON: {err}")) } } - -async fn subscribe_l2_txs( - State(state): State>, - Query(query): Query, - ws: WebSocketUpgrade, -) -> Response { - let from_offset = query.from_offset.unwrap_or(0); - let permit = match Arc::clone(&state.ws_subscriber_limit).try_acquire_owned() { - Ok(permit) => permit, - Err(_) => return ApiError::overloaded("ws subscriber limit reached").into_response(), - }; - let broadcaster = state.broadcaster.clone(); - let ws_max_catchup_events = state.ws_max_catchup_events; - ws.on_upgrade(move |socket| { - run_broadcaster_session( - broadcaster, - socket, - from_offset, - permit, - ws_max_catchup_events, - ) - }) - .into_response() -} - -async fn run_broadcaster_session( - broadcaster: L2TxBroadcaster, - mut socket: WebSocket, - from_offset: u64, - _subscriber_permit: OwnedSemaphorePermit, - ws_max_catchup_events: u64, -) { - let mut subscription = broadcaster.subscribe(); - let mut next_offset = from_offset; - - if next_offset < subscription.live_start_offset { - let catchup_events = subscription.live_start_offset - next_offset; - if catchup_events > ws_max_catchup_events { - warn!( - requested_offset = next_offset, - live_start_offset = subscription.live_start_offset, - max_catchup_events = ws_max_catchup_events, - "ws catch-up window exceeded; closing subscriber" - ); - let _ = socket - .send(Message::Close(Some(CloseFrame { - code: close_code::POLICY, - reason: "catch-up window exceeded".into(), - }))) - .await; - return; - } - if send_catch_up( - &broadcaster, - &mut socket, - next_offset, - subscription.live_start_offset, - ) - .await - .is_err() - { - return; - } - next_offset = subscription.live_start_offset; - } - - loop { - tokio::select! { - maybe_event = subscription.receiver.recv() => { - let Some(event) = maybe_event else { - break; - }; - let offset = event.offset(); - if offset < next_offset { - continue; - } - if offset != next_offset { - warn!( - expected_offset = next_offset, - received_offset = offset, - "broadcaster detected gap in live stream" - ); - break; - } - if send_ws_event(&mut socket, &event).await.is_err() { - break; - } - next_offset = next_offset.saturating_add(1); - } - inbound = socket.recv() => { - match inbound { - Some(Ok(Message::Close(_))) | None => break, - Some(Ok(Message::Ping(payload))) => { - if socket.send(Message::Pong(payload)).await.is_err() { - break; - } - } - Some(Ok(_)) => {} - Some(Err(_)) => break, - } - } - } - } -} - -async fn send_catch_up( - broadcaster: &L2TxBroadcaster, - socket: &mut WebSocket, - from_offset: u64, - to_offset: u64, -) -> Result<(), ()> { - if from_offset >= to_offset { - return Ok(()); - } - - let (events_tx, mut events_rx) = mpsc::channel::(1024); - let db_path = broadcaster.db_path(); - let page_size = broadcaster.page_size(); - let worker = tokio::task::spawn_blocking(move || -> Result<(), String> { - let mut storage = Storage::open_read_only(&db_path) - .map_err(|err| format!("open catch-up storage failed: {err}"))?; - let mut next_offset = from_offset; - - while next_offset < to_offset { - let remaining = (to_offset - next_offset) as usize; - let page_limit = remaining.min(page_size.max(1)); - let txs = storage - .load_ordered_l2_txs_page_from(next_offset, page_limit) - .map_err(|err| format!("read catch-up page from {next_offset} failed: {err}"))?; - if txs.is_empty() { - return Err(format!( - "catch-up reached sparse range [{next_offset}, {to_offset})" - )); - } - - for tx in txs { - let event = BroadcastTxMessage::from_offset_and_tx(next_offset, tx); - next_offset = next_offset.saturating_add(1); - if events_tx.blocking_send(event).is_err() { - debug!("broadcaster catch-up worker stopping early: receiver channel closed"); - return Ok(()); - } - } - } - Ok(()) - }); - - let mut ws_send_failed = false; - while let Some(event) = events_rx.recv().await { - if send_ws_event(socket, &event).await.is_err() { - ws_send_failed = true; - break; - } - } - drop(events_rx); - - let worker_result = match worker.await { - Ok(Ok(())) => Ok(()), - Ok(Err(reason)) => { - warn!(reason, "broadcaster catch-up worker exited with error"); - Err(()) - } - Err(err) => { - warn!(error = %err, "broadcaster catch-up worker join failed"); - Err(()) - } - }; - - if ws_send_failed { - return Err(()); - } - worker_result -} - -async fn send_ws_event(socket: &mut WebSocket, event: &BroadcastTxMessage) -> Result<(), ()> { - let payload = match serde_json::to_string(event) { - Ok(value) => value, - Err(err) => { - warn!(error = %err, "broadcaster failed to serialize tx event"); - return Err(()); - } - }; - - if socket.send(Message::Text(payload.into())).await.is_err() { - return Err(()); - } - Ok(()) -} - -// ── Health endpoints ────────────────────────────────────────────────────────── - -/// GET /livez — liveness probe: returns 200 as long as the process can respond. -async fn livez() -> StatusCode { - StatusCode::OK -} - -/// GET /readyz — readiness probe: returns 200 only when the sequencer is ready -/// to accept transactions (inclusion lane running, broadcaster running). -async fn readyz(State(state): State>) -> StatusCode { - if state.tx_sender.is_closed() || !state.broadcaster.is_running() { - return StatusCode::SERVICE_UNAVAILABLE; - } - StatusCode::OK -} - -#[derive(Serialize)] -struct HealthStatus { - status: &'static str, - inclusion_lane: &'static str, - broadcaster: &'static str, -} - -/// GET /healthz — detailed JSON health status. -async fn healthz(State(state): State>) -> impl IntoResponse { - let inclusion_lane_ok = !state.tx_sender.is_closed(); - let broadcaster_ok = state.broadcaster.is_running(); - let all_ok = inclusion_lane_ok && broadcaster_ok; - - let body = HealthStatus { - status: if all_ok { "ok" } else { "degraded" }, - inclusion_lane: if inclusion_lane_ok { "ok" } else { "stopped" }, - broadcaster: if broadcaster_ok { "ok" } else { "stopped" }, - }; - - let status = if all_ok { - StatusCode::OK - } else { - StatusCode::SERVICE_UNAVAILABLE - }; - (status, Json(body)) -} - -#[cfg(test)] -mod tests { - use super::*; - use axum::body::to_bytes; - - #[tokio::test] - async fn tx_route_internal_errors_are_sanitized() { - let err: BoxError = std::io::Error::other("sensitive middleware detail").into(); - let response = handle_tx_route_error(err).await.into_response(); - assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR); - - let body = to_bytes(response.into_body(), usize::MAX) - .await - .expect("read response body"); - let body = String::from_utf8(body.to_vec()).expect("utf8 response body"); - - assert!( - body.contains("INTERNAL_ERROR"), - "expected internal error code in body: {body}" - ); - assert!( - body.contains("tx endpoint unavailable"), - "expected sanitized internal message in body: {body}" - ); - assert!( - !body.contains("sensitive middleware detail"), - "middleware internals leaked in body: {body}" - ); - } -} diff --git a/sequencer/src/api/state.rs b/sequencer/src/api/state.rs new file mode 100644 index 0000000..752e254 --- /dev/null +++ b/sequencer/src/api/state.rs @@ -0,0 +1,61 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::sync::Arc; + +use alloy_sol_types::Eip712Domain; +use tokio::sync::{OwnedSemaphorePermit, Semaphore, mpsc}; + +use super::{ApiConfig, ApiError}; +use crate::inclusion_lane::PendingUserOp; +use crate::l2_tx_feed::L2TxFeed; +use crate::shutdown::ShutdownSignal; + +#[derive(Clone)] +pub(super) struct ApiState { + pub tx_sender: mpsc::Sender, + pub domain: Eip712Domain, + pub max_user_op_data_bytes: usize, + pub shutdown: ShutdownSignal, + pub ws_subscriber_limit: Arc, + pub ws_max_catchup_events: u64, + pub tx_feed: L2TxFeed, +} + +impl ApiState { + pub(super) fn new( + tx_sender: mpsc::Sender, + domain: Eip712Domain, + max_user_op_data_bytes: usize, + shutdown: ShutdownSignal, + tx_feed: L2TxFeed, + config: ApiConfig, + ) -> Self { + Self { + tx_sender, + domain, + max_user_op_data_bytes, + shutdown, + ws_subscriber_limit: Arc::new(Semaphore::new(config.ws_max_subscribers)), + ws_max_catchup_events: config.ws_max_catchup_events, + tx_feed, + } + } + + pub(crate) fn reject_if_shutting_down(&self) -> Result<(), ApiError> { + if self.shutdown.is_shutdown_requested() { + Err(ApiError::unavailable("sequencer shutting down")) + } else { + Ok(()) + } + } + + pub(crate) fn try_acquire_ws_subscriber_permit( + &self, + ) -> Result { + self.ws_subscriber_limit + .clone() + .try_acquire_owned() + .map_err(|_| ApiError::overloaded("ws subscriber limit reached")) + } +} diff --git a/sequencer/src/api/tx.rs b/sequencer/src/api/tx.rs new file mode 100644 index 0000000..8f10dd9 --- /dev/null +++ b/sequencer/src/api/tx.rs @@ -0,0 +1,167 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::sync::Arc; +use std::time::SystemTime; + +use axum::extract::{Json, State}; +use tokio::sync::mpsc::error::TrySendError; +use tokio::sync::oneshot; +use tracing::debug; + +use super::{ApiError, ApiState}; +use crate::inclusion_lane::PendingUserOp; +use sequencer_core::api::{TxRequest, TxResponse}; +use sequencer_core::user_op::SignedUserOp; + +pub(super) async fn submit_tx( + State(state): State>, + req: Result, axum::extract::rejection::JsonRejection>, +) -> Result, ApiError> { + let Json(req) = req.map_err(super::map_json_rejection)?; + + let signed = req + .into_signed_user_op(&state.domain, state.max_user_op_data_bytes) + .map_err(ApiError::from)?; + let nonce = signed.user_op.nonce; + let sender = signed.sender; + let ack = enqueue_verified_tx(state.as_ref(), signed)?; + + let commit_result = ack + .await + .map_err(|_| ApiError::internal_error("inclusion lane dropped response"))?; + commit_result.map_err(ApiError::from)?; + debug!(sender = %sender, nonce, "tx committed"); + + Ok(Json(TxResponse { + ok: true, + sender: sender.to_string(), + nonce, + })) +} + +fn enqueue_verified_tx( + state: &ApiState, + signed: SignedUserOp, +) -> Result>, ApiError> { + state.reject_if_shutting_down()?; + + let (respond_to, recv) = oneshot::channel(); + let pending = PendingUserOp { + signed, + respond_to, + received_at: SystemTime::now(), + }; + + match state.tx_sender.try_send(pending) { + Ok(()) => Ok(recv), + Err(TrySendError::Full(_)) => Err(ApiError::overloaded("queue full")), + Err(TrySendError::Closed(_)) => Err(ApiError::internal_error("inclusion lane unavailable")), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use alloy_primitives::{Address, Signature}; + use alloy_sol_types::Eip712Domain; + use alloy_sol_types::SolStruct; + use axum::http::StatusCode; + use k256::ecdsa::SigningKey; + use k256::ecdsa::signature::hazmat::PrehashSigner; + use std::sync::Arc; + use tempfile::TempDir; + use tokio::sync::mpsc; + + use crate::storage::Storage; + use sequencer_core::user_op::UserOp; + + #[tokio::test(flavor = "current_thread")] + async fn submit_tx_rejects_when_shutdown_has_started() { + let db = TempDir::new().expect("create temp dir"); + let db_path = db.path().join("sequencer.db"); + let _storage = Storage::open(&db_path.to_string_lossy(), "NORMAL").expect("create db"); + let shutdown = crate::shutdown::ShutdownSignal::default(); + let tx_feed = crate::l2_tx_feed::L2TxFeed::new( + db_path.to_string_lossy().into_owned(), + shutdown.clone(), + crate::l2_tx_feed::L2TxFeedConfig { + idle_poll_interval: std::time::Duration::from_millis(2), + page_size: 64, + }, + ); + + shutdown.request_shutdown(); + + let (tx_sender, _rx) = mpsc::channel::(1); + let state = Arc::new(ApiState::new( + tx_sender, + Eip712Domain { + name: None, + version: None, + chain_id: None, + verifying_contract: None, + salt: None, + }, + 128, + shutdown, + tx_feed.clone(), + crate::api::ApiConfig { + max_body_bytes: 128, + ws_max_subscribers: 1, + ws_max_catchup_events: 1, + }, + )); + + let signing_key = SigningKey::from_bytes((&[7_u8; 32]).into()).expect("create signing key"); + let sender = address_from_signing_key(&signing_key); + let user_op = UserOp { + nonce: 0, + max_fee: 0, + data: Vec::new().into(), + }; + let request = TxRequest { + message: user_op.clone(), + signature: sign_user_op_hex(&state.domain, &user_op, &signing_key), + sender: sender.to_string(), + }; + + let result = submit_tx(State(state), Ok(Json(request))).await; + + let err = result.expect_err("submit should be rejected during shutdown"); + assert_eq!(err.status(), StatusCode::SERVICE_UNAVAILABLE); + assert_eq!(err.code(), "UNAVAILABLE"); + } + + fn sign_user_op_hex( + domain: &Eip712Domain, + user_op: &UserOp, + signing_key: &SigningKey, + ) -> String { + let hash = user_op.eip712_signing_hash(domain); + let k256_sig = signing_key + .sign_prehash(hash.as_slice()) + .expect("sign user op hash"); + + let sender = address_from_signing_key(signing_key); + let signature = [false, true] + .into_iter() + .map(|parity| Signature::from_signature_and_parity(k256_sig, parity)) + .find(|candidate| { + candidate + .recover_address_from_prehash(&hash) + .ok() + .map(|value| value == sender) + .unwrap_or(false) + }) + .expect("recoverable parity for signature"); + + alloy_primitives::hex::encode_prefixed(signature.as_bytes()) + } + + fn address_from_signing_key(signing_key: &SigningKey) -> Address { + let verifying = signing_key.verifying_key().to_encoded_point(false); + Address::from_raw_public_key(&verifying.as_bytes()[1..]) + } +} diff --git a/sequencer/src/api/ws.rs b/sequencer/src/api/ws.rs new file mode 100644 index 0000000..d69f16e --- /dev/null +++ b/sequencer/src/api/ws.rs @@ -0,0 +1,143 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::sync::Arc; + +use axum::extract::ws::{CloseFrame, Message, WebSocket, WebSocketUpgrade, close_code}; +use axum::extract::{Query, State}; +use axum::response::{IntoResponse, Response}; +use serde::Deserialize; +use tokio::sync::OwnedSemaphorePermit; +use tracing::warn; + +use crate::l2_tx_feed::{BroadcastTxMessage, L2TxFeed, SubscribeError}; + +use super::ApiState; + +const MAX_INBOUND_WS_MESSAGE_SIZE: usize = 8 * 1024; +const MAX_INBOUND_WS_FRAME_SIZE: usize = 8 * 1024; + +#[derive(Debug, Deserialize)] +pub(super) struct SubscribeQuery { + from_offset: Option, +} + +pub(super) async fn subscribe_l2_txs( + State(state): State>, + Query(query): Query, + ws: WebSocketUpgrade, +) -> Response { + if let Err(err) = state.reject_if_shutting_down() { + return err.into_response(); + } + + let from_offset = query.from_offset.unwrap_or(0); + let permit = match state.try_acquire_ws_subscriber_permit() { + Ok(permit) => permit, + Err(err) => return err.into_response(), + }; + let tx_feed = state.tx_feed.clone(); + let ws_max_catchup_events = state.ws_max_catchup_events; + + ws.max_message_size(MAX_INBOUND_WS_MESSAGE_SIZE) + .max_frame_size(MAX_INBOUND_WS_FRAME_SIZE) + .on_upgrade(move |socket| { + run_ws_session(tx_feed, socket, from_offset, permit, ws_max_catchup_events) + }) + .into_response() +} + +async fn run_ws_session( + tx_feed: L2TxFeed, + mut socket: WebSocket, + from_offset: u64, + _subscriber_permit: OwnedSemaphorePermit, + ws_max_catchup_events: u64, +) { + let mut subscription = match tx_feed.subscribe_from(from_offset, ws_max_catchup_events) { + Ok(subscription) => subscription, + Err(SubscribeError::CatchUpWindowExceeded { + requested_offset, + live_start_offset, + max_catchup_events, + }) => { + warn!( + requested_offset, + live_start_offset, + max_catchup_events, + "ws catch-up window exceeded; closing subscriber" + ); + let _ = socket + .send(Message::Close(Some(CloseFrame { + code: close_code::POLICY, + reason: "catch-up window exceeded".into(), + }))) + .await; + return; + } + Err(SubscribeError::OpenStorage { source }) => { + warn!(error = %source, "ws subscription failed to open replay storage"); + let _ = socket + .send(Message::Close(Some(CloseFrame { + code: close_code::ERROR, + reason: "subscription unavailable".into(), + }))) + .await; + return; + } + Err(SubscribeError::LoadHeadOffset { source }) => { + warn!(error = %source, "ws subscription failed to read replay head"); + let _ = socket + .send(Message::Close(Some(CloseFrame { + code: close_code::ERROR, + reason: "subscription unavailable".into(), + }))) + .await; + return; + } + }; + + loop { + tokio::select! { + maybe_event = subscription.recv() => { + let Some(event) = maybe_event else { + break; + }; + if send_ws_event(&mut socket, &event).await.is_err() { + break; + } + } + inbound = socket.recv() => { + match inbound { + Some(Ok(Message::Close(_))) | None => break, + Some(Ok(Message::Ping(payload))) => { + if socket.send(Message::Pong(payload)).await.is_err() { + break; + } + } + Some(Ok(_)) => {} + Some(Err(_)) => break, + } + } + } + } + + if let Err(err) = subscription.finish().await { + warn!(error = %err, "tx feed subscription cleanup failed"); + } +} + +async fn send_ws_event(socket: &mut WebSocket, event: &BroadcastTxMessage) -> Result<(), ()> { + let payload = match serde_json::to_string(event) { + Ok(value) => value, + Err(err) => { + warn!(error = %err, "tx feed failed to serialize tx event"); + return Err(()); + } + }; + + if socket.send(Message::Text(payload.into())).await.is_err() { + return Err(()); + } + Ok(()) +} diff --git a/sequencer/src/config.rs b/sequencer/src/config.rs new file mode 100644 index 0000000..f7b47d6 --- /dev/null +++ b/sequencer/src/config.rs @@ -0,0 +1,137 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use alloy_primitives::{Address, U256}; +use alloy_sol_types::Eip712Domain; +use clap::Parser; + +pub const DOMAIN_NAME: &str = "CartesiAppSequencer"; +pub const DOMAIN_VERSION: &str = "1"; + +const DEFAULT_HTTP_ADDR: &str = "127.0.0.1:3000"; +const DEFAULT_DB_PATH: &str = "sequencer.db"; + +/// `-32005` Infura +/// `-32600`, `-32602` Alchemy +/// `-32616` QuickNode +const DEFAULT_LONG_BLOCK_RANGE_ERROR_CODES: &[&str] = &["-32005", "-32600", "-32602", "-32616"]; + +#[derive(Debug, Clone, Parser)] +#[command( + name = "sequencer", + about = "Deterministic sequencer prototype with low-latency soft confirmations", + version, + after_help = "Examples:\n sequencer --eth-rpc-url http://127.0.0.1:8545 --domain-chain-id 31337 --domain-verifying-contract 0x1111111111111111111111111111111111111111\n sequencer --http-addr 0.0.0.0:3000 --db-path ./sequencer.db --eth-rpc-url https://eth.example --domain-chain-id 1 --domain-verifying-contract 0x4444444444444444444444444444444444444444" +)] +pub struct RunConfig { + #[arg(long, env = "SEQ_HTTP_ADDR", default_value = DEFAULT_HTTP_ADDR, value_parser = parse_non_empty_string)] + pub http_addr: String, + #[arg(long, env = "SEQ_DB_PATH", default_value = DEFAULT_DB_PATH, value_parser = parse_non_empty_string)] + pub db_path: String, + #[arg(long, env = "SEQ_ETH_RPC_URL", value_parser = parse_non_empty_string)] + pub eth_rpc_url: String, + /// Error codes that trigger `get_logs` retries with a shorter block range. + #[arg(long, env = "SEQ_LONG_BLOCK_RANGE_ERROR_CODES", value_delimiter = ',', default_values = DEFAULT_LONG_BLOCK_RANGE_ERROR_CODES)] + pub long_block_range_error_codes: Vec, + #[arg(long, env = "SEQ_DOMAIN_CHAIN_ID")] + pub domain_chain_id: u64, + #[arg(long, env = "SEQ_DOMAIN_VERIFYING_CONTRACT", value_parser = parse_address)] + pub domain_verifying_contract: Address, +} + +impl RunConfig { + pub fn build_domain(&self) -> Eip712Domain { + Eip712Domain { + name: Some(DOMAIN_NAME.into()), + version: Some(DOMAIN_VERSION.into()), + chain_id: Some(U256::from(self.domain_chain_id)), + verifying_contract: Some(self.domain_verifying_contract), + salt: None, + } + } +} + +fn parse_non_empty_string(raw: &str) -> Result { + let value = raw.trim(); + if value.is_empty() { + return Err("value cannot be empty".to_string()); + } + Ok(value.to_string()) +} + +fn parse_address(raw: &str) -> Result { + if !raw.starts_with("0x") { + return Err("verifying contract must be 0x-prefixed".to_string()); + } + + let bytes = alloy_primitives::hex::decode(raw) + .map_err(|err| format!("invalid verifying contract hex: {err}"))?; + if bytes.len() != 20 { + return Err("verifying contract must be 20 bytes".to_string()); + } + Ok(Address::from_slice(&bytes)) +} + +#[cfg(test)] +mod tests { + use super::{DOMAIN_NAME, DOMAIN_VERSION, RunConfig}; + use alloy_primitives::{Address, U256}; + use clap::Parser; + + #[test] + fn run_config_requires_deployment_domain_inputs() { + let err = RunConfig::try_parse_from(["sequencer"]).expect_err("domain inputs are required"); + + let message = err.to_string(); + assert!(message.contains("--eth-rpc-url")); + assert!(message.contains("--domain-chain-id")); + assert!(message.contains("--domain-verifying-contract")); + } + + #[test] + fn run_config_uses_default_block_range_retry_codes() { + let config = RunConfig::try_parse_from([ + "sequencer", + "--eth-rpc-url", + "http://127.0.0.1:8545", + "--domain-chain-id", + "31337", + "--domain-verifying-contract", + "0x1111111111111111111111111111111111111111", + ]) + .expect("parse run config"); + + assert_eq!( + config.long_block_range_error_codes, + vec![ + "-32005".to_string(), + "-32600".to_string(), + "-32602".to_string(), + "-32616".to_string() + ] + ); + } + + #[test] + fn run_config_builds_domain_with_fixed_name_and_version() { + let config = RunConfig::try_parse_from([ + "sequencer", + "--eth-rpc-url", + "http://127.0.0.1:8545", + "--domain-chain-id", + "31337", + "--domain-verifying-contract", + "0x1111111111111111111111111111111111111111", + ]) + .expect("parse run config"); + + let domain = config.build_domain(); + assert_eq!(domain.name.as_deref(), Some(DOMAIN_NAME)); + assert_eq!(domain.version.as_deref(), Some(DOMAIN_VERSION)); + assert_eq!(domain.chain_id, Some(U256::from(31337_u64))); + assert_eq!( + domain.verifying_contract, + Some(Address::from_slice(&[0x11; 20])) + ); + } +} diff --git a/sequencer/src/inclusion_lane/catch_up.rs b/sequencer/src/inclusion_lane/catch_up.rs new file mode 100644 index 0000000..9f5e48d --- /dev/null +++ b/sequencer/src/inclusion_lane/catch_up.rs @@ -0,0 +1,63 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use crate::storage::Storage; +use sequencer_core::application::Application; +use sequencer_core::l2_tx::SequencedL2Tx; + +use super::error::CatchUpError; + +const DEFAULT_CATCH_UP_PAGE_SIZE: usize = 256; + +pub(super) fn catch_up_application( + app: &mut impl Application, + storage: &mut Storage, +) -> Result<(), CatchUpError> { + catch_up_application_paged(app, storage, DEFAULT_CATCH_UP_PAGE_SIZE) +} + +pub(super) fn catch_up_application_paged( + app: &mut impl Application, + storage: &mut Storage, + page_size: usize, +) -> Result<(), CatchUpError> { + let mut next_offset = app.executed_input_count(); + let page_size = page_size.max(1); + + loop { + let replay = storage + .load_ordered_l2_txs_page_from(next_offset, page_size) + .map_err(|source| CatchUpError::LoadReplay { + offset: next_offset, + source, + })?; + + if replay.is_empty() { + return Ok(()); + } + + for item in replay { + replay_sequenced_l2_tx(app, item)?; + next_offset = next_offset.saturating_add(1); + } + } +} + +fn replay_sequenced_l2_tx( + app: &mut impl Application, + item: SequencedL2Tx, +) -> Result<(), CatchUpError> { + match item { + SequencedL2Tx::UserOp(value) => { + app.execute_valid_user_op(&value) + .map_err(|err| CatchUpError::ReplayUserOpInternal { + reason: err.to_string(), + }) + } + SequencedL2Tx::Direct(direct) => app + .execute_direct_input(direct.payload.as_slice()) + .map_err(|err| CatchUpError::ReplayDirectInputInternal { + reason: err.to_string(), + }), + } +} diff --git a/sequencer/src/inclusion_lane/config.rs b/sequencer/src/inclusion_lane/config.rs new file mode 100644 index 0000000..2fa73be --- /dev/null +++ b/sequencer/src/inclusion_lane/config.rs @@ -0,0 +1,44 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::time::Duration; + +use sequencer_core::application::Application; +use sequencer_core::user_op::SignedUserOp; + +const DEFAULT_MAX_USER_OPS_PER_CHUNK: usize = 1024; +const DEFAULT_SAFE_DIRECT_BUFFER_CAPACITY: usize = 2048; +const DEFAULT_MAX_BATCH_OPEN: Duration = Duration::from_secs(2 * 60 * 60); +const DEFAULT_MAX_BATCH_USER_OP_BYTES: usize = 1_048_576; // 1 MiB +const DEFAULT_IDLE_POLL_INTERVAL: Duration = Duration::from_millis(2); + +#[derive(Debug, Clone, Copy)] +pub struct InclusionLaneConfig { + pub max_user_ops_per_chunk: usize, + pub safe_direct_buffer_capacity: usize, + pub max_batch_open: Duration, + + // Soft threshold for batch rotation. + // + // We intentionally check this between chunks (not per user-op) to keep the hot path + // simple and low-latency. This means batches can overshoot the threshold by at most + // one processed chunk. API ingress bounds each user-op size, so this overshoot is + // bounded by: + // max_user_ops_per_chunk * (SignedUserOp::max_batch_metadata() + A::MAX_METHOD_PAYLOAD_BYTES) + pub max_batch_user_op_bytes: usize, + + pub idle_poll_interval: Duration, +} + +impl InclusionLaneConfig { + pub fn for_app() -> Self { + Self { + max_user_ops_per_chunk: DEFAULT_MAX_USER_OPS_PER_CHUNK, + safe_direct_buffer_capacity: DEFAULT_SAFE_DIRECT_BUFFER_CAPACITY, + max_batch_open: DEFAULT_MAX_BATCH_OPEN, + max_batch_user_op_bytes: DEFAULT_MAX_BATCH_USER_OP_BYTES + .max(SignedUserOp::max_batch_metadata() + A::MAX_METHOD_PAYLOAD_BYTES), + idle_poll_interval: DEFAULT_IDLE_POLL_INTERVAL, + } + } +} diff --git a/sequencer/src/inclusion_lane/error.rs b/sequencer/src/inclusion_lane/error.rs index 42cf9ec..afb2407 100644 --- a/sequencer/src/inclusion_lane/error.rs +++ b/sequencer/src/inclusion_lane/error.rs @@ -8,8 +8,6 @@ use thiserror::Error; pub enum InclusionLaneError { #[error("inclusion lane input channel closed")] ChannelClosed, - #[error("inclusion lane shutdown requested")] - ShutdownRequested, #[error("application catchup failed")] CatchUp { #[source] @@ -49,8 +47,9 @@ pub enum InclusionLaneError { #[derive(Debug, Error)] pub enum CatchUpError { - #[error("cannot load replay entries")] + #[error("cannot load replay entries from offset {offset}")] LoadReplay { + offset: u64, #[source] source: rusqlite::Error, }, diff --git a/sequencer/src/inclusion_lane/lane.rs b/sequencer/src/inclusion_lane/lane.rs index f5e1908..54523a2 100644 --- a/sequencer/src/inclusion_lane/lane.rs +++ b/sequencer/src/inclusion_lane/lane.rs @@ -1,152 +1,79 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use crate::storage::{IndexedDirectInput, Storage, WriteHead}; -use sequencer_core::application::{AppError, Application, ExecutionOutcome}; -use sequencer_core::l2_tx::SequencedL2Tx; -use sequencer_core::user_op::SignedUserOp; -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, Ordering}; use std::thread; -use std::time::{Duration, Instant, SystemTime}; +use std::time::SystemTime; + use tokio::sync::mpsc; use tokio::task::JoinHandle; -use super::error::CatchUpError; -use super::profiling::InclusionLaneMetrics; -use super::{InclusionLaneError, InclusionLaneInput, PendingUserOp, SequencerError}; - -#[derive(Debug, Clone, Copy)] -pub struct InclusionLaneConfig { - pub max_user_ops_per_chunk: usize, - pub safe_direct_buffer_capacity: usize, - pub max_batch_open: Duration, - - // Soft threshold for batch rotation. - // - // We intentionally check this between chunks (not per user-op) to keep the hot path - // simple and low-latency. This means batches can overshoot the threshold by at most - // one processed chunk. API ingress bounds each user-op size, so this overshoot is - // bounded by: - // max_user_ops_per_chunk * SignedUserOp::max_batch_bytes_upper_bound() - pub max_batch_user_op_bytes: usize, - - pub idle_poll_interval: Duration, - pub metrics_enabled: bool, - pub metrics_log_interval: Duration, -} - -#[derive(Debug, Clone, Default)] -pub struct InclusionLaneStop { - shutdown: Arc, -} - -impl InclusionLaneStop { - pub fn request_shutdown(&self) { - self.shutdown.store(true, Ordering::Relaxed); - } +use crate::shutdown::ShutdownSignal; +use crate::storage::{DirectInputRange, Storage, StoredDirectInput, WriteHead}; +use sequencer_core::application::{AppError, Application, ExecutionOutcome}; +use sequencer_core::user_op::SignedUserOp; - fn is_shutdown_requested(&self) -> bool { - self.shutdown.load(Ordering::Relaxed) - } -} +use super::catch_up::catch_up_application; +use super::config::InclusionLaneConfig; +use super::{InclusionLaneError, PendingUserOp, SequencerError}; pub struct InclusionLane { - rx: mpsc::Receiver, - stop: InclusionLaneStop, + rx: mpsc::Receiver, + shutdown: ShutdownSignal, app: A, storage: Storage, config: InclusionLaneConfig, } impl InclusionLane { - pub fn new( - rx: mpsc::Receiver, + pub fn start( + queue_capacity: usize, + shutdown: ShutdownSignal, app: A, storage: Storage, config: InclusionLaneConfig, - ) -> Self { - Self { - rx, - stop: InclusionLaneStop::default(), - app, - storage, - config, - } - } - - pub fn spawn(self) -> (JoinHandle, InclusionLaneStop) { - let stop = self.stop.clone(); + ) -> ( + mpsc::Sender, + JoinHandle>, + ) { + let (tx, rx) = mpsc::channel::(queue_capacity.max(1)); let handle = tokio::task::spawn_blocking(move || { - let mut lane = self; - match lane.run_forever() { - Err(err) => err, - Ok(()) => unreachable!("inclusion lane run loop is expected to be non-terminating"), - } + let mut lane = Self { + rx, + shutdown, + app, + storage, + config, + }; + lane.run_forever() }); - (handle, stop) + (tx, handle) } fn run_forever(&mut self) -> Result<(), InclusionLaneError> { self.run_catch_up()?; - let (mut next_safe_input_index, mut head) = self.load_lane_state()?; - let mut metrics = InclusionLaneMetrics::new( - self.config.metrics_enabled, - self.config.metrics_log_interval, - ); - let mut included = Vec::with_capacity(self.config.max_user_ops_per_chunk.max(1)); let mut safe_directs = Vec::with_capacity(self.config.safe_direct_buffer_capacity.max(1)); + let mut lane_state = self.load_or_initialize_lane_state(&mut safe_directs)?; - while !self.stop.is_shutdown_requested() { - metrics.on_loop_start(self.rx.len()); + loop { + if self.shutdown.is_shutdown_requested() { + self.reject_pending_user_ops_due_to_shutdown(); + return Ok(()); + } + + let advanced_safe_frontier = + self.maybe_advance_safe_frontier(&mut lane_state, &mut safe_directs)?; - // Canonical per-iteration order: include user-ops first, then drain direct inputs. - let user_op_started = metrics.phase_started_at(); let included_user_op_count = - self.process_user_op_chunk(&mut head, &mut included, &mut metrics)?; - metrics.on_user_ops_phase_end(user_op_started, included_user_op_count as u64); - - let direct_started = metrics.phase_started_at(); - let drained_safe_direct_count = self.drain_and_execute_safe_direct_inputs( - &mut next_safe_input_index, - &mut safe_directs, - )?; - metrics.on_directs_phase_end(direct_started, drained_safe_direct_count as u64); - let drained_safe_direct_start_index = next_safe_input_index - .checked_sub(drained_safe_direct_count as u64) - .expect("drained direct-input count cannot exceed next safe-input index"); - - if head.should_close_batch(&self.config) { - let close_started = metrics.phase_started_at(); - self.close_frame_and_batch( - &mut head, - drained_safe_direct_start_index, - drained_safe_direct_count, - )?; - metrics.on_close_phase_end(close_started, true); - } else if drained_safe_direct_count > 0 { - let close_started = metrics.phase_started_at(); - self.close_frame_only( - &mut head, - drained_safe_direct_start_index, - drained_safe_direct_count, - )?; - metrics.on_close_phase_end(close_started, false); - } + self.process_user_op_chunk(&mut lane_state.head, &mut included)?; - if included_user_op_count == 0 && drained_safe_direct_count == 0 { - let sleep_started = metrics.phase_started_at(); + if should_close_batch::(&lane_state.head, &self.config) { + let next_safe_block = lane_state.head.safe_block; + self.close_frame_and_batch(&mut lane_state.head, next_safe_block)?; + } else if !advanced_safe_frontier && included_user_op_count == 0 { thread::sleep(self.config.idle_poll_interval); - metrics.on_idle_sleep_end(sleep_started); } - - safe_directs.clear(); - metrics.maybe_log_window(); } - - metrics.log_final(); - Err(InclusionLaneError::ShutdownRequested) } fn run_catch_up(&mut self) -> Result<(), InclusionLaneError> { @@ -154,50 +81,107 @@ impl InclusionLane { .map_err(|source| InclusionLaneError::CatchUp { source }) } - fn load_lane_state(&mut self) -> Result<(u64, WriteHead), InclusionLaneError> { + fn load_or_initialize_lane_state( + &mut self, + safe_directs: &mut Vec, + ) -> Result { let next_safe_input_index = self .storage .load_next_undrained_direct_input_index() .map_err(|source| InclusionLaneError::LoadNextUndrainedDirectInputIndex { source })?; - let head = self + let last_drained_direct_range = DirectInputRange::empty_at(next_safe_input_index); + if let Some(head) = self .storage .load_open_state() + .map_err(|source| InclusionLaneError::LoadOpenState { source })? + { + return Ok(LaneState { + last_drained_direct_range, + head, + }); + } + + let frontier = self + .storage + .load_safe_frontier() + .map_err(|source| InclusionLaneError::LoadSafeDirectInputs { source })?; + assert!( + frontier.end_exclusive >= last_drained_direct_range.end_exclusive, + "safe direct-input head regressed during lane initialization: safe_end={}, next={}", + frontier.end_exclusive, + last_drained_direct_range.end_exclusive + ); + + let leading_direct_range = last_drained_direct_range.advance_to(frontier.end_exclusive); + self.execute_safe_direct_inputs_range(leading_direct_range, safe_directs)?; + let head = self + .storage + .initialize_open_state(frontier.safe_block, leading_direct_range) .map_err(|source| InclusionLaneError::LoadOpenState { source })?; - Ok((next_safe_input_index, head)) + Ok(LaneState { + last_drained_direct_range: leading_direct_range, + head, + }) } fn process_user_op_chunk( &mut self, head: &mut WriteHead, included: &mut Vec, - metrics: &mut InclusionLaneMetrics, ) -> Result { - let timing = dequeue_and_execute_user_op_chunk( + included.clear(); + dequeue_and_execute_user_op_chunk( &mut self.rx, &mut self.app, head.frame_fee, self.config.max_user_ops_per_chunk.max(1), included, )?; - metrics.on_user_op_dequeue_end(timing.dequeue); - metrics.on_user_op_app_execute_end(timing.app_execute); let included_count = included.len(); - let persist_started = metrics.phase_started_at(); self.persist_included_user_ops(head, included)?; - metrics.on_user_op_persist_end(persist_started); - let ack_started = metrics.phase_started_at(); for item in included.drain(..) { let _ = item.respond_to.send(Ok(())); } - metrics.on_user_op_ack_end(ack_started); Ok(included_count) } + fn maybe_advance_safe_frontier( + &mut self, + lane_state: &mut LaneState, + safe_directs: &mut Vec, + ) -> Result { + let frontier = self + .storage + .load_safe_frontier() + .map_err(|source| InclusionLaneError::LoadSafeDirectInputs { source })?; + assert!( + frontier.end_exclusive >= lane_state.last_drained_direct_range.end_exclusive, + "safe direct-input head regressed: safe_end={}, next={}", + frontier.end_exclusive, + lane_state.last_drained_direct_range.end_exclusive + ); + if frontier.safe_block <= lane_state.head.safe_block { + return Ok(false); + } + + let leading_direct_range = lane_state + .last_drained_direct_range + .advance_to(frontier.end_exclusive); + self.execute_safe_direct_inputs_range(leading_direct_range, safe_directs)?; + self.close_frame_only( + &mut lane_state.head, + frontier.safe_block, + leading_direct_range, + )?; + lane_state.last_drained_direct_range = leading_direct_range; + Ok(true) + } + fn persist_included_user_ops( &mut self, head: &mut WriteHead, @@ -211,65 +195,70 @@ impl InclusionLane { }) } - fn drain_and_execute_safe_direct_inputs( + fn execute_safe_direct_inputs_range( &mut self, - next_safe_input_index: &mut u64, - chunk: &mut Vec, - ) -> Result { - let safe_end = self - .storage - .safe_input_end_exclusive() - .map_err(|source| InclusionLaneError::LoadSafeDirectInputs { source })?; - assert!( - safe_end >= *next_safe_input_index, - "safe direct-input head regressed: safe_end={safe_end}, next={next_safe_input_index}" - ); - - let mut drained_total = 0_usize; + direct_range: DirectInputRange, + chunk: &mut Vec, + ) -> Result { let max_chunk_len = self.config.safe_direct_buffer_capacity.max(1) as u64; - while *next_safe_input_index < safe_end { - let chunk_end = safe_end.min((*next_safe_input_index).saturating_add(max_chunk_len)); - chunk.clear(); - - self.storage - .fill_safe_inputs(*next_safe_input_index, chunk_end, chunk) - .map_err(|source| InclusionLaneError::LoadSafeDirectInputs { source })?; - - for input in chunk.iter() { - self.app - .execute_direct_input(input.payload.as_slice()) - .map_err(|source| InclusionLaneError::ExecuteDirectInput { source })?; - } - - drained_total = drained_total.saturating_add(chunk.len()); - *next_safe_input_index = chunk_end; + let mut chunk_start = direct_range.start_inclusive; + while chunk_start < direct_range.end_exclusive { + let chunk_end_exclusive = direct_range + .end_exclusive + .min(chunk_start.saturating_add(max_chunk_len)); + self.load_safe_direct_inputs_chunk(chunk_start, chunk_end_exclusive, chunk)?; + self.execute_safe_direct_inputs_chunk(chunk.as_slice())?; + chunk_start = chunk_end_exclusive; } - Ok(drained_total) + Ok(direct_range) } fn close_frame_and_batch( &mut self, head: &mut WriteHead, - drained_direct_start_index: u64, - drained_direct_count: usize, + next_safe_block: u64, ) -> Result<(), InclusionLaneError> { self.storage - .close_frame_and_batch(head, drained_direct_start_index, drained_direct_count) + .close_frame_and_batch(head, next_safe_block) .map_err(|source| InclusionLaneError::CloseFrameRotate { source }) } fn close_frame_only( &mut self, head: &mut WriteHead, - drained_direct_start_index: u64, - drained_direct_count: usize, + next_safe_block: u64, + leading_direct_range: DirectInputRange, ) -> Result<(), InclusionLaneError> { self.storage - .close_frame_only(head, drained_direct_start_index, drained_direct_count) + .close_frame_only(head, next_safe_block, leading_direct_range) .map_err(|source| InclusionLaneError::CloseFrameRotate { source }) } + fn load_safe_direct_inputs_chunk( + &mut self, + start_inclusive: u64, + end_exclusive: u64, + chunk: &mut Vec, + ) -> Result<(), InclusionLaneError> { + chunk.clear(); + self.storage + .fill_safe_inputs(start_inclusive, end_exclusive, chunk) + .map_err(|source| InclusionLaneError::LoadSafeDirectInputs { source }) + } + + fn execute_safe_direct_inputs_chunk( + &mut self, + chunk: &[StoredDirectInput], + ) -> Result<(), InclusionLaneError> { + for input in chunk { + self.app + .execute_direct_input(input.payload.as_slice()) + .map_err(|source| InclusionLaneError::ExecuteDirectInput { source })?; + } + Ok(()) + } + fn respond_internal_to_all(pending: &mut Vec, message: String) { for item in pending.drain(..) { let _ = item @@ -277,27 +266,38 @@ impl InclusionLane { .send(Err(SequencerError::internal(message.clone()))); } } -} -impl WriteHead { - fn should_close_batch_by_time(&self, config: &InclusionLaneConfig) -> bool { - let age = SystemTime::now() - .duration_since(self.batch_created_at) - .unwrap_or_default(); - age >= config.max_batch_open + fn reject_pending_user_ops_due_to_shutdown(&mut self) { + loop { + match self.rx.try_recv() { + Ok(item) => { + let _ = item + .respond_to + .send(Err(SequencerError::unavailable("sequencer shutting down"))); + } + Err(mpsc::error::TryRecvError::Empty) + | Err(mpsc::error::TryRecvError::Disconnected) => return, + } + } } +} - fn should_close_batch_by_size(&self, config: &InclusionLaneConfig) -> bool { - // This is computed from committed user-op count only. Because checks happen at chunk - // boundaries, byte-based closure is a bounded soft policy, not a strict hard cutoff. - user_op_count_to_bytes(self.batch_user_op_count) >= config.max_batch_user_op_bytes as u64 - } +fn should_close_batch(head: &WriteHead, config: &InclusionLaneConfig) -> bool { + should_close_batch_by_time(head, config) || should_close_batch_by_size::(head, config) +} - fn should_close_batch(&self, config: &InclusionLaneConfig) -> bool { - let batch_has_activity = self.frame_in_batch > 0 || self.batch_user_op_count > 0; - batch_has_activity - && (self.should_close_batch_by_time(config) || self.should_close_batch_by_size(config)) - } +fn should_close_batch_by_time(head: &WriteHead, config: &InclusionLaneConfig) -> bool { + let age = SystemTime::now() + .duration_since(head.batch_created_at) + .unwrap_or_default(); + age >= config.max_batch_open +} + +fn should_close_batch_by_size( + head: &WriteHead, + config: &InclusionLaneConfig, +) -> bool { + user_op_count_to_bytes::(head.batch_user_op_count) >= config.max_batch_user_op_bytes as u64 } fn execute_user_op( @@ -323,73 +323,27 @@ fn execute_user_op( } } -fn dequeue_and_execute_user_op_chunk( - rx: &mut mpsc::Receiver, +pub(super) fn dequeue_and_execute_user_op_chunk( + rx: &mut mpsc::Receiver, app: &mut impl Application, current_frame_fee: u64, max_chunk: usize, included: &mut Vec, -) -> Result { +) -> Result<(), InclusionLaneError> { let mut executed_user_ops = 0_usize; - let mut timing = UserOpChunkTiming::default(); while executed_user_ops < max_chunk { - let dequeue_started = Instant::now(); match rx.try_recv() { - Ok(InclusionLaneInput::UserOp(item)) => { - timing.dequeue = timing.dequeue.saturating_add(dequeue_started.elapsed()); - let app_exec_started = Instant::now(); + Ok(item) => { execute_user_op(app, item, current_frame_fee, included); - timing.app_execute = timing - .app_execute - .saturating_add(app_exec_started.elapsed()); executed_user_ops = executed_user_ops.saturating_add(1); } - Err(mpsc::error::TryRecvError::Empty) => { - timing.dequeue = timing.dequeue.saturating_add(dequeue_started.elapsed()); - return Ok(timing); - } + Err(mpsc::error::TryRecvError::Empty) => return Ok(()), Err(mpsc::error::TryRecvError::Disconnected) => { - timing.dequeue = timing.dequeue.saturating_add(dequeue_started.elapsed()); if executed_user_ops == 0 { return Err(InclusionLaneError::ChannelClosed); } - return Ok(timing); - } - } - } - Ok(timing) -} - -#[derive(Debug, Default, Clone, Copy)] -struct UserOpChunkTiming { - dequeue: Duration, - app_execute: Duration, -} - -fn catch_up_application( - app: &mut impl Application, - storage: &mut Storage, -) -> Result<(), CatchUpError> { - let already_executed = app.executed_input_count(); - let replay = storage - .load_ordered_l2_txs_from(already_executed) - .map_err(|source| CatchUpError::LoadReplay { source })?; - - for item in replay { - match item { - SequencedL2Tx::UserOp(value) => { - app.execute_valid_user_op(&value).map_err(|err| { - CatchUpError::ReplayUserOpInternal { - reason: err.to_string(), - } - })?; - } - SequencedL2Tx::Direct(direct) => { - app.execute_direct_input(direct.payload.as_slice()) - .map_err(|err| CatchUpError::ReplayDirectInputInternal { - reason: err.to_string(), - })?; + return Ok(()); } } } @@ -397,343 +351,12 @@ fn catch_up_application( Ok(()) } -fn user_op_count_to_bytes(user_op_count: u64) -> u64 { - user_op_count.saturating_mul(SignedUserOp::max_batch_bytes_upper_bound() as u64) +fn user_op_count_to_bytes(user_op_count: u64) -> u64 { + let one_user_op_bytes = SignedUserOp::max_batch_metadata() + A::MAX_METHOD_PAYLOAD_BYTES; + user_op_count.saturating_mul(one_user_op_bytes as u64) } -#[cfg(test)] -mod tests { - use super::{ - InclusionLane, InclusionLaneConfig, InclusionLaneError, InclusionLaneInput, - InclusionLaneStop, PendingUserOp, dequeue_and_execute_user_op_chunk, - }; - use crate::storage::Storage; - use alloy_primitives::{Address, Signature, U256}; - use rusqlite::params; - use sequencer_core::application::{AppError, Application, InvalidReason}; - use sequencer_core::l2_tx::ValidUserOp; - use sequencer_core::user_op::{SignedUserOp, UserOp}; - use std::collections::HashMap; - use std::time::{Duration, SystemTime}; - use tempfile::TempDir; - use tokio::sync::{mpsc, oneshot}; - - #[derive(Default)] - struct TestApp { - nonces: HashMap, - executed_input_count: u64, - } - - impl Application for TestApp { - fn current_user_nonce(&self, sender: Address) -> u32 { - self.nonces.get(&sender).copied().unwrap_or(0) - } - - fn current_user_balance(&self, _sender: Address) -> U256 { - U256::MAX - } - - fn validate_user_op( - &self, - _sender: Address, - _user_op: &UserOp, - _current_fee: u64, - ) -> Result<(), InvalidReason> { - Ok(()) - } - - fn execute_valid_user_op(&mut self, user_op: &ValidUserOp) -> Result<(), AppError> { - let next_nonce = self.current_user_nonce(user_op.sender).wrapping_add(1); - self.nonces.insert(user_op.sender, next_nonce); - self.executed_input_count = self.executed_input_count.saturating_add(1); - Ok(()) - } - - fn execute_direct_input(&mut self, _payload: &[u8]) -> Result<(), AppError> { - self.executed_input_count = self.executed_input_count.saturating_add(1); - Ok(()) - } - - fn executed_input_count(&self) -> u64 { - self.executed_input_count - } - } - - struct TestDb { - _dir: TempDir, - path: String, - } - - fn temp_db(name: &str) -> TestDb { - let dir = tempfile::Builder::new() - .prefix(format!("sequencer-inclusion-lane-{name}-").as_str()) - .tempdir() - .expect("create temporary test directory"); - let path = dir.path().join("sequencer.sqlite"); - TestDb { - _dir: dir, - path: path.to_string_lossy().into_owned(), - } - } - - fn default_test_config() -> InclusionLaneConfig { - InclusionLaneConfig { - max_user_ops_per_chunk: 16, - safe_direct_buffer_capacity: 16, - max_batch_open: Duration::MAX, - max_batch_user_op_bytes: 1_000_000_000, - idle_poll_interval: Duration::from_millis(2), - metrics_enabled: false, - metrics_log_interval: Duration::from_secs(5), - } - } - - fn start_lane( - db_path: &str, - config: InclusionLaneConfig, - ) -> ( - mpsc::Sender, - InclusionLaneStop, - tokio::task::JoinHandle, - ) { - let storage = Storage::open(db_path, "NORMAL").expect("open storage"); - let (tx, rx) = mpsc::channel::(128); - let lane = InclusionLane::new(rx, TestApp::default(), storage, config); - let (handle, stop) = lane.spawn(); - (tx, stop, handle) - } - - fn make_pending_user_op( - seed: u8, - ) -> ( - PendingUserOp, - oneshot::Receiver>, - ) { - let sender = Address::from_slice(&[seed; 20]); - let (respond_to, recv) = oneshot::channel(); - let user_op = UserOp { - nonce: 0, - max_fee: 1, - data: vec![seed; 4].into(), - }; - ( - PendingUserOp { - signed: SignedUserOp { - sender, - signature: Signature::test_signature(), - user_op, - }, - respond_to, - received_at: SystemTime::now(), - }, - recv, - ) - } - - fn read_count(db_path: &str, table: &str) -> i64 { - let conn = Storage::open_connection(db_path, "NORMAL").expect("open sqlite reader"); - let sql = format!("SELECT COUNT(*) FROM {table}"); - conn.query_row(sql.as_str(), [], |row| row.get(0)) - .expect("count rows") - } - - fn read_frame_direct_count(db_path: &str, batch_index: i64, frame_in_batch: i64) -> i64 { - let conn = Storage::open_connection(db_path, "NORMAL").expect("open sqlite reader"); - conn.query_row( - "SELECT COUNT(*) FROM sequenced_l2_txs - WHERE batch_index = ?1 - AND frame_in_batch = ?2 - AND direct_input_index IS NOT NULL", - params![batch_index, frame_in_batch], - |row| row.get(0), - ) - .expect("query frame direct count") - } - - async fn wait_until(timeout: Duration, mut predicate: impl FnMut() -> bool) -> bool { - let started = tokio::time::Instant::now(); - while started.elapsed() < timeout { - if predicate() { - return true; - } - tokio::time::sleep(Duration::from_millis(5)).await; - } - predicate() - } - - async fn shutdown_lane( - stop: &InclusionLaneStop, - handle: tokio::task::JoinHandle, - ) { - stop.request_shutdown(); - let joined = tokio::time::timeout(Duration::from_secs(2), handle) - .await - .expect("wait for lane shutdown"); - let err = joined.expect("join lane task"); - assert!(matches!(err, InclusionLaneError::ShutdownRequested)); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] - async fn ack_happens_after_chunk_commit_without_closing_frame() { - let db = temp_db("ack-chunk-commit"); - let (tx, lane_stop, lane_handle) = start_lane(db.path.as_str(), default_test_config()); - let (pending, recv) = make_pending_user_op(0x11); - - tx.send(InclusionLaneInput::UserOp(pending)) - .await - .expect("send user op"); - let ack = tokio::time::timeout(Duration::from_secs(2), recv) - .await - .expect("wait for ack") - .expect("ack channel open"); - let user_ops_count = read_count(db.path.as_str(), "user_ops"); - let frame0_direct_count = read_frame_direct_count(db.path.as_str(), 0, 0); - shutdown_lane(&lane_stop, lane_handle).await; - - assert!(ack.is_ok(), "user op should be included"); - assert_eq!(user_ops_count, 1); - assert_eq!( - frame0_direct_count, 0, - "frame should stay open when no directs and no batch close" - ); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] - async fn direct_inputs_close_frame_and_persist_drain() { - let db = temp_db("directs-close-frame"); - let (_tx, lane_stop, lane_handle) = start_lane(db.path.as_str(), default_test_config()); - let mut feeder_storage = - Storage::open(db.path.as_str(), "NORMAL").expect("open feeder storage"); - - feeder_storage - .append_safe_direct_inputs(&[crate::storage::IndexedDirectInput { - index: 0, - payload: vec![0xaa], - block_number: 0, - }]) - .expect("append safe direct input"); - - let drained = wait_until(Duration::from_secs(2), || { - read_frame_direct_count(db.path.as_str(), 0, 0) == 1 - }) - .await; - let frames_count = read_count(db.path.as_str(), "frames"); - shutdown_lane(&lane_stop, lane_handle).await; - - assert!(drained, "expected one drained direct input in frame 0"); - assert_eq!(frames_count, 2); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] - async fn direct_inputs_are_paginated_by_buffer_capacity() { - let db = temp_db("directs-pagination"); - let mut config = default_test_config(); - config.safe_direct_buffer_capacity = 2; - let (_tx, lane_stop, lane_handle) = start_lane(db.path.as_str(), config); - let mut feeder_storage = - Storage::open(db.path.as_str(), "NORMAL").expect("open feeder storage"); - - let mut directs = Vec::new(); - for index in 0..5_u64 { - directs.push(crate::storage::IndexedDirectInput { - index, - payload: vec![index as u8], - block_number: 0, - }); - } - feeder_storage - .append_safe_direct_inputs(directs.as_slice()) - .expect("append safe direct inputs"); - - let drained = wait_until(Duration::from_secs(2), || { - read_frame_direct_count(db.path.as_str(), 0, 0) == 5 - }) - .await; - let frames_count = read_count(db.path.as_str(), "frames"); - shutdown_lane(&lane_stop, lane_handle).await; - - assert!(drained, "expected five drained direct inputs in frame 0"); - assert_eq!(frames_count, 2); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] - async fn batch_closes_when_max_open_time_is_reached() { - let db = temp_db("batch-close-time"); - let mut config = default_test_config(); - config.max_batch_open = Duration::from_millis(20); - let (tx, lane_stop, lane_handle) = start_lane(db.path.as_str(), config); - let (pending, recv) = make_pending_user_op(0x22); - - tx.send(InclusionLaneInput::UserOp(pending)) - .await - .expect("send user op"); - let ack = tokio::time::timeout(Duration::from_secs(2), recv) - .await - .expect("wait for ack") - .expect("ack channel open"); - let rotated = wait_until(Duration::from_secs(2), || { - read_count(db.path.as_str(), "batches") >= 2 - }) - .await; - let drain = read_frame_direct_count(db.path.as_str(), 0, 0); - shutdown_lane(&lane_stop, lane_handle).await; - - assert!(ack.is_ok(), "user op should be included"); - assert!(rotated, "expected batch rotation by time"); - assert_eq!(drain, 0); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 2)] - async fn batch_closes_when_max_user_op_bytes_is_reached() { - let db = temp_db("batch-close-size"); - let mut config = default_test_config(); - config.max_batch_user_op_bytes = SignedUserOp::max_batch_bytes_upper_bound(); - let (tx, lane_stop, lane_handle) = start_lane(db.path.as_str(), config); - let (pending, recv) = make_pending_user_op(0x33); - - tx.send(InclusionLaneInput::UserOp(pending)) - .await - .expect("send user op"); - let ack = tokio::time::timeout(Duration::from_secs(2), recv) - .await - .expect("wait for ack") - .expect("ack channel open"); - let rotated = wait_until(Duration::from_secs(2), || { - read_count(db.path.as_str(), "batches") >= 2 - }) - .await; - let drain = read_frame_direct_count(db.path.as_str(), 0, 0); - shutdown_lane(&lane_stop, lane_handle).await; - - assert!(ack.is_ok(), "user op should be included"); - assert!(rotated, "expected batch rotation by size"); - assert_eq!(drain, 0); - } - - #[test] - fn dequeue_returns_channel_closed_when_disconnected() { - let (tx, mut rx) = mpsc::channel::(1); - drop(tx); - let mut app = TestApp::default(); - let mut included = Vec::new(); - - let err = - dequeue_and_execute_user_op_chunk(&mut rx, &mut app, 1, 1, &mut included).unwrap_err(); - assert!(matches!(err, InclusionLaneError::ChannelClosed)); - } - - #[test] - fn dequeue_flushes_executed_ops_before_observing_disconnect() { - let (tx, mut rx) = mpsc::channel::(2); - let (pending, _recv) = make_pending_user_op(0x44); - tx.blocking_send(InclusionLaneInput::UserOp(pending)) - .expect("enqueue pending user op"); - drop(tx); - - let mut app = TestApp::default(); - let mut included = Vec::new(); - dequeue_and_execute_user_op_chunk(&mut rx, &mut app, 1, 16, &mut included) - .expect("should flush processed user ops before disconnect"); - assert_eq!(included.len(), 1); - } +struct LaneState { + last_drained_direct_range: DirectInputRange, + head: WriteHead, } diff --git a/sequencer/src/inclusion_lane/mod.rs b/sequencer/src/inclusion_lane/mod.rs index 10cb443..7e52786 100644 --- a/sequencer/src/inclusion_lane/mod.rs +++ b/sequencer/src/inclusion_lane/mod.rs @@ -1,11 +1,16 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +mod catch_up; +mod config; mod error; mod lane; -mod profiling; mod types; +pub use config::InclusionLaneConfig; pub use error::InclusionLaneError; -pub use lane::{InclusionLane, InclusionLaneConfig, InclusionLaneStop}; -pub use types::{InclusionLaneInput, PendingUserOp, SequencerError}; +pub use lane::InclusionLane; +pub use types::{PendingUserOp, SequencerError}; + +#[cfg(test)] +mod tests; diff --git a/sequencer/src/inclusion_lane/profiling.rs b/sequencer/src/inclusion_lane/profiling.rs deleted file mode 100644 index 0aace24..0000000 --- a/sequencer/src/inclusion_lane/profiling.rs +++ /dev/null @@ -1,263 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use std::time::{Duration, Instant}; -use tracing::info; - -#[derive(Debug)] -pub(super) struct InclusionLaneMetrics { - enabled: bool, - log_interval: Duration, - window_started_at: Instant, - loops: u64, - included_user_ops: u64, - drained_direct_inputs: u64, - frame_only_closes: u64, - frame_and_batch_closes: u64, - idle_sleeps: u64, - max_queue_depth: usize, - user_op_phase: Duration, - user_op_dequeue_phase: Duration, - user_op_app_execute_phase: Duration, - user_op_persist_phase: Duration, - user_op_ack_phase: Duration, - direct_phase: Duration, - close_phase: Duration, - idle_sleep: Duration, -} - -impl InclusionLaneMetrics { - pub(super) fn new(enabled: bool, log_interval: Duration) -> Self { - Self { - enabled, - log_interval, - window_started_at: Instant::now(), - loops: 0, - included_user_ops: 0, - drained_direct_inputs: 0, - frame_only_closes: 0, - frame_and_batch_closes: 0, - idle_sleeps: 0, - max_queue_depth: 0, - user_op_phase: Duration::ZERO, - user_op_dequeue_phase: Duration::ZERO, - user_op_app_execute_phase: Duration::ZERO, - user_op_persist_phase: Duration::ZERO, - user_op_ack_phase: Duration::ZERO, - direct_phase: Duration::ZERO, - close_phase: Duration::ZERO, - idle_sleep: Duration::ZERO, - } - } - - pub(super) fn phase_started_at(&self) -> Option { - self.enabled.then(Instant::now) - } - - pub(super) fn on_loop_start(&mut self, queue_depth: usize) { - if !self.enabled { - return; - } - self.loops = self.loops.saturating_add(1); - self.max_queue_depth = self.max_queue_depth.max(queue_depth); - } - - pub(super) fn on_user_ops_phase_end( - &mut self, - started_at: Option, - included_user_ops: u64, - ) { - if !self.enabled { - return; - } - self.included_user_ops = self.included_user_ops.saturating_add(included_user_ops); - self.user_op_phase = self - .user_op_phase - .saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn on_user_op_dequeue_end(&mut self, elapsed: Duration) { - if !self.enabled { - return; - } - self.user_op_dequeue_phase = self.user_op_dequeue_phase.saturating_add(elapsed); - } - - pub(super) fn on_user_op_app_execute_end(&mut self, elapsed: Duration) { - if !self.enabled { - return; - } - self.user_op_app_execute_phase = self.user_op_app_execute_phase.saturating_add(elapsed); - } - - pub(super) fn on_user_op_persist_end(&mut self, started_at: Option) { - if !self.enabled { - return; - } - self.user_op_persist_phase = self - .user_op_persist_phase - .saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn on_user_op_ack_end(&mut self, started_at: Option) { - if !self.enabled { - return; - } - self.user_op_ack_phase = self - .user_op_ack_phase - .saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn on_directs_phase_end( - &mut self, - started_at: Option, - drained_direct_inputs: u64, - ) { - if !self.enabled { - return; - } - self.drained_direct_inputs = self - .drained_direct_inputs - .saturating_add(drained_direct_inputs); - self.direct_phase = self - .direct_phase - .saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn on_close_phase_end(&mut self, started_at: Option, closed_batch: bool) { - if !self.enabled { - return; - } - if closed_batch { - self.frame_and_batch_closes = self.frame_and_batch_closes.saturating_add(1); - } else { - self.frame_only_closes = self.frame_only_closes.saturating_add(1); - } - self.close_phase = self.close_phase.saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn on_idle_sleep_end(&mut self, started_at: Option) { - if !self.enabled { - return; - } - self.idle_sleeps = self.idle_sleeps.saturating_add(1); - self.idle_sleep = self.idle_sleep.saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn maybe_log_window(&mut self) { - if !self.enabled { - return; - } - let elapsed = self.window_started_at.elapsed(); - if elapsed < self.log_interval { - return; - } - self.log_window(elapsed, false); - self.reset_window(); - } - - pub(super) fn log_final(&mut self) { - if !self.enabled { - return; - } - let elapsed = self.window_started_at.elapsed(); - if elapsed.is_zero() && self.loops == 0 { - return; - } - self.log_window(elapsed, true); - } - - fn log_window(&self, elapsed: Duration, final_window: bool) { - let elapsed_secs = elapsed.as_secs_f64(); - let included_tps = if elapsed_secs > 0.0 { - self.included_user_ops as f64 / elapsed_secs - } else { - 0.0 - }; - let user_op_dequeue_share_pct = percentage( - self.user_op_dequeue_phase.as_nanos(), - self.user_op_phase.as_nanos(), - ); - let user_op_app_execute_share_pct = percentage( - self.user_op_app_execute_phase.as_nanos(), - self.user_op_phase.as_nanos(), - ); - let user_op_persist_share_pct = percentage( - self.user_op_persist_phase.as_nanos(), - self.user_op_phase.as_nanos(), - ); - let user_op_ack_share_pct = percentage( - self.user_op_ack_phase.as_nanos(), - self.user_op_phase.as_nanos(), - ); - let app_plus_persist = self - .user_op_app_execute_phase - .saturating_add(self.user_op_persist_phase); - let user_op_app_share_pct_of_app_plus_persist = percentage( - self.user_op_app_execute_phase.as_nanos(), - app_plus_persist.as_nanos(), - ); - let user_op_persist_share_pct_of_app_plus_persist = percentage( - self.user_op_persist_phase.as_nanos(), - app_plus_persist.as_nanos(), - ); - info!( - final_window, - window_ms = elapsed.as_millis() as u64, - loops = self.loops, - included_user_ops = self.included_user_ops, - drained_direct_inputs = self.drained_direct_inputs, - included_tps = included_tps, - frame_only_closes = self.frame_only_closes, - frame_and_batch_closes = self.frame_and_batch_closes, - idle_sleeps = self.idle_sleeps, - max_queue_depth = self.max_queue_depth, - user_op_phase_ms = self.user_op_phase.as_millis() as u64, - user_op_dequeue_phase_ms = self.user_op_dequeue_phase.as_millis() as u64, - user_op_app_execute_phase_ms = self.user_op_app_execute_phase.as_millis() as u64, - user_op_persist_phase_ms = self.user_op_persist_phase.as_millis() as u64, - user_op_ack_phase_ms = self.user_op_ack_phase.as_millis() as u64, - user_op_dequeue_share_pct = user_op_dequeue_share_pct, - user_op_app_execute_share_pct = user_op_app_execute_share_pct, - user_op_persist_share_pct = user_op_persist_share_pct, - user_op_ack_share_pct = user_op_ack_share_pct, - user_op_app_share_pct_of_app_plus_persist = user_op_app_share_pct_of_app_plus_persist, - user_op_persist_share_pct_of_app_plus_persist = - user_op_persist_share_pct_of_app_plus_persist, - direct_phase_ms = self.direct_phase.as_millis() as u64, - close_phase_ms = self.close_phase.as_millis() as u64, - idle_sleep_ms = self.idle_sleep.as_millis() as u64, - "inclusion lane metrics" - ); - } - - fn reset_window(&mut self) { - self.window_started_at = Instant::now(); - self.loops = 0; - self.included_user_ops = 0; - self.drained_direct_inputs = 0; - self.frame_only_closes = 0; - self.frame_and_batch_closes = 0; - self.idle_sleeps = 0; - self.max_queue_depth = 0; - self.user_op_phase = Duration::ZERO; - self.user_op_dequeue_phase = Duration::ZERO; - self.user_op_app_execute_phase = Duration::ZERO; - self.user_op_persist_phase = Duration::ZERO; - self.user_op_ack_phase = Duration::ZERO; - self.direct_phase = Duration::ZERO; - self.close_phase = Duration::ZERO; - self.idle_sleep = Duration::ZERO; - } -} - -fn elapsed_or_zero(started_at: Option) -> Duration { - started_at.map_or(Duration::ZERO, |value| value.elapsed()) -} - -fn percentage(part: u128, total: u128) -> f64 { - if total == 0 { - return 0.0; - } - (part as f64) * 100.0 / (total as f64) -} diff --git a/sequencer/src/inclusion_lane/tests.rs b/sequencer/src/inclusion_lane/tests.rs new file mode 100644 index 0000000..17e6801 --- /dev/null +++ b/sequencer/src/inclusion_lane/tests.rs @@ -0,0 +1,610 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::collections::HashMap; +use std::time::{Duration, SystemTime}; + +use alloy_primitives::{Address, Signature, U256}; +use app_core::application::MAX_METHOD_PAYLOAD_BYTES as WALLET_MAX_METHOD_PAYLOAD_BYTES; +use rusqlite::params; +use tempfile::TempDir; +use tokio::sync::{mpsc, oneshot}; + +use crate::shutdown::ShutdownSignal; +use crate::storage::{DirectInputRange, Storage, StoredDirectInput}; +use sequencer_core::application::{AppError, Application, InvalidReason}; +use sequencer_core::l2_tx::{SequencedL2Tx, ValidUserOp}; +use sequencer_core::user_op::{SignedUserOp, UserOp}; + +use super::catch_up::catch_up_application_paged; +use super::error::CatchUpError; +use super::lane::dequeue_and_execute_user_op_chunk; +use super::{InclusionLane, InclusionLaneConfig, InclusionLaneError, PendingUserOp}; + +#[derive(Default)] +struct TestApp { + nonces: HashMap, + executed_input_count: u64, +} + +impl Application for TestApp { + const MAX_METHOD_PAYLOAD_BYTES: usize = WALLET_MAX_METHOD_PAYLOAD_BYTES; + + fn current_user_nonce(&self, sender: Address) -> u32 { + self.nonces.get(&sender).copied().unwrap_or(0) + } + + fn current_user_balance(&self, _sender: Address) -> U256 { + U256::MAX + } + + fn validate_user_op( + &self, + _sender: Address, + _user_op: &UserOp, + _current_fee: u64, + ) -> Result<(), InvalidReason> { + Ok(()) + } + + fn execute_valid_user_op(&mut self, user_op: &ValidUserOp) -> Result<(), AppError> { + let next_nonce = self.current_user_nonce(user_op.sender).wrapping_add(1); + self.nonces.insert(user_op.sender, next_nonce); + self.executed_input_count = self.executed_input_count.saturating_add(1); + Ok(()) + } + + fn execute_direct_input(&mut self, _payload: &[u8]) -> Result<(), AppError> { + self.executed_input_count = self.executed_input_count.saturating_add(1); + Ok(()) + } + + fn executed_input_count(&self) -> u64 { + self.executed_input_count + } +} + +struct TestDb { + _dir: TempDir, + path: String, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum ReplayEvent { + UserOp { sender: Address, data: Vec }, + DirectInput(Vec), +} + +struct ReplayRecordingApp { + executed_input_count: u64, + replayed: Vec, +} + +impl ReplayRecordingApp { + fn with_executed_input_count(executed_input_count: u64) -> Self { + Self { + executed_input_count, + replayed: Vec::new(), + } + } +} + +impl Default for ReplayRecordingApp { + fn default() -> Self { + Self::with_executed_input_count(0) + } +} + +impl Application for ReplayRecordingApp { + const MAX_METHOD_PAYLOAD_BYTES: usize = WALLET_MAX_METHOD_PAYLOAD_BYTES; + + fn current_user_nonce(&self, _sender: Address) -> u32 { + 0 + } + + fn current_user_balance(&self, _sender: Address) -> U256 { + U256::MAX + } + + fn validate_user_op( + &self, + _sender: Address, + _user_op: &UserOp, + _current_fee: u64, + ) -> Result<(), InvalidReason> { + Ok(()) + } + + fn execute_valid_user_op(&mut self, user_op: &ValidUserOp) -> Result<(), AppError> { + self.replayed.push(ReplayEvent::UserOp { + sender: user_op.sender, + data: user_op.data.clone(), + }); + self.executed_input_count = self.executed_input_count.saturating_add(1); + Ok(()) + } + + fn execute_direct_input(&mut self, payload: &[u8]) -> Result<(), AppError> { + self.replayed + .push(ReplayEvent::DirectInput(payload.to_vec())); + self.executed_input_count = self.executed_input_count.saturating_add(1); + Ok(()) + } + + fn executed_input_count(&self) -> u64 { + self.executed_input_count + } +} + +fn temp_db(name: &str) -> TestDb { + let dir = tempfile::Builder::new() + .prefix(format!("sequencer-inclusion-lane-{name}-").as_str()) + .tempdir() + .expect("create temporary test directory"); + let path = dir.path().join("sequencer.sqlite"); + TestDb { + _dir: dir, + path: path.to_string_lossy().into_owned(), + } +} + +fn default_test_config() -> InclusionLaneConfig { + InclusionLaneConfig { + max_user_ops_per_chunk: 16, + safe_direct_buffer_capacity: 16, + max_batch_open: Duration::MAX, + max_batch_user_op_bytes: 1_000_000_000, + idle_poll_interval: Duration::from_millis(2), + } +} + +async fn start_lane( + db_path: &str, + config: InclusionLaneConfig, +) -> ( + mpsc::Sender, + ShutdownSignal, + tokio::task::JoinHandle>, +) { + let storage = Storage::open(db_path, "NORMAL").expect("open storage"); + let shutdown = ShutdownSignal::default(); + let (tx, handle) = + InclusionLane::start(128, shutdown.clone(), TestApp::default(), storage, config); + let initialized = wait_until(Duration::from_secs(2), || { + let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); + storage + .load_open_state() + .expect("load open state") + .is_some() + }) + .await; + assert!(initialized, "lane should initialize its first open state"); + (tx, shutdown, handle) +} + +fn make_pending_user_op( + seed: u8, +) -> ( + PendingUserOp, + oneshot::Receiver>, +) { + let sender = Address::from_slice(&[seed; 20]); + let (respond_to, recv) = oneshot::channel(); + let user_op = UserOp { + nonce: 0, + max_fee: 1, + data: vec![seed; 4].into(), + }; + ( + PendingUserOp { + signed: SignedUserOp { + sender, + signature: Signature::test_signature(), + user_op, + }, + respond_to, + received_at: SystemTime::now(), + }, + recv, + ) +} + +fn seed_replay_fixture(db_path: &str) -> Vec { + let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); + let mut head = storage + .initialize_open_state(0, DirectInputRange::empty_at(0)) + .expect("initialize open state"); + + let user_op_a = make_pending_user_op(0x51).0; + let user_op_b = make_pending_user_op(0x52).0; + storage + .append_user_ops_chunk(&mut head, &[user_op_a, user_op_b]) + .expect("append first frame user ops"); + storage + .append_safe_direct_inputs( + 10, + &[StoredDirectInput { + payload: vec![0xaa], + block_number: 10, + }], + ) + .expect("append first direct input"); + storage + .close_frame_only(&mut head, 10, DirectInputRange::new(0, 1)) + .expect("close first frame"); + + let user_op_c = make_pending_user_op(0x53).0; + storage + .append_user_ops_chunk(&mut head, &[user_op_c]) + .expect("append second frame user op"); + storage + .append_safe_direct_inputs( + 20, + &[StoredDirectInput { + payload: vec![0xbb], + block_number: 20, + }], + ) + .expect("append second direct input"); + storage + .close_frame_only(&mut head, 20, DirectInputRange::new(1, 2)) + .expect("close second frame"); + + storage + .append_safe_direct_inputs( + 30, + &[StoredDirectInput { + payload: vec![0xcc], + block_number: 30, + }], + ) + .expect("append third direct input"); + storage + .close_frame_only(&mut head, 30, DirectInputRange::new(2, 3)) + .expect("close third frame"); + + vec![ + ReplayEvent::UserOp { + sender: Address::from_slice(&[0x51; 20]), + data: vec![0x51; 4], + }, + ReplayEvent::UserOp { + sender: Address::from_slice(&[0x52; 20]), + data: vec![0x52; 4], + }, + ReplayEvent::DirectInput(vec![0xaa]), + ReplayEvent::UserOp { + sender: Address::from_slice(&[0x53; 20]), + data: vec![0x53; 4], + }, + ReplayEvent::DirectInput(vec![0xbb]), + ReplayEvent::DirectInput(vec![0xcc]), + ] +} + +fn read_count(db_path: &str, table: &str) -> i64 { + let conn = Storage::open_connection(db_path, "NORMAL").expect("open sqlite reader"); + let sql = format!("SELECT COUNT(*) FROM {table}"); + conn.query_row(sql.as_str(), [], |row| row.get(0)) + .expect("count rows") +} + +fn read_frame_direct_count(db_path: &str, batch_index: i64, frame_in_batch: i64) -> i64 { + let conn = Storage::open_connection(db_path, "NORMAL").expect("open sqlite reader"); + conn.query_row( + "SELECT COUNT(*) FROM sequenced_l2_txs + WHERE batch_index = ?1 + AND frame_in_batch = ?2 + AND direct_input_index IS NOT NULL", + params![batch_index, frame_in_batch], + |row| row.get(0), + ) + .expect("query frame direct count") +} + +async fn wait_until(timeout: Duration, mut predicate: impl FnMut() -> bool) -> bool { + let started = tokio::time::Instant::now(); + while started.elapsed() < timeout { + if predicate() { + return true; + } + tokio::time::sleep(Duration::from_millis(5)).await; + } + predicate() +} + +async fn shutdown_lane( + shutdown: &ShutdownSignal, + handle: tokio::task::JoinHandle>, +) { + shutdown.request_shutdown(); + let joined = tokio::time::timeout(Duration::from_secs(2), handle) + .await + .expect("wait for lane shutdown"); + let result = joined.expect("join lane task"); + assert!(result.is_ok(), "lane should shut down cleanly: {result:?}"); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn ack_happens_after_chunk_commit_without_closing_frame() { + let db = temp_db("ack-chunk-commit"); + let (tx, shutdown, lane_handle) = start_lane(db.path.as_str(), default_test_config()).await; + let (pending, recv) = make_pending_user_op(0x11); + + tx.send(pending).await.expect("send user op"); + let ack = tokio::time::timeout(Duration::from_secs(2), recv) + .await + .expect("wait for ack") + .expect("ack channel open"); + let user_ops_count = read_count(db.path.as_str(), "user_ops"); + let frame0_direct_count = read_frame_direct_count(db.path.as_str(), 0, 0); + shutdown_lane(&shutdown, lane_handle).await; + + assert!(ack.is_ok(), "user op should be included"); + assert_eq!(user_ops_count, 1); + assert_eq!( + frame0_direct_count, 0, + "frame should stay open when no directs and no batch close" + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn direct_inputs_close_frame_and_persist_drain() { + let db = temp_db("directs-close-frame"); + let (_tx, shutdown, lane_handle) = start_lane(db.path.as_str(), default_test_config()).await; + let mut feeder_storage = + Storage::open(db.path.as_str(), "NORMAL").expect("open feeder storage"); + + feeder_storage + .append_safe_direct_inputs( + 10, + &[StoredDirectInput { + payload: vec![0xaa], + block_number: 10, + }], + ) + .expect("append safe direct input"); + + let drained = wait_until(Duration::from_secs(2), || { + read_frame_direct_count(db.path.as_str(), 0, 1) == 1 + }) + .await; + let frames_count = read_count(db.path.as_str(), "frames"); + shutdown_lane(&shutdown, lane_handle).await; + + assert!(drained, "expected one drained direct input in frame 1"); + assert_eq!(frames_count, 2); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn direct_inputs_are_paginated_by_buffer_capacity() { + let db = temp_db("directs-pagination"); + let mut config = default_test_config(); + config.safe_direct_buffer_capacity = 2; + let (_tx, shutdown, lane_handle) = start_lane(db.path.as_str(), config).await; + let mut feeder_storage = + Storage::open(db.path.as_str(), "NORMAL").expect("open feeder storage"); + + let mut directs = Vec::new(); + for index in 0..5_u64 { + directs.push(StoredDirectInput { + payload: vec![index as u8], + block_number: 10, + }); + } + feeder_storage + .append_safe_direct_inputs(10, directs.as_slice()) + .expect("append safe direct inputs"); + + let drained = wait_until(Duration::from_secs(2), || { + read_frame_direct_count(db.path.as_str(), 0, 1) == 5 + }) + .await; + let frames_count = read_count(db.path.as_str(), "frames"); + shutdown_lane(&shutdown, lane_handle).await; + + assert!(drained, "expected five drained direct inputs in frame 1"); + assert_eq!(frames_count, 2); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn safe_directs_already_available_are_sequenced_before_later_user_ops() { + let db = temp_db("directs-before-later-userops"); + let (tx, shutdown, lane_handle) = start_lane(db.path.as_str(), default_test_config()).await; + let mut feeder_storage = + Storage::open(db.path.as_str(), "NORMAL").expect("open feeder storage"); + + feeder_storage + .append_safe_direct_inputs( + 10, + &[StoredDirectInput { + payload: vec![0xaa], + block_number: 10, + }], + ) + .expect("append safe direct input"); + + let drained = wait_until(Duration::from_secs(2), || { + read_frame_direct_count(db.path.as_str(), 0, 1) == 1 + }) + .await; + assert!( + drained, + "expected leading direct inputs to land before later user-op sequencing" + ); + + let (pending, recv) = make_pending_user_op(0x31); + tx.send(pending).await.expect("send user op"); + let ack = tokio::time::timeout(Duration::from_secs(2), recv) + .await + .expect("wait for ack") + .expect("ack channel open"); + + let replay = { + let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + storage + .load_ordered_l2_txs_from(0) + .expect("load ordered replay") + }; + shutdown_lane(&shutdown, lane_handle).await; + + assert!(ack.is_ok(), "user op should be included"); + assert_eq!(replay.len(), 2); + assert!(matches!( + replay.first(), + Some(SequencedL2Tx::Direct(direct)) if direct.payload.as_slice() == [0xaa] + )); + assert!(matches!( + replay.get(1), + Some(SequencedL2Tx::UserOp(user_op)) if user_op.data.as_slice() == [0x31, 0x31, 0x31, 0x31] + )); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn batch_closes_when_max_open_time_is_reached() { + let db = temp_db("batch-close-time"); + let mut config = default_test_config(); + config.max_batch_open = Duration::from_millis(20); + let (tx, shutdown, lane_handle) = start_lane(db.path.as_str(), config).await; + let (pending, recv) = make_pending_user_op(0x22); + + tx.send(pending).await.expect("send user op"); + let ack = tokio::time::timeout(Duration::from_secs(2), recv) + .await + .expect("wait for ack") + .expect("ack channel open"); + let rotated = wait_until(Duration::from_secs(2), || { + read_count(db.path.as_str(), "batches") >= 2 + }) + .await; + let drain = read_frame_direct_count(db.path.as_str(), 0, 0); + shutdown_lane(&shutdown, lane_handle).await; + + assert!(ack.is_ok(), "user op should be included"); + assert!(rotated, "expected batch rotation by time"); + assert_eq!(drain, 0); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn empty_batches_close_when_max_open_time_is_reached() { + let db = temp_db("empty-batch-close-time"); + let mut config = default_test_config(); + config.max_batch_open = Duration::from_millis(20); + let (_tx, shutdown, lane_handle) = start_lane(db.path.as_str(), config).await; + + let rotated = wait_until(Duration::from_secs(2), || { + read_count(db.path.as_str(), "batches") >= 2 + }) + .await; + let frames_count = read_count(db.path.as_str(), "frames"); + shutdown_lane(&shutdown, lane_handle).await; + + assert!(rotated, "expected idle batch rotation by time"); + assert!( + frames_count >= 2, + "expected at least one new open frame after rotation" + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn batch_closes_when_max_user_op_bytes_is_reached() { + let db = temp_db("batch-close-size"); + let mut config = default_test_config(); + config.max_batch_user_op_bytes = + SignedUserOp::max_batch_metadata() + ::MAX_METHOD_PAYLOAD_BYTES; + let (tx, shutdown, lane_handle) = start_lane(db.path.as_str(), config).await; + let (pending, recv) = make_pending_user_op(0x33); + + tx.send(pending).await.expect("send user op"); + let ack = tokio::time::timeout(Duration::from_secs(2), recv) + .await + .expect("wait for ack") + .expect("ack channel open"); + let rotated = wait_until(Duration::from_secs(2), || { + read_count(db.path.as_str(), "batches") >= 2 + }) + .await; + let drain = read_frame_direct_count(db.path.as_str(), 0, 0); + shutdown_lane(&shutdown, lane_handle).await; + + assert!(ack.is_ok(), "user op should be included"); + assert!(rotated, "expected batch rotation by size"); + assert_eq!(drain, 0); +} + +#[test] +fn dequeue_returns_channel_closed_when_disconnected() { + let (tx, mut rx) = mpsc::channel::(1); + drop(tx); + let mut app = TestApp::default(); + let mut included = Vec::new(); + + let err = + dequeue_and_execute_user_op_chunk(&mut rx, &mut app, 1, 1, &mut included).unwrap_err(); + assert!(matches!(err, InclusionLaneError::ChannelClosed)); +} + +#[test] +fn dequeue_flushes_executed_ops_before_observing_disconnect() { + let (tx, mut rx) = mpsc::channel::(2); + let (pending, _recv) = make_pending_user_op(0x44); + tx.blocking_send(pending).expect("enqueue pending user op"); + drop(tx); + + let mut app = TestApp::default(); + let mut included = Vec::new(); + dequeue_and_execute_user_op_chunk(&mut rx, &mut app, 1, 16, &mut included) + .expect("should flush processed user ops before disconnect"); + assert_eq!(included.len(), 1); +} + +#[test] +fn catch_up_replays_multiple_pages() { + let db = temp_db("catch-up-multi-page"); + let expected = seed_replay_fixture(db.path.as_str()); + let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + let mut app = ReplayRecordingApp::default(); + + catch_up_application_paged(&mut app, &mut storage, 2).expect("catch up in pages"); + + assert_eq!(app.replayed, expected); + assert_eq!(app.executed_input_count(), expected.len() as u64); +} + +#[test] +fn catch_up_starts_from_executed_input_count_offset() { + let db = temp_db("catch-up-offset"); + let expected = seed_replay_fixture(db.path.as_str()); + let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + let mut app = ReplayRecordingApp::with_executed_input_count(3); + + catch_up_application_paged(&mut app, &mut storage, 2).expect("catch up from offset"); + + assert_eq!(app.replayed, expected[3..].to_vec()); + assert_eq!(app.executed_input_count(), expected.len() as u64); +} + +#[test] +fn catch_up_handles_mixed_user_ops_and_direct_inputs_across_page_boundary() { + let db = temp_db("catch-up-mixed-page-boundary"); + let expected = seed_replay_fixture(db.path.as_str()); + let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + let mut app = ReplayRecordingApp::default(); + + catch_up_application_paged(&mut app, &mut storage, 4).expect("catch up across page boundary"); + + assert_eq!(app.replayed, expected); +} + +#[test] +fn catch_up_load_error_reports_offset() { + let db = temp_db("catch-up-load-error"); + let mut storage = + Storage::open_without_migrations(db.path.as_str(), "NORMAL").expect("open raw storage"); + let mut app = ReplayRecordingApp::default(); + + let err = catch_up_application_paged(&mut app, &mut storage, 2) + .expect_err("catch up should fail without schema"); + + assert!(matches!(err, CatchUpError::LoadReplay { offset: 0, .. })); +} diff --git a/sequencer/src/inclusion_lane/types.rs b/sequencer/src/inclusion_lane/types.rs index 204f52b..535dc89 100644 --- a/sequencer/src/inclusion_lane/types.rs +++ b/sequencer/src/inclusion_lane/types.rs @@ -14,16 +14,13 @@ pub struct PendingUserOp { pub received_at: SystemTime, } -#[derive(Debug)] -pub enum InclusionLaneInput { - UserOp(PendingUserOp), -} - #[derive(Debug, Error, Clone)] pub enum SequencerError { #[error("{0}")] Invalid(String), #[error("{0}")] + Unavailable(String), + #[error("{0}")] Internal(String), } @@ -35,4 +32,8 @@ impl SequencerError { pub fn internal(message: impl Into) -> Self { Self::Internal(message.into()) } + + pub fn unavailable(message: impl Into) -> Self { + Self::Unavailable(message.into()) + } } diff --git a/sequencer/src/input_reader/logs.rs b/sequencer/src/input_reader/logs.rs new file mode 100644 index 0000000..d85953e --- /dev/null +++ b/sequencer/src/input_reader/logs.rs @@ -0,0 +1,104 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use alloy::contract::Error as ContractError; +use alloy::contract::Event; +use alloy::providers::Provider; +use alloy::sol_types::SolEvent; +use alloy_primitives::Address; +use async_recursion::async_recursion; +use cartesi_rollups_contracts::input_box::InputBox::InputAdded; + +#[async_recursion] +pub(crate) async fn get_input_added_events( + provider: &impl Provider, + app_address_filter: Address, + input_box_address: &Address, + start_block: u64, + end_block: u64, + long_block_range_error_codes: &[String], +) -> Result, Vec> { + let event = Event::new_sol(provider, input_box_address) + .from_block(start_block) + .to_block(end_block) + .event(InputAdded::SIGNATURE) + .topic1(app_address_filter.into_word()); + + match event.query().await { + Ok(logs) => Ok(logs), + Err(e) => { + if should_retry_with_partition(&e, long_block_range_error_codes) { + if start_block >= end_block { + return Err(vec![e]); + } + let middle = start_block + (end_block - start_block) / 2; + let first = get_input_added_events( + provider, + app_address_filter, + input_box_address, + start_block, + middle, + long_block_range_error_codes, + ) + .await; + let second = get_input_added_events( + provider, + app_address_filter, + input_box_address, + middle + 1, + end_block, + long_block_range_error_codes, + ) + .await; + + match (first, second) { + (Ok(mut a), Ok(b)) => { + a.extend(b); + Ok(a) + } + (Err(mut a), Err(b)) => { + a.extend(b); + Err(a) + } + (Err(e), _) | (_, Err(e)) => Err(e), + } + } else { + Err(vec![e]) + } + } + } +} + +fn should_retry_with_partition(err: &ContractError, codes: &[String]) -> bool { + error_message_matches_retry_codes(&format!("{err:?}"), codes) +} + +pub(crate) fn error_message_matches_retry_codes(error_message: &str, codes: &[String]) -> bool { + codes.iter().any(|c| error_message.contains(c)) +} + +#[cfg(test)] +mod tests { + use super::error_message_matches_retry_codes; + + #[test] + fn error_message_matches_retry_codes_returns_true_when_message_contains_code() { + assert!(error_message_matches_retry_codes( + "RPC error: block range too large", + &["block range".to_string(), "timeout".to_string()] + )); + assert!(error_message_matches_retry_codes( + "timeout after 30s", + &["timeout".to_string()] + )); + } + + #[test] + fn error_message_matches_retry_codes_returns_false_when_no_match() { + assert!(!error_message_matches_retry_codes( + "connection refused", + &["block range".to_string(), "timeout".to_string()] + )); + assert!(!error_message_matches_retry_codes("ok", &[])); + } +} diff --git a/sequencer/src/input_reader/mod.rs b/sequencer/src/input_reader/mod.rs index 16c289e..6c05d97 100644 --- a/sequencer/src/input_reader/mod.rs +++ b/sequencer/src/input_reader/mod.rs @@ -4,6 +4,7 @@ //! Reads safe (direct) inputs from a reference source (e.g. InputBox contract) and appends them //! to sequencer storage. Minimal design: no epochs or consensus; flat contiguous indices only. +mod logs; mod reader; -pub use reader::{InputReader, InputReaderConfig, InputReaderError, InputReaderStop}; +pub use reader::{InputReader, InputReaderConfig, InputReaderError}; diff --git a/sequencer/src/input_reader/reader.rs b/sequencer/src/input_reader/reader.rs index a4f7084..6dafabd 100644 --- a/sequencer/src/input_reader/reader.rs +++ b/sequencer/src/input_reader/reader.rs @@ -1,38 +1,30 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use std::sync::Arc; -use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; -use alloy::contract::Error as ContractError; -use alloy::contract::Event; use alloy::eips::BlockNumberOrTag::Safe; use alloy::providers::Provider; -use alloy::rpc::types::Topic; -use alloy::sol_types::SolEvent; +use alloy::providers::ProviderBuilder; use alloy_primitives::Address; -use async_recursion::async_recursion; -use cartesi_rollups_contracts::input_box::InputBox::InputAdded; -use tokio::runtime::Builder; +use cartesi_rollups_contracts::application::Application; +use cartesi_rollups_contracts::input_box::InputBox; use tokio::task::JoinHandle; -use tracing::{info, trace}; +use tracing::{info, warn}; -use crate::storage::{IndexedDirectInput, Storage}; +use super::logs::get_input_added_events; +use crate::shutdown::ShutdownSignal; +use crate::storage::{Storage, StorageOpenError, StoredDirectInput}; + +const SQLITE_SYNCHRONOUS_PRAGMA: &str = "NORMAL"; #[derive(Debug, Clone)] pub struct InputReaderConfig { - /// RPC URL for the reference node (e.g. L1 or authority node). pub rpc_url: String, - /// Contract address that emits InputAdded (e.g. InputBox). pub input_box_address: Address, - /// Application address to filter inputs (topic1). Only InputAdded events for this app are ingested. pub app_address_filter: Address, - /// First block to scan (e.g. InputBox deployment block). pub genesis_block: u64, - /// Poll interval when no new blocks. pub poll_interval: Duration, - /// RPC error substrings that trigger partition retry for large block ranges. pub long_block_range_error_codes: Vec, } @@ -40,215 +32,139 @@ pub struct InputReaderConfig { pub enum InputReaderError { #[error("provider/transport: {0}")] Provider(String), - #[error("storage: {0}")] + #[error(transparent)] + OpenStorage(#[from] StorageOpenError), + #[error(transparent)] Storage(#[from] rusqlite::Error), - #[error("shutdown requested")] - ShutdownRequested, + #[error("input reader join error: {0}")] + Join(String), } -/// Reads InputAdded events in a block range. Retries with half-range partition on configured RPC errors. -#[async_recursion] -async fn get_input_added_events( - provider: &impl Provider, - topic1: Option<&Topic>, - read_from: &Address, - start_block: u64, - end_block: u64, - long_block_range_error_codes: &[String], -) -> Result, Vec> { - let event = { - let mut e = Event::new_sol(provider, read_from) - .from_block(start_block) - .to_block(end_block) - .event(InputAdded::SIGNATURE); - if let Some(t) = topic1 { - e = e.topic1(t.clone()); - } - e - }; - - match event.query().await { - Ok(logs) => Ok(logs), - Err(e) => { - if should_retry_with_partition(&e, long_block_range_error_codes) { - if start_block >= end_block { - return Err(vec![e]); - } - let middle = start_block + (end_block - start_block) / 2; - - let first = get_input_added_events( - provider, - topic1, - read_from, - start_block, - middle, - long_block_range_error_codes, - ) - .await; - let second = get_input_added_events( - provider, - topic1, - read_from, - middle + 1, - end_block, - long_block_range_error_codes, - ) - .await; - - match (first, second) { - (Ok(mut a), Ok(b)) => { - a.extend(b); - Ok(a) - } - (Err(mut a), Err(b)) => { - a.extend(b); - Err(a) - } - (Err(e), _) | (_, Err(e)) => Err(e), - } - } else { - Err(vec![e]) - } - } - } -} - -fn should_retry_with_partition(err: &ContractError, codes: &[String]) -> bool { - error_message_matches_retry_codes(&format!("{err:?}"), codes) -} - -/// Pure predicate: true if `error_message` contains any of `codes`. Used for partition retry. -/// Exposed for unit tests. -pub(crate) fn error_message_matches_retry_codes(error_message: &str, codes: &[String]) -> bool { - codes.iter().any(|c| error_message.contains(c)) -} - -/// Builds a contiguous batch of `IndexedDirectInput` from payloads and block numbers. -/// Exposed for unit tests. -pub(crate) fn build_indexed_direct_input_batch( - payloads_with_blocks: impl IntoIterator, u64)>, - next_index: u64, -) -> Vec { - payloads_with_blocks - .into_iter() - .enumerate() - .map(|(i, (payload, block_number))| IndexedDirectInput { - index: next_index + i as u64, - payload, - block_number, - }) - .collect() -} - -/// Returns the current chain head using the standard "safe" block tag (alloy `BlockNumberOrTag::Safe`). -async fn latest_safe_block(provider: &impl Provider) -> Result { - let block = provider - .get_block(Safe.into()) - .await - .map_err(|e| InputReaderError::Provider(e.to_string()))? - .ok_or_else(|| InputReaderError::Provider("get_block returned None".to_string()))?; - let number = block.header.number; - Ok(number) +pub struct InputReader { + config: InputReaderConfig, + db_path: String, + shutdown: ShutdownSignal, } -/// Token used by main to request input reader shutdown and then join the worker. -#[derive(Debug, Clone)] -pub struct InputReaderStop { - stop: Arc, -} +impl InputReader { + pub async fn discover_input_box( + rpc_url: &str, + application_address: Address, + ) -> Result { + let provider = ProviderBuilder::new() + .connect(rpc_url) + .await + .map_err(|e| InputReaderError::Provider(e.to_string()))?; + let application = Application::new(application_address, &provider); + let data_availability = application + .getDataAvailability() + .call() + .await + .map_err(|e| InputReaderError::Provider(e.to_string()))?; -impl InputReaderStop { - pub fn request_shutdown(&self) { - self.stop.store(true, Ordering::Relaxed); + decode_input_box_address(&data_availability) } -} -pub struct InputReader { - config: InputReaderConfig, - storage: Storage, - stop: Arc, -} + pub async fn discover_input_box_deployment_block( + rpc_url: &str, + input_box_address: Address, + ) -> Result { + let provider = ProviderBuilder::new() + .connect(rpc_url) + .await + .map_err(|e| InputReaderError::Provider(e.to_string()))?; + let input_box = InputBox::new(input_box_address, &provider); + input_box + .getDeploymentBlockNumber() + .call() + .await + .map_err(|e| InputReaderError::Provider(e.to_string()))? + .try_into() + .map_err(|_| { + InputReaderError::Provider( + "input box deployment block number did not fit into u64".to_string(), + ) + }) + } -impl InputReader { - pub fn new(config: InputReaderConfig, storage: Storage) -> Self { + pub fn new(config: InputReaderConfig, db_path: String, shutdown: ShutdownSignal) -> Self { Self { config, - storage, - stop: Arc::new(AtomicBool::new(false)), + db_path, + shutdown, } } - pub fn request_shutdown(&self) { - self.stop.store(true, Ordering::Relaxed); + pub fn start( + db_path: &str, + config: InputReaderConfig, + shutdown: ShutdownSignal, + ) -> Result>, StorageOpenError> { + let _ = Storage::open(db_path, SQLITE_SYNCHRONOUS_PRAGMA)?; + let reader = Self::new(config, db_path.to_string(), shutdown); + Ok(tokio::spawn(async move { reader.run_forever().await })) } - /// Spawn the input reader loop on a blocking task. Returns a join handle and a stop token - /// so main can request shutdown and await the worker (same pattern as inclusion lane). - pub fn spawn(self) -> (JoinHandle>, InputReaderStop) { - let stop = InputReaderStop { - stop: Arc::clone(&self.stop), - }; - let handle = tokio::task::spawn_blocking(move || { - let rt = Builder::new_current_thread() - .enable_all() - .build() - .expect("input reader runtime"); - let mut reader = self; - while !reader.stop.load(Ordering::Relaxed) { - if let Err(e) = rt.block_on(reader.advance_once()) { - match &e { - InputReaderError::ShutdownRequested => break, - _ => { - tracing::warn!(error = %e, "input reader advance failed, will retry"); - } - } - } - std::thread::sleep(reader.config.poll_interval); - } - Ok(()) - }); - (handle, stop) - } + pub async fn sync_to_current_safe_head( + db_path: &str, + config: InputReaderConfig, + ) -> Result<(), InputReaderError> { + let mut reader = Self::new(config, db_path.to_string(), ShutdownSignal::default()); + reader.bootstrap_safe_head().await?; - /// Returns true if shutdown has been requested. For tests. - #[cfg(test)] - pub(crate) fn is_shutdown_requested(&self) -> bool { - self.stop.load(Ordering::Relaxed) + let provider = ProviderBuilder::new() + .connect(reader.config.rpc_url.as_str()) + .await + .map_err(|e| InputReaderError::Provider(e.to_string()))?; + reader.advance_once(&provider).await } - /// Run the input reader loop in the background (spawns then returns; no join handle). - /// Prefer `spawn()` so main can request shutdown and join the worker. - pub fn run_blocking(self) -> Result<(), InputReaderError> { - drop(self.spawn()); - Ok(()) - } + async fn run_forever(mut self) -> Result<(), InputReaderError> { + self.bootstrap_safe_head().await?; - /// One iteration of the input reader loop. Public for tests. - pub(crate) async fn advance_once(&mut self) -> Result<(), InputReaderError> { - let provider = alloy::providers::ProviderBuilder::new() + let provider = ProviderBuilder::new() .connect(self.config.rpc_url.as_str()) .await .map_err(|e| InputReaderError::Provider(e.to_string()))?; - let current = latest_safe_block(&provider).await?; - let mut prev = self.storage.input_reader_last_processed_block()?; - if prev == 0 && self.config.genesis_block > 0 { - prev = self.config.genesis_block.saturating_sub(1); + loop { + if self.shutdown.is_shutdown_requested() { + return Ok(()); + } + + match self.advance_once(&provider).await { + Ok(()) => {} + Err(InputReaderError::Provider(error)) => { + warn!(error, "input reader advance failed, will retry"); + } + Err(err) => return Err(err), + } + + tokio::select! { + _ = self.shutdown.wait_for_shutdown() => return Ok(()), + _ = tokio::time::sleep(self.config.poll_interval) => {} + } } + } + + pub(crate) async fn advance_once( + &mut self, + provider: &impl Provider, + ) -> Result<(), InputReaderError> { + let current_safe_block = latest_safe_block(provider).await?; + let previous_safe_block = self.current_safe_block().await?; - if current <= prev { + if current_safe_block <= previous_safe_block { return Ok(()); } - let start_block = prev + 1; - let topic1 = self.config.app_address_filter.into_word().into(); - + let start_block = previous_safe_block + 1; let events = get_input_added_events( - &provider, - Some(&topic1), + provider, + self.config.app_address_filter, &self.config.input_box_address, start_block, - current, + current_safe_block, &self.config.long_block_range_error_codes, ) .await @@ -262,162 +178,119 @@ impl InputReader { )) })?; - if events.is_empty() { - self.storage - .input_reader_set_last_processed_block(current)?; - return Ok(()); - } - - let next_index = self.storage.safe_input_end_exclusive()?; - let payloads_with_blocks: Vec<(Vec, u64)> = events + let batch: Vec = events .into_iter() - .map(|(ev, log)| { - let block_number = log - .block_number - .and_then(|n| n.try_into().ok()) - .unwrap_or(0u64); - (ev.input.to_vec(), block_number) + .map(|(event, log)| { + let block_number = log.block_number.ok_or_else(|| { + InputReaderError::Provider("InputAdded log missing block_number".to_string()) + })?; + + Ok(StoredDirectInput { + payload: event.input.to_vec(), + block_number, + }) }) - .collect(); - let batch = build_indexed_direct_input_batch(payloads_with_blocks, next_index); - - for item in &batch { - trace!( - index = item.index, - block_number = item.block_number, - payload_len = item.payload.len(), - "safe input" - ); - } + .collect::, InputReaderError>>()?; + info!( - block_range = %format!("{}..={}", start_block, current), + block_range = %format!("{}..={}", start_block, current_safe_block), count = batch.len(), "appending safe inputs" ); - self.storage - .append_safe_inputs_and_advance_cursor(&batch, current)?; - - Ok(()) + self.append_safe_direct_inputs(current_safe_block, batch) + .await } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy::node_bindings::Anvil; - use alloy_primitives::Address; - use std::time::Duration; - use tempfile::NamedTempFile; - // ----- Unit tests: retry predicate ----------------------------------------- - - #[test] - fn error_message_matches_retry_codes_returns_true_when_message_contains_code() { - assert!(error_message_matches_retry_codes( - "RPC error: block range too large", - &["block range".to_string(), "timeout".to_string()] - )); - assert!(error_message_matches_retry_codes( - "timeout after 30s", - &["timeout".to_string()] - )); + async fn current_safe_block(&self) -> Result { + let db_path = self.db_path.clone(); + tokio::task::spawn_blocking(move || { + let mut storage = Storage::open(&db_path, SQLITE_SYNCHRONOUS_PRAGMA)?; + storage.current_safe_block().map_err(InputReaderError::from) + }) + .await + .map_err(|err| InputReaderError::Join(err.to_string()))? } - #[test] - fn error_message_matches_retry_codes_returns_false_when_no_match() { - assert!(!error_message_matches_retry_codes( - "connection refused", - &["block range".to_string(), "timeout".to_string()] - )); - assert!(!error_message_matches_retry_codes("ok", &[])); + async fn bootstrap_safe_head(&self) -> Result<(), InputReaderError> { + let db_path = self.db_path.clone(); + let minimum_safe_block = self.config.genesis_block.saturating_sub(1); + tokio::task::spawn_blocking(move || { + let mut storage = Storage::open(&db_path, SQLITE_SYNCHRONOUS_PRAGMA)?; + storage + .ensure_minimum_safe_block(minimum_safe_block) + .map_err(InputReaderError::from) + }) + .await + .map_err(|err| InputReaderError::Join(err.to_string()))? } - #[test] - fn error_message_matches_retry_codes_returns_false_when_codes_empty() { - assert!(!error_message_matches_retry_codes( - "any error message", - &[] as &[String] - )); + async fn append_safe_direct_inputs( + &self, + current_safe_block: u64, + batch: Vec, + ) -> Result<(), InputReaderError> { + let db_path = self.db_path.clone(); + tokio::task::spawn_blocking(move || { + let mut storage = Storage::open(&db_path, SQLITE_SYNCHRONOUS_PRAGMA)?; + storage + .append_safe_direct_inputs(current_safe_block, &batch) + .map_err(InputReaderError::from) + }) + .await + .map_err(|err| InputReaderError::Join(err.to_string()))? } +} - // ----- Unit tests: batch builder ------------------------------------------- - - #[test] - fn build_indexed_direct_input_batch_empty() { - let batch = build_indexed_direct_input_batch(Vec::<(Vec, u64)>::new(), 0); - assert!(batch.is_empty()); +fn decode_input_box_address(data_availability: &[u8]) -> Result { + if data_availability.len() != 20 { + return Err(InputReaderError::Provider(format!( + "application getDataAvailability returned {} bytes; expected 20-byte InputBox address", + data_availability.len() + ))); } - #[test] - fn build_indexed_direct_input_batch_contiguous_indices_and_block_numbers() { - let payloads = vec![ - (vec![0x01], 100_u64), - (vec![0x02, 0x03], 101), - (vec![], 102), - ]; - let batch = build_indexed_direct_input_batch(payloads, 5); - assert_eq!(batch.len(), 3); - assert_eq!(batch[0].index, 5); - assert_eq!(batch[0].payload, vec![0x01]); - assert_eq!(batch[0].block_number, 100); - assert_eq!(batch[1].index, 6); - assert_eq!(batch[1].payload, vec![0x02, 0x03]); - assert_eq!(batch[1].block_number, 101); - assert_eq!(batch[2].index, 7); - assert!(batch[2].payload.is_empty()); - assert_eq!(batch[2].block_number, 102); - } + Ok(Address::from_slice(data_availability)) +} - #[test] - fn build_indexed_direct_input_batch_single_item() { - let batch = build_indexed_direct_input_batch(vec![(vec![0xaa, 0xbb], 1_u64)], 0); - assert_eq!(batch.len(), 1); - assert_eq!(batch[0].index, 0); - assert_eq!(batch[0].payload, vec![0xaa, 0xbb]); - assert_eq!(batch[0].block_number, 1); - } +async fn latest_safe_block(provider: &impl Provider) -> Result { + let block = provider + .get_block(Safe.into()) + .await + .map_err(|e| InputReaderError::Provider(e.to_string()))? + .ok_or_else(|| InputReaderError::Provider("get_block returned None".to_string()))?; + Ok(block.header.number) +} - // ----- Unit tests: InputReader construction and shutdown ------------------- +#[cfg(test)] +mod tests { + use super::*; + use alloy::node_bindings::Anvil; + use tempfile::NamedTempFile; - #[test] - fn input_reader_new_and_request_shutdown_sets_stop_flag() { - let db_file = NamedTempFile::new().expect("temp file"); - let storage = - crate::storage::Storage::open(db_file.path().to_string_lossy().as_ref(), "NORMAL") - .expect("open storage"); - let config = InputReaderConfig { - rpc_url: "http://127.0.0.1:0".to_string(), - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block: 0, - poll_interval: Duration::from_secs(1), - long_block_range_error_codes: vec![], - }; - let reader = InputReader::new(config, storage); - assert!(!reader.is_shutdown_requested()); - reader.request_shutdown(); - assert!(reader.is_shutdown_requested()); + fn require_anvil_tests() -> bool { + std::env::var_os("RUN_ANVIL_TESTS").is_some() } - /// Spawn reader, request shutdown via stop token, then join. Handle should return Ok(Ok(())). #[tokio::test] - async fn spawn_then_request_shutdown_joins_with_ok() { + async fn start_then_request_shutdown_joins_with_ok() { let db_file = NamedTempFile::new().expect("temp file"); - let storage = - crate::storage::Storage::open(db_file.path().to_string_lossy().as_ref(), "NORMAL") - .expect("open storage"); - let config = InputReaderConfig { - rpc_url: "http://127.0.0.1:0".to_string(), - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block: 0, - poll_interval: Duration::from_millis(20), - long_block_range_error_codes: vec![], - }; - let reader = InputReader::new(config, storage); - let (handle, stop) = reader.spawn(); - stop.request_shutdown(); + let shutdown = ShutdownSignal::default(); + let handle = InputReader::start( + db_file.path().to_string_lossy().as_ref(), + InputReaderConfig { + rpc_url: "http://127.0.0.1:0".to_string(), + input_box_address: Address::ZERO, + app_address_filter: Address::ZERO, + genesis_block: 0, + poll_interval: Duration::from_millis(20), + long_block_range_error_codes: vec![], + }, + shutdown.clone(), + ) + .expect("start input reader"); + + shutdown.request_shutdown(); let join_result = tokio::time::timeout(Duration::from_secs(2), handle).await; let join_result = join_result.expect("reader should exit within timeout"); assert!( @@ -427,28 +300,31 @@ mod tests { ); } - /// Spawn reader against Anvil, let one advance complete, then request shutdown and join. #[tokio::test] - async fn spawn_with_anvil_request_shutdown_then_join_returns_ok() { - let anvil = Anvil::default().block_time(1).spawn(); - let rpc_url = anvil.endpoint_url().to_string(); + async fn start_with_anvil_request_shutdown_then_join_returns_ok() { + if !require_anvil_tests() { + return; + } + + let anvil = Anvil::default().block_time(1).timeout(30_000).spawn(); + let shutdown = ShutdownSignal::default(); let db_file = NamedTempFile::new().expect("temp file"); - let db_path = db_file.path().to_string_lossy(); - let storage = crate::storage::Storage::open(&db_path, "NORMAL").expect("open storage"); - - let config = InputReaderConfig { - rpc_url, - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block: 0, - poll_interval: Duration::from_millis(50), - long_block_range_error_codes: vec![], - }; - let reader = InputReader::new(config, storage); - let (handle, stop) = reader.spawn(); + let handle = InputReader::start( + db_file.path().to_string_lossy().as_ref(), + InputReaderConfig { + rpc_url: anvil.endpoint_url().to_string(), + input_box_address: Address::ZERO, + app_address_filter: Address::ZERO, + genesis_block: 0, + poll_interval: Duration::from_millis(50), + long_block_range_error_codes: vec![], + }, + shutdown.clone(), + ) + .expect("start input reader"); tokio::time::sleep(Duration::from_millis(200)).await; - stop.request_shutdown(); + shutdown.request_shutdown(); let join_result = tokio::time::timeout(Duration::from_secs(3), handle).await; let join_result = join_result.expect("reader should exit within timeout"); @@ -459,106 +335,151 @@ mod tests { ); } - // ----- Integration tests (Anvil) ----------------------------------------- - - /// Spawn Anvil, run one advance_once with no InputAdded contract (empty events). - /// Asserts the reader connects, reads safe block, and updates last_processed_block when block > 0. #[tokio::test] - async fn advance_once_with_anvil_updates_cursor_when_block_available() { - let anvil = Anvil::default().block_time(1).spawn(); - let rpc_url = anvil.endpoint_url().to_string(); + async fn advance_once_with_anvil_updates_safe_head_when_block_available() { + if !require_anvil_tests() { + return; + } + + let anvil = Anvil::default().block_time(1).timeout(30_000).spawn(); let db_file = NamedTempFile::new().expect("temp file"); - let db_path = db_file.path().to_string_lossy(); - let storage = crate::storage::Storage::open(&db_path, "NORMAL").expect("open storage"); - - let config = InputReaderConfig { - rpc_url: rpc_url.to_string(), - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block: 0, - poll_interval: Duration::from_secs(1), - long_block_range_error_codes: vec![], + let mut reader = InputReader::new( + InputReaderConfig { + rpc_url: anvil.endpoint_url().to_string(), + input_box_address: Address::ZERO, + app_address_filter: Address::ZERO, + genesis_block: 0, + poll_interval: Duration::from_secs(1), + long_block_range_error_codes: vec![], + }, + db_file.path().to_string_lossy().into_owned(), + ShutdownSignal::default(), + ); + let provider = alloy::providers::ProviderBuilder::new() + .connect(anvil.endpoint_url().to_string().as_str()) + .await + .expect("connect provider"); + + reader.advance_once(&provider).await.expect("advance_once"); + let safe_block = reader.current_safe_block().await.expect("read safe block"); + let safe_end = { + let mut storage = Storage::open( + db_file.path().to_string_lossy().as_ref(), + SQLITE_SYNCHRONOUS_PRAGMA, + ) + .expect("open storage"); + storage.safe_input_end_exclusive().expect("safe end") }; - let mut reader = InputReader::new(config, storage); - - reader.advance_once().await.expect("advance_once"); + assert_eq!(safe_end, 0, "no InputAdded contract so no direct inputs"); + let _ = safe_block; + } - let _last = reader - .storage - .input_reader_last_processed_block() - .expect("read cursor"); - assert_eq!( - reader.storage.safe_input_end_exclusive().expect("safe end"), - 0, - "no InputAdded contract so no direct inputs" + #[tokio::test] + async fn advance_once_with_genesis_block_uses_genesis_as_effective_prev() { + let db_file = NamedTempFile::new().expect("temp file"); + let genesis_block = 2_u64; + let reader = InputReader::new( + InputReaderConfig { + rpc_url: "http://127.0.0.1:0".to_string(), + input_box_address: Address::ZERO, + app_address_filter: Address::ZERO, + genesis_block, + poll_interval: Duration::from_secs(1), + long_block_range_error_codes: vec![], + }, + db_file.path().to_string_lossy().into_owned(), + ShutdownSignal::default(), ); + + reader + .bootstrap_safe_head() + .await + .expect("bootstrap safe head"); + + let safe_block = reader.current_safe_block().await.expect("read safe block"); + assert_eq!(safe_block, genesis_block - 1); } - /// When genesis_block is set and cursor is 0, effective prev becomes genesis_block - 1, - /// so we never read before genesis_block and never set cursor in [1, genesis_block). #[tokio::test] - async fn advance_once_with_genesis_block_uses_genesis_as_effective_prev() { - let anvil = Anvil::default().block_time(1).spawn(); - let rpc_url = anvil.endpoint_url().to_string(); + async fn sync_to_current_safe_head_with_genesis_block_bootstraps_safe_head() { let db_file = NamedTempFile::new().expect("temp file"); - let db_path = db_file.path().to_string_lossy(); - let storage = crate::storage::Storage::open(&db_path, "NORMAL").expect("open storage"); - - let genesis_block = 2u64; - let config = InputReaderConfig { - rpc_url, - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block, - poll_interval: Duration::from_secs(1), - long_block_range_error_codes: vec![], - }; - let mut reader = InputReader::new(config, storage); + let genesis_block = 5_u64; + + let result = InputReader::sync_to_current_safe_head( + db_file.path().to_string_lossy().as_ref(), + InputReaderConfig { + rpc_url: "http://127.0.0.1:0".to_string(), + input_box_address: Address::ZERO, + app_address_filter: Address::ZERO, + genesis_block, + poll_interval: Duration::from_secs(1), + long_block_range_error_codes: vec![], + }, + ) + .await; - reader.advance_once().await.expect("advance_once"); + assert!(matches!(result, Err(InputReaderError::Provider(_)))); - let cursor = reader - .storage - .input_reader_last_processed_block() - .expect("read cursor"); - assert!( - cursor == 0 || cursor >= genesis_block, - "cursor must be 0 (no advance) or >= genesis_block (never in [1, genesis_block))" + let mut storage = Storage::open( + db_file.path().to_string_lossy().as_ref(), + SQLITE_SYNCHRONOUS_PRAGMA, + ) + .expect("open storage"); + assert_eq!( + storage.current_safe_block().expect("read safe block"), + genesis_block - 1 ); } - /// When storage cursor is already ahead of chain head, advance_once does nothing - /// and does not overwrite last_processed_block. #[tokio::test] - async fn advance_once_when_cursor_ahead_of_chain_is_no_op() { - let anvil = Anvil::default().block_time(1).spawn(); - let rpc_url = anvil.endpoint_url().to_string(); + async fn advance_once_when_safe_head_ahead_of_chain_is_no_op() { + if !require_anvil_tests() { + return; + } + + let anvil = Anvil::default().block_time(1).timeout(30_000).spawn(); let db_file = NamedTempFile::new().expect("temp file"); - let db_path = db_file.path().to_string_lossy(); - let mut storage = crate::storage::Storage::open(&db_path, "NORMAL").expect("open storage"); + let db_path = db_file.path().to_string_lossy().into_owned(); + let mut storage = Storage::open(&db_path, SQLITE_SYNCHRONOUS_PRAGMA).expect("open storage"); storage - .input_reader_set_last_processed_block(1000) - .expect("set cursor ahead of chain"); - - let config = InputReaderConfig { - rpc_url, - input_box_address: Address::ZERO, - app_address_filter: Address::ZERO, - genesis_block: 0, - poll_interval: Duration::from_secs(1), - long_block_range_error_codes: vec![], - }; - let mut reader = InputReader::new(config, storage); - - reader.advance_once().await.expect("advance_once"); + .append_safe_direct_inputs(1000, &[]) + .expect("set safe head ahead of chain"); + + let mut reader = InputReader::new( + InputReaderConfig { + rpc_url: anvil.endpoint_url().to_string(), + input_box_address: Address::ZERO, + app_address_filter: Address::ZERO, + genesis_block: 0, + poll_interval: Duration::from_secs(1), + long_block_range_error_codes: vec![], + }, + db_path, + ShutdownSignal::default(), + ); + let provider = alloy::providers::ProviderBuilder::new() + .connect(anvil.endpoint_url().to_string().as_str()) + .await + .expect("connect provider"); + reader.advance_once(&provider).await.expect("advance_once"); assert_eq!( - reader - .storage - .input_reader_last_processed_block() - .expect("read"), + reader.current_safe_block().await.expect("read"), 1000, - "cursor must not be overwritten when chain is behind" + "safe head should remain unchanged when already ahead of chain" ); } + + #[test] + fn decode_input_box_address_requires_exactly_20_bytes() { + let err = decode_input_box_address(&[0_u8; 19]).expect_err("short bytes should fail"); + assert!( + err.to_string() + .contains("expected 20-byte InputBox address") + ); + + let address = + decode_input_box_address(&[0x22; 20]).expect("20-byte data availability address"); + assert_eq!(address, Address::from([0x22; 20])); + } } diff --git a/sequencer/src/l2_tx_broadcaster/mod.rs b/sequencer/src/l2_tx_broadcaster/mod.rs deleted file mode 100644 index 9fe3456..0000000 --- a/sequencer/src/l2_tx_broadcaster/mod.rs +++ /dev/null @@ -1,327 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod profiling; - -use std::collections::HashMap; -use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; -use std::sync::{Arc, Mutex}; -use std::time::Duration; - -pub use sequencer_core::broadcast::BroadcastTxMessage; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::TrySendError; -use tracing::warn; - -use self::profiling::{BroadcasterMetrics, FanoutOutcome}; -use crate::storage::Storage; - -#[derive(Debug, Clone, Copy)] -pub struct L2TxBroadcasterConfig { - pub idle_poll_interval: Duration, - pub page_size: usize, - pub subscriber_buffer_capacity: usize, - pub metrics_enabled: bool, - pub metrics_log_interval: Duration, -} - -#[derive(Clone)] -pub struct L2TxBroadcaster { - inner: Arc, -} - -pub struct LiveSubscription { - pub receiver: mpsc::Receiver, - pub live_start_offset: u64, -} - -struct L2TxBroadcasterInner { - db_path: String, - page_size: usize, - subscriber_buffer_capacity: usize, - head_offset: AtomicU64, - next_subscriber_id: AtomicU64, - stop_requested: AtomicBool, - subscribers: Mutex>>, -} - -impl L2TxBroadcaster { - pub fn start( - db_path: String, - config: L2TxBroadcasterConfig, - ) -> std::result::Result { - let mut storage = Storage::open_read_only(&db_path) - .map_err(|err| format!("open broadcaster storage failed: {err}"))?; - let head_offset = storage - .ordered_l2_tx_count() - .map_err(|err| format!("load broadcaster head offset failed: {err}"))?; - - let inner = Arc::new(L2TxBroadcasterInner { - db_path, - page_size: config.page_size.max(1), - subscriber_buffer_capacity: config.subscriber_buffer_capacity.max(1), - head_offset: AtomicU64::new(head_offset), - next_subscriber_id: AtomicU64::new(0), - stop_requested: AtomicBool::new(false), - subscribers: Mutex::new(HashMap::new()), - }); - - let worker_inner = Arc::clone(&inner); - tokio::task::spawn_blocking(move || { - run_poller( - worker_inner, - config.idle_poll_interval, - config.metrics_enabled, - config.metrics_log_interval, - ); - }); - - Ok(Self { inner }) - } - - pub fn request_shutdown(&self) { - self.inner.stop_requested.store(true, Ordering::Relaxed); - } - - pub fn subscribe(&self) -> LiveSubscription { - let (tx, rx) = mpsc::channel(self.inner.subscriber_buffer_capacity); - let subscriber_id = self - .inner - .next_subscriber_id - .fetch_add(1, Ordering::Relaxed); - - let mut subscribers = self - .inner - .subscribers - .lock() - .expect("l2 tx broadcaster subscribers mutex poisoned"); - subscribers.insert(subscriber_id, tx); - let live_start_offset = self.inner.head_offset.load(Ordering::Acquire); - - LiveSubscription { - receiver: rx, - live_start_offset, - } - } - - pub fn is_running(&self) -> bool { - !self.inner.stop_requested.load(Ordering::Relaxed) - } - - pub fn db_path(&self) -> String { - self.inner.db_path.clone() - } - - pub fn page_size(&self) -> usize { - self.inner.page_size - } -} - -fn run_poller( - inner: Arc, - idle_poll_interval: Duration, - metrics_enabled: bool, - metrics_log_interval: Duration, -) { - let mut storage = match Storage::open_read_only(inner.db_path.as_str()) { - Ok(storage) => storage, - Err(err) => { - warn!(error = %err, "l2 tx broadcaster failed to open read-only storage"); - return; - } - }; - let mut next_offset = inner.head_offset.load(Ordering::Acquire); - let mut metrics = BroadcasterMetrics::new( - metrics_enabled, - metrics_log_interval, - inner.page_size, - inner.subscriber_buffer_capacity, - idle_poll_interval, - ); - - while !inner.stop_requested.load(Ordering::Relaxed) { - metrics.on_loop_start(); - let read_started = metrics.phase_started_at(); - let txs = match storage.load_ordered_l2_txs_page_from(next_offset, inner.page_size) { - Ok(value) => value, - Err(err) => { - metrics.on_read_error(read_started); - warn!( - error = %err, - offset = next_offset, - "l2 tx broadcaster failed to read ordered tx page" - ); - let sleep_started = metrics.phase_started_at(); - std::thread::sleep(idle_poll_interval); - metrics.on_idle_sleep_end(sleep_started); - metrics.maybe_log_window(); - continue; - } - }; - metrics.on_read_end(read_started, txs.len() as u64); - - if txs.is_empty() { - metrics.on_empty_poll(); - let sleep_started = metrics.phase_started_at(); - std::thread::sleep(idle_poll_interval); - metrics.on_idle_sleep_end(sleep_started); - metrics.maybe_log_window(); - continue; - } - - for tx in txs { - let event = BroadcastTxMessage::from_offset_and_tx(next_offset, tx); - next_offset = next_offset.saturating_add(1); - inner.head_offset.store(next_offset, Ordering::Release); - let fanout_started = metrics.phase_started_at(); - let outcome = fanout_event(Arc::as_ref(&inner), event); - metrics.on_fanout_end(fanout_started, outcome); - } - metrics.maybe_log_window(); - } - metrics.log_final(); -} - -fn fanout_event(inner: &L2TxBroadcasterInner, event: BroadcastTxMessage) -> FanoutOutcome { - let mut to_remove = Vec::new(); - let mut subscribers = inner - .subscribers - .lock() - .expect("l2 tx broadcaster subscribers mutex poisoned"); - let subscriber_count_before = subscribers.len(); - let mut dropped_closed = 0_u64; - let mut dropped_full = 0_u64; - let mut delivered = 0_u64; - - for (subscriber_id, sender) in subscribers.iter() { - match sender.try_send(event.clone()) { - Ok(()) => delivered = delivered.saturating_add(1), - Err(TrySendError::Closed(_)) => { - to_remove.push(*subscriber_id); - dropped_closed = dropped_closed.saturating_add(1); - warn!(subscriber_id, "l2 tx broadcaster removed closed subscriber"); - } - Err(TrySendError::Full(_)) => { - to_remove.push(*subscriber_id); - dropped_full = dropped_full.saturating_add(1); - warn!( - subscriber_id, - "l2 tx broadcaster dropped slow subscriber due to full channel" - ); - } - } - } - - for subscriber_id in to_remove { - subscribers.remove(&subscriber_id); - } - - FanoutOutcome { - delivered, - dropped_closed, - dropped_full, - subscriber_count_before: subscriber_count_before as u64, - subscriber_count_after: subscribers.len() as u64, - } -} - -#[cfg(test)] -mod tests { - use super::BroadcastTxMessage; - use super::{L2TxBroadcaster, L2TxBroadcasterInner}; - use alloy_primitives::Address; - use sequencer_core::l2_tx::{DirectInput, SequencedL2Tx, ValidUserOp}; - use std::collections::HashMap; - use std::sync::atomic::{AtomicBool, AtomicU64}; - use std::sync::{Arc, Mutex}; - use std::time::Duration; - - #[test] - fn broadcast_user_op_serializes_with_hex_data() { - let msg = BroadcastTxMessage::from_offset_and_tx( - 7, - SequencedL2Tx::UserOp(ValidUserOp { - sender: Address::from_slice(&[0x11; 20]), - fee: 3, - data: vec![0xaa, 0xbb], - }), - ); - let json = serde_json::to_string(&msg).expect("serialize"); - assert!(json.contains("\"kind\":\"user_op\"")); - assert!(json.contains("\"offset\":7")); - assert!(json.contains("\"fee\":3")); - assert!(json.contains("\"data\":\"0xaabb\"")); - } - - #[test] - fn broadcast_direct_input_serializes_with_hex_payload() { - let msg = BroadcastTxMessage::from_offset_and_tx( - 9, - SequencedL2Tx::Direct(DirectInput { - payload: vec![0xcc, 0xdd], - }), - ); - let json = serde_json::to_string(&msg).expect("serialize"); - assert!(json.contains("\"kind\":\"direct_input\"")); - assert!(json.contains("\"offset\":9")); - assert!(json.contains("\"payload\":\"0xccdd\"")); - } - - #[test] - fn subscribe_observes_live_start_after_registering_subscriber() { - let broadcaster = L2TxBroadcaster { - inner: Arc::new(L2TxBroadcasterInner { - db_path: ":memory:".to_string(), - page_size: 1, - subscriber_buffer_capacity: 1, - head_offset: AtomicU64::new(0), - next_subscriber_id: AtomicU64::new(0), - stop_requested: AtomicBool::new(false), - subscribers: Mutex::new(HashMap::new()), - }), - }; - - for _ in 0..16 { - broadcaster - .inner - .head_offset - .store(0, std::sync::atomic::Ordering::Release); - broadcaster - .inner - .subscribers - .lock() - .expect("subscribers mutex") - .clear(); - - let guard = broadcaster - .inner - .subscribers - .lock() - .expect("subscribers mutex"); - let (tx, rx) = std::sync::mpsc::channel(); - let cloned = broadcaster.clone(); - let join = std::thread::spawn(move || { - let subscription = cloned.subscribe(); - tx.send(subscription.live_start_offset) - .expect("send live start offset"); - }); - - std::thread::sleep(Duration::from_millis(2)); - broadcaster - .inner - .head_offset - .store(1, std::sync::atomic::Ordering::Release); - drop(guard); - - let observed = rx - .recv_timeout(Duration::from_secs(1)) - .expect("recv live start offset"); - join.join().expect("join subscribe thread"); - - assert_eq!( - observed, 1, - "subscriber must observe current head after it is visible in subscriber set" - ); - } - } -} diff --git a/sequencer/src/l2_tx_broadcaster/profiling.rs b/sequencer/src/l2_tx_broadcaster/profiling.rs deleted file mode 100644 index 0110e92..0000000 --- a/sequencer/src/l2_tx_broadcaster/profiling.rs +++ /dev/null @@ -1,199 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use std::time::{Duration, Instant}; -use tracing::info; - -#[derive(Debug, Clone, Copy)] -pub(super) struct FanoutOutcome { - pub(super) delivered: u64, - pub(super) dropped_closed: u64, - pub(super) dropped_full: u64, - pub(super) subscriber_count_before: u64, - pub(super) subscriber_count_after: u64, -} - -#[derive(Debug)] -pub(super) struct BroadcasterMetrics { - enabled: bool, - log_interval: Duration, - page_size: usize, - subscriber_buffer_capacity: usize, - idle_poll_interval: Duration, - window_started_at: Instant, - loops: u64, - empty_polls: u64, - read_errors: u64, - loaded_txs: u64, - fanout_delivered: u64, - dropped_closed: u64, - dropped_full: u64, - max_subscribers_before: u64, - max_subscribers_after: u64, - read_phase: Duration, - fanout_phase: Duration, - idle_sleep: Duration, -} - -impl BroadcasterMetrics { - pub(super) fn new( - enabled: bool, - log_interval: Duration, - page_size: usize, - subscriber_buffer_capacity: usize, - idle_poll_interval: Duration, - ) -> Self { - Self { - enabled, - log_interval, - page_size, - subscriber_buffer_capacity, - idle_poll_interval, - window_started_at: Instant::now(), - loops: 0, - empty_polls: 0, - read_errors: 0, - loaded_txs: 0, - fanout_delivered: 0, - dropped_closed: 0, - dropped_full: 0, - max_subscribers_before: 0, - max_subscribers_after: 0, - read_phase: Duration::ZERO, - fanout_phase: Duration::ZERO, - idle_sleep: Duration::ZERO, - } - } - - pub(super) fn phase_started_at(&self) -> Option { - self.enabled.then(Instant::now) - } - - pub(super) fn on_loop_start(&mut self) { - if !self.enabled { - return; - } - self.loops = self.loops.saturating_add(1); - } - - pub(super) fn on_empty_poll(&mut self) { - if !self.enabled { - return; - } - self.empty_polls = self.empty_polls.saturating_add(1); - } - - pub(super) fn on_read_error(&mut self, started_at: Option) { - if !self.enabled { - return; - } - self.read_errors = self.read_errors.saturating_add(1); - self.read_phase = self.read_phase.saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn on_read_end(&mut self, started_at: Option, loaded_txs: u64) { - if !self.enabled { - return; - } - self.loaded_txs = self.loaded_txs.saturating_add(loaded_txs); - self.read_phase = self.read_phase.saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn on_fanout_end(&mut self, started_at: Option, outcome: FanoutOutcome) { - if !self.enabled { - return; - } - self.fanout_delivered = self.fanout_delivered.saturating_add(outcome.delivered); - self.dropped_closed = self.dropped_closed.saturating_add(outcome.dropped_closed); - self.dropped_full = self.dropped_full.saturating_add(outcome.dropped_full); - self.max_subscribers_before = self - .max_subscribers_before - .max(outcome.subscriber_count_before); - self.max_subscribers_after = self - .max_subscribers_after - .max(outcome.subscriber_count_after); - self.fanout_phase = self - .fanout_phase - .saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn on_idle_sleep_end(&mut self, started_at: Option) { - if !self.enabled { - return; - } - self.idle_sleep = self.idle_sleep.saturating_add(elapsed_or_zero(started_at)); - } - - pub(super) fn maybe_log_window(&mut self) { - if !self.enabled { - return; - } - let elapsed = self.window_started_at.elapsed(); - if elapsed < self.log_interval { - return; - } - self.log_window(elapsed, false); - self.reset_window(); - } - - pub(super) fn log_final(&mut self) { - if !self.enabled { - return; - } - let elapsed = self.window_started_at.elapsed(); - if elapsed.is_zero() && self.loops == 0 { - return; - } - self.log_window(elapsed, true); - } - - fn log_window(&self, elapsed: Duration, final_window: bool) { - let elapsed_secs = elapsed.as_secs_f64(); - let loaded_tps = if elapsed_secs > 0.0 { - self.loaded_txs as f64 / elapsed_secs - } else { - 0.0 - }; - info!( - final_window, - window_ms = elapsed.as_millis() as u64, - page_size = self.page_size, - subscriber_buffer_capacity = self.subscriber_buffer_capacity, - idle_poll_interval_ms = self.idle_poll_interval.as_millis() as u64, - loops = self.loops, - empty_polls = self.empty_polls, - read_errors = self.read_errors, - loaded_txs = self.loaded_txs, - loaded_tps = loaded_tps, - fanout_delivered = self.fanout_delivered, - dropped_closed = self.dropped_closed, - dropped_full = self.dropped_full, - max_subscribers_before = self.max_subscribers_before, - max_subscribers_after = self.max_subscribers_after, - read_phase_ms = self.read_phase.as_millis() as u64, - fanout_phase_ms = self.fanout_phase.as_millis() as u64, - idle_sleep_ms = self.idle_sleep.as_millis() as u64, - "l2 tx broadcaster metrics" - ); - } - - fn reset_window(&mut self) { - self.window_started_at = Instant::now(); - self.loops = 0; - self.empty_polls = 0; - self.read_errors = 0; - self.loaded_txs = 0; - self.fanout_delivered = 0; - self.dropped_closed = 0; - self.dropped_full = 0; - self.max_subscribers_before = 0; - self.max_subscribers_after = 0; - self.read_phase = Duration::ZERO; - self.fanout_phase = Duration::ZERO; - self.idle_sleep = Duration::ZERO; - } -} - -fn elapsed_or_zero(started_at: Option) -> Duration { - started_at.map_or(Duration::ZERO, |value| value.elapsed()) -} diff --git a/sequencer/src/l2_tx_feed/error.rs b/sequencer/src/l2_tx_feed/error.rs new file mode 100644 index 0000000..778dc39 --- /dev/null +++ b/sequencer/src/l2_tx_feed/error.rs @@ -0,0 +1,48 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use thiserror::Error; + +use crate::storage::StorageOpenError; + +#[derive(Debug, Error)] +pub enum SubscribeError { + #[error("cannot open subscription storage")] + OpenStorage { + #[source] + source: StorageOpenError, + }, + #[error("cannot load feed head offset")] + LoadHeadOffset { + #[source] + source: rusqlite::Error, + }, + #[error( + "catch-up window exceeded: requested offset {requested_offset}, live start {live_start_offset}, max {max_catchup_events}" + )] + CatchUpWindowExceeded { + requested_offset: u64, + live_start_offset: u64, + max_catchup_events: u64, + }, +} + +#[derive(Debug, Error)] +pub enum SubscriptionError { + #[error("cannot open subscription storage")] + OpenStorage { + #[source] + source: StorageOpenError, + }, + #[error("cannot load ordered tx page from offset {offset}")] + LoadReplay { + offset: u64, + #[source] + source: rusqlite::Error, + }, + #[error("subscription task join error: {source}")] + Join { + #[source] + source: tokio::task::JoinError, + }, +} diff --git a/sequencer/src/l2_tx_feed/feed.rs b/sequencer/src/l2_tx_feed/feed.rs new file mode 100644 index 0000000..abece8a --- /dev/null +++ b/sequencer/src/l2_tx_feed/feed.rs @@ -0,0 +1,170 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::time::Duration; + +pub use sequencer_core::broadcast::BroadcastTxMessage; +use tokio::sync::mpsc; + +use super::{SubscribeError, SubscriptionError}; +use crate::shutdown::ShutdownSignal; +use crate::storage::Storage; + +#[derive(Debug, Clone, Copy)] +pub struct L2TxFeedConfig { + pub idle_poll_interval: Duration, + pub page_size: usize, +} + +#[derive(Clone)] +pub struct L2TxFeed { + db_path: String, + page_size: usize, + idle_poll_interval: Duration, + shutdown: ShutdownSignal, +} + +pub struct Subscription { + receiver: mpsc::Receiver, + task: Option, + shutdown: ShutdownSignal, +} + +type SubscriptionTask = tokio::task::JoinHandle>; + +const DEFAULT_IDLE_POLL_INTERVAL: Duration = Duration::from_millis(20); +const DEFAULT_PAGE_SIZE: usize = 256; +const SUBSCRIPTION_BUFFER_CAPACITY: usize = 1024; + +impl Default for L2TxFeedConfig { + fn default() -> Self { + Self { + idle_poll_interval: DEFAULT_IDLE_POLL_INTERVAL, + page_size: DEFAULT_PAGE_SIZE, + } + } +} + +impl L2TxFeed { + pub fn new(db_path: String, shutdown: ShutdownSignal, config: L2TxFeedConfig) -> Self { + Self { + db_path, + page_size: config.page_size.max(1), + idle_poll_interval: config.idle_poll_interval, + shutdown, + } + } + + pub fn subscribe_from( + &self, + from_offset: u64, + max_catchup_events: u64, + ) -> Result { + let head_offset = load_head_offset(self.db_path.as_str())?; + let catchup_events = head_offset.saturating_sub(from_offset); + if catchup_events > max_catchup_events { + return Err(SubscribeError::CatchUpWindowExceeded { + requested_offset: from_offset, + live_start_offset: head_offset, + max_catchup_events, + }); + } + + let (events_tx, events_rx) = mpsc::channel(SUBSCRIPTION_BUFFER_CAPACITY); + let db_path = self.db_path.clone(); + let page_size = self.page_size; + let idle_poll_interval = self.idle_poll_interval; + let shutdown = self.shutdown.clone(); + let task = tokio::task::spawn_blocking(move || { + run_subscription( + db_path.as_str(), + page_size, + idle_poll_interval, + from_offset, + shutdown, + events_tx, + ) + }); + + Ok(Subscription { + receiver: events_rx, + task: Some(task), + shutdown: self.shutdown.clone(), + }) + } +} + +impl Subscription { + pub async fn recv(&mut self) -> Option { + tokio::select! { + _ = self.shutdown.wait_for_shutdown() => None, + maybe_event = self.receiver.recv() => maybe_event, + } + } + + pub async fn finish(mut self) -> Result<(), SubscriptionError> { + let task = self.task.take(); + self.receiver.close(); + drop(self.receiver); + + let Some(task) = task else { + return Ok(()); + }; + + match task.await { + Ok(result) => result, + Err(source) => Err(SubscriptionError::Join { source }), + } + } +} + +fn load_head_offset(db_path: &str) -> Result { + let mut storage = Storage::open_read_only(db_path) + .map_err(|source| SubscribeError::OpenStorage { source })?; + storage + .ordered_l2_tx_count() + .map_err(|source| SubscribeError::LoadHeadOffset { source }) +} + +fn run_subscription( + db_path: &str, + page_size: usize, + idle_poll_interval: Duration, + from_offset: u64, + shutdown: ShutdownSignal, + events_tx: mpsc::Sender, +) -> Result<(), SubscriptionError> { + let mut storage = Storage::open_read_only(db_path) + .map_err(|source| SubscriptionError::OpenStorage { source })?; + let mut next_offset = from_offset; + + loop { + if shutdown.is_shutdown_requested() || events_tx.is_closed() { + return Ok(()); + } + + let txs = storage + .load_ordered_l2_txs_page_from(next_offset, page_size) + .map_err(|source| SubscriptionError::LoadReplay { + offset: next_offset, + source, + })?; + + if txs.is_empty() { + std::thread::sleep(idle_poll_interval); + continue; + } + + for tx in txs { + if shutdown.is_shutdown_requested() || events_tx.is_closed() { + return Ok(()); + } + + let event = BroadcastTxMessage::from_offset_and_tx(next_offset, tx); + next_offset = next_offset.saturating_add(1); + if events_tx.blocking_send(event).is_err() { + return Ok(()); + } + } + } +} diff --git a/sequencer/src/l2_tx_feed/mod.rs b/sequencer/src/l2_tx_feed/mod.rs new file mode 100644 index 0000000..7c78a45 --- /dev/null +++ b/sequencer/src/l2_tx_feed/mod.rs @@ -0,0 +1,11 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +mod error; +mod feed; + +#[cfg(test)] +mod tests; + +pub use error::{SubscribeError, SubscriptionError}; +pub use feed::{BroadcastTxMessage, L2TxFeed, L2TxFeedConfig, Subscription}; diff --git a/sequencer/src/l2_tx_feed/tests.rs b/sequencer/src/l2_tx_feed/tests.rs new file mode 100644 index 0000000..3a6e6ec --- /dev/null +++ b/sequencer/src/l2_tx_feed/tests.rs @@ -0,0 +1,170 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::time::{Duration, SystemTime}; + +use alloy_primitives::{Address, Signature}; +use tempfile::TempDir; +use tokio::sync::oneshot; + +use super::{BroadcastTxMessage, L2TxFeed, L2TxFeedConfig, SubscribeError}; +use crate::inclusion_lane::{PendingUserOp, SequencerError}; +use crate::shutdown::ShutdownSignal; +use crate::storage::{DirectInputRange, Storage, StoredDirectInput}; +use sequencer_core::l2_tx::{DirectInput, SequencedL2Tx, ValidUserOp}; +use sequencer_core::user_op::UserOp; + +#[test] +fn broadcast_user_op_serializes_with_hex_data() { + let msg = BroadcastTxMessage::from_offset_and_tx( + 7, + SequencedL2Tx::UserOp(ValidUserOp { + sender: Address::from_slice(&[0x11; 20]), + fee: 3, + data: vec![0xaa, 0xbb], + }), + ); + let json = serde_json::to_string(&msg).expect("serialize"); + assert!(json.contains("\"kind\":\"user_op\"")); + assert!(json.contains("\"offset\":7")); + assert!(json.contains("\"fee\":3")); + assert!(json.contains("\"data\":\"0xaabb\"")); +} + +#[test] +fn broadcast_direct_input_serializes_with_hex_payload() { + let msg = BroadcastTxMessage::from_offset_and_tx( + 9, + SequencedL2Tx::Direct(DirectInput { + payload: vec![0xcc, 0xdd], + }), + ); + let json = serde_json::to_string(&msg).expect("serialize"); + assert!(json.contains("\"kind\":\"direct_input\"")); + assert!(json.contains("\"offset\":9")); + assert!(json.contains("\"payload\":\"0xccdd\"")); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn subscribe_from_rejects_catchup_window() { + let db = test_db("catchup-window"); + seed_ordered_txs(db.path.as_str()); + let feed = test_feed(db.path.as_str(), ShutdownSignal::default()); + + let result = feed.subscribe_from(0, 1); + + assert!(matches!( + result, + Err(SubscribeError::CatchUpWindowExceeded { + requested_offset: 0, + live_start_offset: 2, + max_catchup_events: 1, + }) + )); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn subscription_replays_existing_rows_in_order() { + let db = test_db("replay-existing"); + seed_ordered_txs(db.path.as_str()); + let feed = test_feed(db.path.as_str(), ShutdownSignal::default()); + + let mut subscription = feed.subscribe_from(0, u64::MAX).expect("subscribe"); + + let first = tokio::time::timeout(Duration::from_secs(1), subscription.recv()) + .await + .expect("wait first event") + .expect("first event"); + let second = tokio::time::timeout(Duration::from_secs(1), subscription.recv()) + .await + .expect("wait second event") + .expect("second event"); + + assert_eq!(first.offset(), 0); + assert_eq!(second.offset(), 1); + + subscription.finish().await.expect("finish subscription"); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn shutdown_signal_closes_subscription() { + let db = test_db("shutdown-closes"); + seed_ordered_txs(db.path.as_str()); + let shutdown = ShutdownSignal::default(); + let feed = test_feed(db.path.as_str(), shutdown.clone()); + + let mut subscription = feed.subscribe_from(u64::MAX, u64::MAX).expect("subscribe"); + + shutdown.request_shutdown(); + + assert!( + tokio::time::timeout(Duration::from_secs(1), subscription.recv()) + .await + .expect("wait for subscription close") + .is_none() + ); + subscription.finish().await.expect("clean shutdown"); +} + +fn test_feed(db_path: &str, shutdown: ShutdownSignal) -> L2TxFeed { + L2TxFeed::new( + db_path.to_string(), + shutdown, + L2TxFeedConfig { + idle_poll_interval: Duration::from_millis(2), + page_size: 64, + }, + ) +} + +fn test_db(label: &str) -> TestDb { + let dir = TempDir::new().expect("create temp dir"); + let path = dir.path().join(format!("{label}.db")); + TestDb { + _dir: dir, + path: path.to_string_lossy().into_owned(), + } +} + +fn seed_ordered_txs(db_path: &str) { + let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); + let mut head = storage + .initialize_open_state(0, DirectInputRange::empty_at(0)) + .expect("initialize open state"); + + let (respond_to, _recv) = oneshot::channel::>(); + let pending = PendingUserOp { + signed: sequencer_core::user_op::SignedUserOp { + sender: Address::from_slice(&[0x11; 20]), + signature: Signature::test_signature(), + user_op: UserOp { + nonce: 0, + max_fee: 3, + data: vec![0x42].into(), + }, + }, + respond_to, + received_at: SystemTime::now(), + }; + + storage + .append_user_ops_chunk(&mut head, &[pending]) + .expect("append user-op chunk"); + storage + .append_safe_direct_inputs( + 10, + &[StoredDirectInput { + payload: vec![0xaa], + block_number: 10, + }], + ) + .expect("append direct input"); + storage + .close_frame_only(&mut head, 10, DirectInputRange::new(0, 1)) + .expect("close frame with one drained direct input"); +} + +struct TestDb { + _dir: TempDir, + path: String, +} diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index e0fa2ff..935cb88 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -6,7 +6,13 @@ //! Flow: API -> inclusion lane -> SQLite -> catch-up replay. //! The inclusion lane is the single writer that defines execution order. pub mod api; +pub mod config; pub mod inclusion_lane; pub mod input_reader; -pub mod l2_tx_broadcaster; +pub mod l2_tx_feed; +mod runtime; +pub mod shutdown; pub mod storage; + +pub use config::RunConfig; +pub use runtime::{RunError, run}; diff --git a/sequencer/src/main.rs b/sequencer/src/main.rs index f9ddf05..a1ff863 100644 --- a/sequencer/src/main.rs +++ b/sequencer/src/main.rs @@ -1,831 +1,19 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use std::sync::Arc; -use std::time::Duration; - -use alloy_primitives::{Address, U256}; -use alloy_sol_types::Eip712Domain; use app_core::application::{WalletApp, WalletConfig}; -use clap::{Args, Parser, Subcommand, ValueEnum}; -use sequencer_core::api::TxRequest; -use sequencer_core::user_op::SignedUserOp; -use serde::Serialize; -use tokio::sync::Semaphore; +use clap::Parser; +use sequencer::{RunConfig, run}; use tracing_subscriber::EnvFilter; -use sequencer::api::AppState; -use sequencer::inclusion_lane::{ - InclusionLane, InclusionLaneConfig, InclusionLaneError, InclusionLaneInput, -}; -use sequencer::input_reader::{InputReader, InputReaderConfig}; -use sequencer::l2_tx_broadcaster::{L2TxBroadcaster, L2TxBroadcasterConfig}; -use sequencer::storage; - -const DEFAULT_HTTP_ADDR: &str = "127.0.0.1:3000"; -const DEFAULT_DB_PATH: &str = "sequencer.db"; -const DEFAULT_QUEUE_CAP: usize = 8192; -const DEFAULT_MAX_USER_OPS_PER_CHUNK: usize = 1024; -const DEFAULT_SAFE_DIRECT_BUFFER_CAPACITY: usize = 2048; -const DEFAULT_MAX_BATCH_OPEN_DURATION: Duration = Duration::from_secs(2 * 60 * 60); -const DEFAULT_MAX_BATCH_USER_OP_BYTES: usize = 1_048_576; // 1 MiB -const DEFAULT_INCLUSION_LANE_IDLE_POLL_INTERVAL: Duration = Duration::from_millis(2); -const DEFAULT_BROADCASTER_IDLE_POLL_INTERVAL: Duration = Duration::from_millis(20); -const DEFAULT_INPUT_READER_POLL_INTERVAL: Duration = Duration::from_secs(12); -const DEFAULT_BROADCASTER_PAGE_SIZE: usize = 256; -const DEFAULT_BROADCASTER_SUBSCRIBER_BUFFER_CAPACITY: usize = 32_768; -const DEFAULT_WS_MAX_SUBSCRIBERS: usize = 64; -const DEFAULT_WS_MAX_CATCHUP_EVENTS: u64 = 50_000; -const DEFAULT_OVERLOAD_MAX_INFLIGHT_MULTIPLIER: usize = 2; -const DEFAULT_RUNTIME_METRICS_ENABLED: bool = false; -const DEFAULT_RUNTIME_METRICS_LOG_INTERVAL: Duration = Duration::from_secs(5); -const DEFAULT_MAX_BODY_BYTES: usize = TxRequest::MAX_JSON_BYTES_RECOMMENDED; -const DEFAULT_DOMAIN_NAME: &str = "CartesiAppSequencer"; -const DEFAULT_DOMAIN_VERSION: &str = "1"; -const DEFAULT_DOMAIN_CHAIN_ID: u64 = 1; -const DEFAULT_DOMAIN_VERIFYING_CONTRACT: &str = "0x0000000000000000000000000000000000000000"; -/// Default RPC URL for the input reader (Anvil's default HTTP endpoint). -const DEFAULT_INPUT_READER_RPC_URL: &str = "http://127.0.0.1:8545"; - -fn default_overload_max_inflight_submissions(queue_capacity: usize) -> usize { - queue_capacity - .saturating_mul(DEFAULT_OVERLOAD_MAX_INFLIGHT_MULTIPLIER) - .max(1) -} - #[tokio::main] -async fn main() -> Result<(), Box> { - let cli = Cli::parse(); - if let Some(command) = cli.command { - return handle_config_command(command); - } - - run(cli.run).await -} - -async fn run(args: RunArgs) -> Result<(), Box> { +async fn main() -> Result<(), sequencer::RunError> { tracing_subscriber::fmt() .with_env_filter( EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), ) .init(); - let config = Config::from_run_args(args) - .map_err(|reason| std::io::Error::other(format!("invalid configuration: {reason}")))?; - log_effective_config(&config); - let domain = config.build_domain()?; - - let mut storage = storage::Storage::open(&config.db_path, config.sqlite_synchronous.pragma())?; - force_zero_frame_fee_for_now(&mut storage)?; - let (tx, rx) = tokio::sync::mpsc::channel::(config.queue_capacity); - - let storage_ir = storage::Storage::open(&config.db_path, config.sqlite_synchronous.pragma())?; - let reader = InputReader::new(config.input_reader.clone(), storage_ir); - let (mut reader_handle, reader_stop) = reader.spawn(); - tracing::info!( - "input reader started (reference: {})", - config.input_reader.rpc_url - ); - - let inclusion_lane = InclusionLane::new( - rx, - WalletApp::new(WalletConfig), - storage, - InclusionLaneConfig { - max_user_ops_per_chunk: config.max_user_ops_per_chunk, - safe_direct_buffer_capacity: config.safe_direct_buffer_capacity, - max_batch_open: config.max_batch_open, - max_batch_user_op_bytes: config.max_batch_user_op_bytes, - idle_poll_interval: config.inclusion_lane_idle_poll_interval, - metrics_enabled: config.runtime_metrics_enabled, - metrics_log_interval: config.runtime_metrics_log_interval, - }, - ); - let (mut inclusion_lane_handle, inclusion_lane_stop) = inclusion_lane.spawn(); - let broadcaster = L2TxBroadcaster::start( - config.db_path.clone(), - L2TxBroadcasterConfig { - idle_poll_interval: config.broadcaster_idle_poll_interval, - page_size: config.broadcaster_page_size, - subscriber_buffer_capacity: config.broadcaster_subscriber_buffer_capacity, - metrics_enabled: config.runtime_metrics_enabled, - metrics_log_interval: config.runtime_metrics_log_interval, - }, - ) - .map_err(|reason| format!("failed to start l2 tx broadcaster: {reason}"))?; - let broadcaster_shutdown = broadcaster.clone(); - - let state = Arc::new(AppState { - tx_sender: tx, - domain, - overload_max_inflight_submissions: config.overload_max_inflight_submissions, - ws_subscriber_limit: Arc::new(Semaphore::new(config.ws_max_subscribers)), - ws_max_catchup_events: config.ws_max_catchup_events, - broadcaster, - }); - - let app = sequencer::api::router(state, config.max_body_bytes); - let listener = tokio::net::TcpListener::bind(&config.http_addr).await?; - - tracing::info!(address = %config.http_addr, "listening"); - tokio::select! { - server_result = axum::serve(listener, app) => { - broadcaster_shutdown.request_shutdown(); - inclusion_lane_stop.request_shutdown(); - reader_stop.request_shutdown(); - let lane_result = inclusion_lane_handle.await; - match lane_result { - Ok(InclusionLaneError::ShutdownRequested) => {} - Ok(err) => return Err(format!("inclusion lane exited during shutdown: {err}").into()), - Err(join_err) => { - return Err(format!("inclusion lane join error during shutdown: {join_err}").into()) - } - } - let reader_result = reader_handle.await; - match reader_result { - Ok(Ok(())) => {} - Ok(Err(e)) => return Err(format!("input reader exited during shutdown: {e}").into()), - Err(join_err) => return Err(format!("input reader join error during shutdown: {join_err}").into()), - } - server_result?; - } - lane_result = &mut inclusion_lane_handle => { - broadcaster_shutdown.request_shutdown(); - reader_stop.request_shutdown(); - match lane_result { - Ok(err) => return Err(format!("inclusion lane exited: {err}").into()), - Err(join_err) => { - return Err(format!("inclusion lane join error: {join_err}").into()) - } - } - } - reader_result = &mut reader_handle => { - broadcaster_shutdown.request_shutdown(); - inclusion_lane_stop.request_shutdown(); - match reader_result { - Ok(Ok(())) => return Err("input reader exited unexpectedly".into()), - Ok(Err(e)) => return Err(format!("input reader exited: {e}").into()), - Err(join_err) => return Err(format!("input reader join error: {join_err}").into()), - } - } - } - - Ok(()) -} - -fn handle_config_command(command: Command) -> Result<(), Box> { - match command { - Command::Config { - command: ConfigCommand::Print(args), - } => { - let config = Config::from_run_args(args).map_err(std::io::Error::other)?; - println!("{}", serde_json::to_string_pretty(&config.effective())?); - Ok(()) - } - Command::Config { - command: ConfigCommand::Validate(args), - } => { - let config = Config::from_run_args(args).map_err(std::io::Error::other)?; - println!("configuration is valid"); - println!("{}", serde_json::to_string_pretty(&config.effective())?); - Ok(()) - } - } -} - -fn log_effective_config(config: &Config) { - match serde_json::to_string(&config.effective()) { - Ok(json) => tracing::info!(effective_config = %json, "resolved sequencer config"), - Err(err) => tracing::warn!(%err, "failed to serialize effective sequencer config"), - } -} - -struct Config { - profile: Profile, - http_addr: String, - db_path: String, - queue_capacity: usize, - overload_max_inflight_submissions: usize, - max_user_ops_per_chunk: usize, - safe_direct_buffer_capacity: usize, - max_batch_open: Duration, - max_batch_user_op_bytes: usize, - inclusion_lane_idle_poll_interval: Duration, - broadcaster_idle_poll_interval: Duration, - broadcaster_page_size: usize, - broadcaster_subscriber_buffer_capacity: usize, - ws_max_subscribers: usize, - ws_max_catchup_events: u64, - runtime_metrics_enabled: bool, - runtime_metrics_log_interval: Duration, - max_body_bytes: usize, - sqlite_synchronous: SqliteSynchronous, - domain_name: String, - domain_version: String, - domain_chain_id: u64, - domain_verifying_contract: String, - /// InputReader config (required). Feeds safe inputs from a reference node into storage. - input_reader: InputReaderConfig, -} - -#[derive(Debug, Parser)] -#[command( - name = "sequencer", - about = "Deterministic sequencer prototype with low-latency soft confirmations", - version, - after_help = "Examples:\n sequencer --profile dev\n sequencer --profile bench --max-user-ops-per-chunk 4096 --max-batch-open 30m\n sequencer config print --profile safe\n sequencer config validate --sqlite-synchronous FULL" -)] -struct Cli { - #[command(subcommand)] - command: Option, - #[command(flatten)] - run: RunArgs, -} - -#[derive(Debug, Subcommand)] -enum Command { - Config { - #[command(subcommand)] - command: ConfigCommand, - }, -} - -#[derive(Debug, Subcommand)] -enum ConfigCommand { - Print(RunArgs), - Validate(RunArgs), -} - -#[derive(Debug, Clone, Args)] -struct RunArgs { - #[arg(long, env = "SEQ_PROFILE", value_enum, default_value_t = Profile::Dev)] - profile: Profile, - #[arg(long, env = "SEQ_HTTP_ADDR")] - http_addr: Option, - #[arg(long, env = "SEQ_DB_PATH")] - db_path: Option, - #[arg(long, env = "SEQ_QUEUE_CAP")] - queue_capacity: Option, - #[arg(long, env = "SEQ_OVERLOAD_MAX_INFLIGHT_SUBMISSIONS")] - overload_max_inflight_submissions: Option, - #[arg(long, env = "SEQ_MAX_USER_OPS_PER_CHUNK")] - max_user_ops_per_chunk: Option, - #[arg(long, env = "SEQ_MAX_BATCH", hide = true)] - legacy_max_batch: Option, - #[arg(long, env = "SEQ_SAFE_DIRECT_BUFFER_CAPACITY")] - safe_direct_buffer_capacity: Option, - #[arg( - long, - env = "SEQ_MAX_BATCH_OPEN_MS", - value_name = "DURATION", - value_parser = parse_duration_ms_or_unit - )] - max_batch_open: Option, - #[arg(long, env = "SEQ_MAX_BATCH_USER_OP_BYTES")] - max_batch_user_op_bytes: Option, - #[arg( - long, - env = "SEQ_INCLUSION_LANE_IDLE_POLL_INTERVAL_MS", - value_name = "DURATION", - value_parser = parse_duration_ms_or_unit - )] - inclusion_lane_idle_poll_interval: Option, - #[arg( - long, - env = "SEQ_INCLUSION_LANE_TICK_INTERVAL_MS", - hide = true, - value_parser = parse_duration_ms_or_unit - )] - legacy_inclusion_lane_tick_interval: Option, - #[arg( - long, - env = "SEQ_COMMIT_LANE_TICK_INTERVAL_MS", - hide = true, - value_parser = parse_duration_ms_or_unit - )] - legacy_commit_lane_tick_interval: Option, - #[arg( - long, - env = "SEQ_BROADCASTER_IDLE_POLL_INTERVAL_MS", - value_name = "DURATION", - value_parser = parse_duration_ms_or_unit - )] - broadcaster_idle_poll_interval: Option, - #[arg(long, env = "SEQ_BROADCASTER_PAGE_SIZE")] - broadcaster_page_size: Option, - #[arg(long, env = "SEQ_BROADCASTER_SUBSCRIBER_BUFFER_CAPACITY")] - broadcaster_subscriber_buffer_capacity: Option, - #[arg(long, env = "SEQ_WS_MAX_SUBSCRIBERS")] - ws_max_subscribers: Option, - #[arg(long, env = "SEQ_WS_MAX_CATCHUP_EVENTS")] - ws_max_catchup_events: Option, - #[arg(long, env = "SEQ_RUNTIME_METRICS_ENABLED")] - runtime_metrics_enabled: Option, - #[arg( - long, - env = "SEQ_RUNTIME_METRICS_LOG_INTERVAL_MS", - value_name = "DURATION", - value_parser = parse_duration_ms_or_unit - )] - runtime_metrics_log_interval: Option, - #[arg(long, env = "SEQ_MAX_BODY_BYTES")] - max_body_bytes: Option, - #[arg(long, env = "SEQ_SQLITE_SYNCHRONOUS", value_enum)] - sqlite_synchronous: Option, - #[arg(long, env = "SEQ_DOMAIN_NAME")] - domain_name: Option, - #[arg(long, env = "SEQ_DOMAIN_VERSION")] - domain_version: Option, - #[arg(long, env = "SEQ_DOMAIN_CHAIN_ID")] - domain_chain_id: Option, - #[arg(long, env = "SEQ_DOMAIN_VERIFYING_CONTRACT")] - domain_verifying_contract: Option, - #[arg( - long, - env = "SEQ_INPUT_READER_RPC_URL", - help = "Ethereum RPC URL for input reader (default: Anvil endpoint)" - )] - input_reader_rpc_url: Option, - #[arg(long, env = "SEQ_INPUT_READER_INPUT_BOX_ADDRESS", required = true)] - input_reader_input_box_address: String, - #[arg(long, env = "SEQ_INPUT_READER_APP_ADDRESS", required = true)] - input_reader_app_address: String, - #[arg(long, env = "SEQ_INPUT_READER_GENESIS_BLOCK")] - input_reader_genesis_block: Option, - #[arg( - long, - env = "SEQ_INPUT_READER_POLL_INTERVAL_MS", - value_name = "DURATION", - value_parser = parse_duration_ms_or_unit - )] - input_reader_poll_interval: Option, -} - -#[derive(Debug, Clone, Copy, Serialize, ValueEnum)] -#[serde(rename_all = "snake_case")] -enum Profile { - Dev, - Bench, - Safe, -} - -#[derive(Debug, Clone, Copy, Serialize, ValueEnum)] -#[serde(rename_all = "UPPERCASE")] -enum SqliteSynchronous { - #[value(name = "OFF", alias = "off")] - Off, - #[value(name = "NORMAL", alias = "normal")] - Normal, - #[value(name = "FULL", alias = "full")] - Full, - #[value(name = "EXTRA", alias = "extra")] - Extra, -} - -impl SqliteSynchronous { - fn pragma(self) -> &'static str { - match self { - Self::Off => "OFF", - Self::Normal => "NORMAL", - Self::Full => "FULL", - Self::Extra => "EXTRA", - } - } -} - -struct ProfileDefaults { - queue_capacity: usize, - max_user_ops_per_chunk: usize, - safe_direct_buffer_capacity: usize, - max_batch_open: Duration, - max_batch_user_op_bytes: usize, - inclusion_lane_idle_poll_interval: Duration, - broadcaster_idle_poll_interval: Duration, - input_reader_poll_interval: Duration, - broadcaster_page_size: usize, - broadcaster_subscriber_buffer_capacity: usize, - ws_max_subscribers: usize, - ws_max_catchup_events: u64, - runtime_metrics_enabled: bool, - runtime_metrics_log_interval: Duration, - max_body_bytes: usize, - sqlite_synchronous: SqliteSynchronous, -} - -impl Profile { - fn defaults(self) -> ProfileDefaults { - match self { - Self::Dev => ProfileDefaults { - queue_capacity: DEFAULT_QUEUE_CAP, - max_user_ops_per_chunk: DEFAULT_MAX_USER_OPS_PER_CHUNK, - safe_direct_buffer_capacity: DEFAULT_SAFE_DIRECT_BUFFER_CAPACITY, - max_batch_open: DEFAULT_MAX_BATCH_OPEN_DURATION, - max_batch_user_op_bytes: DEFAULT_MAX_BATCH_USER_OP_BYTES, - inclusion_lane_idle_poll_interval: DEFAULT_INCLUSION_LANE_IDLE_POLL_INTERVAL, - broadcaster_idle_poll_interval: DEFAULT_BROADCASTER_IDLE_POLL_INTERVAL, - input_reader_poll_interval: DEFAULT_INPUT_READER_POLL_INTERVAL, - broadcaster_page_size: DEFAULT_BROADCASTER_PAGE_SIZE, - broadcaster_subscriber_buffer_capacity: - DEFAULT_BROADCASTER_SUBSCRIBER_BUFFER_CAPACITY, - ws_max_subscribers: DEFAULT_WS_MAX_SUBSCRIBERS, - ws_max_catchup_events: DEFAULT_WS_MAX_CATCHUP_EVENTS, - runtime_metrics_enabled: DEFAULT_RUNTIME_METRICS_ENABLED, - runtime_metrics_log_interval: DEFAULT_RUNTIME_METRICS_LOG_INTERVAL, - max_body_bytes: DEFAULT_MAX_BODY_BYTES, - sqlite_synchronous: SqliteSynchronous::Normal, - }, - Self::Bench => ProfileDefaults { - queue_capacity: 32_768, - max_user_ops_per_chunk: 4_096, - safe_direct_buffer_capacity: 8_192, - max_batch_open: DEFAULT_MAX_BATCH_OPEN_DURATION, - max_batch_user_op_bytes: 1_572_864, // 1.5 MiB - inclusion_lane_idle_poll_interval: Duration::from_millis(1), - broadcaster_idle_poll_interval: Duration::from_millis(5), - input_reader_poll_interval: DEFAULT_INPUT_READER_POLL_INTERVAL, - broadcaster_page_size: 1_024, - broadcaster_subscriber_buffer_capacity: 131_072, - ws_max_subscribers: DEFAULT_WS_MAX_SUBSCRIBERS, - ws_max_catchup_events: DEFAULT_WS_MAX_CATCHUP_EVENTS, - runtime_metrics_enabled: DEFAULT_RUNTIME_METRICS_ENABLED, - runtime_metrics_log_interval: DEFAULT_RUNTIME_METRICS_LOG_INTERVAL, - max_body_bytes: DEFAULT_MAX_BODY_BYTES, - sqlite_synchronous: SqliteSynchronous::Normal, - }, - Self::Safe => ProfileDefaults { - queue_capacity: DEFAULT_QUEUE_CAP, - max_user_ops_per_chunk: DEFAULT_MAX_USER_OPS_PER_CHUNK, - safe_direct_buffer_capacity: DEFAULT_SAFE_DIRECT_BUFFER_CAPACITY, - max_batch_open: Duration::from_secs(60 * 60), - max_batch_user_op_bytes: DEFAULT_MAX_BATCH_USER_OP_BYTES, - inclusion_lane_idle_poll_interval: Duration::from_millis(5), - broadcaster_idle_poll_interval: Duration::from_millis(25), - input_reader_poll_interval: DEFAULT_INPUT_READER_POLL_INTERVAL, - broadcaster_page_size: DEFAULT_BROADCASTER_PAGE_SIZE, - broadcaster_subscriber_buffer_capacity: - DEFAULT_BROADCASTER_SUBSCRIBER_BUFFER_CAPACITY, - ws_max_subscribers: DEFAULT_WS_MAX_SUBSCRIBERS, - ws_max_catchup_events: DEFAULT_WS_MAX_CATCHUP_EVENTS, - runtime_metrics_enabled: DEFAULT_RUNTIME_METRICS_ENABLED, - runtime_metrics_log_interval: DEFAULT_RUNTIME_METRICS_LOG_INTERVAL, - max_body_bytes: DEFAULT_MAX_BODY_BYTES, - sqlite_synchronous: SqliteSynchronous::Full, - }, - } - } -} - -impl Config { - fn from_run_args(args: RunArgs) -> Result { - let defaults = args.profile.defaults(); - let queue_capacity = args.queue_capacity.unwrap_or(defaults.queue_capacity); - let overload_max_inflight_submissions = args - .overload_max_inflight_submissions - .unwrap_or_else(|| default_overload_max_inflight_submissions(queue_capacity)); - - let config = Self { - profile: args.profile, - http_addr: args - .http_addr - .as_deref() - .unwrap_or(DEFAULT_HTTP_ADDR) - .to_string(), - db_path: args - .db_path - .as_deref() - .unwrap_or(DEFAULT_DB_PATH) - .to_string(), - queue_capacity, - overload_max_inflight_submissions, - max_user_ops_per_chunk: args - .max_user_ops_per_chunk - .or(args.legacy_max_batch) - .unwrap_or(defaults.max_user_ops_per_chunk), - safe_direct_buffer_capacity: args - .safe_direct_buffer_capacity - .unwrap_or(defaults.safe_direct_buffer_capacity), - max_batch_open: args.max_batch_open.unwrap_or(defaults.max_batch_open), - max_batch_user_op_bytes: args - .max_batch_user_op_bytes - .unwrap_or(defaults.max_batch_user_op_bytes), - inclusion_lane_idle_poll_interval: args - .inclusion_lane_idle_poll_interval - .or(args.legacy_inclusion_lane_tick_interval) - .or(args.legacy_commit_lane_tick_interval) - .unwrap_or(defaults.inclusion_lane_idle_poll_interval), - broadcaster_idle_poll_interval: args - .broadcaster_idle_poll_interval - .unwrap_or(defaults.broadcaster_idle_poll_interval), - broadcaster_page_size: args - .broadcaster_page_size - .unwrap_or(defaults.broadcaster_page_size), - broadcaster_subscriber_buffer_capacity: args - .broadcaster_subscriber_buffer_capacity - .unwrap_or(defaults.broadcaster_subscriber_buffer_capacity), - ws_max_subscribers: args - .ws_max_subscribers - .unwrap_or(defaults.ws_max_subscribers), - ws_max_catchup_events: args - .ws_max_catchup_events - .unwrap_or(defaults.ws_max_catchup_events), - runtime_metrics_enabled: args - .runtime_metrics_enabled - .unwrap_or(defaults.runtime_metrics_enabled), - runtime_metrics_log_interval: args - .runtime_metrics_log_interval - .unwrap_or(defaults.runtime_metrics_log_interval), - max_body_bytes: args.max_body_bytes.unwrap_or(defaults.max_body_bytes), - sqlite_synchronous: args - .sqlite_synchronous - .unwrap_or(defaults.sqlite_synchronous), - domain_name: args - .domain_name - .as_deref() - .unwrap_or(DEFAULT_DOMAIN_NAME) - .to_string(), - domain_version: args - .domain_version - .as_deref() - .unwrap_or(DEFAULT_DOMAIN_VERSION) - .to_string(), - domain_chain_id: args.domain_chain_id.unwrap_or(DEFAULT_DOMAIN_CHAIN_ID), - domain_verifying_contract: args - .domain_verifying_contract - .as_deref() - .unwrap_or(DEFAULT_DOMAIN_VERIFYING_CONTRACT) - .to_string(), - input_reader: input_reader_config_from_run_args( - &args, - args.input_reader_poll_interval - .unwrap_or(defaults.input_reader_poll_interval), - )?, - }; - config.validate()?; - Ok(config) - } - - fn validate(&self) -> Result<(), String> { - if self.http_addr.trim().is_empty() { - return Err("http_addr cannot be empty".to_string()); - } - if self.db_path.trim().is_empty() { - return Err("db_path cannot be empty".to_string()); - } - if self.queue_capacity == 0 { - return Err("queue_capacity must be > 0".to_string()); - } - if self.overload_max_inflight_submissions == 0 { - return Err("overload_max_inflight_submissions must be > 0".to_string()); - } - if self.max_user_ops_per_chunk == 0 { - return Err("max_user_ops_per_chunk must be > 0".to_string()); - } - if self.safe_direct_buffer_capacity == 0 { - return Err("safe_direct_buffer_capacity must be > 0".to_string()); - } - if self.max_batch_open.is_zero() { - return Err("max_batch_open must be > 0".to_string()); - } - if self.max_batch_user_op_bytes == 0 { - return Err("max_batch_user_op_bytes must be > 0".to_string()); - } - if self.max_batch_user_op_bytes < SignedUserOp::max_batch_bytes_upper_bound() { - return Err(format!( - "max_batch_user_op_bytes must be >= {} (one max-sized user op)", - SignedUserOp::max_batch_bytes_upper_bound() - )); - } - if self.inclusion_lane_idle_poll_interval.is_zero() { - return Err("inclusion_lane_idle_poll_interval must be > 0".to_string()); - } - if self.broadcaster_idle_poll_interval.is_zero() { - return Err("broadcaster_idle_poll_interval must be > 0".to_string()); - } - if self.input_reader.poll_interval.is_zero() { - return Err("input_reader_poll_interval must be > 0".to_string()); - } - if self.broadcaster_page_size == 0 { - return Err("broadcaster_page_size must be > 0".to_string()); - } - if self.broadcaster_subscriber_buffer_capacity == 0 { - return Err("broadcaster_subscriber_buffer_capacity must be > 0".to_string()); - } - if self.ws_max_subscribers == 0 { - return Err("ws_max_subscribers must be > 0".to_string()); - } - if self.ws_max_catchup_events == 0 { - return Err("ws_max_catchup_events must be > 0".to_string()); - } - if self.runtime_metrics_log_interval.is_zero() { - return Err("runtime_metrics_log_interval must be > 0".to_string()); - } - if self.max_body_bytes == 0 { - return Err("max_body_bytes must be > 0".to_string()); - } - if self.domain_name.trim().is_empty() { - return Err("domain_name cannot be empty".to_string()); - } - if self.domain_version.trim().is_empty() { - return Err("domain_version cannot be empty".to_string()); - } - Ok(()) - } - - fn build_domain(&self) -> Result { - let verifying_contract = parse_address(&self.domain_verifying_contract)?; - Ok(Eip712Domain { - name: Some(self.domain_name.clone().into()), - version: Some(self.domain_version.clone().into()), - chain_id: Some(U256::from(self.domain_chain_id)), - verifying_contract: Some(verifying_contract), - salt: None, - }) - } - - fn effective(&self) -> EffectiveConfig { - EffectiveConfig { - profile: self.profile, - http_addr: self.http_addr.clone(), - db_path: self.db_path.clone(), - queue_capacity: self.queue_capacity, - overload_max_inflight_submissions: self.overload_max_inflight_submissions, - max_user_ops_per_chunk: self.max_user_ops_per_chunk, - safe_direct_buffer_capacity: self.safe_direct_buffer_capacity, - max_batch_open: DurationValue::from(self.max_batch_open), - max_batch_user_op_bytes: self.max_batch_user_op_bytes, - inclusion_lane_idle_poll_interval: DurationValue::from( - self.inclusion_lane_idle_poll_interval, - ), - broadcaster_idle_poll_interval: DurationValue::from( - self.broadcaster_idle_poll_interval, - ), - broadcaster_page_size: self.broadcaster_page_size, - broadcaster_subscriber_buffer_capacity: self.broadcaster_subscriber_buffer_capacity, - ws_max_subscribers: self.ws_max_subscribers, - ws_max_catchup_events: self.ws_max_catchup_events, - runtime_metrics_enabled: self.runtime_metrics_enabled, - runtime_metrics_log_interval: DurationValue::from(self.runtime_metrics_log_interval), - max_body_bytes: self.max_body_bytes, - sqlite_synchronous: self.sqlite_synchronous, - domain_name: self.domain_name.clone(), - domain_version: self.domain_version.clone(), - domain_chain_id: self.domain_chain_id, - domain_verifying_contract: self.domain_verifying_contract.clone(), - } - } -} - -#[derive(Debug, Clone, Serialize)] -struct EffectiveConfig { - profile: Profile, - http_addr: String, - db_path: String, - queue_capacity: usize, - overload_max_inflight_submissions: usize, - max_user_ops_per_chunk: usize, - safe_direct_buffer_capacity: usize, - max_batch_open: DurationValue, - max_batch_user_op_bytes: usize, - inclusion_lane_idle_poll_interval: DurationValue, - broadcaster_idle_poll_interval: DurationValue, - broadcaster_page_size: usize, - broadcaster_subscriber_buffer_capacity: usize, - ws_max_subscribers: usize, - ws_max_catchup_events: u64, - runtime_metrics_enabled: bool, - runtime_metrics_log_interval: DurationValue, - max_body_bytes: usize, - sqlite_synchronous: SqliteSynchronous, - domain_name: String, - domain_version: String, - domain_chain_id: u64, - domain_verifying_contract: String, -} - -#[derive(Debug, Clone, Serialize)] -struct DurationValue { - ms: u64, - human: String, -} - -impl From for DurationValue { - fn from(value: Duration) -> Self { - Self { - ms: duration_millis_u64(value), - human: format_duration(value), - } - } -} - -fn parse_address(value: &str) -> Result { - if !value.starts_with("0x") { - return Err("address must be 0x-prefixed hex".to_string()); - } - let bytes = - alloy_primitives::hex::decode(value).map_err(|e| format!("invalid address hex: {e}"))?; - if bytes.len() != 20 { - return Err("address must be 20 bytes".to_string()); - } - Ok(Address::from_slice(&bytes)) -} - -fn input_reader_config_from_run_args( - args: &RunArgs, - poll_interval: Duration, -) -> Result { - let input_box_address = parse_address(&args.input_reader_input_box_address)?; - let app_address_filter = parse_address(&args.input_reader_app_address)?; - let genesis_block = args.input_reader_genesis_block.unwrap_or(0); - Ok(InputReaderConfig { - rpc_url: args - .input_reader_rpc_url - .as_deref() - .unwrap_or(DEFAULT_INPUT_READER_RPC_URL) - .to_string(), - input_box_address, - app_address_filter, - genesis_block, - poll_interval, - long_block_range_error_codes: vec!["rate limit".into(), "too many".into()], - }) -} - -fn parse_duration_ms_or_unit(raw: &str) -> Result { - let value = raw.trim(); - if value.is_empty() { - return Err("duration cannot be empty".to_string()); - } - - if let Ok(ms) = value.parse::() { - return Ok(Duration::from_millis(ms)); - } - - if let Some(ms) = value.strip_suffix("ms") { - let value = ms - .trim() - .parse::() - .map_err(|_| format!("invalid milliseconds duration: {raw}"))?; - return Ok(Duration::from_millis(value)); - } - if let Some(seconds) = value.strip_suffix('s') { - let value = seconds - .trim() - .parse::() - .map_err(|_| format!("invalid seconds duration: {raw}"))?; - return Ok(Duration::from_secs(value)); - } - if let Some(minutes) = value.strip_suffix('m') { - let value = minutes - .trim() - .parse::() - .map_err(|_| format!("invalid minutes duration: {raw}"))?; - let seconds = value - .checked_mul(60) - .ok_or_else(|| format!("duration overflow: {raw}"))?; - return Ok(Duration::from_secs(seconds)); - } - if let Some(hours) = value.strip_suffix('h') { - let value = hours - .trim() - .parse::() - .map_err(|_| format!("invalid hours duration: {raw}"))?; - let seconds = value - .checked_mul(60 * 60) - .ok_or_else(|| format!("duration overflow: {raw}"))?; - return Ok(Duration::from_secs(seconds)); - } - - Err(format!( - "invalid duration '{raw}'. Use plain milliseconds (e.g. 250) or suffix: ms, s, m, h" - )) -} - -fn format_duration(value: Duration) -> String { - let ms = duration_millis_u64(value); - if ms.is_multiple_of(60 * 60 * 1000) { - return format!("{}h", ms / (60 * 60 * 1000)); - } - if ms.is_multiple_of(60 * 1000) { - return format!("{}m", ms / (60 * 1000)); - } - if ms.is_multiple_of(1000) { - return format!("{}s", ms / 1000); - } - format!("{ms}ms") -} - -fn duration_millis_u64(value: Duration) -> u64 { - u64::try_from(value.as_millis()).unwrap_or(u64::MAX) -} - -fn force_zero_frame_fee_for_now( - storage: &mut storage::Storage, -) -> Result<(), Box> { - // Temporary prototype policy: keep sequencer frame fee at zero until fee estimation lands. - storage.set_recommended_fee(0)?; - let mut head = storage.load_open_state()?; - if head.frame_fee != 0 { - storage.close_frame_only(&mut head, 0, 0)?; - } - Ok(()) + let config = RunConfig::parse(); + run(WalletApp::new(WalletConfig), config).await } diff --git a/sequencer/src/runtime.rs b/sequencer/src/runtime.rs new file mode 100644 index 0000000..7b4f3f6 --- /dev/null +++ b/sequencer/src/runtime.rs @@ -0,0 +1,294 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use thiserror::Error; +use tracing::warn; + +use crate::api::{self, ApiConfig}; +use crate::config::RunConfig; +use crate::inclusion_lane::{InclusionLane, InclusionLaneConfig, InclusionLaneError}; +use crate::input_reader::{InputReader, InputReaderConfig, InputReaderError}; +use crate::l2_tx_feed::{L2TxFeed, L2TxFeedConfig}; +use crate::shutdown::ShutdownSignal; +use crate::storage::{self, StorageOpenError}; +use sequencer_core::application::Application; + +const SQLITE_SYNCHRONOUS_PRAGMA: &str = "NORMAL"; +const QUEUE_CAPACITY: usize = 8192; +const INPUT_READER_POLL_INTERVAL: std::time::Duration = std::time::Duration::from_secs(2); + +#[derive(Debug, Error)] +pub enum RunError { + #[error(transparent)] + OpenStorage(#[from] StorageOpenError), + #[error(transparent)] + Io(#[from] std::io::Error), + #[error("server stopped unexpectedly")] + ServerStoppedUnexpectedly, + #[error("server join error: {source}")] + ServerJoin { + #[source] + source: tokio::task::JoinError, + }, + #[error("inclusion lane stopped unexpectedly")] + InclusionLaneStoppedUnexpectedly, + #[error("inclusion lane exited: {source}")] + InclusionLane { + #[source] + source: InclusionLaneError, + }, + #[error("inclusion lane join error: {source}")] + InclusionLaneJoin { + #[source] + source: tokio::task::JoinError, + }, + #[error("input reader stopped unexpectedly")] + InputReaderStoppedUnexpectedly, + #[error("input reader exited: {source}")] + InputReader { + #[source] + source: InputReaderError, + }, + #[error("input reader join error: {source}")] + InputReaderJoin { + #[source] + source: tokio::task::JoinError, + }, +} + +enum FirstExit { + Signal(Option), + Server(RunError), + InclusionLane(RunError), + InputReader(RunError), +} + +pub async fn run(app: A, config: RunConfig) -> Result<(), RunError> +where + A: Application + 'static, +{ + let domain = config.build_domain(); + let shutdown = ShutdownSignal::default(); + let input_box_address = + InputReader::discover_input_box(&config.eth_rpc_url, config.domain_verifying_contract) + .await + .map_err(|source| RunError::InputReader { source })?; + let input_reader_genesis_block = + InputReader::discover_input_box_deployment_block(&config.eth_rpc_url, input_box_address) + .await + .map_err(|source| RunError::InputReader { source })?; + let input_reader_config = + build_input_reader_config(&config, input_box_address, input_reader_genesis_block); + InputReader::sync_to_current_safe_head(&config.db_path, input_reader_config.clone()) + .await + .map_err(|source| RunError::InputReader { source })?; + + tracing::info!( + http_addr = %config.http_addr, + db_path = %config.db_path, + eth_rpc_url = %config.eth_rpc_url, + input_box_address = %input_box_address, + input_reader_genesis_block, + domain_chain_id = config.domain_chain_id, + domain_verifying_contract = %config.domain_verifying_contract, + "starting sequencer" + ); + + let storage = storage::Storage::open(&config.db_path, SQLITE_SYNCHRONOUS_PRAGMA)?; + let (tx, mut inclusion_lane_handle) = InclusionLane::start( + QUEUE_CAPACITY, + shutdown.clone(), + app, + storage, + InclusionLaneConfig::for_app::(), + ); + let mut input_reader_handle = + InputReader::start(&config.db_path, input_reader_config, shutdown.clone())?; + + let tx_feed = L2TxFeed::new( + config.db_path.clone(), + shutdown.clone(), + L2TxFeedConfig::default(), + ); + + let mut server_task = api::start( + &config.http_addr, + tx, + domain, + A::MAX_METHOD_PAYLOAD_BYTES, + shutdown.clone(), + tx_feed, + ApiConfig::default(), + ) + .await?; + + tracing::info!(address = %config.http_addr, "listening"); + + let shutdown_signal = tokio::signal::ctrl_c(); + tokio::pin!(shutdown_signal); + + let first_exit = tokio::select! { + signal_result = &mut shutdown_signal => { + FirstExit::Signal(signal_result.err().map(RunError::from)) + } + server_result = &mut server_task => { + FirstExit::Server(map_server_exit(server_result)) + } + lane_result = &mut inclusion_lane_handle => { + FirstExit::InclusionLane(map_lane_exit(lane_result)) + } + reader_result = &mut input_reader_handle => { + FirstExit::InputReader(map_input_reader_exit(reader_result)) + } + }; + + begin_runtime_shutdown(&shutdown); + finish_runtime( + first_exit, + server_task, + inclusion_lane_handle, + input_reader_handle, + ) + .await +} + +fn begin_runtime_shutdown(shutdown: &ShutdownSignal) { + shutdown.request_shutdown(); +} + +async fn wait_for_clean_shutdown( + server_task: tokio::task::JoinHandle>, + inclusion_lane_handle: tokio::task::JoinHandle>, + input_reader_handle: tokio::task::JoinHandle>, +) -> Result<(), RunError> { + wait_for_server_shutdown(server_task).await?; + wait_for_lane_shutdown(inclusion_lane_handle).await?; + wait_for_input_reader_shutdown(input_reader_handle).await?; + Ok(()) +} + +async fn finish_runtime( + first_exit: FirstExit, + server_task: tokio::task::JoinHandle>, + inclusion_lane_handle: tokio::task::JoinHandle>, + input_reader_handle: tokio::task::JoinHandle>, +) -> Result<(), RunError> { + match first_exit { + FirstExit::Signal(signal_error) => { + let shutdown_result = + wait_for_clean_shutdown(server_task, inclusion_lane_handle, input_reader_handle) + .await; + match (signal_error, shutdown_result) { + (Some(err), _) => Err(err), + (None, Ok(())) => Ok(()), + (None, Err(err)) => Err(err), + } + } + FirstExit::Server(primary) => { + log_cleanup_result( + "inclusion lane", + wait_for_lane_shutdown(inclusion_lane_handle).await, + ); + log_cleanup_result( + "input reader", + wait_for_input_reader_shutdown(input_reader_handle).await, + ); + Err(primary) + } + FirstExit::InclusionLane(primary) => { + log_cleanup_result("server", wait_for_server_shutdown(server_task).await); + log_cleanup_result( + "input reader", + wait_for_input_reader_shutdown(input_reader_handle).await, + ); + Err(primary) + } + FirstExit::InputReader(primary) => { + log_cleanup_result("server", wait_for_server_shutdown(server_task).await); + log_cleanup_result( + "inclusion lane", + wait_for_lane_shutdown(inclusion_lane_handle).await, + ); + Err(primary) + } + } +} + +async fn wait_for_server_shutdown( + server_task: tokio::task::JoinHandle>, +) -> Result<(), RunError> { + match server_task.await { + Ok(Ok(())) => Ok(()), + Ok(Err(source)) => Err(RunError::Io(source)), + Err(source) => Err(RunError::ServerJoin { source }), + } +} + +async fn wait_for_lane_shutdown( + inclusion_lane_handle: tokio::task::JoinHandle>, +) -> Result<(), RunError> { + match inclusion_lane_handle.await { + Ok(Ok(())) => Ok(()), + Ok(Err(source)) => Err(RunError::InclusionLane { source }), + Err(source) => Err(RunError::InclusionLaneJoin { source }), + } +} + +async fn wait_for_input_reader_shutdown( + input_reader_handle: tokio::task::JoinHandle>, +) -> Result<(), RunError> { + match input_reader_handle.await { + Ok(Ok(())) => Ok(()), + Ok(Err(source)) => Err(RunError::InputReader { source }), + Err(source) => Err(RunError::InputReaderJoin { source }), + } +} + +fn map_server_exit(result: Result, tokio::task::JoinError>) -> RunError { + match result { + Ok(Ok(())) => RunError::ServerStoppedUnexpectedly, + Ok(Err(source)) => RunError::Io(source), + Err(source) => RunError::ServerJoin { source }, + } +} + +fn map_lane_exit( + result: Result, tokio::task::JoinError>, +) -> RunError { + match result { + Ok(Ok(())) => RunError::InclusionLaneStoppedUnexpectedly, + Ok(Err(source)) => RunError::InclusionLane { source }, + Err(source) => RunError::InclusionLaneJoin { source }, + } +} + +fn map_input_reader_exit( + result: Result, tokio::task::JoinError>, +) -> RunError { + match result { + Ok(Ok(())) => RunError::InputReaderStoppedUnexpectedly, + Ok(Err(source)) => RunError::InputReader { source }, + Err(source) => RunError::InputReaderJoin { source }, + } +} + +fn build_input_reader_config( + config: &RunConfig, + input_box_address: alloy_primitives::Address, + genesis_block: u64, +) -> InputReaderConfig { + InputReaderConfig { + rpc_url: config.eth_rpc_url.clone(), + input_box_address, + app_address_filter: config.domain_verifying_contract, + genesis_block, + poll_interval: INPUT_READER_POLL_INTERVAL, + long_block_range_error_codes: config.long_block_range_error_codes.clone(), + } +} + +fn log_cleanup_result(component: &str, result: Result<(), RunError>) { + if let Err(err) = result { + warn!(component, error = %err, "component shutdown after primary failure also errored"); + } +} diff --git a/sequencer/src/shutdown.rs b/sequencer/src/shutdown.rs new file mode 100644 index 0000000..22e8215 --- /dev/null +++ b/sequencer/src/shutdown.rs @@ -0,0 +1,43 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; + +use tokio::sync::Notify; + +#[derive(Clone, Default)] +pub struct ShutdownSignal { + is_shutting_down: Arc, + notify: Arc, +} + +impl ShutdownSignal { + pub fn request_shutdown(&self) { + let was_shutting_down = self.is_shutting_down.swap(true, Ordering::SeqCst); + if !was_shutting_down { + self.notify.notify_waiters(); + } + } + + pub fn is_shutdown_requested(&self) -> bool { + self.is_shutting_down.load(Ordering::SeqCst) + } + + pub async fn wait_for_shutdown(&self) { + if self.is_shutdown_requested() { + return; + } + + loop { + let notified = self.notify.notified(); + if self.is_shutdown_requested() { + return; + } + notified.await; + if self.is_shutdown_requested() { + return; + } + } + } +} diff --git a/sequencer/src/storage/db.rs b/sequencer/src/storage/db.rs index 4b37305..0f4fb67 100644 --- a/sequencer/src/storage/db.rs +++ b/sequencer/src/storage/db.rs @@ -7,16 +7,15 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use super::sql::{ sql_count_user_ops_for_frame, sql_insert_direct_inputs_batch, sql_insert_open_batch, - sql_insert_open_frame, sql_insert_sequenced_direct_inputs_for_frame, - sql_insert_user_ops_and_sequenced_batch, sql_select_last_processed_block, + sql_insert_open_batch_with_index, sql_insert_open_frame, + sql_insert_sequenced_direct_inputs_for_frame, sql_insert_user_ops_and_sequenced_batch, sql_select_latest_batch_with_user_op_count, sql_select_latest_frame_in_batch_for_batch, sql_select_max_direct_input_index, sql_select_ordered_l2_tx_count, sql_select_ordered_l2_txs_from_offset, sql_select_ordered_l2_txs_page_from_offset, - sql_select_recommended_fee, sql_select_safe_inputs_range, - sql_select_total_drained_direct_inputs, sql_update_last_processed_block, - sql_update_recommended_fee, + sql_select_recommended_fee, sql_select_safe_block, sql_select_safe_inputs_range, + sql_select_total_drained_direct_inputs, sql_update_recommended_fee, sql_update_safe_block, }; -use super::{IndexedDirectInput, StorageOpenError, WriteHead}; +use super::{DirectInputRange, SafeFrontier, StorageOpenError, StoredDirectInput, WriteHead}; use crate::inclusion_lane::PendingUserOp; use alloy_primitives::Address; use sequencer_core::l2_tx::{DirectInput, SequencedL2Tx, ValidUserOp}; @@ -87,26 +86,44 @@ impl Storage { Ok(i64_to_u64(value)) } - /// Last block number from which safe (direct) inputs have been read. Used by InputReader to resume chain sync. - pub fn input_reader_last_processed_block(&mut self) -> Result { - let value = sql_select_last_processed_block(&self.conn)?; + pub fn safe_input_end_exclusive(&mut self) -> Result { + let value = sql_select_max_direct_input_index(&self.conn)?; + Ok(match value { + Some(last_index) => i64_to_u64(last_index).saturating_add(1), + None => 0, + }) + } + + pub fn current_safe_block(&mut self) -> Result { + let value = sql_select_safe_block(&self.conn)?; Ok(i64_to_u64(value)) } - /// Set the last block number processed by the InputReader. Callers must pass a block greater than the current value. - pub fn input_reader_set_last_processed_block(&mut self, block: u64) -> Result<()> { - let changed = sql_update_last_processed_block(&self.conn, u64_to_i64(block))?; - if changed != 1 { - return Err(rusqlite::Error::StatementChangedRows(changed)); + pub fn ensure_minimum_safe_block(&mut self, minimum_safe_block: u64) -> Result<()> { + let tx = self + .conn + .transaction_with_behavior(TransactionBehavior::Immediate)?; + let current_safe_block = query_current_safe_block(&tx)?; + if current_safe_block < minimum_safe_block { + let changed_rows = sql_update_safe_block(&tx, u64_to_i64(minimum_safe_block))?; + if changed_rows != 1 { + return Err(rusqlite::Error::StatementChangedRows(changed_rows)); + } } + tx.commit()?; Ok(()) } - pub fn safe_input_end_exclusive(&mut self) -> Result { - let value = sql_select_max_direct_input_index(&self.conn)?; - Ok(match value { - Some(last_index) => i64_to_u64(last_index).saturating_add(1), - None => 0, + pub fn load_safe_frontier(&mut self) -> Result { + let tx = self + .conn + .transaction_with_behavior(TransactionBehavior::Deferred)?; + let safe_block = query_current_safe_block(&tx)?; + let end_exclusive = query_latest_direct_input_index_exclusive(&tx)?; + tx.commit()?; + Ok(SafeFrontier { + safe_block, + end_exclusive, }) } @@ -114,7 +131,7 @@ impl Storage { &mut self, from_inclusive: u64, to_exclusive: u64, - out: &mut Vec, + out: &mut Vec, ) -> Result<()> { assert!( from_inclusive <= to_exclusive, @@ -141,8 +158,7 @@ impl Storage { "non-contiguous safe-input index: expected {expected}, found {index}" ); - out.push(IndexedDirectInput { - index, + out.push(StoredDirectInput { payload: row.payload, block_number: i64_to_u64(row.block_number), }); @@ -158,63 +174,37 @@ impl Storage { Ok(()) } - pub fn append_safe_direct_inputs(&mut self, inputs: &[IndexedDirectInput]) -> Result<()> { - if inputs.is_empty() { - return Ok(()); - } - - let tx = self - .conn - .transaction_with_behavior(TransactionBehavior::Immediate)?; - - let mut next_expected = query_latest_direct_input_index_exclusive(&tx)?; - for input in inputs { - assert_eq!( - input.index, next_expected, - "direct input index must be contiguous from storage head" - ); - next_expected = next_expected.saturating_add(1); - } - - sql_insert_direct_inputs_batch(&tx, inputs)?; - - tx.commit()?; - Ok(()) - } - - /// Appends safe direct inputs and advances the input-reader cursor in a single transaction. - /// Use this when persisting inputs from the chain so cursor and inputs stay in sync on failure. - pub fn append_safe_inputs_and_advance_cursor( + pub fn append_safe_direct_inputs( &mut self, - inputs: &[IndexedDirectInput], - new_last_processed_block: u64, + safe_block: u64, + inputs: &[StoredDirectInput], ) -> Result<()> { let tx = self .conn .transaction_with_behavior(TransactionBehavior::Immediate)?; - if !inputs.is_empty() { - let mut next_expected = query_latest_direct_input_index_exclusive(&tx)?; - for input in inputs { - assert_eq!( - input.index, next_expected, - "direct input index must be contiguous from storage head" - ); - next_expected = next_expected.saturating_add(1); - } - sql_insert_direct_inputs_batch(&tx, inputs)?; - } + let current_safe_block = query_current_safe_block(&tx)?; + assert!( + safe_block >= current_safe_block, + "safe block regressed: current={current_safe_block}, next={safe_block}" + ); + assert!( + safe_block > current_safe_block || inputs.is_empty(), + "safe block must advance when appending new safe direct inputs" + ); - let changed = sql_update_last_processed_block(&tx, u64_to_i64(new_last_processed_block))?; - if changed != 1 { - return Err(rusqlite::Error::StatementChangedRows(changed)); + let next_expected = query_latest_direct_input_index_exclusive(&tx)?; + sql_insert_direct_inputs_batch(&tx, next_expected, inputs)?; + let changed_rows = sql_update_safe_block(&tx, u64_to_i64(safe_block))?; + if changed_rows != 1 { + return Err(rusqlite::Error::StatementChangedRows(changed_rows)); } tx.commit()?; Ok(()) } - pub fn load_open_state(&mut self) -> Result { + pub fn load_open_state(&mut self) -> Result> { let tx = self .conn .transaction_with_behavior(TransactionBehavior::Deferred)?; @@ -223,6 +213,37 @@ impl Storage { Ok(head) } + pub fn initialize_open_state( + &mut self, + safe_block: u64, + leading_direct_range: DirectInputRange, + ) -> Result { + let tx = self + .conn + .transaction_with_behavior(TransactionBehavior::Immediate)?; + assert!( + load_current_write_head(&tx)?.is_none(), + "open state already exists" + ); + + let now_ms = now_unix_ms(); + let frame_fee = query_recommended_fee(&tx)?; + insert_open_batch_with_index(&tx, 0, now_ms)?; + insert_open_frame(&tx, 0, 0, now_ms, frame_fee, safe_block)?; + persist_frame_direct_sequence(&tx, 0, 0, leading_direct_range)?; + tx.commit()?; + + Ok(WriteHead { + batch_index: 0, + batch_created_at: from_unix_ms(now_ms), + frame_fee, + safe_block, + batch_user_op_count: 0, + open_frame_user_op_count: 0, + frame_in_batch: 0, + }) + } + pub fn recommended_fee(&mut self) -> Result { let value = sql_select_recommended_fee(&self.conn)?; Ok(i64_to_u64(value)) @@ -252,14 +273,11 @@ impl Storage { // observe the same database snapshot. assert_write_head_matches_open_state(&tx, head)?; - let frame_user_op_count = - query_frame_user_op_count(&tx, head.batch_index, head.frame_in_batch)?; - sql_insert_user_ops_and_sequenced_batch( &tx, u64_to_i64(head.batch_index), i64::from(head.frame_in_batch), - frame_user_op_count, + head.open_frame_user_op_count, user_ops, )?; @@ -271,8 +289,8 @@ impl Storage { pub fn close_frame_only( &mut self, head: &mut WriteHead, - drained_direct_start_index: u64, - drained_direct_count: usize, + next_safe_block: u64, + leading_direct_range: DirectInputRange, ) -> Result<()> { let tx = self .conn @@ -280,7 +298,6 @@ impl Storage { assert_write_head_matches_open_state(&tx, head)?; let now_ms = now_unix_ms(); let next_frame_fee = query_recommended_fee(&tx)?; - persist_frame_direct_sequence(&tx, head, drained_direct_start_index, drained_direct_count)?; let next_frame_in_batch = head.frame_in_batch.saturating_add(1); insert_open_frame( &tx, @@ -288,17 +305,23 @@ impl Storage { next_frame_in_batch, now_ms, next_frame_fee, + next_safe_block, + )?; + persist_frame_direct_sequence( + &tx, + head.batch_index, + next_frame_in_batch, + leading_direct_range, )?; tx.commit()?; - head.advance_frame(next_frame_fee); + head.advance_frame(next_frame_fee, next_safe_block); Ok(()) } pub fn close_frame_and_batch( &mut self, head: &mut WriteHead, - drained_direct_start_index: u64, - drained_direct_count: usize, + next_safe_block: u64, ) -> Result<()> { let tx = self .conn @@ -308,16 +331,27 @@ impl Storage { // Frame fee is committed here: we sample the current recommendation once and // assign it to the newly opened frame. let next_frame_fee = query_recommended_fee(&tx)?; - persist_frame_direct_sequence(&tx, head, drained_direct_start_index, drained_direct_count)?; let next_batch_index = insert_open_batch(&tx, now_ms)?; - insert_open_frame(&tx, next_batch_index, 0, now_ms, next_frame_fee)?; + insert_open_frame( + &tx, + next_batch_index, + 0, + now_ms, + next_frame_fee, + next_safe_block, + )?; tx.commit()?; - head.move_to_next_batch(next_batch_index, from_unix_ms(now_ms), next_frame_fee); + head.move_to_next_batch( + next_batch_index, + from_unix_ms(now_ms), + next_frame_fee, + next_safe_block, + ); Ok(()) } pub fn load_ordered_l2_txs_from(&mut self, offset: u64) -> Result> { - // Read the persisted total order used by catch-up and downstream broadcasters. + // Read the persisted total order used by catch-up and downstream feed readers. let rows = sql_select_ordered_l2_txs_from_offset(&self.conn, u64_to_i64(offset))?; Ok(decode_ordered_l2_txs(rows)) } @@ -375,20 +409,25 @@ fn decode_ordered_l2_txs(rows: Vec) -> Vec) -> Result { - let (batch_index, batch_created_at, batch_user_op_count) = query_latest_batch(tx)?; - let (frame_in_batch, frame_fee) = query_latest_frame_in_batch(tx, batch_index)?; - Ok(WriteHead { +fn load_current_write_head(tx: &Transaction<'_>) -> Result> { + let Some((batch_index, batch_created_at, batch_user_op_count)) = query_latest_batch(tx)? else { + return Ok(None); + }; + let (frame_in_batch, frame_fee, safe_block) = query_latest_frame_in_batch(tx, batch_index)?; + let open_frame_user_op_count = query_frame_user_op_count(tx, batch_index, frame_in_batch)?; + Ok(Some(WriteHead { batch_index, batch_created_at, frame_fee, + safe_block, batch_user_op_count, + open_frame_user_op_count, frame_in_batch, - }) + })) } fn assert_write_head_matches_open_state(tx: &Transaction<'_>, expected: &WriteHead) -> Result<()> { - let actual = load_current_write_head(tx)?; + let actual = load_current_write_head(tx)?.expect("stale WriteHead: storage has no open state"); assert_eq!( expected.batch_index, actual.batch_index, "stale WriteHead: batch_index mismatch" @@ -401,10 +440,18 @@ fn assert_write_head_matches_open_state(tx: &Transaction<'_>, expected: &WriteHe expected.batch_user_op_count, actual.batch_user_op_count, "stale WriteHead: batch_user_op_count mismatch" ); + assert_eq!( + expected.open_frame_user_op_count, actual.open_frame_user_op_count, + "stale WriteHead: open_frame_user_op_count mismatch" + ); assert_eq!( expected.frame_fee, actual.frame_fee, "stale WriteHead: frame_fee mismatch" ); + assert_eq!( + expected.safe_block, actual.safe_block, + "stale WriteHead: safe_block mismatch" + ); assert_eq!( to_unix_ms(expected.batch_created_at), to_unix_ms(actual.batch_created_at), @@ -413,20 +460,26 @@ fn assert_write_head_matches_open_state(tx: &Transaction<'_>, expected: &WriteHe Ok(()) } -fn query_latest_batch(tx: &Transaction<'_>) -> Result<(u64, SystemTime, u64)> { - let (batch_index, batch_created_at_ms, batch_user_op_count) = - sql_select_latest_batch_with_user_op_count(tx)?; - Ok(( - i64_to_u64(batch_index), - from_unix_ms(batch_created_at_ms), - i64_to_u64(batch_user_op_count), - )) +fn query_latest_batch(tx: &Transaction<'_>) -> Result> { + match sql_select_latest_batch_with_user_op_count(tx) { + Ok((batch_index, batch_created_at_ms, batch_user_op_count)) => Ok(Some(( + i64_to_u64(batch_index), + from_unix_ms(batch_created_at_ms), + i64_to_u64(batch_user_op_count), + ))), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(source) => Err(source), + } } -fn query_latest_frame_in_batch(tx: &Transaction<'_>, batch_index: u64) -> Result<(u32, u64)> { - let (frame_in_batch, frame_fee) = +fn query_latest_frame_in_batch(tx: &Transaction<'_>, batch_index: u64) -> Result<(u32, u64, u64)> { + let (frame_in_batch, frame_fee, safe_block) = sql_select_latest_frame_in_batch_for_batch(tx, u64_to_i64(batch_index))?; - Ok((i64_to_u32(frame_in_batch), i64_to_u64(frame_fee))) + Ok(( + i64_to_u32(frame_in_batch), + i64_to_u64(frame_fee), + i64_to_u64(safe_block), + )) } fn query_frame_user_op_count( @@ -447,6 +500,11 @@ fn query_latest_direct_input_index_exclusive(tx: &Connection) -> Result { }) } +fn query_current_safe_block(tx: &Connection) -> Result { + let value = sql_select_safe_block(tx)?; + Ok(i64_to_u64(value)) +} + fn query_recommended_fee(tx: &Transaction<'_>) -> Result { let value = sql_select_recommended_fee(tx)?; Ok(i64_to_u64(value)) @@ -454,16 +512,15 @@ fn query_recommended_fee(tx: &Transaction<'_>) -> Result { fn persist_frame_direct_sequence( tx: &Transaction<'_>, - head: &WriteHead, - drained_direct_start_index: u64, - drained_direct_count: usize, + batch_index: u64, + frame_in_batch: u32, + drained_direct_range: DirectInputRange, ) -> Result<()> { sql_insert_sequenced_direct_inputs_for_frame( tx, - u64_to_i64(head.batch_index), - i64::from(head.frame_in_batch), - drained_direct_start_index, - drained_direct_count, + u64_to_i64(batch_index), + i64::from(frame_in_batch), + drained_direct_range, ) } @@ -472,12 +529,22 @@ fn insert_open_batch(tx: &Transaction<'_>, created_at_ms: i64) -> Result { Ok(i64_to_u64(tx.last_insert_rowid())) } +fn insert_open_batch_with_index( + tx: &Transaction<'_>, + batch_index: u64, + created_at_ms: i64, +) -> Result<()> { + sql_insert_open_batch_with_index(tx, u64_to_i64(batch_index), created_at_ms)?; + Ok(()) +} + fn insert_open_frame( tx: &Transaction<'_>, batch_index: u64, frame_in_batch: u32, created_at_ms: i64, frame_fee: u64, + safe_block: u64, ) -> Result<()> { sql_insert_open_frame( tx, @@ -485,6 +552,7 @@ fn insert_open_frame( i64::from(frame_in_batch), created_at_ms, u64_to_i64(frame_fee), + u64_to_i64(safe_block), )?; Ok(()) } @@ -525,7 +593,7 @@ fn i64_to_u32(value: i64) -> u32 { #[cfg(test)] mod tests { use super::Storage; - use crate::storage::IndexedDirectInput; + use crate::storage::{DirectInputRange, StoredDirectInput}; use sequencer_core::l2_tx::SequencedL2Tx; use tempfile::TempDir; @@ -551,8 +619,21 @@ mod tests { let db = temp_db("open-state"); let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); - let head_a = storage.load_open_state().expect("load open state"); - let head_b = storage.load_open_state().expect("load existing open state"); + assert!( + storage + .load_open_state() + .expect("load open state") + .is_none(), + "fresh storage should not have an open frame yet" + ); + + let head_a = storage + .initialize_open_state(0, DirectInputRange::empty_at(0)) + .expect("initialize open state"); + let head_b = storage + .load_open_state() + .expect("load existing open state") + .expect("open state should now exist"); assert_eq!(head_a.batch_index, head_b.batch_index); assert_eq!(head_a.frame_in_batch, head_b.frame_in_batch); @@ -560,37 +641,22 @@ mod tests { assert_eq!(head_a.frame_fee, 0); let mut head_c = head_b; + let next_safe_block = head_c.safe_block; storage - .close_frame_only(&mut head_c, 0, 0) + .close_frame_only(&mut head_c, next_safe_block, DirectInputRange::empty_at(0)) .expect("rotate within same batch"); assert_eq!(head_c.batch_index, head_b.batch_index); assert_eq!(head_c.frame_in_batch, 1); let mut head_d = head_c; + let next_safe_block = head_d.safe_block; storage - .close_frame_and_batch(&mut head_d, 0, 0) + .close_frame_and_batch(&mut head_d, next_safe_block) .expect("close batch and rotate"); assert!(head_d.batch_index > head_c.batch_index); assert_eq!(head_d.frame_in_batch, 0); } - #[test] - fn input_reader_last_processed_block_defaults_and_advances() { - let db = temp_db("input-reader-state"); - let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); - assert_eq!( - storage.input_reader_last_processed_block().expect("read"), - 0 - ); - storage - .input_reader_set_last_processed_block(100) - .expect("set"); - assert_eq!( - storage.input_reader_last_processed_block().expect("read"), - 100 - ); - } - #[test] fn next_frame_fee_comes_from_recommended_fee_singleton() { let db = temp_db("recommended-fee"); @@ -599,9 +665,12 @@ mod tests { storage.set_recommended_fee(7).expect("set recommended fee"); - let mut head = storage.load_open_state().expect("load open state"); + let mut head = storage + .initialize_open_state(0, DirectInputRange::empty_at(0)) + .expect("initialize open state"); + let next_safe_block = head.safe_block; storage - .close_frame_and_batch(&mut head, 0, 0) + .close_frame_and_batch(&mut head, next_safe_block) .expect("rotate batch"); assert_eq!(head.frame_fee, 7); @@ -612,26 +681,30 @@ mod tests { fn replay_returns_direct_inputs_in_drain_order() { let db = temp_db("replay-order"); let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); - let head = storage.load_open_state().expect("load open state"); + let head = storage + .initialize_open_state(0, DirectInputRange::empty_at(0)) + .expect("initialize open state"); let drained = vec![ - IndexedDirectInput { - index: 0, + StoredDirectInput { payload: vec![0xaa], - block_number: 0, + block_number: 10, }, - IndexedDirectInput { - index: 1, + StoredDirectInput { payload: vec![0xbb], - block_number: 0, + block_number: 10, }, ]; storage - .append_safe_direct_inputs(drained.as_slice()) + .append_safe_direct_inputs(10, drained.as_slice()) .expect("insert direct inputs"); let mut head = head; storage - .close_frame_only(&mut head, 0, drained.len()) + .close_frame_only( + &mut head, + 10, + DirectInputRange::new(0, drained.len() as u64), + ) .expect("close frame with directs"); let replay = storage.load_ordered_l2_txs_from(0).expect("load replay"); @@ -657,25 +730,29 @@ mod tests { 0 ); - let head = storage.load_open_state().expect("load open state"); + let head = storage + .initialize_open_state(0, DirectInputRange::empty_at(0)) + .expect("initialize open state"); let drained = vec![ - IndexedDirectInput { - index: 0, + StoredDirectInput { payload: vec![0x01], - block_number: 0, + block_number: 10, }, - IndexedDirectInput { - index: 1, + StoredDirectInput { payload: vec![0x02], - block_number: 0, + block_number: 10, }, ]; storage - .append_safe_direct_inputs(drained.as_slice()) + .append_safe_direct_inputs(10, drained.as_slice()) .expect("insert direct inputs"); let mut head = head; storage - .close_frame_only(&mut head, 0, drained.len()) + .close_frame_only( + &mut head, + 10, + DirectInputRange::new(0, drained.len() as u64), + ) .expect("close frame with directs"); assert_eq!( @@ -699,19 +776,17 @@ mod tests { assert!(out.is_empty()); let inserted = vec![ - IndexedDirectInput { - index: 0, + StoredDirectInput { payload: vec![0xa0], - block_number: 0, + block_number: 10, }, - IndexedDirectInput { - index: 1, + StoredDirectInput { payload: vec![0xb1], - block_number: 0, + block_number: 10, }, ]; storage - .append_safe_direct_inputs(inserted.as_slice()) + .append_safe_direct_inputs(10, inserted.as_slice()) .expect("insert safe directs"); assert_eq!(storage.safe_input_end_exclusive().expect("safe head"), 2); @@ -727,4 +802,42 @@ mod tests { .expect("query empty half-open interval"); assert!(out.is_empty()); } + + #[test] + fn ensure_minimum_safe_block_only_moves_forward() { + let db = temp_db("ensure-min-safe-block"); + let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + + storage + .ensure_minimum_safe_block(7) + .expect("advance bootstrap safe head"); + assert_eq!(storage.current_safe_block().expect("read advanced"), 7); + + storage + .ensure_minimum_safe_block(3) + .expect("do not regress bootstrap safe head"); + assert_eq!(storage.current_safe_block().expect("read unchanged"), 7); + } + + #[test] + fn initialize_open_state_creates_first_real_batch_and_frame() { + let db = temp_db("initialize-open-state"); + let mut storage = Storage::open(db.path.as_str(), "NORMAL").expect("open storage"); + + let head = storage + .initialize_open_state(12, DirectInputRange::empty_at(0)) + .expect("initialize open state"); + + assert_eq!(head.batch_index, 0); + assert_eq!(head.frame_in_batch, 0); + assert_eq!(head.safe_block, 12); + + let loaded = storage + .load_open_state() + .expect("load open state") + .expect("open state should exist"); + assert_eq!(loaded.batch_index, 0); + assert_eq!(loaded.frame_in_batch, 0); + assert_eq!(loaded.safe_block, 12); + } } diff --git a/sequencer/src/storage/migrations/0001_schema.sql b/sequencer/src/storage/migrations/0001_schema.sql index 257c25b..0587652 100644 --- a/sequencer/src/storage/migrations/0001_schema.sql +++ b/sequencer/src/storage/migrations/0001_schema.sql @@ -9,6 +9,8 @@ CREATE TABLE IF NOT EXISTS frames ( created_at_ms INTEGER NOT NULL, -- Fee committed by the sequencer for this whole frame. fee INTEGER NOT NULL CHECK (fee >= 0), + -- Claimed safe L1 block frontier for this frame. + safe_block INTEGER NOT NULL CHECK (safe_block >= 0), PRIMARY KEY(batch_index, frame_in_batch) ); @@ -31,11 +33,11 @@ CREATE TABLE IF NOT EXISTS direct_inputs ( direct_input_index INTEGER PRIMARY KEY, payload BLOB NOT NULL, -- Block number of the chain block where this direct input was included (e.g. InputAdded event block). - block_number INTEGER NOT NULL DEFAULT 0 + block_number INTEGER NOT NULL CHECK (block_number >= 0) ); CREATE TABLE IF NOT EXISTS sequenced_l2_txs ( - -- Global append-only replay order consumed by catch-up and broadcaster. + -- Global append-only replay order consumed by catch-up and feed readers. offset INTEGER PRIMARY KEY, batch_index INTEGER NOT NULL, frame_in_batch INTEGER NOT NULL, @@ -68,26 +70,20 @@ CREATE TABLE IF NOT EXISTS sequenced_l2_txs ( CREATE INDEX IF NOT EXISTS idx_sequenced_l2_txs_frame ON sequenced_l2_txs(batch_index, frame_in_batch); +CREATE TABLE IF NOT EXISTS l1_safe_head ( + singleton_id INTEGER PRIMARY KEY CHECK (singleton_id = 0), + -- Highest L1 safe block the input reader has observed and atomically synced into storage. + block_number INTEGER NOT NULL CHECK (block_number >= 0) +); + +INSERT OR IGNORE INTO l1_safe_head (singleton_id, block_number) +VALUES (0, 0); + CREATE TABLE IF NOT EXISTS recommended_fees ( singleton_id INTEGER PRIMARY KEY CHECK (singleton_id = 0), -- Mutable recommendation consumed when opening the next frame. fee INTEGER NOT NULL CHECK (fee >= 0) ); --- Input reader: chain sync cursor (last safe block from which direct inputs have been read). -CREATE TABLE IF NOT EXISTS input_reader_state ( - singleton_id INTEGER PRIMARY KEY CHECK (singleton_id = 0), - last_processed_block INTEGER NOT NULL CHECK (last_processed_block >= 0) -); - INSERT OR IGNORE INTO recommended_fees (singleton_id, fee) VALUES (0, 0); - -INSERT OR IGNORE INTO input_reader_state (singleton_id, last_processed_block) -VALUES (0, 0); - -INSERT OR IGNORE INTO batches (batch_index, created_at_ms) -VALUES (0, 0); - -INSERT OR IGNORE INTO frames (batch_index, frame_in_batch, created_at_ms, fee) -VALUES (0, 0, 0, 0); diff --git a/sequencer/src/storage/mod.rs b/sequencer/src/storage/mod.rs index 4677da7..9b3011a 100644 --- a/sequencer/src/storage/mod.rs +++ b/sequencer/src/storage/mod.rs @@ -10,13 +10,49 @@ use thiserror::Error; pub use db::Storage; #[derive(Debug, Clone, PartialEq, Eq)] -pub struct IndexedDirectInput { - pub index: u64, +pub struct StoredDirectInput { pub payload: Vec, /// Chain block number where this input was included (e.g. InputAdded event block). pub block_number: u64, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct DirectInputRange { + pub start_inclusive: u64, + pub end_exclusive: u64, +} + +impl DirectInputRange { + pub fn new(start_inclusive: u64, end_exclusive: u64) -> Self { + assert!( + end_exclusive >= start_inclusive, + "direct-input range must be half-open and non-negative: start={start_inclusive}, end={end_exclusive}" + ); + Self { + start_inclusive, + end_exclusive, + } + } + + pub fn empty_at(index: u64) -> Self { + Self::new(index, index) + } + + pub fn advance_to(self, end_exclusive: u64) -> Self { + Self::new(self.end_exclusive, end_exclusive) + } + + pub fn is_empty(self) -> bool { + self.start_inclusive == self.end_exclusive + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct SafeFrontier { + pub safe_block: u64, + pub end_exclusive: u64, +} + #[derive(Debug, Error)] pub enum StorageOpenError { #[error(transparent)] @@ -31,18 +67,27 @@ pub struct WriteHead { pub batch_created_at: SystemTime, // Sequencer-chosen fee committed for this open frame. pub frame_fee: u64, + pub safe_block: u64, pub batch_user_op_count: u64, + pub open_frame_user_op_count: u32, pub frame_in_batch: u32, } impl WriteHead { pub fn increment_batch_user_op_count(&mut self, count: usize) { self.batch_user_op_count = self.batch_user_op_count.saturating_add(count as u64); + self.open_frame_user_op_count = self.open_frame_user_op_count.saturating_add(count as u32); + } + + pub fn open_frame_has_user_ops(&self) -> bool { + self.open_frame_user_op_count > 0 } - pub fn advance_frame(&mut self, frame_fee: u64) { + pub fn advance_frame(&mut self, frame_fee: u64, safe_block: u64) { self.frame_in_batch = self.frame_in_batch.saturating_add(1); self.frame_fee = frame_fee; + self.safe_block = safe_block; + self.open_frame_user_op_count = 0; } pub fn move_to_next_batch( @@ -50,11 +95,14 @@ impl WriteHead { batch_index: u64, batch_created_at: SystemTime, frame_fee: u64, + safe_block: u64, ) { self.batch_index = batch_index; self.batch_created_at = batch_created_at; self.frame_fee = frame_fee; + self.safe_block = safe_block; self.batch_user_op_count = 0; + self.open_frame_user_op_count = 0; self.frame_in_batch = 0; } } diff --git a/sequencer/src/storage/queries/select_latest_frame_in_batch_for_batch.sql b/sequencer/src/storage/queries/select_latest_frame_in_batch_for_batch.sql index 759aff6..c2b5a43 100644 --- a/sequencer/src/storage/queries/select_latest_frame_in_batch_for_batch.sql +++ b/sequencer/src/storage/queries/select_latest_frame_in_batch_for_batch.sql @@ -1,6 +1,7 @@ SELECT f.frame_in_batch, - f.fee + f.fee, + f.safe_block FROM frames f WHERE f.batch_index = ?1 ORDER BY f.frame_in_batch DESC diff --git a/sequencer/src/storage/queries/select_ordered_l2_txs_from_offset.sql b/sequencer/src/storage/queries/select_ordered_l2_txs_from_offset.sql index 941fcd6..271fcb8 100644 --- a/sequencer/src/storage/queries/select_ordered_l2_txs_from_offset.sql +++ b/sequencer/src/storage/queries/select_ordered_l2_txs_from_offset.sql @@ -14,5 +14,5 @@ LEFT JOIN frames f AND f.frame_in_batch = s.frame_in_batch LEFT JOIN direct_inputs d ON d.direct_input_index = s.direct_input_index -WHERE s.offset >= ?1 +WHERE s.offset > ?1 ORDER BY s.offset ASC diff --git a/sequencer/src/storage/queries/select_ordered_l2_txs_page_from_offset.sql b/sequencer/src/storage/queries/select_ordered_l2_txs_page_from_offset.sql index ded3cce..3f752ff 100644 --- a/sequencer/src/storage/queries/select_ordered_l2_txs_page_from_offset.sql +++ b/sequencer/src/storage/queries/select_ordered_l2_txs_page_from_offset.sql @@ -14,6 +14,6 @@ LEFT JOIN frames f AND f.frame_in_batch = s.frame_in_batch LEFT JOIN direct_inputs d ON d.direct_input_index = s.direct_input_index -WHERE s.offset >= ?1 +WHERE s.offset > ?1 ORDER BY s.offset ASC LIMIT ?2 diff --git a/sequencer/src/storage/sql.rs b/sequencer/src/storage/sql.rs index 76ab9ec..5e5cbe8 100644 --- a/sequencer/src/storage/sql.rs +++ b/sequencer/src/storage/sql.rs @@ -4,7 +4,7 @@ use rusqlite::{Connection, Result, Row, Transaction, params}; use std::time::{SystemTime, UNIX_EPOCH}; -use super::IndexedDirectInput; +use super::{DirectInputRange, StoredDirectInput}; use crate::inclusion_lane::PendingUserOp; const SQL_SELECT_SAFE_INPUTS_RANGE: &str = include_str!("queries/select_safe_inputs_range.sql"); @@ -22,6 +22,8 @@ const SQL_SELECT_MAX_DIRECT_INPUT_INDEX: &str = "SELECT MAX(direct_input_index) const SQL_SELECT_ORDERED_L2_TX_COUNT: &str = "SELECT COUNT(*) FROM sequenced_l2_txs"; const SQL_SELECT_RECOMMENDED_FEE: &str = "SELECT fee FROM recommended_fees WHERE singleton_id = 0 LIMIT 1"; +const SQL_SELECT_SAFE_BLOCK: &str = + "SELECT block_number FROM l1_safe_head WHERE singleton_id = 0 LIMIT 1"; const SQL_INSERT_DIRECT_INPUT: &str = "INSERT INTO direct_inputs (direct_input_index, payload, block_number) VALUES (?1, ?2, ?3)"; const SQL_INSERT_USER_OP: &str = include_str!("queries/insert_user_op.sql"); @@ -30,10 +32,8 @@ const SQL_INSERT_SEQUENCED_DIRECT_INPUT: &str = include_str!("queries/insert_sequenced_direct_input.sql"); const SQL_UPDATE_RECOMMENDED_FEE: &str = "UPDATE recommended_fees SET fee = ?1 WHERE singleton_id = 0"; -const SQL_SELECT_LAST_PROCESSED_BLOCK: &str = - "SELECT last_processed_block FROM input_reader_state WHERE singleton_id = 0 LIMIT 1"; -const SQL_UPDATE_LAST_PROCESSED_BLOCK: &str = - "UPDATE input_reader_state SET last_processed_block = ?1 WHERE singleton_id = 0"; +const SQL_UPDATE_SAFE_BLOCK: &str = + "UPDATE l1_safe_head SET block_number = ?1 WHERE singleton_id = 0"; #[derive(Debug, Clone)] pub(super) struct OrderedL2TxRow { @@ -72,12 +72,12 @@ pub(super) fn sql_update_recommended_fee(conn: &Connection, fee: i64) -> Result< conn.execute(SQL_UPDATE_RECOMMENDED_FEE, params![fee]) } -pub(super) fn sql_select_last_processed_block(conn: &Connection) -> Result { - conn.query_row(SQL_SELECT_LAST_PROCESSED_BLOCK, [], |row| row.get(0)) +pub(super) fn sql_select_safe_block(conn: &Connection) -> Result { + conn.query_row(SQL_SELECT_SAFE_BLOCK, [], |row| row.get(0)) } -pub(super) fn sql_update_last_processed_block(conn: &Connection, block: i64) -> Result { - conn.execute(SQL_UPDATE_LAST_PROCESSED_BLOCK, params![block]) +pub(super) fn sql_update_safe_block(conn: &Connection, safe_block: i64) -> Result { + conn.execute(SQL_UPDATE_SAFE_BLOCK, params![safe_block]) } pub(super) fn sql_select_safe_inputs_range( @@ -95,18 +95,19 @@ pub(super) fn sql_select_safe_inputs_range( pub(super) fn sql_insert_direct_inputs_batch( tx: &Transaction<'_>, - direct_inputs: &[IndexedDirectInput], + start_index: u64, + direct_inputs: &[StoredDirectInput], ) -> Result<()> { if direct_inputs.is_empty() { return Ok(()); } let mut stmt = tx.prepare_cached(SQL_INSERT_DIRECT_INPUT)?; - for input in direct_inputs { + for (offset, input) in direct_inputs.iter().enumerate() { stmt.execute(params![ - u64_to_i64(input.index), + u64_to_i64(start_index.saturating_add(offset as u64)), input.payload.as_slice(), - u64_to_i64(input.block_number), + u64_to_i64(input.block_number) ])?; } Ok(()) @@ -152,16 +153,14 @@ pub(super) fn sql_insert_sequenced_direct_inputs( tx: &Transaction<'_>, batch_index: i64, frame_in_batch: i64, - direct_start_index: u64, - direct_input_count: usize, + direct_range: DirectInputRange, ) -> Result<()> { - if direct_input_count == 0 { + if direct_range.is_empty() { return Ok(()); } let mut stmt = tx.prepare_cached(SQL_INSERT_SEQUENCED_DIRECT_INPUT)?; - for offset in 0..direct_input_count { - let direct_input_index = direct_start_index.saturating_add(offset as u64); + for direct_input_index in direct_range.start_inclusive..direct_range.end_exclusive { stmt.execute(params![ batch_index, frame_in_batch, @@ -207,11 +206,11 @@ pub(super) fn sql_select_latest_batch_with_user_op_count( pub(super) fn sql_select_latest_frame_in_batch_for_batch( tx: &Transaction<'_>, batch_index: i64, -) -> Result<(i64, i64)> { +) -> Result<(i64, i64, i64)> { tx.query_row( SQL_SELECT_LATEST_FRAME_IN_BATCH_FOR_BATCH, params![batch_index], - |row| Ok((row.get(0)?, row.get(1)?)), + |row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)), ) } @@ -231,16 +230,9 @@ pub(super) fn sql_insert_sequenced_direct_inputs_for_frame( tx: &Transaction<'_>, batch_index: i64, frame_in_batch: i64, - direct_start_index: u64, - direct_input_count: usize, + direct_range: DirectInputRange, ) -> Result<()> { - sql_insert_sequenced_direct_inputs( - tx, - batch_index, - frame_in_batch, - direct_start_index, - direct_input_count, - ) + sql_insert_sequenced_direct_inputs(tx, batch_index, frame_in_batch, direct_range) } pub(super) fn sql_insert_open_batch(tx: &Transaction<'_>, created_at_ms: i64) -> Result { @@ -248,17 +240,27 @@ pub(super) fn sql_insert_open_batch(tx: &Transaction<'_>, created_at_ms: i64) -> tx.execute(SQL, params![created_at_ms]) } +pub(super) fn sql_insert_open_batch_with_index( + tx: &Transaction<'_>, + batch_index: i64, + created_at_ms: i64, +) -> Result { + const SQL: &str = "INSERT INTO batches (batch_index, created_at_ms) VALUES (?1, ?2)"; + tx.execute(SQL, params![batch_index, created_at_ms]) +} + pub(super) fn sql_insert_open_frame( tx: &Transaction<'_>, batch_index: i64, frame_in_batch: i64, created_at_ms: i64, fee: i64, + safe_block: i64, ) -> Result { - const SQL: &str = "INSERT INTO frames (batch_index, frame_in_batch, created_at_ms, fee) VALUES (?1, ?2, ?3, ?4)"; + const SQL: &str = "INSERT INTO frames (batch_index, frame_in_batch, created_at_ms, fee, safe_block) VALUES (?1, ?2, ?3, ?4, ?5)"; tx.execute( SQL, - params![batch_index, frame_in_batch, created_at_ms, fee], + params![batch_index, frame_in_batch, created_at_ms, fee, safe_block], ) } @@ -304,18 +306,18 @@ fn u64_to_i64(value: u64) -> i64 { mod tests { use super::{ SQL_INSERT_DIRECT_INPUT, SQL_INSERT_SEQUENCED_DIRECT_INPUT, SQL_INSERT_SEQUENCED_USER_OP, - SQL_INSERT_USER_OP, sql_count_user_ops_for_frame, sql_insert_direct_inputs_batch, - sql_insert_open_batch, sql_insert_open_frame, sql_insert_sequenced_direct_inputs_for_frame, - sql_insert_user_ops_and_sequenced_batch, sql_select_latest_batch_with_user_op_count, - sql_select_latest_frame_in_batch_for_batch, sql_select_max_direct_input_index, + SQL_INSERT_USER_OP, sql_insert_direct_inputs_batch, sql_insert_open_batch, + sql_insert_open_batch_with_index, sql_insert_open_frame, + sql_insert_sequenced_direct_inputs_for_frame, sql_insert_user_ops_and_sequenced_batch, + sql_select_latest_batch_with_user_op_count, sql_select_max_direct_input_index, sql_select_ordered_l2_tx_count, sql_select_ordered_l2_txs_from_offset, sql_select_ordered_l2_txs_page_from_offset, sql_select_recommended_fee, - sql_select_safe_inputs_range, sql_select_total_drained_direct_inputs, - sql_update_recommended_fee, + sql_select_safe_block, sql_select_safe_inputs_range, + sql_select_total_drained_direct_inputs, sql_update_recommended_fee, sql_update_safe_block, }; use crate::inclusion_lane::PendingUserOp; - use crate::storage::IndexedDirectInput; use crate::storage::db::Storage; + use crate::storage::{DirectInputRange, StoredDirectInput}; use alloy_primitives::{Address, Signature}; use rusqlite::{Connection, params}; use sequencer_core::user_op::{SignedUserOp, UserOp}; @@ -347,6 +349,13 @@ mod tests { } } + fn seed_open_batch0_frame0(conn: &mut Connection) { + let tx = conn.transaction().expect("start tx"); + sql_insert_open_batch_with_index(&tx, 0, 123).expect("insert batch 0"); + sql_insert_open_frame(&tx, 0, 0, 123, 0, 0).expect("insert frame 0"); + tx.commit().expect("commit tx"); + } + #[test] fn max_index_helpers_work_for_empty_and_non_empty_tables() { let mut conn = setup_conn(); @@ -362,12 +371,12 @@ mod tests { conn.execute( SQL_INSERT_DIRECT_INPUT, - params![0_i64, vec![0xaa_u8], 0_i64], + params![0_i64, vec![0xaa_u8], 10_i64], ) .expect("insert direct input 0"); conn.execute( SQL_INSERT_DIRECT_INPUT, - params![1_i64, vec![0xbb_u8], 0_i64], + params![1_i64, vec![0xbb_u8], 11_i64], ) .expect("insert direct input 1"); assert_eq!( @@ -375,6 +384,7 @@ mod tests { Some(1) ); + seed_open_batch0_frame0(&mut conn); let tx = conn.transaction().expect("start tx"); tx.execute( SQL_INSERT_SEQUENCED_DIRECT_INPUT, @@ -401,17 +411,17 @@ mod tests { conn.execute( SQL_INSERT_DIRECT_INPUT, - params![0_i64, vec![0xaa_u8], 0_i64], + params![0_i64, vec![0xaa_u8], 10_i64], ) .expect("insert direct input 0"); conn.execute( SQL_INSERT_DIRECT_INPUT, - params![1_i64, vec![0xbb_u8], 0_i64], + params![1_i64, vec![0xbb_u8], 11_i64], ) .expect("insert direct input 1"); conn.execute( SQL_INSERT_DIRECT_INPUT, - params![2_i64, vec![0xcc_u8], 0_i64], + params![2_i64, vec![0xcc_u8], 12_i64], ) .expect("insert direct input 2"); @@ -425,8 +435,9 @@ mod tests { } #[test] - fn ordered_l2_query_returns_user_ops_before_drained_directs_in_frame() { - let conn = setup_conn(); + fn ordered_l2_query_follows_sequenced_offset_order() { + let mut conn = setup_conn(); + seed_open_batch0_frame0(&mut conn); conn.execute( SQL_INSERT_USER_OP, @@ -445,7 +456,7 @@ mod tests { .expect("insert user op"); conn.execute( SQL_INSERT_DIRECT_INPUT, - params![0_i64, vec![0xaa_u8], 0_i64], + params![0_i64, vec![0xaa_u8], 10_i64], ) .expect("insert direct input"); conn.execute(SQL_INSERT_SEQUENCED_USER_OP, params![0_i64, 0_i64, 0_i64]) @@ -463,7 +474,7 @@ mod tests { assert_eq!(rows[1].kind, 1); assert_eq!(rows[1].fee, None); - let paged = sql_select_ordered_l2_txs_page_from_offset(&conn, 2, 1).expect("query page"); + let paged = sql_select_ordered_l2_txs_page_from_offset(&conn, 1, 1).expect("query page"); assert_eq!(paged.len(), 1); assert_eq!(paged[0].kind, 1); assert_eq!( @@ -473,23 +484,12 @@ mod tests { } #[test] - fn batch_and_frame_helpers_reflect_bootstrapped_open_state() { + fn batch_and_frame_helpers_start_empty_before_lane_initialization() { let mut conn = setup_conn(); let tx = conn.transaction().expect("start tx"); - let (batch_index, _created_at_ms, user_op_count) = - sql_select_latest_batch_with_user_op_count(&tx).expect("query latest batch"); - assert_eq!(batch_index, 0); - assert_eq!(user_op_count, 0); - - let (frame_in_batch, frame_fee) = - sql_select_latest_frame_in_batch_for_batch(&tx, batch_index).expect("latest frame"); - assert_eq!(frame_in_batch, 0); - assert_eq!(frame_fee, 0); - - let frame_user_op_count = - sql_count_user_ops_for_frame(&tx, batch_index, frame_in_batch).expect("count user ops"); - assert_eq!(frame_user_op_count, 0); + let err = sql_select_latest_batch_with_user_op_count(&tx).expect_err("no batch yet"); + assert!(matches!(err, rusqlite::Error::QueryReturnedNoRows)); } #[test] @@ -499,7 +499,7 @@ mod tests { sql_insert_open_batch(&tx, 123).expect("insert open batch"); let new_batch = tx.last_insert_rowid(); - sql_insert_open_frame(&tx, new_batch, 0, 123, 7).expect("insert open frame"); + sql_insert_open_frame(&tx, new_batch, 0, 123, 7, 9).expect("insert open frame"); tx.commit().expect("commit tx"); let batch_count: i64 = conn @@ -508,8 +508,8 @@ mod tests { let frame_count: i64 = conn .query_row("SELECT COUNT(*) FROM frames", [], |row| row.get(0)) .expect("count frames"); - assert!(batch_count >= 2); - assert!(frame_count >= 2); + assert_eq!(batch_count, 1); + assert_eq!(frame_count, 1); } #[test] @@ -523,24 +523,31 @@ mod tests { assert_eq!(sql_select_recommended_fee(&conn).expect("read updated"), 9); } + #[test] + fn l1_safe_head_helpers_read_and_update_singleton() { + let conn = setup_conn(); + assert_eq!(sql_select_safe_block(&conn).expect("read safe block"), 0); + sql_update_safe_block(&conn, 12).expect("update safe block"); + assert_eq!(sql_select_safe_block(&conn).expect("read updated"), 12); + } + #[test] fn batch_insert_helpers_insert_multiple_rows() { let mut conn = setup_conn(); + seed_open_batch0_frame0(&mut conn); let tx = conn.transaction().expect("start tx"); let direct_inputs = vec![ - IndexedDirectInput { - index: 0, + StoredDirectInput { payload: vec![0xaa_u8], - block_number: 0, + block_number: 10, }, - IndexedDirectInput { - index: 1, + StoredDirectInput { payload: vec![0xbb_u8], - block_number: 0, + block_number: 11, }, ]; - sql_insert_direct_inputs_batch(&tx, direct_inputs.as_slice()) + sql_insert_direct_inputs_batch(&tx, 0, direct_inputs.as_slice()) .expect("insert direct inputs batch"); let user_ops = vec![ @@ -550,8 +557,13 @@ mod tests { sql_insert_user_ops_and_sequenced_batch(&tx, 0, 0, 0, user_ops.as_slice()) .expect("insert user ops + sequenced batch"); - sql_insert_sequenced_direct_inputs_for_frame(&tx, 0, 0, 0, direct_inputs.len()) - .expect("insert sequenced direct inputs batch"); + sql_insert_sequenced_direct_inputs_for_frame( + &tx, + 0, + 0, + DirectInputRange::new(0, direct_inputs.len() as u64), + ) + .expect("insert sequenced direct inputs batch"); tx.commit().expect("commit tx"); @@ -574,7 +586,8 @@ mod tests { #[test] fn user_op_uniqueness_is_sender_nonce() { - let conn = setup_conn(); + let mut conn = setup_conn(); + seed_open_batch0_frame0(&mut conn); // Same nonce with different senders should be accepted. conn.execute( diff --git a/sequencer/tests/e2e_sequencer.rs b/sequencer/tests/e2e_sequencer.rs index bb5737a..afd78a4 100644 --- a/sequencer/tests/e2e_sequencer.rs +++ b/sequencer/tests/e2e_sequencer.rs @@ -2,28 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 (see LICENSE) use std::io::ErrorKind; -use std::sync::Arc; use std::time::Duration; use alloy_primitives::{Address, Signature, U256}; use alloy_sol_types::{Eip712Domain, SolStruct}; -use app_core::application::{WalletApp, WalletConfig}; +use app_core::application::{ + MAX_METHOD_PAYLOAD_BYTES, Method, WalletApp, WalletConfig, Withdrawal, +}; use futures_util::StreamExt; use k256::ecdsa::SigningKey; use k256::ecdsa::signature::hazmat::PrehashSigner; -use sequencer::api::{AppState, router}; -use sequencer::inclusion_lane::{ - InclusionLane, InclusionLaneConfig, InclusionLaneError, InclusionLaneInput, -}; -use sequencer::l2_tx_broadcaster::{L2TxBroadcaster, L2TxBroadcasterConfig}; -use sequencer::storage::Storage; +use sequencer::api::{self, ApiConfig}; +use sequencer::inclusion_lane::{InclusionLane, InclusionLaneConfig, PendingUserOp}; +use sequencer::l2_tx_feed::{L2TxFeed, L2TxFeedConfig}; +use sequencer::shutdown::ShutdownSignal; +use sequencer::storage::{DirectInputRange, Storage}; use sequencer_core::api::{TxRequest, TxResponse, WsTxMessage}; -use sequencer_core::application::{Method, Withdrawal}; use sequencer_core::user_op::UserOp; use sequencer_rust_client::SequencerClient; use tempfile::TempDir; use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::sync::{Semaphore, mpsc, oneshot}; +use tokio::sync::mpsc; use tokio_tungstenite::connect_async; use tokio_tungstenite::tungstenite::Message; @@ -222,13 +221,13 @@ async fn api_rejects_malformed_json_as_bad_request() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn api_returns_429_when_tx_middleware_concurrency_is_exceeded() { - let db = temp_db("tx-middleware-overload"); +async fn api_returns_429_when_queue_is_full() { + let db = temp_db("queue-full-overload"); let domain = test_domain(); bootstrap_open_frame_fee_zero(db.path.as_str()); let Some(runtime) = - start_api_only_server(db.path.as_str(), domain.clone(), 128 * 1024, 8, 1).await + start_api_only_server(db.path.as_str(), domain.clone(), 128 * 1024, 1).await else { return; }; @@ -243,10 +242,14 @@ async fn api_returns_429_when_tx_middleware_concurrency_is_exceeded() { tokio::time::sleep(Duration::from_millis(50)).await; let (status, body) = post_raw_json(runtime.addr, request_json.as_str()).await; - assert_eq!(status, 429, "expected 429 for middleware overload: {body}"); + assert_eq!(status, 429, "expected 429 for queue-full overload: {body}"); + assert!( + body.contains("queue full"), + "expected queue full message in overload response: {body}" + ); assert!( body.contains("OVERLOADED"), - "expected OVERLOADED code for middleware overload: {body}" + "expected OVERLOADED code for queue-full overload: {body}" ); first.abort(); @@ -254,60 +257,65 @@ async fn api_returns_429_when_tx_middleware_concurrency_is_exceeded() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn api_returns_429_when_queue_is_full() { - let db = temp_db("queue-full-overload"); +async fn api_rejects_user_op_payloads_above_application_limit() { + let db = temp_db("user-op-payload-too-large"); let domain = test_domain(); bootstrap_open_frame_fee_zero(db.path.as_str()); - let Some(runtime) = - start_api_only_server(db.path.as_str(), domain.clone(), 128 * 1024, 1, 8).await - else { + let Some(runtime) = start_full_server(db.path.as_str(), domain.clone()).await else { return; }; - let request = make_valid_request(&domain); - let request_json = serde_json::to_string(&request).expect("serialize valid request"); - let first = tokio::spawn({ - let body = request_json.clone(); - let addr = runtime.addr; - async move { post_raw_json(addr, body.as_str()).await } - }); - tokio::time::sleep(Duration::from_millis(50)).await; + let endpoint = format!("http://{}", runtime.addr); + let client = SequencerClient::new_with_timeout(endpoint, Duration::from_secs(2)) + .expect("build sequencer client"); - let (status, body) = post_raw_json(runtime.addr, request_json.as_str()).await; - assert_eq!(status, 429, "expected 429 for queue-full overload: {body}"); + let signing_key = SigningKey::from_bytes((&[7_u8; 32]).into()).expect("create signing key"); + let sender = address_from_signing_key(&signing_key); + let user_op = UserOp { + nonce: 0, + max_fee: 0, + data: vec![0_u8; MAX_METHOD_PAYLOAD_BYTES + 1].into(), + }; + let request = TxRequest { + signature: sign_user_op_hex(&domain, &user_op, &signing_key), + sender: sender.to_string(), + message: user_op, + }; + + let (status, body) = client + .submit_tx_with_status(&request) + .await + .expect("submit oversized tx"); + + assert_eq!( + status, 400, + "unexpected status for oversized payload: {body}" + ); assert!( - body.contains("queue full"), - "expected queue full message in overload response: {body}" + body.contains("user op payload too large"), + "expected payload-size validation message, got: {body}" ); assert!( - body.contains("OVERLOADED"), - "expected OVERLOADED code for queue-full overload: {body}" + body.contains(&MAX_METHOD_PAYLOAD_BYTES.to_string()), + "expected max payload size in error body, got: {body}" ); - first.abort(); shutdown_runtime(runtime).await; } struct FullServerRuntime { addr: std::net::SocketAddr, - broadcaster: L2TxBroadcaster, - shutdown_tx: Option>, - server_task: Option>, - lane_stop: Option, - lane_handle: Option>, - _parked_rx: Option>, + shutdown: ShutdownSignal, + server_task: Option, + lane_handle: + Option>>, + _parked_rx: Option>, } impl Drop for FullServerRuntime { fn drop(&mut self) { - self.broadcaster.request_shutdown(); - if let Some(tx) = self.shutdown_tx.take() { - let _ = tx.send(()); - } - if let Some(stop) = self.lane_stop.take() { - stop.request_shutdown(); - } + self.shutdown.request_shutdown(); if let Some(task) = self.server_task.take() { task.abort(); } @@ -339,10 +347,11 @@ async fn start_full_server_with_max_body( let addr = listener.local_addr().expect("read listener addr"); let storage = Storage::open(db_path, "NORMAL").expect("open storage"); - let (tx, rx) = mpsc::channel::(128); + let shutdown = ShutdownSignal::default(); - let inclusion_lane = InclusionLane::new( - rx, + let (tx, lane_handle) = InclusionLane::start( + 128, + shutdown.clone(), WalletApp::new(WalletConfig), storage, InclusionLaneConfig { @@ -351,48 +360,35 @@ async fn start_full_server_with_max_body( max_batch_open: Duration::from_secs(60 * 60), max_batch_user_op_bytes: 1_048_576, idle_poll_interval: Duration::from_millis(2), - metrics_enabled: false, - metrics_log_interval: Duration::from_secs(5), }, ); - let (lane_handle, lane_stop) = inclusion_lane.spawn(); - let broadcaster = L2TxBroadcaster::start( + let tx_feed = L2TxFeed::new( db_path.to_string(), - L2TxBroadcasterConfig { + shutdown.clone(), + L2TxFeedConfig { idle_poll_interval: Duration::from_millis(2), page_size: 64, - subscriber_buffer_capacity: 256, - metrics_enabled: false, - metrics_log_interval: Duration::from_secs(5), }, - ) - .expect("start broadcaster"); + ); - let state = Arc::new(AppState { - tx_sender: tx, + let server_task = api::start_on_listener( + listener, + tx, domain, - overload_max_inflight_submissions: 256, - ws_subscriber_limit: Arc::new(Semaphore::new(64)), - ws_max_catchup_events: 50_000, - broadcaster: broadcaster.clone(), - }); - let app = router(state, max_body_bytes); - - let (shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); - let server = axum::serve(listener, app).with_graceful_shutdown(async move { - let _ = shutdown_rx.await; - }); - let server_task = tokio::spawn(async move { - server.await.expect("run test server"); - }); + MAX_METHOD_PAYLOAD_BYTES, + shutdown.clone(), + tx_feed, + ApiConfig { + max_body_bytes, + ..ApiConfig::default() + }, + ); Some(FullServerRuntime { addr, - broadcaster, - shutdown_tx: Some(shutdown_tx), + shutdown, server_task: Some(server_task), - lane_stop: Some(lane_stop), lane_handle: Some(lane_handle), _parked_rx: None, }) @@ -403,7 +399,6 @@ async fn start_api_only_server( domain: Eip712Domain, max_body_bytes: usize, queue_capacity: usize, - overload_max_inflight_submissions: usize, ) -> Option { let listener = match tokio::net::TcpListener::bind("127.0.0.1:0").await { Ok(value) => value, @@ -416,69 +411,58 @@ async fn start_api_only_server( let addr = listener.local_addr().expect("read listener addr"); let _storage = Storage::open(db_path, "NORMAL").expect("open storage"); - let (tx, rx) = mpsc::channel::(queue_capacity); - let broadcaster = L2TxBroadcaster::start( + let (tx, rx) = mpsc::channel::(queue_capacity); + let shutdown = ShutdownSignal::default(); + let tx_feed = L2TxFeed::new( db_path.to_string(), - L2TxBroadcasterConfig { + shutdown.clone(), + L2TxFeedConfig { idle_poll_interval: Duration::from_millis(2), page_size: 64, - subscriber_buffer_capacity: 256, - metrics_enabled: false, - metrics_log_interval: Duration::from_secs(5), }, - ) - .expect("start broadcaster"); - let state = Arc::new(AppState { - tx_sender: tx, + ); + let server_task = api::start_on_listener( + listener, + tx, domain, - overload_max_inflight_submissions, - ws_subscriber_limit: Arc::new(Semaphore::new(64)), - ws_max_catchup_events: 50_000, - broadcaster: broadcaster.clone(), - }); - let app = router(state, max_body_bytes); - - let (shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); - let server = axum::serve(listener, app).with_graceful_shutdown(async move { - let _ = shutdown_rx.await; - }); - let server_task = tokio::spawn(async move { - server.await.expect("run test server"); - }); + MAX_METHOD_PAYLOAD_BYTES, + shutdown.clone(), + tx_feed, + ApiConfig { + max_body_bytes, + ..ApiConfig::default() + }, + ); Some(FullServerRuntime { addr, - broadcaster, - shutdown_tx: Some(shutdown_tx), + shutdown, server_task: Some(server_task), - lane_stop: None, lane_handle: None, _parked_rx: Some(rx), }) } async fn shutdown_runtime(mut runtime: FullServerRuntime) { - runtime.broadcaster.request_shutdown(); - if let Some(stop) = runtime.lane_stop.take() { - stop.request_shutdown(); - } - if let Some(tx) = runtime.shutdown_tx.take() { - let _ = tx.send(()); - } - if let Some(task) = runtime.server_task.take() { - tokio::time::timeout(Duration::from_secs(3), task) - .await - .expect("wait for server task") - .expect("join server task"); - } + runtime.shutdown.request_shutdown(); if let Some(task) = runtime.lane_handle.take() { let lane_result = tokio::time::timeout(Duration::from_secs(3), task) .await .expect("wait for inclusion lane") .expect("join inclusion lane task"); assert!( - matches!(lane_result, InclusionLaneError::ShutdownRequested), - "expected shutdown result, got {lane_result}" + lane_result.is_ok(), + "expected clean shutdown, got {lane_result:?}" + ); + } + if let Some(task) = runtime.server_task.take() { + let server_result = tokio::time::timeout(Duration::from_secs(3), task) + .await + .expect("wait for server task") + .expect("join server task"); + assert!( + server_result.is_ok(), + "expected clean server shutdown, got {server_result:?}" ); } } @@ -486,10 +470,9 @@ async fn shutdown_runtime(mut runtime: FullServerRuntime) { fn bootstrap_open_frame_fee_zero(db_path: &str) { let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); storage.set_recommended_fee(0).expect("set recommended fee"); - let mut head = storage.load_open_state().expect("load open state"); - storage - .close_frame_and_batch(&mut head, 0, 0) - .expect("rotate batch to fee=0"); + let head = storage + .initialize_open_state(0, DirectInputRange::empty_at(0)) + .expect("initialize open state"); assert_eq!(head.frame_fee, 0); } diff --git a/sequencer/tests/ws_broadcaster.rs b/sequencer/tests/ws_broadcaster.rs index e5aca5d..5f8da0d 100644 --- a/sequencer/tests/ws_broadcaster.rs +++ b/sequencer/tests/ws_broadcaster.rs @@ -2,37 +2,28 @@ // SPDX-License-Identifier: Apache-2.0 (see LICENSE) use std::io::ErrorKind; -use std::sync::Arc; use std::time::{Duration, SystemTime}; use alloy_primitives::{Address, Signature}; use alloy_sol_types::Eip712Domain; +use app_core::application::MAX_METHOD_PAYLOAD_BYTES; use futures_util::{SinkExt, StreamExt}; -use sequencer::api::{AppState, router}; -use sequencer::inclusion_lane::{InclusionLaneInput, PendingUserOp, SequencerError}; -use sequencer::l2_tx_broadcaster::{L2TxBroadcaster, L2TxBroadcasterConfig}; -use sequencer::storage::{IndexedDirectInput, Storage}; +use sequencer::api::{self, ApiConfig}; +use sequencer::inclusion_lane::{PendingUserOp, SequencerError}; +use sequencer::l2_tx_feed::{L2TxFeed, L2TxFeedConfig}; +use sequencer::shutdown::ShutdownSignal; +use sequencer::storage::{DirectInputRange, Storage, StoredDirectInput}; use sequencer_core::api::WsTxMessage; use sequencer_core::l2_tx::SequencedL2Tx; use sequencer_core::user_op::{SignedUserOp, UserOp}; use sequencer_rust_client::SequencerClient; use tempfile::TempDir; -use tokio::sync::{Semaphore, mpsc, oneshot}; +use tokio::sync::{mpsc, oneshot}; use tokio_tungstenite::connect_async; use tokio_tungstenite::tungstenite::Message; -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn ws_subscribe_tests_sequential() { - scenario_streams_ordered_txs_from_offset_zero().await; - scenario_resumes_from_given_offset().await; - scenario_receives_live_events_after_subscribing().await; - scenario_fanout_delivers_live_event_to_multiple_subscribers().await; - scenario_replies_with_pong_on_ping().await; - scenario_rejects_when_subscriber_limit_is_reached().await; - scenario_closes_when_catchup_window_exceeds_limit().await; -} - -async fn scenario_streams_ordered_txs_from_offset_zero() { +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn ws_subscribe_streams_ordered_txs_from_offset_zero() { let db = temp_db("ws-subscribe-zero"); seed_ordered_txs(db.path.as_str()); let expected = load_ordered_l2_txs_page(db.path.as_str(), 0, 2); @@ -57,7 +48,8 @@ async fn scenario_streams_ordered_txs_from_offset_zero() { assert_ws_message_matches_tx(second, &expected[1], 1); } -async fn scenario_resumes_from_given_offset() { +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn ws_subscribe_resumes_from_given_offset() { let db = temp_db("ws-subscribe-resume"); seed_ordered_txs(db.path.as_str()); let expected = load_ordered_l2_txs_page(db.path.as_str(), 1, 1); @@ -84,7 +76,8 @@ async fn scenario_resumes_from_given_offset() { assert_ws_message_matches_tx(first, &expected[0], 1); } -async fn scenario_receives_live_events_after_subscribing() { +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn ws_subscribe_receives_live_events_after_subscribing() { let db = temp_db("ws-subscribe-live"); seed_ordered_txs(db.path.as_str()); let base_offset = ordered_l2_tx_count(db.path.as_str()); @@ -100,7 +93,7 @@ async fn scenario_receives_live_events_after_subscribing() { .expect("timeout connecting websocket") .expect("connect websocket"); - append_drained_direct_input(db.path.as_str(), 1, vec![0xbb]); + append_drained_direct_input(db.path.as_str(), vec![0xbb]); let expected = load_ordered_l2_txs_page(db.path.as_str(), base_offset, 1); assert_eq!( expected.len(), @@ -115,7 +108,8 @@ async fn scenario_receives_live_events_after_subscribing() { assert_ws_message_matches_tx(live, &expected[0], base_offset); } -async fn scenario_fanout_delivers_live_event_to_multiple_subscribers() { +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn ws_subscribe_fanout_delivers_live_event_to_multiple_subscribers() { let db = temp_db("ws-subscribe-fanout"); seed_ordered_txs(db.path.as_str()); let base_offset = ordered_l2_tx_count(db.path.as_str()); @@ -134,7 +128,7 @@ async fn scenario_fanout_delivers_live_event_to_multiple_subscribers() { .expect("timeout connecting websocket B") .expect("connect websocket B"); - append_drained_direct_input(db.path.as_str(), 1, vec![0xcd]); + append_drained_direct_input(db.path.as_str(), vec![0xcd]); let expected = load_ordered_l2_txs_page(db.path.as_str(), base_offset, 1); assert_eq!( expected.len(), @@ -153,7 +147,8 @@ async fn scenario_fanout_delivers_live_event_to_multiple_subscribers() { assert_ws_message_matches_tx(event_b, &expected[0], base_offset); } -async fn scenario_replies_with_pong_on_ping() { +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn ws_subscribe_replies_with_pong_on_ping() { let db = temp_db("ws-subscribe-ping-pong"); seed_ordered_txs(db.path.as_str()); // Use a far-future offset so this test validates ping/pong without @@ -185,7 +180,8 @@ async fn scenario_replies_with_pong_on_ping() { } } -async fn scenario_rejects_when_subscriber_limit_is_reached() { +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn ws_subscribe_rejects_when_subscriber_limit_is_reached() { let db = temp_db("ws-subscriber-limit"); seed_ordered_txs(db.path.as_str()); let base_offset = ordered_l2_tx_count(db.path.as_str()); @@ -215,7 +211,8 @@ async fn scenario_rejects_when_subscriber_limit_is_reached() { shutdown_runtime(runtime).await; } -async fn scenario_closes_when_catchup_window_exceeds_limit() { +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn ws_subscribe_closes_when_catchup_window_exceeds_limit() { let db = temp_db("ws-catchup-limit"); seed_ordered_txs(db.path.as_str()); @@ -245,9 +242,45 @@ async fn scenario_closes_when_catchup_window_exceeds_limit() { shutdown_runtime(runtime).await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn ws_subscribe_closes_on_oversized_inbound_message() { + let db = temp_db("ws-oversized-inbound"); + seed_ordered_txs(db.path.as_str()); + + let Some(runtime) = start_test_server(db.path.as_str()).await else { + return; + }; + + let url = ws_subscribe_url(runtime.addr, u64::MAX); + let (mut ws, _) = tokio::time::timeout(Duration::from_secs(5), connect_async(url)) + .await + .expect("timeout connecting websocket") + .expect("connect websocket"); + + ws.send(Message::Text("x".repeat(64 * 1024).into())) + .await + .expect("send oversized text frame"); + + let received = tokio::time::timeout(Duration::from_secs(2), ws.next()) + .await + .expect("wait for websocket close after oversized inbound message"); + + match received { + Some(Ok(Message::Close(_))) | Some(Err(_)) | None => {} + other => { + panic!("expected websocket to close after oversized inbound message, got {other:?}") + } + } + + drop(ws); + shutdown_runtime(runtime).await; +} + fn seed_ordered_txs(db_path: &str) { let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); - let mut head = storage.load_open_state().expect("load open state"); + let mut head = storage + .initialize_open_state(0, DirectInputRange::empty_at(0)) + .expect("initialize open state"); let (respond_to, _recv) = oneshot::channel::>(); let pending = PendingUserOp { @@ -268,45 +301,59 @@ fn seed_ordered_txs(db_path: &str) { .append_user_ops_chunk(&mut head, &[pending]) .expect("append user-op chunk"); storage - .append_safe_direct_inputs(&[IndexedDirectInput { - index: 0, - payload: vec![0xaa], - block_number: 0, - }]) + .append_safe_direct_inputs( + 10, + &[StoredDirectInput { + payload: vec![0xaa], + block_number: 10, + }], + ) .expect("append direct input"); storage - .close_frame_only(&mut head, 0, 1) + .close_frame_only(&mut head, 10, DirectInputRange::new(0, 1)) .expect("close frame with one drained direct input"); } -fn append_drained_direct_input(db_path: &str, index: u64, payload: Vec) { +fn append_drained_direct_input(db_path: &str, payload: Vec) { let mut storage = Storage::open(db_path, "NORMAL").expect("open storage"); - let mut head = storage.load_open_state().expect("load open state"); + let mut head = storage + .load_open_state() + .expect("load open state") + .expect("open state should exist"); + let safe_block = storage + .current_safe_block() + .expect("read current safe block") + .saturating_add(1); + let next_direct_index = storage + .safe_input_end_exclusive() + .expect("read next direct input index"); storage - .append_safe_direct_inputs(&[IndexedDirectInput { - index, - payload, - block_number: 0, - }]) + .append_safe_direct_inputs( + safe_block, + &[StoredDirectInput { + payload, + block_number: safe_block, + }], + ) .expect("append direct input"); storage - .close_frame_only(&mut head, index, 1) + .close_frame_only( + &mut head, + safe_block, + DirectInputRange::new(next_direct_index, next_direct_index.saturating_add(1)), + ) .expect("close frame with one drained direct input"); } struct WsServerRuntime { addr: std::net::SocketAddr, - broadcaster: L2TxBroadcaster, - shutdown_tx: Option>, - server_task: Option>, + shutdown: ShutdownSignal, + server_task: Option, } impl Drop for WsServerRuntime { fn drop(&mut self) { - self.broadcaster.request_shutdown(); - if let Some(tx) = self.shutdown_tx.take() { - let _ = tx.send(()); - } + self.shutdown.request_shutdown(); if let Some(task) = self.server_task.take() { task.abort(); } @@ -334,60 +381,54 @@ async fn start_test_server_with_limits( }; let addr = listener.local_addr().expect("read listener addr"); - let (tx_sender, _rx) = mpsc::channel::(1); - let broadcaster = L2TxBroadcaster::start( + let (tx_sender, _rx) = mpsc::channel::(1); + let shutdown = ShutdownSignal::default(); + let tx_feed = L2TxFeed::new( db_path.to_string(), - L2TxBroadcasterConfig { + shutdown.clone(), + L2TxFeedConfig { idle_poll_interval: Duration::from_millis(2), page_size: 64, - subscriber_buffer_capacity: 256, - metrics_enabled: false, - metrics_log_interval: Duration::from_secs(5), }, - ) - .expect("start broadcaster"); - let state = Arc::new(AppState { + ); + let task = api::start_on_listener( + listener, tx_sender, - domain: Eip712Domain { + Eip712Domain { name: None, version: None, chain_id: None, verifying_contract: None, salt: None, }, - overload_max_inflight_submissions: 16, - ws_subscriber_limit: Arc::new(Semaphore::new(ws_max_subscribers)), - ws_max_catchup_events, - broadcaster: broadcaster.clone(), - }); - let app = router(state, 128 * 1024); - let (shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); - - let server = axum::serve(listener, app).with_graceful_shutdown(async move { - let _ = shutdown_rx.await; - }); - let task = tokio::spawn(async move { - server.await.expect("run test server"); - }); + MAX_METHOD_PAYLOAD_BYTES, + shutdown.clone(), + tx_feed, + ApiConfig { + ws_max_subscribers, + ws_max_catchup_events, + ..ApiConfig::default() + }, + ); Some(WsServerRuntime { addr, - broadcaster, - shutdown_tx: Some(shutdown_tx), + shutdown, server_task: Some(task), }) } async fn shutdown_runtime(mut runtime: WsServerRuntime) { - runtime.broadcaster.request_shutdown(); - if let Some(tx) = runtime.shutdown_tx.take() { - let _ = tx.send(()); - } + runtime.shutdown.request_shutdown(); if let Some(task) = runtime.server_task.take() { - tokio::time::timeout(Duration::from_secs(3), task) + let server_result = tokio::time::timeout(Duration::from_secs(3), task) .await .expect("wait for server task") .expect("join server task"); + assert!( + server_result.is_ok(), + "expected clean server shutdown, got {server_result:?}" + ); } } @@ -396,7 +437,7 @@ async fn recv_tx_message( tokio_tungstenite::MaybeTlsStream, >, ) -> WsTxMessage { - let received = tokio::time::timeout(Duration::from_secs(5), ws.next()) + let received = tokio::time::timeout(Duration::from_secs(2), ws.next()) .await .expect("wait for websocket message") .expect("websocket stream ended") @@ -415,7 +456,7 @@ async fn recv_raw_message( tokio_tungstenite::MaybeTlsStream, >, ) -> Message { - tokio::time::timeout(Duration::from_secs(5), ws.next()) + tokio::time::timeout(Duration::from_secs(2), ws.next()) .await .expect("wait for websocket message") .expect("websocket stream ended") @@ -490,7 +531,7 @@ struct TestDb { fn temp_db(name: &str) -> TestDb { let dir = tempfile::Builder::new() - .prefix(format!("sequencer-ws-broadcaster-{name}-").as_str()) + .prefix(format!("sequencer-ws-feed-{name}-").as_str()) .tempdir() .expect("create temporary test directory"); let path = dir.path().join("sequencer.sqlite");