diff --git a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Builtin/Empty.hs b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Builtin/Empty.hs index 33040869fcf..36bb32c2d3d 100644 --- a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Builtin/Empty.hs +++ b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Builtin/Empty.hs @@ -63,7 +63,7 @@ fastDuration = ciTestDuration :: Types.Profile -> Types.Profile ciTestDuration = - V.timescaleCompressed . P.shutdownOnBlock 8 + V.timescaleCompressed . P.shutdownOnBlock 180 -- TODO: dummy "generator.epochs" ignored in favor of "--shutdown-on". -- Create a "time.epochs" or "time.blocks" or similar, IDK! -- This applies to all profiles! diff --git a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs index f9aa0f2af83..1566a4ffc06 100644 --- a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs +++ b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs @@ -116,11 +116,11 @@ genesisVariantVoltaire = genesisVariantLatest -- Defined in the "genesis" property and it's for the tx-generator. fundsDefault :: Types.Profile -> Types.Profile -fundsDefault = P.poolBalance 1000000000000000 . P.funds 10000000000000 . P.utxoKeys 1 +fundsDefault = P.poolBalance 1000000000000000 . P.funds 10000000000000 . P.utxoKeys (52*500*3) -- Some profiles have a higher `funds_balance` in `Genesis`. Needed? Fix it? fundsDouble :: Types.Profile -> Types.Profile -fundsDouble = P.poolBalance 1000000000000000 . P.funds 20000000000000 . P.utxoKeys 1 +fundsDouble = P.poolBalance 1000000000000000 . P.funds 20000000000000 . P.utxoKeys (52*500*3) fundsVoting :: Types.Profile -> Types.Profile fundsVoting = P.poolBalance 1000000000000000 . P.funds 40000000000000 . P.utxoKeys 2 diff --git a/bench/tx-centrifuge/LICENSE b/bench/tx-centrifuge/LICENSE new file mode 100644 index 00000000000..f433b1a53f5 --- /dev/null +++ b/bench/tx-centrifuge/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/bench/tx-centrifuge/NOTICE b/bench/tx-centrifuge/NOTICE new file mode 100644 index 00000000000..df6a765c219 --- /dev/null +++ b/bench/tx-centrifuge/NOTICE @@ -0,0 +1,14 @@ +Copyright 2019-2023 Input Output Global Inc (IOG), 2023-2026 Intersect. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/bench/tx-centrifuge/README.md b/bench/tx-centrifuge/README.md new file mode 100644 index 00000000000..debec26ff29 --- /dev/null +++ b/bench/tx-centrifuge/README.md @@ -0,0 +1,447 @@ +# Tx Centrifuge & Pull-Fiction + +`tx-centrifuge` is a high-performance load generator for Cardano, built on top of the protocol-agnostic **Pull-Fiction** library. + +Unlike traditional load generators that "push" data at a fixed rate, this system is designed for **pull-based protocols**. It does not generate load by itself; instead, it acts as a **policer** that reacts to requests from downstream consumers, admitting or delaying them to enforce a configured rate ceiling. + +### Minimal Configuration Example + +A basic configuration defines how to load initial resources, how to build payloads, the desired rate, and where to send the results: + +```json +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 42, + "signing_keys_file": "funds.json" + } + }, + "builder": { + "type": "value", + "params": { "fee": 1000000 }, + "recycle": { "type": "on_pull" } + }, + "rate_limit": { "type": "token_bucket", "params": { "tps": 10 } }, + "workloads": { + "group-A": { + "targets": { + "node-1": { "addr": "127.0.0.1", "port": 30000 }, + "node-2": { "addr": "127.0.0.1", "port": 30001 } + } + } + }, + "nodeConfig": "node-config.json", + "protocol_parameters": { + "epoch_length": 600, + "min_fee_a": 44, + "min_fee_b": 155381 + } +} +``` + +## Core Concepts: The Pull-Fiction Engine + +The underlying `pull-fiction` library implements a reactive rate-limiting strategy. It only produces data when a consumer asks for it, and only as fast as the rate limiter allows. + +### Architecture + +``` + ┌───────────────┐ + │ Initial UTxOs │ + └───────┬───────┘ + │ + (partitioned) + │ + ┌─────────────────────────────┼─────────────────────────────┐ + │ │ │ + ▼ ▼ ▼ +┌─────────────────────────┐ ┌─────────────────────────┐ ┌─────────────────────────┐ +│ Workload A │ │ Workload B │ │ Workload N │ +├─────────────────────────┤ ├─────────────────────────┤ ├─────────────────────────┤ +│ │ │ │ │ │ +│ ┌─────────────────┐ │ │ ┌─────────────────┐ │ │ ┌─────────────────┐ │ +│ │ Input Queue │ │ │ │ Input Queue │ │ │ │ Input Queue │ │ +│ │ (unbounded) │ │ │ │ (unbounded) │ │ │ │ (unbounded) │ │ +│ └────────┬────────┘ │ │ └────────┬────────┘ │ │ └────────┬────────┘ │ +│ │ │ │ │ │ │ │ │ +│ ▼ │ │ ▼ │ │ ▼ │ +│ ┌─────────────────┐ │ │ ┌─────────────────┐ │ │ ┌─────────────────┐ │ +│ │ Builder │ │ │ │ Builder │ │ │ │ Builder │ │ +│ │ (build & sign) │ │ │ │ (build & sign) │ │ │ │ (build & sign) │ │ +│ └────────┬────────┘ │ │ └────────┬────────┘ │ │ └────────┬────────┘ │ +│ │ │ │ │ │ │ │ │ +│ ▼ │ │ ▼ │ │ ▼ │ +│ ┌─────────────────┐ │ │ ┌─────────────────┐ │ │ ┌─────────────────┐ │ +│ │ Payload Queue │ │ │ │ Payload Queue │ │ │ │ Payload Queue │ │ +│ │ (bounded) │ │ │ │ (bounded) │ │ │ │ (bounded) │ │ +│ └────────┬────────┘ │ │ └────────┬────────┘ │ │ └────────┬────────┘ │ +│ │ │ │ │ │ │ │ │ +│ ┌─────┴─────┐ │ │ ┌─────┴─────┐ │ │ ┌─────┴─────┐ │ +│ ▼ ▼ │ │ ▼ ▼ │ │ ▼ ▼ │ +│ ┌────────┐ ┌────────┐ │ │ ┌────────┐ ┌────────┐ │ │ ┌────────┐ ┌────────┐ │ +│ │Worker 1│ │Worker 2│ │ │ │Worker 1│ │Worker 2│ │ │ │Worker 1│ │Worker 2│ │ +│ └───┬────┘ └───┬────┘ │ │ └───┬────┘ └───┬────┘ │ │ └───┬────┘ └───┬────┘ │ +│ │ │ │ │ │ │ │ │ │ │ │ +│ ▼ ▼ │ │ ▼ ▼ │ │ ▼ ▼ │ +│ NodeToNode ... │ │ NodeToNode ... │ │ NodeToNode ... │ +│ (multiplexed) │ │ (multiplexed) │ │ (multiplexed) │ +│ │ │ │ │ │ │ │ │ │ │ │ +│ ▼ ▼ │ │ ▼ ▼ │ │ ▼ ▼ │ +│ ┌──────┐ ┌──────┐ │ │ ┌──────┐ ┌──────┐ │ │ ┌──────┐ ┌──────┐ │ +│ │Node 1│ │Node 2│ │ │ │Node 3│ │Node 4│ │ │ │Node 5│ │Node 6│ │ +│ └──────┘ └──────┘ │ │ └──────┘ └──────┘ │ │ └──────┘ └──────┘ │ +│ │ │ │ │ │ +│ ◄── recycle outputs ───┤ │ ◄── recycle outputs ───┤ │ ◄── recycle outputs ───┤ +│ │ │ │ │ │ +└─────────────────────────┘ └─────────────────────────┘ └─────────────────────────┘ +``` + +**Pipeline flow:** +1. **Initial UTxOs** are loaded and **partitioned** across workloads +2. Each workload's share enters its **Input Queue** (unbounded) +3. A **Builder** (one per workload) pulls inputs, assembles and signs transactions, and pushes `(tx, outputs)` to the **Payload Queue** (bounded — sole source of backpressure) +4. **Workers** (one per target) pull from the Payload Queue via rate-limited fetchers +5. Workers connect to Cardano nodes via a **multiplexed NodeToNode** connection running **TxSubmission2** and **KeepAlive** mini-protocols (optionally **ChainSync** + **BlockFetch** for confirmation-based recycling) +6. **Outputs are recycled** back to the workload's Input Queue according to the configured `recycle` strategy, enabling indefinite-duration runs + +### Reactive Rate Limiting +- **Downstream Driven**: Load is only dispensed in response to an explicit pull from a target. If the target doesn't ask, the engine stays idle. +- **Ceiling Enforcement**: The rate limiter enforces a tokens-per-second (TPS) ceiling. Even if a consumer pulls aggressively, the engine ensures the dispensed items never exceed the configured limit. +- **Fairness**: Token slots are claimed in a single atomic STM transaction, providing FIFO-fair scheduling across multiple workers sharing the same limiter. + +### Workloads and Targets +The configuration is organized into a hierarchy that defines the concurrency model: + +- **Target**: A single network endpoint (e.g., a Cardano node). Each target has a dedicated **Worker thread** that manages the network connection and handles requests. +- **Workload**: A logical grouping of targets. + - All targets within a workload share the same **Builder thread** and the same **Payload Queue**. + - **Transaction Profiles**: Each workload can define its own `builder` configuration. This allows you to generate different "profiles" of transactions (e.g., different sizes, complexities, or fees) for different groups of nodes. + - **Isolation**: By using multiple workloads, you can isolate different groups of targets. For example, one workload could simulate high-volume "small" transactions for one group of nodes, while another generates "heavy" transactions for another. + +### Pipeline Architecture +The engine operates as a decoupled production pipeline using generic `input` and `payload` types: +1. **Initial Inputs**: Starting resources are partitioned across workloads and bulk-loaded into each workload's Input Queue at startup. +2. **Input Queue (Unbounded)**: Holds available inputs. +3. **Builder (One per Workload)**: A dedicated thread that pulls inputs, produces a payload, and handles recycling according to the configured strategy (see [Resource Recycling](#resource-recycling)). It pushes the payload to the payload queue. +4. **Payload Queue (Bounded)**: The sole source of **backpressure**. The builder blocks here if consumers are slower than the production rate. +5. **Workers (One per Target)**: Threads that manage the consumer connection. They pull from the payload queue via a rate-limited fetcher. + +### Resource Recycling +To enable indefinite-duration runs with finite resources, inputs must be returned to the `Input Queue`. The `recycle` field on the `builder` selects when this happens. There are three strategies: + +1. **`on_build`** — The builder immediately returns resources to the `Input Queue` as soon as the payload is constructed, before it even enters the payload queue. This is the highest-throughput mode but assumes the payload will be successfully processed downstream. + ```json + "recycle": { "type": "on_build" } + ``` +2. **`on_pull`** — The builder pairs the payload with the resources to be recycled as a `(payload, [input])` tuple in the payload queue. When a worker **fetches** this tuple (triggered by a downstream request), the library returns those resources to the `Input Queue` in a separate STM transaction before handing the payload to the worker. Note: recycling happens on fetch, not on downstream acknowledgement — if the worker is killed between fetch and delivery, those inputs are lost. + ```json + "recycle": { "type": "on_pull" } + ``` +3. **`on_confirm`** — Resources stay in a pending map until an **observer** (ChainSync + BlockFetch) confirms the transaction on-chain at the configured confirmation depth. The builder enqueues the payload without any inputs; a background recycler async reads confirmations from the observer's broadcast channel and recycles matching inputs. This is the safest mode for long-running benchmarks where mempool eviction is a concern. + ```json + "recycle": { "type": "on_confirm", "params": "my-observer" } + ``` + +## Configuration + +### Initial Inputs (`initial_inputs`) +The generator requires a set of initial UTxOs, configured in the `initial_inputs` section of the main configuration file. + +- **`type`**: The input loader variant (e.g., `"genesis_utxo_keys"`). +- **`params`**: + - **`network_magic`**: Required for deriving UTxO references from keys (e.g., `42` for testnet). + - **`signing_keys_file`**: Path to a JSON file (e.g., `funds.json`) containing the actual fund data. + +#### `funds.json` entry types +The file contains an array of fund objects. There are two distinct types: + +1. **Genesis Funds** (Key-only): Identified only by their signing key. The `TxIn` is derived automatically. + ```json + { "signing_key": "genesis.skey", "value": 1500000000000 } + ``` +2. **Payment Funds** (Explicit UTxO): Requires a specific transaction reference. + ```json + { "signing_key": "payment.skey", "value": 1000000, "tx_in": "df6...#0" } + ``` + +**Design Note**: The `funds.json` format is designed to be compatible with the output of `cardano-cli conway create-testnet-data --utxo-keys`. This allows you to immediately use an arbitrary large set of Shelley genesis keys created during testnet bootstrapping as the initial fund pool for the generator, without needing to manually create UTxOs once the network is live. + +### Rate Limiting (`rate_limit`) +The `rate_limit` field can be set at the **top level** or at the **workload level** (but not both — setting it at both levels is a validation error). If omitted entirely, targets run **unlimited** (no rate ceiling). + +The `scope` determines the granularity of the TPS ceiling. Available scopes depend on where the rate limit is defined: + +**Top-level scopes:** +- **`shared`** (default): A single rate limiter shared by all targets across all workloads. The configured TPS is the aggregate ceiling. +- **`per_workload`**: Each workload gets its own independent rate limiter at the full configured TPS (shared by its targets). +- **`per_target`**: Every target gets its own independent rate limiter at the full configured TPS. E.g., 10 TPS with 50 targets = 500 TPS aggregate. + +**Workload-level scopes:** +- **`shared`** (default): One rate limiter shared by all targets in the workload. The configured TPS is the aggregate ceiling for the workload. +- **`per_target`**: Every target in the workload gets its own independent rate limiter at the full configured TPS. + +### Cascading Defaults + +Most configuration fields can be set at multiple levels. The most specific value wins: + +- **`builder`**: workload > top-level. Setting it at **both** levels is a validation error. At least one must be set (no default). +- **`rate_limit`**: workload > top-level > **unlimited**. Setting it at **both** levels is a validation error. +- **`max_batch_size`**: target > workload > top-level > **1**. +- **`on_exhaustion`**: target > workload > top-level > **`block`**. + +Workload and target names must be non-empty and must not contain `.` or start with `@` (reserved for internal rate-limiter cache keys). + +### Batching and Flow Control +- **`max_batch_size`**: Limits the number of items (e.g., transactions) the generator will announce to a target in a single protocol request. + - This acts as a safety cap: even if a target's protocol allows for 500 items, a `max_batch_size` of 100 ensures the generator doesn't commit too much capacity to a single connection at once. + - This helps distribute the available "payload queue" more evenly across multiple targets and prevents a single aggressive node from starving others. +- **`on_exhaustion`**: + - `block`: The worker thread waits until the builder produces a new payload. + - `error`: The generator fails immediately if the builder cannot keep up with the requested TPS. + +## Cardano Implementation (`tx-centrifuge`) + +### Value Builder Parameters +These parameters define the **transaction profile** for a workload: +- `inputs_per_tx` / `outputs_per_tx`: Controls the transaction structure (size and complexity). +- `fee`: Fixed Lovelace fee per transaction. +- `recycle` (optional): Controls when output UTxOs are returned to the input queue. See [Resource Recycling](#resource-recycling) for the three strategies (`on_build`, `on_pull`, `on_confirm`). When omitted, outputs are **not recycled** — the generator consumes initial funds and eventually exhausts them. + +## Usage + +```bash +tx-centrifuge config.json +``` + +## Detailed Examples + +### 1. High-Throughput (On-Build Recycling) +Optimized for maximum TPS using simple 1-in/1-out transactions. Outputs are recycled immediately after building (`on_build`), before the transaction enters the payload queue. + +**`config.json` snippet:** +```json +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 42, + "signing_keys_file": "funds.1.json" + } + }, + "builder": { + "type": "value", + "params": { + "inputs_per_tx": 1, + "outputs_per_tx": 1, + "fee": 1000000 + }, + "recycle": { "type": "on_build" } + }, + "rate_limit": { + "type": "token_bucket", + "scope": "shared", + "params": { "tps": 1000 } + }, + "workloads": { + "simulation": { + "targets": { + "node-0": { "addr": "127.0.0.1", "port": 30000 } + } + } + }, + "nodeConfig": "node-config.json", + "protocol_parameters": { + "epoch_length": 600, + "min_fee_a": 44, + "min_fee_b": 155381 + } +} +``` + +**`funds.1.json` snippet:** +```json +[ + {"signing_key": "utxo1.skey", "value": 1500000000000}, + {"signing_key": "utxo2.skey", "value": 1500000000000} +] +``` + +### 2. Large Transactions (Target-Specific Limits) +Uses complex transactions with independent rate limits for each target connection. Outputs are recycled on fetch (`on_pull`). + +**`config.json` snippet:** +```json +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 42, + "signing_keys_file": "funds.2.json" + } + }, + "builder": { + "type": "value", + "params": { + "inputs_per_tx": 5, + "outputs_per_tx": 5, + "fee": 2000000 + }, + "recycle": { "type": "on_pull" } + }, + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { "tps": 5 } + }, + "max_batch_size": 50, + "on_exhaustion": "block", + "workloads": { + "heavy-load": { + "targets": { + "edge-node": { "addr": "192.168.1.10", "port": 30001 } + } + } + }, + "nodeConfig": "node-config.json", + "protocol_parameters": { + "epoch_length": 600, + "min_fee_a": 44, + "min_fee_b": 155381 + } +} +``` + +**`funds.2.json` snippet:** +```json +[ + {"signing_key": "utxo1.skey", "value": 1000000000}, + {"signing_key": "utxo2.skey", "value": 1000000000}, + {"signing_key": "utxo3.skey", "value": 1000000000} +] +``` + +### 3. Confirmation-Based Recycling (On-Confirm with Observer) +Uses an observer that follows the chain via ChainSync + BlockFetch to track when submitted transactions are confirmed on-chain. Outputs are only recycled back to the input queue after the transaction reaches a confirmation depth of 2 blocks, protecting against mempool eviction and short rollbacks. + +**`config.json` snippet:** +```json +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 42, + "signing_keys_file": "funds.3.json" + } + }, + "observers": { + "chain-follower": { + "type": "nodetonode", + "params": { + "addr": "127.0.0.1", + "port": 30000, + "confirmation_depth": 2 + } + } + }, + "builder": { + "type": "value", + "params": { + "inputs_per_tx": 2, + "outputs_per_tx": 2, + "fee": 1000000 + }, + "recycle": { "type": "on_confirm", "params": "chain-follower" } + }, + "rate_limit": { + "type": "token_bucket", + "scope": "shared", + "params": { "tps": 50 } + }, + "max_batch_size": 500, + "on_exhaustion": "error", + "workloads": { + "confirmed-load": { + "targets": { + "node-0": { "addr": "127.0.0.1", "port": 30000 } + } + } + }, + "nodeConfig": "node-config.json", + "protocol_parameters": { + "epoch_length": 600, + "min_fee_a": 44, + "min_fee_b": 155381 + } +} +``` + +**`funds.3.json` snippet:** +```json +[ + {"signing_key": "utxo1.skey", "value": 1500000000000}, + {"signing_key": "utxo2.skey", "value": 1500000000000}, + {"signing_key": "utxo3.skey", "value": 1500000000000} +] +``` + +With `on_confirm`, the generator needs enough initial funds to cover the in-flight transactions between submission and confirmation. At 50 TPS with a 2-block confirmation depth (~40 seconds on a 20-second slot), roughly 2000 transactions will be pending at any time, so the initial fund pool should have at least that many UTxOs. + +## Internals + +### Package Structure + +``` +tx-centrifuge/ +├── tx-centrifuge.cabal # Package definition +├── app/ +│ └── Main.hs # Executable entry point +├── lib/ +│ ├── pull-fiction/ # Domain-independent load generation library +│ │ └── Cardano/Benchmarking/PullFiction/ +│ │ ├── Config/ +│ │ │ ├── Raw.hs # JSON parsing (no validation) +│ │ │ ├── Validated.hs # Validation + cascading defaults +│ │ │ └── Runtime.hs # STM resources, rate limiters, builder spawning +│ │ ├── Clock.hs # Monotonic time source +│ │ ├── WorkloadRunner.hs # Rate-limited workload execution +│ │ └── Internal/ +│ │ └── RateLimiter.hs # GCRA token bucket +│ │ +│ └── tx-centrifuge/ # Cardano-specific library +│ └── Cardano/Benchmarking/TxCentrifuge/ +│ ├── Fund.hs # UTxO/fund loading from JSON +│ ├── NodeToNode.hs # Multiplexed N2N connection (4 mini-protocols) +│ ├── NodeToNode/ +│ │ ├── KeepAlive.hs # KeepAlive mini-protocol client +│ │ ├── TxIdSync.hs # ChainSync + BlockFetch tx confirmation +│ │ └── TxSubmission.hs # TxSubmission2 mini-protocol client +│ ├── TxAssembly.hs # Transaction building and signing +│ ├── Tracing.hs # Structured logging via trace-dispatcher +│ └── Tracing/ +│ └── Orphans.hs # LogFormatting/MetaTrace instances +│ +├── test/ # Test suites +│ ├── pull-fiction/ # Pull-fiction unit tests +│ └── tx-centrifuge/ # Tx-centrifuge unit tests +│ +└── bench/ # Benchmarks + └── Bench.hs +``` + +### Data Flow + +``` +Raw JSON → Validated Config → Runtime (STM queues, rate limiters, builder asyncs) + │ + ┌──────────────────────────────┘ + ↓ + [Builder Async] per workload + reads TQueue(inputs) → buildTx → TBQueue(payloads, 8192 cap) + ↓ + [Worker Asyncs] per target + GCRA rate-limited fetch → TxSubmission2 pull protocol → cardano-node + ↓ + [Recycler] closed-loop + outputs → back to TQueue(inputs) +``` diff --git a/bench/tx-centrifuge/app/Main.hs b/bench/tx-centrifuge/app/Main.hs new file mode 100644 index 00000000000..1b7a45e8e7f --- /dev/null +++ b/bench/tx-centrifuge/app/Main.hs @@ -0,0 +1,565 @@ +{-# LANGUAGE GADTs #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Main (main) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Exception (finally) +import Control.Monad (when) +import Data.Bifunctor (first) +import Data.List.NonEmpty qualified as NE +import Data.Maybe (fromMaybe) +import Data.Monoid (Last(..)) +import Numeric.Natural (Natural) +import System.Environment (getArgs) +import System.Exit (die) +import System.IO (hPutStrLn, stderr) +import Text.Printf (printf) +----------- +-- aeson -- +----------- +import Data.Aeson ((.:), (.:?)) +import Data.Aeson qualified as Aeson +import Data.Aeson.Types qualified as Aeson.Types +----------- +-- async -- +----------- +import Control.Concurrent.Async qualified as Async +---------------- +-- bytestring -- +---------------- +import Data.ByteString.Char8 qualified as BS8 +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +------------------------- +-- cardano-ledger-core -- +------------------------- +import Cardano.Ledger.Coin qualified as L +------------------ +-- cardano-node -- +------------------ +import Cardano.Node.Configuration.POM + ( parseNodeConfigurationFP + , makeNodeConfiguration + , defaultPartialNodeConfiguration + , PartialNodeConfiguration(..) + , NodeConfiguration + , ncProtocolConfig + ) +import Cardano.Node.Handlers.Shutdown (ShutdownConfig(..)) +import Cardano.Node.Protocol.Cardano (mkSomeConsensusProtocolCardano) +import Cardano.Node.Protocol.Types (SomeConsensusProtocol(..)) +import Cardano.Node.Types + ( ConfigYamlFilePath(..) + , NodeProtocolConfiguration(..) + , ProtocolFilepaths(..) + ) +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +------------- +-- network -- +------------- +import Network.Socket qualified as Socket +------------------------- +-- ouroboros-consensus -- +------------------------- +import Ouroboros.Consensus.Block.Abstract (CodecConfig) +import Ouroboros.Consensus.Config (configBlock, configCodec) +import Ouroboros.Consensus.Config.SupportsNode (getNetworkMagic) +import Ouroboros.Consensus.Node.ProtocolInfo (ProtocolInfo(..)) +--------------------------------- +-- ouroboros-network-framework -- +--------------------------------- +import Ouroboros.Network.IOManager (withIOManager) +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM +------------------ +-- transformers -- +------------------ +import Control.Monad.Trans.Except (runExceptT) +------------------ +-- pull-fiction -- +------------------ +import Cardano.Benchmarking.PullFiction.Config.Raw qualified as Raw +import Cardano.Benchmarking.PullFiction.Config.Runtime qualified as Runtime +import Cardano.Benchmarking.PullFiction.Config.Validated qualified as Validated +import Cardano.Benchmarking.PullFiction.WorkloadRunner (runWorkload) +------------------- +-- tx-centrifuge -- +------------------- +import Cardano.Benchmarking.TxCentrifuge.NodeToNode qualified as N2N +import Cardano.Benchmarking.TxCentrifuge.NodeToNode.KeepAlive + qualified as KeepAlive +import Cardano.Benchmarking.TxCentrifuge.NodeToNode.TxIdSync + qualified as TxIdSync +import Cardano.Benchmarking.TxCentrifuge.NodeToNode.TxSubmission + qualified as TxSubmission +import Cardano.Benchmarking.TxCentrifuge.Fund qualified as Fund +import Cardano.Benchmarking.TxCentrifuge.Tracing qualified as Tracing +import Cardano.Benchmarking.TxCentrifuge.TxAssembly qualified as TxAssembly + +-------------------------------------------------------------------------------- + +main :: IO () +main = do + + -- Config. + ---------- + + (validated, codecConfig, networkId, networkMagic, tracers) <- loadConfig + + -- Callbacks. + ------------- + + -- From 'String' (address) and 'Int' (port) to 'AddrInfo'. + let resolveAddr ip port = do + let hints = Socket.defaultHints + { Socket.addrSocketType = Socket.Stream + , Socket.addrFamily = Socket.AF_INET + } + addrs <- Socket.getAddrInfo + (Just hints) + (Just ip) + (Just (show port)) + case addrs of + [] -> die $ "Cannot resolve target: " ++ ip ++ ":" ++ show port + (a:_) -> pure a + + -- Observer factory: called by Runtime.resolve for each observer in the + -- config. Creates a NodeToNode connection with ChainSync + BlockFetch for + -- transaction confirmation tracking, and returns an ObserverHandle. + -- Takes the IOManager as first argument (partial-applied below). + let mkObserver ioManager _observerIndex observerName rawObserver = do + -- From JSON/Aeson.Value to the cardano-node specific observer. + observer <- case interpretObserver rawObserver of + Left err -> die $ "Observer " ++ observerName ++ ": " ++ err + Right o -> pure o + -- BlockFetch + ChainSync state and config. + syncState <- TxIdSync.emptyState + TxIdSync.Config + { TxIdSync.confirmationDepth = observerConfirmationDepth observer } + -- Protocol clients including KeepAlive. + keepAlive <- KeepAlive.keepAliveClient 10 + let clients = N2N.emptyClients + { N2N.clientChainSync = Just $ TxIdSync.chainSyncClient syncState + , N2N.clientBlockFetch = Just $ TxIdSync.blockFetchClient syncState + , N2N.clientKeepAlive = Just keepAlive + } + addrInfo <- resolveAddr (observerAddr observer) (observerPort observer) + pure Runtime.ObserverHandle + { Runtime.ohRun = do + result <- N2N.connect + ioManager + codecConfig + networkMagic + tracers + addrInfo + clients + case result of + Left err -> die $ "observer " ++ observerName ++ ": " ++ err + Right () -> pure () + , Runtime.ohSubscribe = STM.atomically $ + STM.dupTChan (TxIdSync.stateBroadcast syncState) + , -- Transform from whatever the observer returns to something useful. + Runtime.ohExtractKey = TxIdSync.confirmedTxId + } + + -- Builder factory passed to 'Runtime.resolve'. Given a zero-based index, + -- the builder name, and the opaque builder config, returns a BuilderHandle. + let mkBuilder builderIndex builderName rawBuilder = do + -- From JSON/Aeson.Value to the cardano-node specific builder. + builder <- interpretBuilder rawBuilder + -- Based on index we create a new unique address/key for each builder. + let (signingKey, signingAddr) = createSigningKeyAndAddress + networkId + builderIndex + pure Runtime.BuilderHandle + { -- The number of inputs to wait for. + Runtime.bhInputsPerBatch = inputsPerTx builder + -- Build and sign. + , Runtime.bhBuildPayload = \inputFunds -> do + let buildTxAns = TxAssembly.buildTx + signingAddr signingKey + inputFunds (outputsPerTx builder) + (L.Coin (fee builder)) + case buildTxAns of + Left err -> die $ "TxAssembly.buildTx: " ++ err + Right (tx, outputFunds) -> do + -- Trace the building action. + Tracing.traceWith + (Tracing.trBuilder tracers) + (Tracing.mkBuilderNewTx + builderName tx inputFunds outputFunds + ) + -- The TxID is needed for the "on_confirm" recycling strategy. + let txId = Api.getTxId (Api.getTxBody tx) + pure (txId, tx, outputFunds) + } + + -- IOManager: no-op on POSIX, required on Windows for IOCP. All network I/O + -- and cleanup must live inside this block — the handle is invalidated when + -- 'withIOManager' returns. + withIOManager $ \ioManager -> do + -- Resolve runtime: creates observers (via mkObserver), pipes, rate + -- limiters, and spawns builders. All asyncs are linked and tracked. + runtime <- Runtime.resolve + mkBuilder + (mkObserver ioManager) + (\name funds -> + Tracing.traceWith + (Tracing.trBuilder tracers) + (Tracing.mkBuilderRecycle name funds) + ) + validated + -- The 'TargetWorker' callback, called once per 'Target'. + let targetWorker target fetchTx tryFetchTx = do + addrInfo <- resolveAddr + (Runtime.targetAddr target) + (Runtime.targetPort target) + keepAliveClient <- KeepAlive.keepAliveClient 10 + result <- N2N.connect ioManager codecConfig networkMagic tracers addrInfo + N2N.emptyClients + { N2N.clientKeepAlive = Just keepAliveClient + , N2N.clientTxSubmission = Just $ + TxSubmission.txSubmissionClient + (Tracing.trTxSubmission tracers) + (Runtime.targetName target) + (Runtime.maxBatchSize target) + fetchTx tryFetchTx + } + case result of + Left err -> die $ Runtime.targetName target ++ ": " ++ err + Right () -> pure () + -- For each 'Workload'. + workers <- concat <$> mapM + (\workload -> runWorkload workload targetWorker) + (Map.elems $ Runtime.workloads runtime) + -- runWorkload returns unlinked asyncs; link them here so failures + -- propagate to the main thread immediately. + mapM_ Async.link workers + -- All asyncs (builders and workers) are linked to the main thread and run + -- forever. ANY completion — whether by exception or normal return — is + -- fatal: either the pipeline starved ('QueueStarved'), a connection + -- dropped, or a builder failed. + -- + -- 'waitAnyCatch' returns as soon as the first async finishes (without + -- re-throwing, so we keep control). 'finally cancelAll' then cancels every + -- remaining async before the program exits. + -- + -- 'Async.link' is still needed: if the main thread is blocked in + -- 'waitAnyCatch' waiting on async A but async B dies, 'link' delivers the + -- exception asynchronously, unblocking 'waitAnyCatch' immediately instead + -- of waiting for A to finish first. + let allAsyncs = Runtime.asyncs runtime ++ workers + cancelAll = mapM_ Async.cancel allAsyncs + (_, result) <- flip finally cancelAll $ + Async.waitAnyCatch allAsyncs + case result of + Left ex -> + die $ show ex + Right () -> + die "async terminated unexpectedly" + +-------------------------------------------------------------------------------- +-- Initial funds +-------------------------------------------------------------------------------- + +-- | How to load initial funds for the generator. +-- +-- This type is node-specific (it references signing keys and network magic), +-- so it lives here rather than in the @pull-fiction@ sub-library. The raw JSON +-- config stores this as an opaque 'Aeson.Value'; @Main@ parses it into this ADT +-- and loads funds before passing them to 'Validated.validate'. +data InitialFunds + = GenesisUTxOKeys + !Natural -- ^ Network magic. + !FilePath -- ^ Path to signing keys file. + +instance Aeson.FromJSON InitialFunds where + parseJSON = Aeson.withObject "InitialFunds" $ \o -> do + ty <- o .: "type" :: Aeson.Types.Parser String + case ty of + "genesis_utxo_keys" -> do + p <- o .: "params" + GenesisUTxOKeys <$> p .: "network_magic" <*> p .: "signing_keys_file" + _ -> fail $ "InitialFunds: unknown type " ++ show ty + ++ ", expected \"genesis_utxo_keys\"" + +-------------------------------------------------------------------------------- +-- Builder interpretation +-------------------------------------------------------------------------------- + +-- | Interpreted "value" builder configuration with defaults applied. +data ValueBuilder + = ValueBuilder + { inputsPerTx :: !Natural + , outputsPerTx :: !Natural + , fee :: !Integer + } + +-- | Interpret a 'Raw.Builder' (opaque type + params) into a concrete +-- 'ValueBuilder'. Applies defaults (@inputs_per_tx@ = 1, @outputs_per_tx@ = 1) +-- and validates invariants. +interpretBuilder :: Raw.Builder -> IO ValueBuilder +interpretBuilder raw = case Raw.builderType raw of + "value" -> + case Aeson.Types.parseEither parseValueParams (Raw.builderParams raw) of + Left err -> die $ "Builder params: " ++ err + Right (maybeInputs, maybeOutputs, rawFee) -> do + let nInputs = fromMaybe 1 maybeInputs + nOutputs = fromMaybe 1 maybeOutputs + when (nInputs == 0) $ die "Builder: inputs_per_tx must be >= 1" + when (nOutputs == 0) $ die "Builder: outputs_per_tx must be >= 1" + when (rawFee < 0) $ die "Builder: fee must be >= 0" + pure ValueBuilder + { inputsPerTx = nInputs + , outputsPerTx = nOutputs + , fee = rawFee + } + other -> die $ + "Builder: unknown type " ++ show other ++ ", expected \"value\"" + where + parseValueParams = Aeson.withObject "ValueParams" $ \o -> + (,,) <$> o .:? "inputs_per_tx" + <*> o .:? "outputs_per_tx" + <*> o .: "fee" + +-------------------------------------------------------------------------------- +-- Observer interpretation +-------------------------------------------------------------------------------- + +-- | Interpreted observer. +data Observer + -- | A concrete chain-following endpoint for transaction confirmation tracking + -- via ChainSync + BlockFetch. + = NodeToNode + { observerAddr :: !String + , observerPort :: !Int + , observerConfirmationDepth :: !Natural + } + +-- | Interpret 'Raw.Observer' (opaque type + params) into a concrete 'Observer'. +interpretObserver :: Raw.Observer -> Either String Observer +interpretObserver raw = case Raw.observerType raw of + "nodetonode" -> + case Aeson.Types.parseEither parseParams (Raw.observerParams raw) of + Left err -> Left $ "Observer params: " ++ err + Right n2n -> Right n2n + other -> Left $ + "Observer: unknown \"type\" " ++ show other ++ ", expected \"nodetonode\"" + where + parseParams = Aeson.withObject "ObserverParams" $ \o -> + NodeToNode <$> o .: "addr" + <*> o .: "port" + <*> o .: "confirmation_depth" + +-------------------------------------------------------------------------------- +-- Signing key loading +-------------------------------------------------------------------------------- + +-- | Load a signing key from a hex string, applying an integer suffix to the +-- last 3 hex characters, and derive its address. +createSigningKeyAndAddress + :: Api.NetworkId + -> Int + -- Signing key used for all generated transactions. + -- Destination address derived from the signing key. + -> (Api.SigningKey Api.PaymentKey, Api.AddressInEra Api.ConwayEra) +createSigningKeyAndAddress networkId n + | n < 0 || n > 999 = + error $ "createSigningKeyAndAddress: out of range (0-999): " ++ show n + | otherwise = + let -- Hex string (32 bytes = 64 hex chars). + -- We use 61 chars + 3 chars suffix = 64 chars total. + -- If the input string is a CBOR-encoded hex string (e.g. from an + -- .skey file), strip the first 4 characters ("5820") which represent + -- the CBOR type and length prefix for 32 bytes of raw data. + prefix = "bed03030fd08a600647d99fa7cd94dae3ddab99b199c3f08f81949db3e422" + suffix = printf "%03d" n + hex = prefix ++ suffix + in case Api.deserialiseFromRawBytesHex @(Api.SigningKey Api.PaymentKey) (BS8.pack hex) of + Left err -> + error $ "createSigningKeyAndAddress: Failed to deserialise: " ++ show err + Right signingKey -> + let signingAddr = + Api.shelleyAddressInEra + (Api.shelleyBasedEra @Api.ConwayEra) $ + Api.makeShelleyAddress networkId + (Api.PaymentCredentialByKey + (Api.verificationKeyHash + (Api.getVerificationKey signingKey))) + Api.NoStakeAddress + in (signingKey, signingAddr) + +-------------------------------------------------------------------------------- +-- Cardano parameters +-------------------------------------------------------------------------------- + +{-- TODO: Construct a minimal protocol parameters, see Tx.hs last line. +data ProtocolParameters = ProtocolParameters + { epochLength :: Integer + , minFeeA :: Integer + , minFeeB :: Integer + } + +instance Aeson.FromJSON ProtocolParameters where + parseJSON = Aeson.withObject "ProtocolParameters" $ \o -> do + pp <- o .: "params" + ProtocolParameters <$> pp .: "epoch_length" <*> pp .: "min_fee_a" <*> pp .: "min_fee_b" +--} + +-------------------------------------------------------------------------------- +-- Initialization +-------------------------------------------------------------------------------- + +-- | Parse CLI args, load all configuration files, create protocol, +-- generate a signing key, load initial funds, and validate config. +-- +-- Returns a 'Validated.Config' (validated but not yet resolved into a +-- 'Runtime.Runtime'). The caller is responsible for calling +-- 'Runtime.resolve' to create STM resources. +loadConfig + :: IO ( -- | Validated configuration (no STM resources yet). + Validated.Config Fund.Fund + -- | Codec config for serialising blocks on the wire. + , CodecConfig N2N.CardanoBlock + , Api.NetworkId + -- | Network magic for the handshake with cardano-node. + , Api.NetworkMagic + -- | Logging / metrics tracers. + , Tracing.Tracers + ) +loadConfig = do + args <- getArgs + configFile <- case args of + [f] -> pure f + _ -> die "Usage: tx-centrifuge " + + hPutStrLn stderr "=== Tx Centrifuge ===" + hPutStrLn stderr "" + + -- Decode the full JSON object once; extract node-specific paths here (like + -- setupTracers reads trace config from the same file independently) and pass + -- the rest to the Raw → Validated → Runtime pipeline. + hPutStrLn stderr $ "Loading config from: " ++ configFile + rawValue <- Aeson.eitherDecodeFileStrict' configFile + >>= either (\e -> die $ "JSON: " ++ e) pure + let parseField field = + case Aeson.Types.parseEither (Aeson.withObject "Config" (.: field)) rawValue of + Left err -> die $ "Config: " ++ err + Right v -> pure v + nodeConfigPath <- parseField "nodeConfig" + raw <- case Aeson.fromJSON rawValue of + Aeson.Error err -> die $ "JSON: " ++ err + Aeson.Success cfg -> pure cfg + + -- Load initial funds. + -- Parse the opaque initialInputs JSON into the node-level InitialFunds ADT, + -- then load actual UTxO funds before validation. + funds <- case Aeson.fromJSON (Raw.initialInputs raw) of + Aeson.Error err -> die $ "initialInputs: " ++ err + Aeson.Success (GenesisUTxOKeys magic path) -> do + hPutStrLn stderr $ "Loading funds from: " ++ path + result <- Fund.loadFunds (magicToNetworkId magic) path + case result of + Left err -> die ("Fund.loadFunds: " ++ err) + Right [] -> die "Fund.loadFunds: no funds loaded" + Right (f:fs) -> do + let allFunds = f NE.:| fs + hPutStrLn stderr $ " Loaded " ++ show (NE.length allFunds) ++ " funds" + pure allFunds + -- Validate config. + -- Pipeline: Raw → Validated (with pre-loaded funds). + validated <- either die pure $ Validated.validate raw funds + + -- Load node configuration and create consensus protocol. + hPutStrLn stderr $ "Loading node config from: " ++ nodeConfigPath + nodeConfig <- mkNodeConfig nodeConfigPath >>= either die pure + protocol <- mkConsensusProtocol nodeConfig >>= either die pure + let codecConfig = protocolToCodecConfig protocol + networkId = protocolToNetworkId protocol + networkMagic = protocolToNetworkMagic protocol + + -- Tracers. + tracers <- Tracing.setupTracers configFile + + pure ( validated, codecConfig, networkId, networkMagic, tracers ) + +-------------------------------------------------------------------------------- +-- Protocol helpers (inlined from NodeConfig.hs and OuroborosImports.hs) +-------------------------------------------------------------------------------- + +mkNodeConfig :: FilePath -> IO (Either String NodeConfiguration) +mkNodeConfig configFp_ = do + configYamlPc <- parseNodeConfigurationFP . Just $ configFp + pure $ first show $ makeNodeConfiguration (configYamlPc <> filesPc) + where + configFp = ConfigYamlFilePath configFp_ + filesPc :: PartialNodeConfiguration + filesPc = defaultPartialNodeConfiguration + { pncProtocolFiles = Last . Just $ + ProtocolFilepaths + { byronCertFile = Just "" + , byronKeyFile = Just "" + , shelleyKESFile = Just "" + , shelleyVRFFile = Just "" + , shelleyCertFile = Just "" + , shelleyBulkCredsFile = Just "" + } + , pncShutdownConfig = Last $ Just $ ShutdownConfig Nothing Nothing + , pncConfigFile = Last $ Just configFp + } + +mkConsensusProtocol + :: NodeConfiguration -> IO (Either String SomeConsensusProtocol) +mkConsensusProtocol nodeConfig = + case ncProtocolConfig nodeConfig of + NodeProtocolConfigurationCardano + byronCfg shelleyCfg alonzoCfg conwayCfg + dijkstraCfg hardforkCfg checkpointsCfg -> + first show <$> + runExceptT (mkSomeConsensusProtocolCardano + byronCfg shelleyCfg alonzoCfg conwayCfg + dijkstraCfg hardforkCfg checkpointsCfg Nothing) + +protocolToCodecConfig :: SomeConsensusProtocol -> CodecConfig N2N.CardanoBlock +protocolToCodecConfig (SomeConsensusProtocol Api.CardanoBlockType info) = + configCodec $ pInfoConfig $ fst $ Api.protocolInfo @IO info +protocolToCodecConfig _ = + error "protocolToCodecConfig: non-Cardano protocol" + +-- | Derive NetworkId from the consensus config. Mainnet uses a +-- well-known magic number; everything else is a testnet. +protocolToNetworkId :: SomeConsensusProtocol -> Api.NetworkId +protocolToNetworkId proto = case protocolToNetworkMagic proto of + Api.NetworkMagic 764824073 -> Api.Mainnet + nm -> Api.Testnet nm + +protocolToNetworkMagic :: SomeConsensusProtocol -> Api.NetworkMagic +protocolToNetworkMagic + (SomeConsensusProtocol Api.CardanoBlockType info) = + getNetworkMagic $ configBlock $ pInfoConfig $ + fst $ Api.protocolInfo @IO info +protocolToNetworkMagic _ = + error "protocolToNetworkMagic: non-Cardano protocol" + +-- | Convert a raw network magic number to a 'Api.NetworkId'. +-- Mainnet uses the well-known magic 764824073; everything else is a testnet. +magicToNetworkId :: Natural -> Api.NetworkId +magicToNetworkId 764824073 = Api.Mainnet +magicToNetworkId n = Api.Testnet (Api.NetworkMagic (fromIntegral n)) diff --git a/bench/tx-centrifuge/bench/Bench.hs b/bench/tx-centrifuge/bench/Bench.hs new file mode 100644 index 00000000000..756aab580b1 --- /dev/null +++ b/bench/tx-centrifuge/bench/Bench.hs @@ -0,0 +1,52 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +module Main where + +-------------------------------------------------------------------------------- + +--------------- +-- criterion -- +--------------- +import Criterion.Main qualified as Criterion +------------- +-- deepseq -- +------------- +import Control.DeepSeq (NFData (..), deepseq) +--------------------- +-- tx-centrifuge -- +--------------------- +import Paths_tx_centrifuge qualified as Paths +import Test.PullFiction.Harness qualified as Harness + +-------------------------------------------------------------------------------- + +-- | Local wrapper so Criterion can force benchmark results without requiring an +-- NFData instance in the test-harness library. +newtype BenchResult = BenchResult Harness.TestResult + +instance NFData BenchResult where + rnf (BenchResult result) = + Harness.elapsedSeconds result `seq` + Harness.targetCounts result `deepseq` () + +-------------------------------------------------------------------------------- + +main :: IO () +main = do + sharedPath <- Paths.getDataFileName "data/config-shared-100k.json" + perTargetPath <- Paths.getDataFileName "data/config-per-target-200.json" + Criterion.defaultMain + [ Criterion.bgroup "generator-throughput" + [ Criterion.bench + "shared-limiter-100k-tps-50-targets" + $ Criterion.nfIO + $ BenchResult <$> Harness.runTpsTest sharedPath 5.0 + , Criterion.bench + "per-target-limiter-200-tps-50-targets" + $ Criterion.nfIO + $ BenchResult <$> Harness.runTpsTest perTargetPath 5.0 + ] + ] diff --git a/bench/tx-centrifuge/data/config-multi-group.json b/bench/tx-centrifuge/data/config-multi-group.json new file mode 100644 index 00000000000..974d84b4569 --- /dev/null +++ b/bench/tx-centrifuge/data/config-multi-group.json @@ -0,0 +1,819 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + }, + "recycle": { "type": "on_pull" } + }, + + "workloads": { + "group-01": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-01": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-02": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-02": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-03": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-03": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-04": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-04": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-05": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-05": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-06": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-06": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-07": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-07": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-08": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-08": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-09": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-09": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-10": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-10": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-11": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-11": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-12": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-12": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-13": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-13": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-14": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-14": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-15": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-15": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-16": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-16": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-17": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-17": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-18": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-18": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-19": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-19": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-20": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-20": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-21": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-21": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-22": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-22": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-23": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-23": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-24": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-24": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-25": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-25": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-26": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-26": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-27": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-27": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-28": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-28": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-29": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-29": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-30": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-30": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-31": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-31": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-32": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-32": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-33": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-33": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-34": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-34": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-35": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-35": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-36": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-36": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-37": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-37": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-38": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-38": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-39": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-39": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-40": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-40": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-41": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-41": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-42": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-42": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-43": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-43": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-44": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-44": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-45": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-45": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-46": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-46": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-47": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-47": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-48": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-48": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-49": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-49": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-50": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-per-target-0_2.json b/bench/tx-centrifuge/data/config-per-target-0_2.json new file mode 100644 index 00000000000..45f8686964c --- /dev/null +++ b/bench/tx-centrifuge/data/config-per-target-0_2.json @@ -0,0 +1,231 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + }, + "recycle": { "type": "on_pull" } + }, + + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 0.2 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-per-target-200.json b/bench/tx-centrifuge/data/config-per-target-200.json new file mode 100644 index 00000000000..e43fc99f555 --- /dev/null +++ b/bench/tx-centrifuge/data/config-per-target-200.json @@ -0,0 +1,231 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + }, + "recycle": { "type": "on_pull" } + }, + + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 4000 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-per-target-2k.json b/bench/tx-centrifuge/data/config-per-target-2k.json new file mode 100644 index 00000000000..b7c51b87e20 --- /dev/null +++ b/bench/tx-centrifuge/data/config-per-target-2k.json @@ -0,0 +1,231 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + }, + "recycle": { "type": "on_pull" } + }, + + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 2000 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-shared-10.json b/bench/tx-centrifuge/data/config-shared-10.json new file mode 100644 index 00000000000..469fa559e82 --- /dev/null +++ b/bench/tx-centrifuge/data/config-shared-10.json @@ -0,0 +1,231 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + }, + "recycle": { "type": "on_pull" } + }, + + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "shared", + "params": { + "tps": 10 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-shared-100k.json b/bench/tx-centrifuge/data/config-shared-100k.json new file mode 100644 index 00000000000..cdbc551457e --- /dev/null +++ b/bench/tx-centrifuge/data/config-shared-100k.json @@ -0,0 +1,231 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + }, + "recycle": { "type": "on_pull" } + }, + + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "shared", + "params": { + "tps": 100000 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/protocol-parameters.ci-test.json b/bench/tx-centrifuge/data/protocol-parameters.ci-test.json new file mode 100644 index 00000000000..832d72f2f1e --- /dev/null +++ b/bench/tx-centrifuge/data/protocol-parameters.ci-test.json @@ -0,0 +1,461 @@ +{ + "collateralPercentage": 150, + "costModels": { + "PlutusV1": [ + 197209, + 0, + 1, + 1, + 396231, + 621, + 0, + 1, + 150000, + 1000, + 0, + 1, + 150000, + 32, + 2477736, + 29175, + 4, + 29773, + 100, + 29773, + 100, + 29773, + 100, + 29773, + 100, + 29773, + 100, + 29773, + 100, + 100, + 100, + 29773, + 100, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 1000, + 0, + 1, + 150000, + 32, + 150000, + 1000, + 0, + 8, + 148000, + 425507, + 118, + 0, + 1, + 1, + 150000, + 1000, + 0, + 8, + 150000, + 112536, + 247, + 1, + 150000, + 10000, + 1, + 136542, + 1326, + 1, + 1000, + 150000, + 1000, + 1, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 1, + 1, + 150000, + 1, + 150000, + 4, + 103599, + 248, + 1, + 103599, + 248, + 1, + 145276, + 1366, + 1, + 179690, + 497, + 1, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 148000, + 425507, + 118, + 0, + 1, + 1, + 61516, + 11218, + 0, + 1, + 150000, + 32, + 148000, + 425507, + 118, + 0, + 1, + 1, + 148000, + 425507, + 118, + 0, + 1, + 1, + 2477736, + 29175, + 4, + 0, + 82363, + 4, + 150000, + 5000, + 0, + 1, + 150000, + 32, + 197209, + 0, + 1, + 1, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 3345831, + 1, + 1 + ], + "PlutusV3": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "decentralization": null, + "executionUnitPrices": { + "priceMemory": 5.77e-2, + "priceSteps": 7.21e-5 + }, + "extraPraosEntropy": null, + "maxBlockBodySize": 65536, + "maxBlockExecutionUnits": { + "memory": 50000000, + "steps": 40000000000 + }, + "maxBlockHeaderSize": 1100, + "maxCollateralInputs": 3, + "maxTxExecutionUnits": { + "memory": 10000000, + "steps": 10000000000 + }, + "maxTxSize": 16384, + "maxValueSize": 5000, + "minPoolCost": 340000000, + "minUTxOValue": null, + "monetaryExpansion": 3.0e-3, + "poolPledgeInfluence": 0.3, + "poolRetireMaxEpoch": 18, + "protocolVersion": { + "major": 6, + "minor": 0 + }, + "stakeAddressDeposit": 2000000, + "stakePoolDeposit": 500000000, + "stakePoolTargetNum": 500, + "treasuryCut": 0.2, + "txFeeFixed": 155381, + "txFeePerByte": 44, + "utxoCostPerByte": 4310 +} \ No newline at end of file diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Clock.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Clock.hs new file mode 100644 index 00000000000..67912f5aebb --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Clock.hs @@ -0,0 +1,69 @@ +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE ImportQualifiedPost #-} + +-------------------------------------------------------------------------------- + +-- | Single source of truth for monotonic time across the pull-fiction library. +-- +-- Every module in the package must obtain timestamps through this module rather +-- than importing @System.Clock@ directly. This guarantees that all call sites +-- use the same clock ('Clock.MonotonicRaw') and prevents hard-to-diagnose bugs +-- caused by accidentally mixing different clocks (e.g. 'Clock.Monotonic' vs +-- 'Clock.MonotonicRaw'), which can produce negative deltas or phantom drift on +-- systems where NTP adjusts the non-raw monotonic source. +-- +-- 'TimeSpec' is a @newtype@ over 'Clock.TimeSpec' so that code importing +-- @System.Clock@ directly cannot accidentally pass its timestamps to functions +-- expecting this module's 'TimeSpec', and vice versa. + +module Cardano.Benchmarking.PullFiction.Clock + ( -- * Types. + TimeSpec + -- * Reading the clock. + , getTime + -- * Conversions. + , toNanoSecs + , fromNanoSecs + ) where + +-------------------------------------------------------------------------------- + +----------- +-- clock -- +----------- +import System.Clock qualified as Clock + +-------------------------------------------------------------------------------- + +-- | Opaque monotonic timestamp. +-- +-- A @newtype@ wrapper that ensures only timestamps obtained via 'getTime' +-- (which always reads 'Clock.MonotonicRaw') are used in the core library. +-- +-- Internally a 'Clock.TimeSpec' stores two fields: @sec@ (seconds) and +-- @nsec@ (nanoseconds within the current second, 0–999 999 999). The derived +-- 'Num' instance normalizes after every operation: carries and borrows +-- between @sec@ and @nsec@ are handled automatically, so @timeA - timeB@ +-- always produces a correctly normalized result even when the nanoseconds +-- component underflows. +newtype TimeSpec = TimeSpec Clock.TimeSpec + deriving (Eq, Ord, Show, Num) + +-- | Read the monotonic raw clock. All timing in the package goes through this +-- function so a single clock source is used everywhere. +getTime :: IO TimeSpec +getTime = TimeSpec <$> Clock.getTime Clock.MonotonicRaw + +-- | Convert a 'TimeSpec' to __total__ nanoseconds. +-- +-- Returns @sec * 1 000 000 000 + nsec@, not just the @nsec@ field. +-- For example, @TimeSpec 2 500000000@ (2.5 s) yields @2 500 000 000@. +toNanoSecs :: TimeSpec -> Integer +toNanoSecs (TimeSpec ts) = Clock.toNanoSecs ts + +-- | Convert total nanoseconds to a 'TimeSpec'. +-- +-- Splits via @divMod@ into @sec@ and @nsec@ so the result is always +-- normalized (e.g. @fromNanoSecs 2500000000@ gives @TimeSpec 2 500000000@). +fromNanoSecs :: Integer -> TimeSpec +fromNanoSecs = TimeSpec . Clock.fromNanoSecs diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Raw.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Raw.hs new file mode 100644 index 00000000000..405631f7276 --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Raw.hs @@ -0,0 +1,343 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} + +-------------------------------------------------------------------------------- + +-- | Raw load-generator configuration parsed from JSON. +-- +-- A plain Aeson parser with no extra logic. Each 'FromJSON' instance is a +-- direct transformation from JSON values to Haskell base types ('String', +-- 'Natural', 'Double', 'Int', etc.); optional fields are 'Maybe' and named +-- collections are @'Map' 'String'@. No defaults are applied, no business rules +-- are checked, and no cross-field relationships are enforced. +-- All of that is the responsibility of +-- "Cardano.Benchmarking.PullFiction.Config.Validated". +-- +-- All data constructors and fields are exported so that test code can build +-- configuration values directly without going through JSON. +-- +-- __Import qualified.__ Field names clash with +-- "Cardano.Benchmarking.PullFiction.Config.Validated" and +-- "Cardano.Benchmarking.PullFiction.Config.Runtime". +module Cardano.Benchmarking.PullFiction.Config.Raw + ( + -- * Config. + Config (..) + + -- * Observer. + , Observer (..) + + -- * Builder. + , Builder (..) + + -- * Recycle strategy. + , RecycleStrategy (..) + + -- * RateLimit. + , RateLimit (..) + -- ** TopLevelScope. + , TopLevelScope (..) + -- ** WorkloadScope. + , WorkloadScope (..) + + -- * OnExhaustion. + , OnExhaustion (..) + + -- * Workload. + , Workload (..) + + -- * Target. + , Target (..) + + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Monad (when) +import Numeric.Natural (Natural) +----------- +-- aeson -- +----------- +import Data.Aeson qualified as Aeson +import Data.Aeson ((.:), (.:?)) +import Data.Aeson.Types qualified as Aeson.Types +---------------- +-- containers -- +---------------- +import Data.Map.Strict (Map) + +-------------------------------------------------------------------------------- + +-- | Top-level configuration as parsed from JSON. +-- +-- No invariants are enforced. Use 'validate' from +-- "Cardano.Benchmarking.PullFiction.Config.Validated" to apply business +-- rules and cascading defaults. +data Config = Config + { -- | Raw JSON value describing how to load initial inputs. + -- Interpretation is left to the caller (e.g. @Main.hs@). + initialInputs :: !Aeson.Value + -- | Optional @\"observers\"@ map (keyed by name). + -- Because Aeson decodes JSON objects into a 'Map', duplicate observer names + -- are silently discarded (last value wins). + , maybeObservers :: !(Maybe (Map String Observer)) + -- | Optional top level @\"builder\"@. + , maybeTopLevelBuilder :: !(Maybe Builder) + -- | Optional top-level @\"rate_limit\"@. + , maybeTopLevelRateLimit :: !(Maybe (Maybe TopLevelScope, RateLimit)) + -- | Optional top-level @\"max_batch_size\"@. + , maybeTopLevelMaxBatchSize :: !(Maybe Natural) + -- | Optional top-level @\"on_exhaustion\"@. + , maybeTopLevelOnExhaustion :: !(Maybe OnExhaustion) + -- | Optional generator workloads keyed by name. + -- Because Aeson decodes JSON objects into a 'Map', duplicate workload names + -- are silently discarded (last value wins). + , maybeWorkloads :: !(Maybe (Map String Workload)) + } + deriving (Show, Eq) + +instance Aeson.FromJSON Config where + parseJSON = Aeson.withObject "Config" $ \o -> + Config + <$> o .: "initial_inputs" + <*> o .:? "observers" + <*> o .:? "builder" + <*> Aeson.Types.explicitParseFieldMaybe parseTopLevelRateLimit o + "rate_limit" + <*> o .:? "max_batch_size" + <*> o .:? "on_exhaustion" + <*> o .:? "workloads" + +-------------------------------------------------------------------------------- + +-- | Opaque observer configuration. +-- +-- Carries a @\"type\"@ discriminator and an opaque @\"params\"@ object. +-- Interpretation of the params is the caller's responsibility (see @Main.hs@), +-- just like 'initialInputs' and 'Builder'. +data Observer = Observer + { -- | Observer variant name (e.g. @\"nodetonode\"@). Non-empty. + observerType :: !String + -- | Opaque params object for the variant. + , observerParams :: !Aeson.Value + } + deriving (Show, Eq) + +instance Aeson.FromJSON Observer where + parseJSON = Aeson.withObject "Observer" $ \o -> do + ty <- o .: "type" :: Aeson.Types.Parser String + when (null ty) $ fail "Observer: \"type\" must be non-empty" + Observer ty <$> o .: "params" + +-------------------------------------------------------------------------------- + +-- | Opaque builder configuration. +-- +-- Carries a @\"type\"@ discriminator and an opaque @\"params\"@ object. +-- Interpretation of the params is the caller's responsibility (see @Main.hs@), +-- just like 'initialInputs'. +data Builder = Builder + { -- | Builder variant name (e.g. @\"value\"@). Non-empty. + builderType :: !String + -- | Opaque params object for the variant. + , builderParams :: !Aeson.Value + -- | Optional recycle strategy. 'Nothing' means no recycling. + , builderRecycle :: !(Maybe RecycleStrategy) + } + deriving (Show, Eq) + +instance Aeson.FromJSON Builder where + parseJSON = Aeson.withObject "Builder" $ \o -> do + ty <- o .: "type" :: Aeson.Types.Parser String + when (null ty) $ fail "Builder: \"type\" must be non-empty" + Builder ty <$> o .: "params" + <*> o .:? "recycle" + +-------------------------------------------------------------------------------- + +-- | When to recycle transaction outputs back to the input queue. +data RecycleStrategy + -- | Recycle immediately after building, before entering the payload queue. + = RecycleOnBuild + -- | Recycle when a worker fetches the payload from the queue. + | RecycleOnPull + -- | Recycle when an observer confirms the payload. Carries the observer name. + | RecycleOnConfirm !String + deriving (Show, Eq) + +instance Aeson.FromJSON RecycleStrategy where + parseJSON = Aeson.withObject "RecycleStrategy" $ \o -> do + ty <- o .: "type" :: Aeson.Types.Parser String + case ty of + "on_build" -> pure RecycleOnBuild + "on_pull" -> pure RecycleOnPull + "on_confirm" -> + RecycleOnConfirm <$> o .: "params" + _ -> fail $ "RecycleStrategy: unknown \"type\" " ++ show ty + ++ ", expected \"on_build\", \"on_pull\", or \"on_confirm\"" + +-------------------------------------------------------------------------------- + +-- | Scope of a top-level rate limiter. +-- +-- There is no @Distributed@ scope. A \"distributed\" mode would be equivalent +-- to 'TopPerWorkload' or 'TopPerTarget' but with the TPS divided internally by +-- the number of sub-entities. We avoid that: the config should state the +-- per-entity TPS directly so the value is explicit and auditable. +data TopLevelScope + -- | One rate limiter shared by all targets across all workloads. + = TopShared + -- | Each workload gets its own rate limiter at the full configured TPS. + | TopPerWorkload + -- | Each target gets its own rate limiter at the full configured TPS. + | TopPerTarget + deriving (Show, Eq) + +-- | Scope of a workload-level rate limiter. +-- +-- 'TopPerWorkload' is not valid here (we are already at the workload level). +data WorkloadScope + -- | One rate limiter shared by all targets in the workload. + = WorkloadShared + -- | Each target gets its own rate limiter at the full configured TPS. + | WorkloadPerTarget + deriving (Show, Eq) + +-- | Rate limit configuration. +-- +-- Scope is not part of the rate limit itself; it is carried alongside the +-- 'RateLimit' in the enclosing tuple (e.g. @(TopLevelScope, RateLimit)@). +-- +-- The JSON representation uses @\"type\"@ + @\"params\"@ at the same level; +-- the parser flattens the nested @\"params\"@ object into the constructor. +data RateLimit + = TokenBucket + { -- | Target tokens per second. + tps :: !Double + } + deriving (Show, Eq) + +-- | Parse a rate limit from JSON using a context-specific scope parser. +-- +-- Scope is optional (defaults to @\"shared\"@ at validation time) and parsed +-- first; it is not part of 'RateLimit'. +-- +-- At the top level, use 'parseTopLevelRateLimit' (accepts @\"shared\"@, +-- @\"per_workload\"@, @\"per_target\"@). +-- At the workload level, use 'parseWorkloadRateLimit' (accepts @\"shared\"@, +-- @\"per_target\"@). +parseRateLimit + :: (String -> Aeson.Types.Parser scope) + -> Aeson.Value + -> Aeson.Types.Parser (Maybe scope, RateLimit) +parseRateLimit scopeParser = Aeson.withObject "RateLimit" $ \o -> do + maybeScopeStr <- o .:? "scope" + maybeScope <- case maybeScopeStr of + Nothing -> pure Nothing + Just s -> Just <$> scopeParser s + ty <- o .: "type" :: Aeson.Types.Parser String + case ty of + "token_bucket" -> do + p <- o .: "params" + rl <- TokenBucket <$> p .: "tps" + pure (maybeScope, rl) + _ -> fail $ + "RateLimit: unknown \"type\" " ++ show ty ++ ", expected \"token_bucket\"" + +parseTopLevelRateLimit :: Aeson.Value + -> Aeson.Types.Parser (Maybe TopLevelScope, RateLimit) +parseTopLevelRateLimit = parseRateLimit topLevelScopeParser + +parseWorkloadRateLimit :: Aeson.Value + -> Aeson.Types.Parser (Maybe WorkloadScope, RateLimit) +parseWorkloadRateLimit = parseRateLimit workloadScopeParser + +topLevelScopeParser :: String -> Aeson.Types.Parser TopLevelScope +topLevelScopeParser "shared" = pure TopShared +topLevelScopeParser "per_workload" = pure TopPerWorkload +topLevelScopeParser "per_target" = pure TopPerTarget +topLevelScopeParser s = fail $ "RateLimit: unknown scope " ++ show s + +workloadScopeParser :: String -> Aeson.Types.Parser WorkloadScope +workloadScopeParser "shared" = pure WorkloadShared +workloadScopeParser "per_target" = pure WorkloadPerTarget +workloadScopeParser s = fail $ + "RateLimit: unknown scope " ++ show s + ++ "; at workload level, only \"shared\" and \"per_target\" are valid" + +-------------------------------------------------------------------------------- + +-- | What to do when the payload queue, the output of the builder stage, is +-- exhausted. +data OnExhaustion + -- | Block / wait. + = Block + -- | Fail immediately with an error. + | Error + deriving (Show, Eq) + +instance Aeson.FromJSON OnExhaustion where + parseJSON = Aeson.withText "OnExhaustion" $ \t -> case t of + "block" -> pure Block + "error" -> pure Error + _ -> fail $ + "OnExhaustion: expected \"block\" or \"error\", got " ++ show t + +-------------------------------------------------------------------------------- + +-- | Configuration for a single workload as parsed from JSON. +-- +-- The workload name is the 'Map' key in the parent 'Config'; it is not stored +-- inside the record. +data Workload = Workload + { -- | Optional builder for this workload. + maybeBuilder :: !(Maybe Builder) + -- | Optional rate limit for this workload. + , maybeRateLimit :: !(Maybe (Maybe WorkloadScope, RateLimit)) + -- | Optional max tokens per request. + , maybeMaxBatchSize :: !(Maybe Natural) + -- | Optional on-exhaustion behaviour. + , maybeOnExhaustion :: !(Maybe OnExhaustion) + -- | Targets keyed by name. + -- Because Aeson decodes JSON objects into a 'Map', duplicate target names + -- are silently discarded (last value wins). + , targets :: !(Map String Target) + } + deriving (Show, Eq) + +instance Aeson.FromJSON Workload where + parseJSON = Aeson.withObject "Workload" $ \o -> + Workload + <$> o .:? "builder" + <*> Aeson.Types.explicitParseFieldMaybe parseWorkloadRateLimit o + "rate_limit" + <*> o .:? "max_batch_size" + <*> o .:? "on_exhaustion" + <*> o .: "targets" + +-------------------------------------------------------------------------------- + +-- | A target endpoint to connect to. +-- +-- The target name is the 'Map' key in the parent 'Workload'; it is not stored +-- inside the record. +data Target = Target + { -- | Optional per-target @\"max_batch_size\"@ override. + maybeTargetMaxBatchSize :: !(Maybe Natural) + -- | Optional per-target @\"on_exhaustion\"@ override. + , maybeTargetOnExhaustion :: !(Maybe OnExhaustion) + , addr :: !String + , port :: !Int + } + deriving (Show, Eq) + +instance Aeson.FromJSON Target where + parseJSON = Aeson.withObject "Target" $ \o -> + Target + <$> o .:? "max_batch_size" + <*> o .:? "on_exhaustion" + <*> o .: "addr" + <*> o .: "port" diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Runtime.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Runtime.hs new file mode 100644 index 00000000000..95f8868747c --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Runtime.hs @@ -0,0 +1,534 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +-- | Resolved runtime configuration — creates STM resources (rate limiters, +-- pipeline queues) from a 'Validated.Config'. +-- +-- Rate-limiter sharing is keyed by 'Validated.rateLimitKey': +-- +-- * @\@global@: one limiter for all targets across all workloads. +-- * @workloadName@: one per workload. +-- * @workloadName.targetName@: one per target. +-- * No rate limit: 'RL.newUnlimited'. +module Cardano.Benchmarking.PullFiction.Config.Runtime + ( -- * Runtime. + Runtime + , config, observers, builders, workloads, asyncs + -- User supplied handles needed to create the runtime "object". + , ObserverHandle (..) + , BuilderHandle (..) + -- * Pipe. + , Pipe + , pipeInputQueue, pipePayloadQueue, pipeRecycle + -- * Observer. + , Observer + , observerName, observerAsync + -- * Builder. + , Builder + , builderName, builderPipe, builderAsync, recyclerAsync + -- * Workload. + , Workload + , workloadName, targets + -- * OnExhaustion. + , Raw.OnExhaustion (..) + -- * Target. + , Target + , targetName + , targetPipe + , rateLimiter + , maxBatchSize, onExhaustion + , targetAddr, targetPort + -- * Resolution. + , resolve + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Concurrent (myThreadId) +import Control.Monad (forever, replicateM) +import Data.Foldable (foldlM, toList) +import GHC.Conc (labelThread) +import Numeric.Natural (Natural) +----------- +-- async -- +----------- +import Control.Concurrent.Async (Async) +import Control.Concurrent.Async qualified as Async +---------------- +-- containers -- +---------------- +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM +------------------ +-- pull-fiction -- +------------------ +import Cardano.Benchmarking.PullFiction.Config.Raw qualified as Raw +import Cardano.Benchmarking.PullFiction.Config.Validated qualified as Validated +import Cardano.Benchmarking.PullFiction.Internal.RateLimiter qualified as RL + +-------------------------------------------------------------------------------- + +-- | Fully resolved top-level configuration. +data Runtime input payload = Runtime + { -- | The original validated configuration. + config :: !(Validated.Config input) + -- | Resolved observers, keyed by name. + , observers :: !(Map String Observer) + -- | One per workload, alphabetical order (as 'Map.elems' on 'workloads'). + , builders :: [Builder input payload] + -- | Resolved workloads, keyed by name. + , workloads :: !(Map String (Workload input payload)) + -- | All asyncs (observers + builders + recyclers), linked. + -- Caller should append their own worker asyncs for cleanup. + , asyncs :: [Async ()] + } + +-------------------------------------------------------------------------------- + +-- | Caller-provided observer handle. 'resolve' spawns 'ohRun' in a labeled, +-- linked async and uses the subscription for 'Raw.RecycleOnConfirm' recycling. +-- 'ohExtractKey' must produce the same @key@ type used by 'BuilderHandle'. +data ObserverHandle key confirmed = ObserverHandle + { -- | IO action that runs the observer (e.g. a NodeToNode connection). + ohRun :: !(IO ()) + -- | Subscribe to the observer's confirmation broadcast channel. + , ohSubscribe :: !(IO (STM.TChan confirmed)) + -- | Extract the recycling key from a confirmation event. + , ohExtractKey :: !(confirmed -> key) + } + +-- | Caller-provided build handle. 'resolve' spawns a builder async that pulls +-- inputs via 'bhInputsPerBatch' and produces payloads. +data BuilderHandle key input payload = BuilderHandle + { -- | How many inputs the builder needs per call to 'bhBuildPayload'. + bhInputsPerBatch :: !Natural + -- | Returns @(confirmation key, payload, recyclable outputs)@. + , bhBuildPayload :: [input] -> IO (key, payload, [input]) + } + +-------------------------------------------------------------------------------- + +-- | A resolved observer with its lifecycle managed by 'resolve'. +data Observer = Observer + { -- | Key from the config's @\"observers\"@ object. + observerName :: !String + -- | Linked async running the observer connection. + , observerAsync :: !(Async ()) + } + +-- | Pipeline queues for a workload. All targets share the same 'Pipe'. +data Pipe input payload = Pipe + { -- | Unbounded: must never block on write (bulk-load at startup, recycle + -- bursts at steady-state). Backpressure comes from 'pipePayloadQueue'. + pipeInputQueue :: !(STM.TQueue input) + -- | Bounded (capacity 8192); sole source of backpressure. + , pipePayloadQueue :: !(STM.TBQueue (payload, [input])) + -- | Recycle consumed inputs back to 'pipeInputQueue'. + -- + -- NOTE: recycling happens on /delivery/, not on downstream /confirmation/. + -- The pipeline assumes delivered payloads will eventually be confirmed — + -- an accepted trade-off that enables indefinite-duration runs without + -- pre-generating all payloads. + , pipeRecycle :: [input] -> STM.STM () + } + +-- | Builder resources for one workload (exactly one per workload). +data Builder input payload = Builder + { -- | Same as the workload name. + builderName :: !String + -- | Shared with all targets in the workload. + , builderPipe :: !(Pipe input payload) + -- | Linked async running the builder loop. + , builderAsync :: !(Async ()) + -- | 'Raw.RecycleOnConfirm' recycler; 'Nothing' for other strategies. + , recyclerAsync :: !(Maybe (Async ())) + } + +-- | Fully resolved workload. Builder resources live in 'Builder' on the +-- 'Runtime', not here. +data Workload input payload = Workload + { -- | Unique name identifying this workload. + workloadName :: !String + -- | Resolved targets, keyed by name. + , targets :: !(Map String (Target input payload)) + } + +-- | A fully resolved target. Targets in the same workload share a 'Pipe'; +-- targets with the same 'Validated.rateLimitKey' share a 'RL.RateLimiter'. +data Target input payload = Target + { -- | Unique name identifying this target. + targetName :: !String + -- | Shared with all targets in the same workload. + , targetPipe :: !(Pipe input payload) + -- | Shared when 'Validated.rateLimitKey' matches. + , rateLimiter :: !RL.RateLimiter + -- | Resolved max tokens per request for this target. + , maxBatchSize :: !Natural + -- | What to do when the payload queue is exhausted. + , onExhaustion :: !Raw.OnExhaustion + -- | IP address or hostname of the target endpoint. + , targetAddr :: !String + -- | Port number of the target endpoint. + , targetPort :: !Int + } + +-------------------------------------------------------------------------------- +-- Resolution. +-------------------------------------------------------------------------------- + +-- | Limiter cache: maps a sharing key to an already-created rate limiter. +-- +-- Threaded across workloads so that top-level Shared limiters are reused. +type LimiterCache = Map String RL.RateLimiter + +-- | Resolve a 'Validated.Config' into a 'Runtime': create observers, rate +-- limiters, pipeline queues, and spawn builder asyncs. +-- Initial inputs are partitioned equally across workloads (last absorbs +-- remainder). +resolve + :: Ord key + => (Int -> String -> Raw.Builder -> IO (BuilderHandle key input payload)) + -- ^ Builder factory (index, name, config). + -> (Int -> String -> Raw.Observer -> IO (ObserverHandle key confirmed)) + -- ^ Observer factory (index, name, config). + -> (String -> [input] -> IO ()) + -- ^ Recycle callback (e.g. for tracing). + -> Validated.Config input + -> IO (Runtime input payload) +resolve mkBuilderFn mkObserverFn onRecycle validatedConfig = do + -- Create all observers via the factory callback. The ObserverHandle's run + -- action is spawned in a labeled, linked async (same pattern as builders). + -- The subscription pair is kept locally for RecycleOnConfirm. + handleResults <- Map.fromAscList <$> mapM + (\(ix, (name, rawObs)) -> do + handle <- mkObserverFn ix name rawObs + obsAsync <- Async.async $ do + tid <- myThreadId + labelThread tid ("observer/" ++ name) + ohRun handle + Async.link obsAsync + pure ( name + , ( Observer { observerName = name + , observerAsync = obsAsync + } + , handle + ) + ) + ) + (zip + [0..] + (Map.toAscList (Validated.observers validatedConfig)) + ) + let resolvedObservers = Map.map fst handleResults + -- Internal lookup for RecycleOnConfirm: subscribe to named observer. + let resolveObserver name = + case Map.lookup name handleResults of + Nothing -> fail $ "resolve: builder references unknown observer " + ++ show name + Just (_, handle) -> do + chan <- ohSubscribe handle + pure (chan, ohExtractKey handle) + let validatedWorkloadsMap = Validated.workloads validatedConfig + -- Distribute initial inputs equally across workloads, keyed by workload name. + -- Both Maps share the same ascending key order, so zip + fromAscList is safe. + let inputsByWorkloadMap = + let workloadsCount = Map.size validatedWorkloadsMap + inputChunks = partitionInputs + workloadsCount + (toList (Validated.initialInputs validatedConfig)) + in Map.fromAscList + (zip (Map.keys validatedWorkloadsMap) inputChunks) + -- Resolve builders first: each builder creates its own Pipe (input queue, + -- payload queue, recycle action) and loads initial inputs. + resolvedBuilders <- Map.fromAscList <$> mapM + (\(ix, (wlName, validatedWorkload)) -> do + let workloadInputs = inputsByWorkloadMap Map.! wlName + workloadBuilder = Validated.builder validatedWorkload + builder <- resolveBuilder mkBuilderFn resolveObserver onRecycle + ix validatedWorkload workloadBuilder workloadInputs + pure (wlName, builder) + ) + (zip + [0..] + (Map.toAscList validatedWorkloadsMap) + ) + -- Resolve workloads: assign the pipe from each builder to its targets and + -- resolve each target's rate limiter. The limiter cache is threaded as a + -- pure accumulator so that top-level Shared limiters are reused. + (resolvedWorkloads, _) <- + foldlM + (\(acc, cache) (wlName, validatedWorkload) -> do + let resolvedBuilder = resolvedBuilders Map.! wlName + (resolved, cache') <- + resolveWorkload validatedWorkload cache (builderPipe resolvedBuilder) + pure (Map.insert wlName resolved acc, cache') + ) + (Map.empty, Map.empty) + (Map.toAscList validatedWorkloadsMap) + -- Collect all asyncs: observers + builders + recyclers. + let builderList = Map.elems resolvedBuilders + observerAsyncs = map observerAsync (Map.elems resolvedObservers) + builderAsyncs = concatMap + (\b -> builderAsync b : maybe [] pure (recyclerAsync b)) + builderList + -- Assemble the final runtime. + pure Runtime + { config = validatedConfig + , observers = resolvedObservers + , builders = builderList + , workloads = resolvedWorkloads + , asyncs = observerAsyncs ++ builderAsyncs + } + +-------------------------------------------------------------------------------- +-- Builder resolution. +-------------------------------------------------------------------------------- + +-- | Create builder resources for one workload (queues, recycle action, builder +-- async). The returned 'Pipe' is shared with all targets. +resolveBuilder + :: Ord key + => (Int -> String -> Raw.Builder -> IO (BuilderHandle key input payload)) + -> (String -> IO (STM.TChan confirmed, confirmed -> key)) + -> (String -> [input] -> IO ()) + -> Int + -> Validated.Workload + -> Raw.Builder + -- | Initial inputs for this workload. + -> [input] + -> IO (Builder input payload) +resolveBuilder mkBuilderFn resolveObserver onRecycle + builderIndex validatedWorkload validatedBuilder initialInputs = do + -- Input queue: unbounded (TQueue) so that bulk-loading initial inputs and + -- recycling outputs never block. See 'Pipe' for the full rationale. + inputQueue <- STM.newTQueueIO + STM.atomically $ mapM_ (STM.writeTQueue inputQueue) initialInputs + -- Payload queue: bounded (TBQueue, capacity 8192); the sole source of + -- backpressure. The builder blocks here when workers cannot consume fast + -- enough. The capacity must be large enough to absorb GC pauses at high TPS + -- (e.g. 100k TPS drains 256 entries in ~2.5 ms). + payloadQ <- STM.newTBQueueIO 8192 + let thePipe = Pipe + { pipeInputQueue = inputQueue + , pipePayloadQueue = payloadQ + -- pipeRecycle: write recycled inputs back to the unbounded input + -- queue. Because TQueue has no capacity limit, this can never stall + -- the worker thread inside STM. + , pipeRecycle = \is -> mapM_ (STM.writeTQueue inputQueue) is + } + let name = Validated.workloadName validatedWorkload + recycle = Raw.builderRecycle validatedBuilder + -- Resolve confirm source for RecycleOnConfirm. + mConfirmSource <- case recycle of + Just (Raw.RecycleOnConfirm obsName) -> + Just <$> resolveObserver obsName + _ -> pure Nothing + -- Set up recycling and get the enqueue action + optional recycler async. + (enqueue, mRecycler) <- + builderRunner recycle thePipe name mConfirmSource (onRecycle name) + -- Create the caller's build handle. + builderHandle <- mkBuilderFn builderIndex name validatedBuilder + -- Spawn the builder async: forever pull inputs, build, enqueue. + async <- Async.async $ do + tid <- myThreadId + labelThread tid name + forever $ do + inputs <- STM.atomically $ + replicateM (fromIntegral (bhInputsPerBatch builderHandle)) + (STM.readTQueue (pipeInputQueue thePipe)) + (key, payload, outputInputs) <- bhBuildPayload builderHandle inputs + enqueue payload key outputInputs + Async.link async + -- Link the recycler async (consistent with builder async linking above). + case mRecycler of + Just r -> Async.link r + Nothing -> pure () + pure Builder + { builderName = name + , builderPipe = thePipe + , builderAsync = async + , recyclerAsync = mRecycler + } + +-------------------------------------------------------------------------------- +-- Workload resolution. +-------------------------------------------------------------------------------- + +-- | Resolve a single workload: assign the pre-created 'Pipe' to each target +-- and resolve each target's rate limiter. +-- +-- The 'Pipe' is created by 'resolveBuilder' and passed in so that the 'Builder' +-- and all targets share the same underlying queues. +-- +-- Cascading defaults and conflict checks have already been performed by +-- "Cardano.Benchmarking.PullFiction.Config.Validated"; this function only +-- creates rate limiters. +resolveWorkload + :: Validated.Workload + -- | Limiter cache (threaded as a pure accumulator). + -> LimiterCache + -- | Pipe for this workload (created by 'resolveBuilder'). + -> Pipe input payload + -> IO (Workload input payload, LimiterCache) +resolveWorkload validatedWorkload cache0 thePipe = do + let wlName = Validated.workloadName validatedWorkload + validatedTargets = Validated.targets validatedWorkload + (resolvedTargets, cache') <- + foldlM + (\(acc, cache) (tName, validatedTarget) -> do + (resolved, cache'') <- + resolveTarget cache thePipe validatedTarget + pure (Map.insert tName resolved acc, cache'') + ) + (Map.empty, cache0) + (Map.toAscList validatedTargets) + pure ( Workload { workloadName = wlName + , targets = resolvedTargets + } + , cache' + ) + +-------------------------------------------------------------------------------- +-- Target resolution. +-------------------------------------------------------------------------------- + +-- | Resolve a single target: look up or create its rate limiter from the +-- cache, then build the 'Target' record. +resolveTarget + :: LimiterCache + -> Pipe input payload + -> Validated.Target + -> IO (Target input payload, LimiterCache) +resolveTarget cache thePipe validatedTarget = do + (limiter, cache') <- getOrCreateLimiter cache validatedTarget + pure ( Target + { targetName = Validated.targetName validatedTarget + , targetPipe = thePipe + , rateLimiter = limiter + , maxBatchSize = Validated.maxBatchSize validatedTarget + , onExhaustion = Validated.onExhaustion validatedTarget + , targetAddr = Validated.addr validatedTarget + , targetPort = Validated.port validatedTarget + } + , cache' + ) + +-- | Look up or create a 'RL.RateLimiter' for a target. +-- +-- * 'Nothing' rate limit source → 'RL.newUnlimited' (no cache entry). +-- * Otherwise, use the pre-computed 'Validated.rateLimitKey' as the cache +-- key. If the key already exists the existing limiter is reused; otherwise a +-- 'RL.newTokenBucket' is created and inserted. +getOrCreateLimiter + :: LimiterCache -> Validated.Target + -> IO (RL.RateLimiter, LimiterCache) +getOrCreateLimiter cache target = + case Validated.rateLimitSource target of + Nothing -> pure (RL.newUnlimited, cache) + Just src -> do + let key = Validated.rateLimitKey src + tpsValue = Raw.tps (Validated.rateLimit src) + case Map.lookup key cache of + Just existing -> pure (existing, cache) + Nothing -> do + limiter <- RL.newTokenBucket tpsValue + pure (limiter, Map.insert key limiter cache) + +-------------------------------------------------------------------------------- +-- Builder runner. +-------------------------------------------------------------------------------- + +-- | Set up recycling infrastructure for a builder and return an enqueue action. +-- +-- The returned action is called by the builder loop after each payload is built. +-- It handles enqueueing the payload and recycling inputs according to the +-- configured strategy: +-- +-- * 'Nothing': enqueue @(payload, [])@ — no recycling at all. +-- * 'Raw.RecycleOnBuild': enqueue @(payload, [])@, recycle inputs immediately. +-- * 'Raw.RecycleOnPull': enqueue @(payload, inputs)@ — recycled on fetch. +-- * 'Raw.RecycleOnConfirm': enqueue @(payload, [])@, track @key → inputs@ in +-- a pending map; a background recycler async reads confirmations from the +-- provided channel and recycles matching inputs. +-- +-- The @key@ parameter of the returned action is only used by 'RecycleOnConfirm' +-- and ignored otherwise. +builderRunner + :: Ord key + => Maybe Raw.RecycleStrategy + -> Pipe input payload + -> String + -- | For 'RecycleOnConfirm': pre-subscribed broadcast channel and a function + -- to extract the confirmation key. Ignored for other strategies. + -> Maybe (STM.TChan confirmed, confirmed -> key) + -- | Callback invoked each time inputs are recycled (e.g. for tracing). + -> ([input] -> IO ()) + -> IO (payload -> key -> [input] -> IO (), Maybe (Async ())) +builderRunner strategy pipe name mConfirmSource onRecycle = do + pendingRecycle <- STM.newTVarIO Map.empty + -- For RecycleOnConfirm, spawn a recycler that reads confirmations and + -- recycles the matching inputs. The recycler async is returned to the + -- caller ('resolveBuilder') which links it. + mRecycler <- case (strategy, mConfirmSource) of + (Just (Raw.RecycleOnConfirm _), Just (chan, extractKey)) -> do + recycler <- Async.async $ do + tid <- myThreadId + labelThread tid (name ++ "/recycler") + forever $ do + confirmed <- STM.atomically $ STM.readTChan chan + let k = extractKey confirmed + mInputs <- STM.atomically $ do + m <- STM.readTVar pendingRecycle + case Map.lookup k m of + Nothing -> pure Nothing + Just inputs -> do + STM.writeTVar pendingRecycle (Map.delete k m) + pure (Just inputs) + case mInputs of + Nothing -> pure () + Just inputs -> do + STM.atomically $ pipeRecycle pipe inputs + onRecycle inputs + pure (Just recycler) + _ -> pure Nothing + -- Return the enqueue action and the optional recycler async. + let enqueue payload key inputs = + case strategy of + Nothing -> + STM.atomically $ STM.writeTBQueue (pipePayloadQueue pipe) (payload, []) + Just Raw.RecycleOnBuild -> do + STM.atomically $ STM.writeTBQueue (pipePayloadQueue pipe) (payload, []) + STM.atomically $ pipeRecycle pipe inputs + onRecycle inputs + Just Raw.RecycleOnPull -> + STM.atomically $ STM.writeTBQueue (pipePayloadQueue pipe) (payload, inputs) + Just (Raw.RecycleOnConfirm _) -> STM.atomically $ do + STM.writeTBQueue (pipePayloadQueue pipe) (payload, []) + STM.modifyTVar' pendingRecycle (Map.insert key inputs) + pure (enqueue, mRecycler) + +-------------------------------------------------------------------------------- +-- Input partitioning. +-------------------------------------------------------------------------------- + +-- | Split a list into @n@ contiguous chunks of roughly equal size. +-- The last chunk absorbs any remainder. +partitionInputs :: Int -> [a] -> [[a]] +partitionInputs n xs + | n <= 1 = [xs] + | otherwise = go xs n + where + chunkSize = length xs `div` n + go remaining 1 = [remaining] + go remaining k = + let (chunk, rest) = splitAt chunkSize remaining + in chunk : go rest (k - 1) diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Validated.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Validated.hs new file mode 100644 index 00000000000..cbb2f572199 --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Validated.hs @@ -0,0 +1,432 @@ +{-# LANGUAGE ImportQualifiedPost #-} + +-------------------------------------------------------------------------------- + +-- | Validated load-generator configuration with cascading defaults applied. +-- +-- Types mirror "Cardano.Benchmarking.PullFiction.Config.Raw" but with hidden +-- data constructors. The only way to obtain values is through 'validate', which +-- guarantees that every value has passed validation (e.g. @tps > 0@, +-- @max_batch_size >= 1@, valid names). +-- +-- Cascading defaults are resolved here: +-- +-- * @builder@: setting it at both the top level and the workload level is an +-- error; otherwise workload value > top level value > error. +-- * @rate_limit@: setting it at both the top level and the workload level is an +-- error; otherwise the workload inherits the top-level value (or 'Nothing' +-- for unlimited). +-- * @max_batch_size@: target value > workload value > top-level value > +-- default (1). +-- * @on_exhaustion@: target value > workload value > top-level value > +-- default (@\"block\"@). +-- +-- After 'validate', every 'Target' has a concrete @maxBatchSize@ and every +-- 'Workload' has a concrete @builder@ (no 'Maybe'). +-- +-- 'Workload' and 'Config' store their children in 'Map's keyed by name +-- (alphabetical order; JSON object key order is not preserved). +-- +-- __Import qualified.__ Field names clash with +-- "Cardano.Benchmarking.PullFiction.Config.Raw" and +-- "Cardano.Benchmarking.PullFiction.Config.Runtime". +module Cardano.Benchmarking.PullFiction.Config.Validated + ( + -- * Config. + Config + , initialInputs, observers, workloads + + -- * Workload. + , Workload + , workloadName, builder, targets + + -- * RateLimitSource. + , RateLimitSource (..) + + -- * Target. + , Target + , targetName + , rateLimitSource + , maxBatchSize, onExhaustion + , addr, port + + -- * Validation. + , validate + + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Monad (when) +import Data.List.NonEmpty (NonEmpty) +import Data.Maybe (fromMaybe) +import Numeric.Natural (Natural) +---------------- +-- containers -- +---------------- +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +------------------ +-- pull-fiction -- +------------------ +import Cardano.Benchmarking.PullFiction.Config.Raw qualified as Raw + +-------------------------------------------------------------------------------- +-- Defaults. +-------------------------------------------------------------------------------- + +-- | Default scope for a top-level rate limiter when not specified in JSON. +defaultTopLevelScope :: Raw.TopLevelScope +defaultTopLevelScope = Raw.TopShared + +-- | Default scope for a workload-level rate limiter when not specified in JSON. +defaultWorkloadScope :: Raw.WorkloadScope +defaultWorkloadScope = Raw.WorkloadShared + +-- | Default maximum batch size when neither the workload nor the top-level +-- config specifies one. +defaultMaxBatchSize :: Natural +defaultMaxBatchSize = 1 + +-- | Default on-exhaustion behaviour when not specified at any level. +defaultOnExhaustion :: Raw.OnExhaustion +defaultOnExhaustion = Raw.Block + +-------------------------------------------------------------------------------- + +-- | Top-level configuration. +-- +-- See 'Raw.Config' for field semantics. All invariants have been checked and +-- cascading defaults applied by 'validate'. +data Config input = Config + { -- | Initial inputs provided by the caller and stored by 'validate'. + initialInputs :: !(NonEmpty input) + -- | Observers (keyed by name). + -- Opaque; interpretation is the caller's responsibility. + , observers :: !(Map String Raw.Observer) + -- | Workloads keyed by name. Iteration order is alphabetical (Map order). + , workloads :: !(Map String Workload) + } + deriving (Show, Eq) + +-------------------------------------------------------------------------------- + +-- | A single workload with cascading defaults applied. +-- +-- 'builder' is always concrete (no 'Maybe'); cascading from the top level +-- config is performed by 'validate'. +data Workload = Workload + { -- | User provided name. + workloadName :: !String + -- | Resolved builder: workload value > top level value. + -- Opaque; interpretation is the caller's responsibility. + , builder :: !Raw.Builder + -- | Targets keyed by name. Iteration order is alphabetical. + , targets :: !(Map String Target) + } + deriving (Show, Eq) + +-------------------------------------------------------------------------------- + +-- | Resolved rate limit for a target, with its sharing key pre-computed. +-- +-- The 'rateLimitKey' encodes the sharing boundary using fully qualified names: +-- +-- * @\@global@: one limiter shared by all targets across all workloads. +-- * @workloadName@: one limiter per workload (each shared by all its targets). +-- * @workloadName.targetName@: one limiter per target. +-- +-- Because workload and target names may not start with @\@@ or contain @.@ +-- (enforced at validation time), these keys are guaranteed to be unique. +data RateLimitSource = RateLimitSource + { -- | Cache key for limiter sharing (the fully qualified name). + rateLimitKey :: !String + -- | Validated rate limit parameters. + , rateLimit :: !Raw.RateLimit + } + deriving (Show, Eq) + +-------------------------------------------------------------------------------- + +-- | A target endpoint to connect to. +-- +-- 'maxBatchSize' and 'onExhaustion' are concrete (no 'Maybe'). Cascading +-- defaults have been applied by 'validate'. +data Target = Target + { -- | User provided name. + targetName :: !String + -- | Resolved rate limit source ('Nothing' means unlimited). + , rateLimitSource :: !(Maybe RateLimitSource) + -- | Resolved max batch size. + -- target value > workload value > top-level value > default (1). + , maxBatchSize :: !Natural + -- | Resolved on-exhaustion behaviour. + -- target value > workload value > top-level value > default (block). + , onExhaustion :: !Raw.OnExhaustion + -- How to connect to the target. + , addr :: !String + , port :: !Int + } + deriving (Show, Eq) + +-------------------------------------------------------------------------------- +-- Validation. +-------------------------------------------------------------------------------- + +-- | Validate a 'Raw.Config', enforce all business rules, and cascade top-level +-- defaults into workloads. +-- +-- Input loading is the caller's responsibility; passes the already-loaded +-- inputs directly. This keeps the validation layer pure and decouples it from +-- IO concerns like key loading and network magic interpretation. +-- +-- Returns 'Left' with a descriptive error message on the first violation. +validate + -- | Raw configuration as parsed from JSON. + :: Raw.Config + -- | Initial inputs (already loaded by the caller). + -> NonEmpty input + -> Either String (Config input) +validate raw inputs = do + -- Observers. Always top-level and by name. + -- (opaque; passed through without interpretation). + let resolvedObservers = fromMaybe + Map.empty + (Raw.maybeObservers raw) + -- Top level builder. Future iterations will have builders by name. + -- (opaque; passed through without interpretation). + let maybeTopBuilder = Raw.maybeTopLevelBuilder raw + -- Top level rate limit. + maybeTopRateLimit <- + case Raw.maybeTopLevelRateLimit raw of + Nothing -> pure Nothing + Just (maybeTopScope, rawRL) -> do + let topScope = fromMaybe + defaultTopLevelScope + maybeTopScope + validatedRL <- validateRateLimit rawRL + pure (Just (topScope, validatedRL)) + -- Max batch size. + let topMaxBatchSize = fromMaybe + defaultMaxBatchSize + (Raw.maybeTopLevelMaxBatchSize raw) + when (topMaxBatchSize == 0) $ + Left "Config: max_batch_size must be >= 1" + -- On-exhaustion behaviour. + let topOnExhaustion = fromMaybe + defaultOnExhaustion + (Raw.maybeTopLevelOnExhaustion raw) + -- Workloads. + let rawWorkloads = fromMaybe + Map.empty + (Raw.maybeWorkloads raw) + when (Map.null rawWorkloads) $ + Left "Config: at least one workload is required" + workloadsMap <- Map.traverseWithKey + (\name workload -> + validateWorkload + name + maybeTopBuilder + maybeTopRateLimit + topMaxBatchSize + topOnExhaustion + workload + ) + rawWorkloads + -- Inputs must cover all workloads: Runtime.partitionInputs splits them into + -- contiguous chunks, so fewer inputs than workloads leaves some with zero. + let inputCount = length inputs + when (inputCount < Map.size workloadsMap) $ + Left $ "Config: not enough initial inputs (" ++ show inputCount + ++ ") for " ++ show (Map.size workloadsMap) ++ " workload(s)" + -- Final validated config. + pure Config + { initialInputs = inputs + , observers = resolvedObservers + , workloads = workloadsMap + } + +-------------------------------------------------------------------------------- + +-- Returns 'Left' with a descriptive error message on the first violation. +validateWorkload + -- | Workload name (from Map key). + :: String + -- | Top level builder (opaque). + -> Maybe Raw.Builder + -- | Validated top-level scope / rate limit. + -> Maybe (Raw.TopLevelScope, Raw.RateLimit) + -- | Resolved top-level max batch size. + -> Natural + -- | Resolved top-level on-exhaustion behaviour. + -> Raw.OnExhaustion + -- | The parsed workload from JSON. + -> Raw.Workload + -> Either String Workload +validateWorkload name + maybeTopBuilder + maybeTopRateLimit + topMaxBatchSize + topOnExhaustion + rawWorkload = do + -- Name. + validateName "Workload" name + -- Builder conflict: setting at both levels is ambiguous. + case (maybeTopBuilder, Raw.maybeBuilder rawWorkload) of + (Just _, Just _) -> + Left $ "builder set at both the top level and in workload: " ++ show name + _ -> pure () + -- Resolve builder: workload level > top level > error. + resolvedBuilder <- do + case Raw.maybeBuilder rawWorkload of + Just parsedBuilder -> do + -- The top level builder gets ignored in favor of the workload builder. + pure parsedBuilder + Nothing -> do + case maybeTopBuilder of + Just topLevelBuilder -> pure topLevelBuilder + Nothing -> Left $ + "Workload " ++ show name + ++ ": builder is required (no workload or top level default)" + -- Rate-limit conflict: setting at both levels is ambiguous. + case (maybeTopRateLimit, Raw.maybeRateLimit rawWorkload) of + (Just _, Just _) -> + Left $ + "rate_limit is set at both the top level and in workload: " ++ show name + _ -> pure () + -- Resolve effective rate limit: workload-level > top-level > unlimited. + -- The scope and validated rate limit are cascaded to validateTarget, which + -- computes the final RateLimitSource (including the cache key). + effectiveRateLimit <- do + case Raw.maybeRateLimit rawWorkload of + -- There is a rate limit at the workload level. + Just (maybeWlScope, rawRL) -> do + validatedRL <- validateRateLimit rawRL + let wlScope = fromMaybe + defaultWorkloadScope + maybeWlScope + -- `Right` workload scope. + pure (Just (Right wlScope, validatedRL)) + -- There is no rate limit at the workload level. + Nothing -> do + case maybeTopRateLimit of + Just (topScope, validatedTopRL) -> do + -- `Left` top level scope. + pure (Just (Left topScope, validatedTopRL)) + Nothing -> do + pure Nothing + -- Cascade max_batch_size: workload > top-level (always concrete). + -- The per-target override is applied inside validateTarget. + case Raw.maybeMaxBatchSize rawWorkload of + Just 0 -> Left "Workload: max_batch_size must be >= 1" + _ -> pure () + let workloadBatchSize = fromMaybe + topMaxBatchSize + (Raw.maybeMaxBatchSize rawWorkload) + -- Cascade on_exhaustion: workload > top-level. + let workloadOnExhaustion = fromMaybe + topOnExhaustion + (Raw.maybeOnExhaustion rawWorkload) + -- Targets. + when (Map.null (Raw.targets rawWorkload)) $ + Left $ "Workload " ++ show name ++ ": targets must not be empty" + targetsMap <- Map.traverseWithKey + (\tName target -> validateTarget + name tName effectiveRateLimit workloadBatchSize workloadOnExhaustion target + ) + (Raw.targets rawWorkload) + -- Final validated workload. + pure Workload + { workloadName = name + , builder = resolvedBuilder + , targets = targetsMap + } + +-- Returns 'Left' with a descriptive error message on the first violation. +validateTarget + -- | Workload name (for cache key computation). + :: String + -- | Target name (from Map key). + -> String + -- | If 'Just': 'Left' is top level scope, 'Right' is workload scope. + -> Maybe (Either Raw.TopLevelScope Raw.WorkloadScope, Raw.RateLimit) + -- | Resolved max batch size. + -> Natural + -- | Resolved on-exhaustion behaviour. + -> Raw.OnExhaustion + -- The target parsed from JSON. + -> Raw.Target + -> Either String Target +validateTarget wlName tgtName effectiveRateLimit workloadBatchSize workloadOnExhaustion rawTarget = do + -- Name. + validateName "Target" tgtName + -- Resolve rate limit source with pre-computed cache key. + -- The key scheme uses fully-qualified names: + -- @global → one limiter for everything + -- workloadName → one per workload + -- workloadName.target → one per target + let maybeRateLimitSource = + case effectiveRateLimit of + Nothing -> Nothing + Just (scope, rl) -> Just $ case scope of + -- Using the scope set at the top level rate limit. + Left Raw.TopShared -> RateLimitSource "@global" rl + Left Raw.TopPerWorkload -> RateLimitSource wlName rl + Left Raw.TopPerTarget -> RateLimitSource (wlName++"."++tgtName) rl + -- Using scope set at the workload level rate limit. + Right Raw.WorkloadShared -> RateLimitSource wlName rl + Right Raw.WorkloadPerTarget -> RateLimitSource (wlName++"."++tgtName) rl + -- Cascade max_batch_size: target > workload (always concrete). + case Raw.maybeTargetMaxBatchSize rawTarget of + Just 0 -> Left $ + "Target " ++ show tgtName + ++ ": max_batch_size must be >= 1" + _ -> pure () + -- Cascade max_batch_size: target > workload (always concrete). + let resolvedMaxBatchSize = fromMaybe + workloadBatchSize + (Raw.maybeTargetMaxBatchSize rawTarget) + -- Cascade on_exhaustion: target > workload (always concrete). + let resolvedOnExhaustion = fromMaybe + workloadOnExhaustion + (Raw.maybeTargetOnExhaustion rawTarget) + -- Final validated target. + pure Target + { targetName = tgtName + , rateLimitSource = maybeRateLimitSource + , maxBatchSize = resolvedMaxBatchSize + , onExhaustion = resolvedOnExhaustion + , addr = Raw.addr rawTarget + , port = Raw.port rawTarget + } + +-------------------------------------------------------------------------------- + +validateRateLimit :: Raw.RateLimit -> Either String Raw.RateLimit +validateRateLimit rl@(Raw.TokenBucket rawTps) = do + when (isNaN rawTps) $ + Left "RateLimit: tps must be a number, got NaN" + when (isInfinite rawTps) $ + Left "RateLimit: tps must be finite" + when (rawTps <= 0) $ + Left "RateLimit: tps must be > 0" + pure rl + +-- | Validate that a name does not start with @\'@\'@ or contain @\'.\'@. +-- +-- These characters are reserved for the rate-limit cache key scheme +-- (see 'RateLimitSource'). +validateName :: String -> String -> Either String () +validateName context name = do + case name of + [] -> + Left $ context ++ ": name must be non-empty" + ('@':_) -> + Left $ context ++ ": name must not start with '@', got " ++ show name + _ -> pure () + when ('.' `elem` name) $ + Left $ context ++ ": name must not contain '.', got " ++ show name + diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Internal/RateLimiter.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Internal/RateLimiter.hs new file mode 100644 index 00000000000..2da7ac25406 --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Internal/RateLimiter.hs @@ -0,0 +1,187 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +-- | Server-side GCRA rate limiter for pull-based token dispensing. +-- +-- Computes delays but never sleeps — the caller is responsible for sleeping +-- outside the STM transaction (keeps the critical section short and the limiter +-- testable in pure STM). +-- +-- The 'TBQueue' is an explicit parameter so that queue reads and rate-limit +-- accounting are atomic while the limiter stays decoupled from any particular +-- queue. +module Cardano.Benchmarking.PullFiction.Internal.RateLimiter + ( RateLimiter, newTokenBucket, newUnlimited + , waitToken, tryWaitToken + ) where + +-------------------------------------------------------------------------------- + +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM +------------------ +-- pull-fiction -- +------------------ +import Cardano.Benchmarking.PullFiction.Clock qualified as Clock + +-------------------------------------------------------------------------------- + +-- | 'TokenBucket' for a configured TPS ceiling, or 'Unlimited'. +data RateLimiter + = TokenBucket + -- | Emission interval T in nanoseconds (cached). + !Integer + -- | Start time (set on first claim). + !(STM.TVar (Maybe Clock.TimeSpec)) + -- | Tokens sent so far. + !(STM.TVar Integer) + -- | No rate limit. + | Unlimited + +-- | Create a token-bucket rate limiter targeting @tps@ tokens per second. +-- +-- Uses the Generic Cell Rate Algorithm (GCRA), also known as the virtual +-- scheduling algorithm (ITU-T I.371). Equivalent to Turner's leaky bucket as a +-- meter (Turner 1986, "New Directions in Communications", IEEE Comm. Mag. +-- 24(10)). +-- +-- The algorithm tracks a /Theoretical Arrival Time/ (TAT), the earliest time +-- the next token is allowed: +-- +-- @ +-- TAT(0) = now -- first token, no delay +-- TAT(N+1) = max(TAT(N), now) + T -- T = emission interval = 1\/rate +-- allow iff TAT <= now + τ -- τ = burst tolerance +-- @ +-- +-- With @τ = 0@ (the current implementation) no burst is allowed: each token +-- must wait until its scheduled time. Adding @τ > 0@ would permit up to @τ / T@ +-- tokens to arrive ahead of schedule (the dual token-bucket formulation with +-- bucket depth @τ / T@). +-- +-- TODO: Add a @maxBurst@ parameter to the rate limit config. The burst +-- tolerance becomes @τ = maxBurst * T@, and the admission check becomes +-- @TAT <= now + τ@. +-- +-- The start time is captured on the first token claim, so any delay between +-- limiter creation and the first request does not cause a burst of catch-up +-- tokens. +-- +-- Performance: @nanosPerToken@ (the emission interval @T@) is pre-computed +-- once at construction via @round (1e9 / tps)@. This trades a tiny rounding +-- error (at most +/-0.5 ns per token) for O(1) integer multiplication in +-- 'nextTokenTargetTime', avoiding 'Rational' division that would otherwise +-- dominate at high token counts. +newTokenBucket :: Double -> IO RateLimiter +newTokenBucket tps = do + startVar <- STM.newTVarIO Nothing + countVar <- STM.newTVarIO 0 + let !nanosPerToken = round (1_000_000_000 / tps) :: Integer + pure (TokenBucket nanosPerToken startVar countVar) + +-- | An unlimited rate limiter (never blocks on rate). +newUnlimited :: RateLimiter +newUnlimited = Unlimited + +-------------------------------------------------------------------------------- + +-- | @targetTime(N) = startTime + N * nanosPerToken@. +-- Token 0 is special-cased in 'waitToken' (delay 0). +-- O(1) integer multiply + add — no division on the hot path. +nextTokenTargetTime :: Integer -> Clock.TimeSpec -> Integer -> Clock.TimeSpec +nextTokenTargetTime nanosPerToken startTime tokensSent = + let !offset = Clock.fromNanoSecs (tokensSent * nanosPerToken) + in startTime + offset + +-------------------------------------------------------------------------------- + +-- | Try to claim the next token. Runs entirely in STM, never retries. +-- +-- @Just (token, delay)@ when a token is available; 'Nothing' when the queue is +-- empty (caller sleeps and retries). +-- +-- __Fairness__: consume + slot-claim are one STM transaction, so concurrent +-- threads see a strictly increasing @tokensSent@ counter — FIFO-fair. +-- +-- Never blocks inside STM, so the caller-captured @timeNow@ stays accurate +-- (no stale-clock TPS drift). +waitToken :: Clock.TimeSpec + -> RateLimiter + -> STM.TBQueue token + -> STM.STM (Maybe (token, Clock.TimeSpec)) +-- No TPS: try to read a token without blocking. +waitToken _ Unlimited queue = do + maybeToken <- STM.tryReadTBQueue queue + case maybeToken of + Nothing -> pure Nothing + Just token -> pure (Just (token, 0)) +-- With a TPS. +waitToken timeNow (TokenBucket nanosPerToken startTVar countTVar) queue = do + maybeToken <- STM.tryReadTBQueue queue + case maybeToken of + Nothing -> pure Nothing + Just token -> do + maybeStartTime <- STM.readTVar startTVar + case maybeStartTime of + -- Rate limiter running, claim a rate-limit slot. + Just startTime -> do + tokensSent <- STM.readTVar countTVar + STM.writeTVar countTVar (tokensSent + 1) + let !targetTime = nextTokenTargetTime + nanosPerToken startTime tokensSent + !delay = max 0 (targetTime - timeNow) + pure (Just (token, delay)) + -- First call, record start time. + Nothing -> do + STM.writeTVar startTVar (Just timeNow) + STM.writeTVar countTVar 1 + pure (Just (token, 0)) + +-- | Non-blocking variant: checks the rate limit /first/ and returns +-- @Left delay@ without touching the queue when ahead of schedule. +-- +-- @Right Nothing@: not rate-limited but queue empty. +-- @Right (Just token)@: token claimed. +tryWaitToken :: Clock.TimeSpec + -> RateLimiter + -> STM.TBQueue token + -> STM.STM (Either Clock.TimeSpec (Maybe token)) +-- No TPS. +tryWaitToken _ Unlimited queue = Right <$> STM.tryReadTBQueue queue +-- With a TPS. +tryWaitToken timeNow + (TokenBucket nanosPerToken startTVar countTVar) + queue = do + maybeStartTime <- STM.readTVar startTVar + case maybeStartTime of + -- Rate limiter running, check if ahead of schedule. + Just startTime -> do + tokensSent <- STM.readTVar countTVar + let !targetTime = nextTokenTargetTime + nanosPerToken startTime tokensSent + if targetTime > timeNow + -- Ahead of schedule. + then pure (Left (targetTime - timeNow)) + -- Available headroom. + else do + maybeToken <- STM.tryReadTBQueue queue + case maybeToken of + Nothing -> pure (Right Nothing) + Just token -> do + STM.writeTVar countTVar (tokensSent + 1) + pure (Right (Just token)) + -- First call, no rate limit to check. + Nothing -> do + maybeToken <- STM.tryReadTBQueue queue + case maybeToken of + Nothing -> pure (Right Nothing) + Just token -> do + -- Record the time only if a token was available. + STM.writeTVar startTVar (Just timeNow) + STM.writeTVar countTVar 1 + pure (Right (Just token)) diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/WorkloadRunner.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/WorkloadRunner.hs new file mode 100644 index 00000000000..27b19d77918 --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/WorkloadRunner.hs @@ -0,0 +1,293 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.PullFiction.WorkloadRunner + ( TargetWorker, runWorkload + , QueueStarved(..) + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Concurrent (myThreadId, threadDelay) +import Control.Exception (Exception, throwIO) +import Control.Monad (when) +import GHC.Conc (labelThread) +----------- +-- async -- +----------- +import Control.Concurrent.Async qualified as Async +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM +------------------ +-- pull-fiction -- +------------------ +import Cardano.Benchmarking.PullFiction.Config.Raw qualified as Raw +import Cardano.Benchmarking.PullFiction.Config.Runtime qualified as Runtime +import Cardano.Benchmarking.PullFiction.Clock qualified as Clock +import Cardano.Benchmarking.PullFiction.Internal.RateLimiter qualified as RL + +-------------------------------------------------------------------------------- +-- Queue-starved exception. +-------------------------------------------------------------------------------- + +-- | Fatal exception thrown (in 'Raw.Error' on-exhaustion mode) when the payload +-- queue is empty and the rate limiter has authorized a fetch. This means the +-- payload builder cannot produce payloads fast enough for the configured TPS +-- demand. The caller must either reduce TPS, increase the number of initial +-- inputs, increase the payload queue capacity, or parallelise the builder. +-- +-- In 'Raw.Block' mode, 'blockingFetch' retries until a token becomes +-- available; 'nonBlockingFetch' returns 'Nothing'. +data QueueStarved = QueueStarved !String + deriving (Show) + +instance Exception QueueStarved + +-------------------------------------------------------------------------------- +-- RateLimitedFetcher. +-------------------------------------------------------------------------------- + +-- | Rate-limited fetch interface over caller-supplied queues. +-- +-- A 'RateLimitedFetcher' encapsulates rate-limiting state but has __no opinion +-- about which queue to read from__. The 'TBQueue' is a plain parameter on every +-- call, so the caller is free to pass: +-- +-- * the same shared queue on every call, +-- * a different per-target queue for each worker, +-- * or even a different queue from one call to the next. +-- +-- The fetcher never captures, stores, or inspects the queue; it only reads one +-- token from whatever queue it receives and applies the rate limit. +-- +-- Two fetch modes are provided for pull-based protocols that distinguish +-- between "I need at least one item" and "give me more if available": +-- +-- * 'blockingFetch': claims a rate-limit slot, sleeps for the required delay, +-- and returns the token. When the queue is empty, behaviour depends on the +-- 'Raw.OnExhaustion' mode: 'Raw.Error' throws 'QueueStarved'; 'Raw.Block' +-- retries until a token becomes available. +-- +-- * 'nonBlockingFetch': returns 'Nothing' when rate-limited (ahead of schedule) +-- or when the queue is empty in 'Raw.Block' mode. In 'Raw.Error' mode, throws +-- 'QueueStarved' if not rate-limited but the queue is empty. +data RateLimitedFetcher token = RateLimitedFetcher + { -- | Claim a rate-limit slot, sleep for the computed delay, return one token + -- from the given queue. Retries or throws on empty queue depending on the + -- 'Raw.OnExhaustion' mode. + blockingFetch :: STM.TBQueue token -> IO token + -- | Return @Just token@ if the rate limit allows, 'Nothing' if ahead of + -- schedule or if the queue is empty in 'Raw.Block' mode. Throws + -- 'QueueStarved' on empty queue in 'Raw.Error' mode. + , nonBlockingFetch :: STM.TBQueue token -> IO (Maybe token) + } + +-- | Build a 'RateLimitedFetcher' from a 'RL.RateLimiter'. +-- +-- On the hot path both modes use non-blocking STM ('tryReadTBQueue', never +-- 'readTBQueue' / 'retry'), so no thread parks inside STM while tokens are +-- flowing. The sole exception is the 'Raw.Block' starvation fallback in +-- 'blockingFetch', which uses 'peekTBQueue' as an event-driven gate (see the +-- inline comment for the trade-off analysis). See +-- "Cardano.Benchmarking.PullFiction.Internal.RateLimiter" for the full design. +mkRateLimitedFetcher :: Raw.OnExhaustion + -> RL.RateLimiter + -> RateLimitedFetcher token +mkRateLimitedFetcher onExhaustion rateLimiter = RateLimitedFetcher + { blockingFetch = goBlocking + , nonBlockingFetch = goNonBlocking + } + where + goBlocking queue = do + now <- Clock.getTime + result <- STM.atomically $ RL.waitToken now rateLimiter queue + case result of + Just (token, delay) -> do + -- Delays this thread and not the global RateLimiter. + threadDelayNanos (Clock.toNanoSecs delay) + pure token + -- The queue is empty. + Nothing -> case onExhaustion of + Raw.Error -> + -- The payload queue is empty. The payload builder cannot keep up + -- with the configured TPS demand. At this stage of the library we + -- treat this as a fatal error rather than silently degrading + -- throughput; the user must either reduce TPS, increase the number + -- of initial inputs, or parallelise the builder. + throwIO $ QueueStarved + "blockingFetch: payload queue empty, cannot keep up with TPS." + Raw.Block -> do + -- Gate: park until the builder produces at least one payload. + -- + -- 'peekTBQueue' retries (parks the thread via STM retry) until the + -- queue is non-empty, then succeeds without consuming the item. + -- This is event-driven: the thread uses zero CPU while parked and + -- wakes as soon as the builder writes. + -- + -- The stale-clock concern documented in 'RL.waitToken' does not + -- apply here: 'goBlocking' captures a fresh timestamp on every + -- iteration, so the rate limiter always sees an accurate clock. + -- Fairness is likewise unaffected: the rate limiter's FIFO property + -- comes from the atomic slot claiming inside 'waitToken', not from + -- the retry mechanism. + -- + -- Trade-off: when N workers are starved on the same queue, a single + -- builder write wakes all N (GHC's STM wake-all). N-1 fail + -- 'tryReadTBQueue' inside 'waitToken' and re-park. This is bounded + -- by the number of targets per workload and is far cheaper than + -- polling ('threadDelay' would cause N wakeups per requested sleep + -- time regardless of builder activity). + _ <- STM.atomically $ STM.peekTBQueue queue + goBlocking queue + goNonBlocking queue = do + now <- Clock.getTime + result <- STM.atomically $ RL.tryWaitToken now rateLimiter queue + case result of + -- Rate limited, discard the nanoseconds and return. + Left _ -> pure Nothing + -- Not rate limited and the queue was not empty. + Right (Just token) -> pure (Just token) + -- The queue is empty. + Right Nothing -> case onExhaustion of + Raw.Error -> + -- The payload queue is empty. The payload builder cannot keep up + -- with the configured TPS demand. At this stage of the library we + -- treat this as a fatal error rather than silently degrading + -- throughput; the user must either reduce TPS, increase the number + -- of initial inputs, or parallelise the builder. + throwIO $ QueueStarved + "nonBlockingFetch: payload queue empty, cannot keep up with TPS." + Raw.Block -> pure Nothing + +-- | Safely sleep for a duration in nanoseconds. +-- +-- Converts nanoseconds to microseconds for 'threadDelay'. To prevent integer +-- overflow on 32-bit systems (where 'Int' maxes out at ~2147s), the delay is +-- clamped to 'maxBound :: Int'. This ensures that even with extremely low TPS +-- configurations (TPS below ~0.0005), the generator sleeps for the maximum +-- representable period rather than wrapping around to a small or negative value +-- and triggering an accidental token burst. +-- Replaces: `threadDelay (fromIntegral (Clock.toNanoSecs nanos `div` 1_000))`. +threadDelayNanos :: Integer -> IO () +threadDelayNanos nanos = + let micros = nanos `div` 1_000 + clamped = fromIntegral (min (fromIntegral (maxBound :: Int)) micros) + in when (clamped > 0) $ threadDelay clamped + +-------------------------------------------------------------------------------- +-- Workload runner. +-------------------------------------------------------------------------------- + +-- | A worker callback that runs inside a labeled 'Async.Async'. +-- +-- 'runWorkload' builds the rate-limited, recycling fetch functions for each +-- target and spawns a labeled async that calls this callback. The callback +-- receives: +-- +-- 1. The fully resolved 'Runtime.Target' (carries addr, port, batch size, +-- and target name for error attribution). +-- 2. @fetchPayload@: blocking fetch that claims one rate-limit slot, reads a +-- @(payload, [input])@ pair from the payload queue, writes the @[input]@ +-- component back to the workload's input queue, and returns the @payload@. +-- 3. @tryFetchPayload@: non-blocking variant that returns @Nothing@ when +-- rate-limited. On success, writes inputs back and returns the payload the +-- same way. +-- +-- Both fetch functions handle the @[input]@ recycling automatically: whatever +-- inputs the builder pairs with the payload are written back to the input queue +-- after each fetch. The callback must not write to the input queue itself +-- (doing so would duplicate inputs). Its only responsibilities are delivering +-- the payload and any application-level bookkeeping. +-- +-- The thread is already labeled @workloadName\/targetName@ by 'runWorkload'. +-- The callback body runs for the lifetime of the generator. It should not +-- create its own async or label its own thread; 'runWorkload' handles both. +type TargetWorker input payload + = Runtime.Target input payload -- ^ The resolved target. + -> IO payload -- ^ Blocking fetch (rate-limited, recycles inputs). + -> IO (Maybe payload) -- ^ Non-blocking fetch (rate-limited, recycles inputs). + -> IO () -- ^ Worker body (runs inside labeled async). + +-- | Run a load-generation workload: for each target, build rate-limited fetch +-- functions that recycle consumed inputs, spawn a labeled async, and call the +-- worker callback inside it. +-- +-- Rate limiter creation and the shared\/independent decision are handled by +-- 'Runtime.resolve'. This function simply activates the pre-built limiters. +-- +-- For each target the function: +-- +-- 1. Builds a 'RateLimitedFetcher' from the target's 'Runtime.rateLimiter'. +-- 2. Wraps it with pipe recycling to produce @fetchPayload :: IO payload@ and +-- @tryFetchPayload :: IO (Maybe payload)@. +-- 3. Computes a thread label: @workloadName ++ \"\/\" ++ targetName@. +-- 4. Creates an 'Async.Async' that labels the thread, then runs the worker +-- callback. +-- +-- Returns the list of worker asyncs (__unlinked__). Callers decide how to +-- monitor them: 'Main.hs' links them for immediate propagation; the test +-- harness polls synchronously so Tasty's 'withResource' can cache the +-- exception. +runWorkload + :: Runtime.Workload input payload + -> TargetWorker input payload + -> IO [Async.Async ()] +runWorkload workload targetWorker = + mapM + (\target -> do + let fetcher = mkRateLimitedFetcher + (Runtime.onExhaustion target) + (Runtime.rateLimiter target) + pipe = Runtime.targetPipe target + -- Fetch one payload (blocking), write its [input] back, return payload. + fetchPayload = do + (payload, recycledInputs) <- + (blockingFetch fetcher) (Runtime.pipePayloadQueue pipe) + -- Recycling is a separate STM transaction from the fetch above. + -- Merging both into one transaction would widen the critical + -- section: the combined transaction would hold the rate-limiter + -- TVars and the payload queue while also writing to the input + -- queue, increasing contention and risking that a slow recycle + -- (many inputs) stalls other workers competing for rate-limit + -- slots. Keeping them separate means the fetch-and-claim is short; + -- the recycle cannot delay other submissions. + -- + -- Trade-off: between the two transactions, recycled inputs are held + -- only in memory. If the thread is killed in this window, those + -- inputs are lost. This is acceptable; recycling happens on + -- delivery, not on downstream confirmation (see + -- 'Runtime.pipeRecycle' for the full rationale). + STM.atomically $ Runtime.pipeRecycle pipe recycledInputs + pure payload + -- Try to fetch one payload (non-blocking). + tryFetchPayload = do + result <- (nonBlockingFetch fetcher) (Runtime.pipePayloadQueue pipe) + case result of + Nothing -> pure Nothing + Just (payload, recycledInputs) -> do + -- See fetchPayload above for why recycle is a separate + -- transaction. + STM.atomically $ Runtime.pipeRecycle pipe recycledInputs + pure (Just payload) + -- Always labeled threads. + threadLabel = + Runtime.workloadName workload ++ "/" ++ Runtime.targetName target + -- Return async (unlinked, caller decides monitoring strategy). + async <- Async.async $ do + tid <- myThreadId + labelThread tid threadLabel + targetWorker target fetchPayload tryFetchPayload + pure async + ) + (Map.elems (Runtime.targets workload)) diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Fund.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Fund.hs new file mode 100644 index 00000000000..62a05770e0f --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Fund.hs @@ -0,0 +1,186 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.TxCentrifuge.Fund + ( Fund (..) + , loadFunds + , genesisTxIn + , castToGenesisUTxOKey + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Data.Bifunctor (first) +import Data.IORef qualified as IORef +import Text.Read (readMaybe) +----------- +-- aeson -- +----------- +import Data.Aeson qualified as Aeson +import Data.Aeson ((.:), (.:?)) +import Data.Aeson.Types qualified as Aeson +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +---------- +-- text -- +---------- +import Data.Text qualified as T +import Data.Text.Encoding qualified as T + +-------------------------------------------------------------------------------- + +-- | A spendable fund: a UTxO reference, its Lovelace value, and the signing key +-- required to spend it. +data Fund = Fund + { fundTxIn :: !Api.TxIn + -- | Lovelace amount. + , fundValue :: !Integer + -- | Key to spend this UTxO. + , fundSignKey :: !(Api.SigningKey Api.PaymentKey) + } + +-------------------------------------------------------------------------------- +-- JSON loading +-------------------------------------------------------------------------------- + +-- | Internal: JSON-parseable fund entry. Two variants: +-- +-- * 'FundEntryPayment': a regular fund with an explicit UTxO reference. +-- @{ "tx_in": "txid#ix", "value": 1000000, "signing_key": "payment.skey" }@ +-- +-- * 'FundEntryGenesis': a genesis UTxO fund identified only by its key. +-- The TxIn is derived via 'Api.genesisUTxOPseudoTxIn' (always TxIx 0). +-- @{ "signing_key": "genesis.skey", "value": 1000000 }@ +data FundEntry + = FundEntryPayment !Api.TxIn !Integer !FilePath + | FundEntryGenesis !Integer !FilePath + +instance Aeson.FromJSON FundEntry where + parseJSON = Aeson.withObject "Fund" $ \o -> do + -- Common fields. + val <- o .: "value" + keyPath <- o .: "signing_key" + -- If it has a "tx_in" field it is a 'FundEntryPayment'. + mbTxInStr <- o .:? "tx_in" + case mbTxInStr of + Just txInStr -> do + txIn <- parseTxIn txInStr + pure (FundEntryPayment txIn val keyPath) + Nothing -> do + pure (FundEntryGenesis val keyPath) + +-- | Parse @"txid#ix"@ format. Both parts are required. +parseTxIn :: T.Text -> Aeson.Parser Api.TxIn +parseTxIn text = + let (txIdHex, rest) = T.breakOn "#" text + in case T.uncons rest of + Just ('#', ds) -> + case Api.deserialiseFromRawBytesHex @Api.TxId (T.encodeUtf8 txIdHex) of + Left err -> fail $ "Invalid TxId: " ++ show err + Right txId -> case readMaybe (T.unpack ds) of + Nothing -> fail $ "Invalid TxIx: expected an integer, got " ++ show ds + Just ix -> pure $ Api.TxIn txId (Api.TxIx ix) + _ -> fail "Invalid TxIn: expected \"txid#ix\" format" + +-- | Load funds from a JSON file and return them as a list. +-- The JSON file should contain an array of fund objects, each with a +-- @"signingKey"@ field pointing to a @.skey@ file. +-- Signing keys are cached by path to avoid redundant disk reads. +-- +-- For key-only entries (no @"txIn"@), the genesis UTxO pseudo-TxIn is derived +-- from the signing key using the provided 'Api.NetworkId'. +-- +-- NOTE: the entire JSON array is decoded into memory before returning. +-- For very large fund files a streaming parser (e.g. json-stream) could yield +-- funds incrementally so the caller can start filling queues before the file +-- is fully read. +loadFunds :: Api.NetworkId -> FilePath -> IO (Either String [Fund]) +loadFunds networkId path = do + result <- Aeson.eitherDecodeFileStrict' path + case result of + Left err -> pure (Left err) + Right (entries :: [FundEntry]) -> do + keyCache <- IORef.newIORef Map.empty + eFunds <- mapM (entryToFund networkId keyCache) entries + case sequence eFunds of + Left err -> pure (Left err) + Right funds -> pure (Right funds) + +-- | Convert a JSON entry to a Fund by loading its signing key (cached). +entryToFund + :: Api.NetworkId + -> IORef.IORef (Map.Map FilePath (Api.SigningKey Api.PaymentKey)) + -> FundEntry + -> IO (Either String Fund) +entryToFund networkId cacheRef entry = do + let keyPath = entryKeyPath entry + cache <- IORef.readIORef cacheRef + case Map.lookup keyPath cache of + Just key -> pure $ Right $ mkFund key + Nothing -> do + eKey <- readSigningKey keyPath + case eKey of + Left err -> pure $ Left $ + "Failed to load signing key " + ++ keyPath ++ ": " ++ err + Right key -> do + IORef.modifyIORef' cacheRef (Map.insert keyPath key) + pure $ Right $ mkFund key + where + + entryKeyPath :: FundEntry -> FilePath + entryKeyPath (FundEntryPayment _ _ p) = p + entryKeyPath (FundEntryGenesis _ p) = p + + mkFund :: Api.SigningKey Api.PaymentKey -> Fund + mkFund key = case entry of + FundEntryPayment txIn val _ -> Fund txIn val key + FundEntryGenesis val _ -> Fund (genesisTxIn networkId key) val key + +-- | Derive the genesis UTxO pseudo-TxIn from a payment signing key. +-- Casts to 'Api.GenesisUTxOKey' to compute the key hash expected by +-- 'Api.genesisUTxOPseudoTxIn'. +genesisTxIn :: Api.NetworkId -> Api.SigningKey Api.PaymentKey -> Api.TxIn +genesisTxIn networkId + = Api.genesisUTxOPseudoTxIn networkId + . Api.verificationKeyHash + . Api.getVerificationKey + . castToGenesisUTxOKey + +-- | Cast a 'Api.PaymentKey' signing key to a 'Api.GenesisUTxOKey' signing key. +-- Both key types use the same underlying ed25519 representation; this cast +-- enables computing the genesis UTxO pseudo-TxIn via +-- 'Api.genesisUTxOPseudoTxIn'. +castToGenesisUTxOKey + :: Api.SigningKey Api.PaymentKey + -> Api.SigningKey Api.GenesisUTxOKey +castToGenesisUTxOKey (Api.PaymentSigningKey skey) = + Api.GenesisUTxOSigningKey skey + +-- | Read a signing key from a text envelope file. +-- Accepts both @PaymentSigningKey_ed25519@ and +-- @GenesisUTxOVerificationKey_ed25519@ key types. +-- Genesis UTxO keys are cast to payment keys. +readSigningKey :: FilePath -> IO (Either String (Api.SigningKey Api.PaymentKey)) +readSigningKey fp = do + result <- Api.readFileTextEnvelopeAnyOf + [ Api.FromSomeType (Api.AsSigningKey Api.AsPaymentKey) id + , Api.FromSomeType + (Api.AsSigningKey Api.AsGenesisUTxOKey) + Api.castSigningKey + ] + (Api.File fp) + pure $ first show result diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode.hs new file mode 100644 index 00000000000..d6fe35702ff --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode.hs @@ -0,0 +1,388 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.TxCentrifuge.NodeToNode + ( CardanoBlock + -- * Client bundle. + , Clients (..), emptyClients + -- * Connection. + , connect + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Data.Maybe (catMaybes) +import Data.Proxy (Proxy (..)) +import Data.Void (Void) +---------------- +-- bytestring -- +---------------- +import Data.ByteString.Lazy qualified as BSL +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +------------- +-- network -- +------------- +import Network.Socket qualified as Socket +----------------- +-- network-mux -- +----------------- +import Network.Mux qualified as Mux +-------------------------- +-- ouroboros-consensus -- +-------------------------- +import Ouroboros.Consensus.Block.Abstract qualified as Block +import Ouroboros.Consensus.Cardano qualified as Consensus (CardanoBlock) +import Ouroboros.Consensus.Network.NodeToNode qualified as NetN2N +import Ouroboros.Consensus.Node.NetworkProtocolVersion qualified as NetVer +import Ouroboros.Consensus.Node.Run () +import Ouroboros.Consensus.Shelley.Eras qualified as Eras +-- Orphan instances needed for +-- RunNode / SupportedNetworkProtocolVersion CardanoBlock +import Ouroboros.Consensus.Shelley.Ledger.SupportsProtocol () +----------------------- +-- ouroboros-network -- +----------------------- +import Ouroboros.Network.Context qualified as NetCtx +import Ouroboros.Network.Driver qualified as Driver +import Ouroboros.Network.Magic qualified as Magic +import Ouroboros.Network.Mux qualified as NetMux +import Ouroboros.Network.NodeToNode qualified as NtN +import Ouroboros.Network.PeerSelection.PeerSharing qualified as PeerSharing +import Ouroboros.Network.PeerSelection.PeerSharing.Codec qualified as PSCodec +import Ouroboros.Network.Protocol.BlockFetch.Client qualified as BFClient +import Ouroboros.Network.Protocol.ChainSync.Client qualified as CSClient +import Ouroboros.Network.Protocol.Handshake.Version qualified as Handshake +import Ouroboros.Network.Protocol.KeepAlive.Client qualified as KAClient +import Ouroboros.Network.Protocol.KeepAlive.Codec qualified as KACodec +import Ouroboros.Network.Protocol.TxSubmission2.Client qualified as TxSub +import Ouroboros.Network.Snocket qualified as Snocket +--------------------------------- +-- ouroboros-network-framework -- +--------------------------------- +import Ouroboros.Network.IOManager qualified as IOManager +--------------- +-- serialise -- +--------------- +import Codec.Serialise qualified as Serialise +------------------- +-- tx-centrifuge -- +------------------- +import Cardano.Benchmarking.TxCentrifuge.NodeToNode.KeepAlive + qualified as KeepAlive +import Cardano.Benchmarking.TxCentrifuge.NodeToNode.TxIdSync + qualified as TxIdSync +import Cardano.Benchmarking.TxCentrifuge.NodeToNode.TxSubmission + qualified as TxSubmission +import Cardano.Benchmarking.TxCentrifuge.Tracing qualified as Tracing + +-------------------------------------------------------------------------------- + +type CardanoBlock = Consensus.CardanoBlock Eras.StandardCrypto + +-------------------------------------------------------------------------------- +-- Client bundle. +-------------------------------------------------------------------------------- + +-- | Bundle of mini-protocol clients for a NodeToNode connection. +-- +-- All clients are optional ('Maybe'). When 'Nothing', the protocol is not +-- included in the connection at all, the mux simply doesn't run that +-- mini-protocol. This is the proper way to disable protocols per the +-- ouroboros-network design (using @[]@ / 'mempty' in the protocol bundle). +-- +-- This allows callers to selectively enable protocols: +-- +-- * TxSubmission only: for submitting transactions without chain following. +-- * ChainSync + BlockFetch: for tracking transaction confirmations. +-- * All: for full closed-loop operation with confirmation-based recycling. +data Clients = Clients + { clientBlockFetch :: !(Maybe TxIdSync.BlockFetchClient) + , clientChainSync :: !(Maybe TxIdSync.ChainSyncClient) + , clientKeepAlive :: !(Maybe KeepAlive.KeepAliveClient) + , clientTxSubmission :: !(Maybe TxSubmission.TxSubmissionClient) + } + +-- | Empty clients: all protocols disabled (null/idle clients). +emptyClients :: Clients +emptyClients = Clients + { clientBlockFetch = Nothing + , clientChainSync = Nothing + , clientKeepAlive = Nothing + , clientTxSubmission = Nothing + } + +-------------------------------------------------------------------------------- +-- Mini-protocol builders. +-------------------------------------------------------------------------------- + +-- | Protocol limits matching cardano-diffusion defaults. +-- See Cardano.Network.NodeToNode.defaultMiniProtocolParameters. + +blockFetchLimits :: NetMux.MiniProtocolLimits +blockFetchLimits = NetMux.MiniProtocolLimits + { NetMux.maximumIngressQueue = 20_000_000 } + +chainSyncLimits :: NetMux.MiniProtocolLimits +chainSyncLimits = NetMux.MiniProtocolLimits + { NetMux.maximumIngressQueue = 300_000 } + +keepAliveLimits :: NetMux.MiniProtocolLimits +keepAliveLimits = NetMux.MiniProtocolLimits + { NetMux.maximumIngressQueue = 1_500 } + +txSubmissionLimits :: NetMux.MiniProtocolLimits +txSubmissionLimits = NetMux.MiniProtocolLimits + { NetMux.maximumIngressQueue = 10_000_000 } + +-- | Build a BlockFetch mini-protocol. +mkBlockFetchMiniProtocol + :: NetN2N.Codecs CardanoBlock NtN.RemoteAddress + Serialise.DeserialiseFailure IO + BSL.ByteString BSL.ByteString BSL.ByteString BSL.ByteString + BSL.ByteString BSL.ByteString BSL.ByteString + -> TxIdSync.BlockFetchClient + -> NetMux.MiniProtocol + 'Mux.InitiatorMode + (NetCtx.MinimalInitiatorContext NtN.RemoteAddress) + (NetCtx.ResponderContext NtN.RemoteAddress) + BSL.ByteString IO () Void +mkBlockFetchMiniProtocol codecs client = NetMux.MiniProtocol + { NetMux.miniProtocolNum = NetMux.MiniProtocolNum 3 + , NetMux.miniProtocolStart = Mux.StartOnDemand + , NetMux.miniProtocolLimits = blockFetchLimits + , NetMux.miniProtocolRun = NetMux.InitiatorProtocolOnly + $ NetMux.MiniProtocolCb $ \_ctx channel -> + Driver.runPeer mempty (NetN2N.cBlockFetchCodec codecs) channel + (BFClient.blockFetchClientPeer client) + } + +-- | Build a ChainSync mini-protocol. +mkChainSyncMiniProtocol + :: NetN2N.Codecs CardanoBlock NtN.RemoteAddress + Serialise.DeserialiseFailure IO + BSL.ByteString BSL.ByteString BSL.ByteString BSL.ByteString + BSL.ByteString BSL.ByteString BSL.ByteString + -> TxIdSync.ChainSyncClient + -> NetMux.MiniProtocol + 'Mux.InitiatorMode + (NetCtx.MinimalInitiatorContext NtN.RemoteAddress) + (NetCtx.ResponderContext NtN.RemoteAddress) + BSL.ByteString IO () Void +mkChainSyncMiniProtocol codecs client = NetMux.MiniProtocol + { NetMux.miniProtocolNum = NetMux.MiniProtocolNum 2 + , NetMux.miniProtocolStart = Mux.StartOnDemand + , NetMux.miniProtocolLimits = chainSyncLimits + , NetMux.miniProtocolRun = NetMux.InitiatorProtocolOnly + $ NetMux.MiniProtocolCb $ \_ctx channel -> + Driver.runPeer mempty (NetN2N.cChainSyncCodec codecs) channel + (CSClient.chainSyncClientPeer client) + } + +-- | Build a KeepAlive mini-protocol. +mkKeepAliveMiniProtocol + :: NetN2N.Codecs CardanoBlock NtN.RemoteAddress + Serialise.DeserialiseFailure IO + BSL.ByteString BSL.ByteString BSL.ByteString BSL.ByteString + BSL.ByteString BSL.ByteString BSL.ByteString + -> Tracing.Tracers + -> KeepAlive.KeepAliveClient + -> NetMux.MiniProtocol + 'Mux.InitiatorMode + (NetCtx.MinimalInitiatorContext NtN.RemoteAddress) + (NetCtx.ResponderContext NtN.RemoteAddress) + BSL.ByteString IO () Void +mkKeepAliveMiniProtocol codecs tracers client = NetMux.MiniProtocol + { NetMux.miniProtocolNum = NetMux.MiniProtocolNum 8 + , NetMux.miniProtocolStart = Mux.StartOnDemandAny + , NetMux.miniProtocolLimits = keepAliveLimits + , NetMux.miniProtocolRun = NetMux.InitiatorProtocolOnly + $ NetMux.MiniProtocolCb $ \_ctx channel -> + Driver.runPeerWithLimits + (Tracing.trKeepAlive tracers) + (NetN2N.cKeepAliveCodec codecs) + (KACodec.byteLimitsKeepAlive (const 0)) + KACodec.timeLimitsKeepAlive + channel + $ KAClient.keepAliveClientPeer client + } + +-- | Build a TxSubmission mini-protocol. +mkTxSubmissionMiniProtocol + :: NetN2N.Codecs CardanoBlock NtN.RemoteAddress + Serialise.DeserialiseFailure IO + BSL.ByteString BSL.ByteString BSL.ByteString BSL.ByteString + BSL.ByteString BSL.ByteString BSL.ByteString + -> Tracing.Tracers + -> TxSubmission.TxSubmissionClient + -> NetMux.MiniProtocol + 'Mux.InitiatorMode + (NetCtx.MinimalInitiatorContext NtN.RemoteAddress) + (NetCtx.ResponderContext NtN.RemoteAddress) + BSL.ByteString IO () Void +mkTxSubmissionMiniProtocol codecs tracers client = NetMux.MiniProtocol + { NetMux.miniProtocolNum = NetMux.MiniProtocolNum 4 + , NetMux.miniProtocolStart = Mux.StartOnDemand + , NetMux.miniProtocolLimits = txSubmissionLimits + , NetMux.miniProtocolRun = NetMux.InitiatorProtocolOnly + $ NetMux.MiniProtocolCb $ \_ctx channel -> + Driver.runPeer (Tracing.trTxSubmission2 tracers) + (NetN2N.cTxSubmission2Codec codecs) channel + (TxSub.txSubmissionClientPeer client) + } + +-------------------------------------------------------------------------------- + +-- | Connect to a remote cardano-node via NodeToNode protocols. +-- +-- Establishes a multiplexed connection running ChainSync, BlockFetch, +-- TxSubmission2, KeepAlive, and null PeerSharing clients. +-- +-- For any client set to 'Nothing' in 'Clients', a null/idle client is used +-- that waits forever without participating in the protocol. +-- +-- Returns @Left msg@ on handshake failure or unexpected connection termination. +-- The @Right@ case is unreachable (the mux never returns successfully). +connect + :: IOManager.IOManager + -> Block.CodecConfig CardanoBlock + -> Magic.NetworkMagic + -> Tracing.Tracers + -> Socket.AddrInfo + -> Clients + -> IO (Either String ()) +connect + ioManager + codecConfig + networkMagic + tracers + remoteAddr + clients = do + done <- NtN.connectTo (Snocket.socketSnocket ioManager) + NtN.NetworkConnectTracers + { NtN.nctMuxTracers = Mux.nullTracers + , NtN.nctHandshakeTracer = mempty + } + peerMultiplex + Nothing + (Socket.addrAddress remoteAddr) + case done of + Left err -> pure $ Left $ + "handshake failed: " ++ show err + Right choice -> case choice of + Left () -> pure $ Left + "connection terminated unexpectedly" + Right {} -> error "connect: unreachable (Void)" + + where + + n2nVer :: NetVer.NodeToNodeVersion + n2nVer = NetVer.NodeToNodeV_14 + + blkN2nVer :: NetVer.BlockNodeToNodeVersion CardanoBlock + blkN2nVer = case Map.lookup n2nVer supportedVers of + Just v -> v + Nothing -> error $ + "NodeToNode.connect: " ++ show n2nVer + ++ " is not in supportedNodeToNodeVersions. " + ++ "Supported: " ++ show (Map.keys supportedVers) + + supportedVers + :: Map.Map + NetVer.NodeToNodeVersion + ( NetVer.BlockNodeToNodeVersion + CardanoBlock + ) + supportedVers = + NetVer.supportedNodeToNodeVersions (Proxy @CardanoBlock) + + myCodecs + :: NetN2N.Codecs + CardanoBlock + NtN.RemoteAddress + Serialise.DeserialiseFailure + IO + BSL.ByteString + BSL.ByteString + BSL.ByteString + BSL.ByteString + BSL.ByteString + BSL.ByteString + BSL.ByteString + myCodecs = + NetN2N.defaultCodecs + codecConfig + blkN2nVer + PSCodec.encodeRemoteAddress + PSCodec.decodeRemoteAddress + n2nVer + + peerMultiplex + :: NtN.Versions + NetVer.NodeToNodeVersion + NtN.NodeToNodeVersionData + ( NetMux.OuroborosApplication + 'Mux.InitiatorMode + ( NetCtx.MinimalInitiatorContext + NtN.RemoteAddress + ) + ( NetCtx.ResponderContext + NtN.RemoteAddress + ) + BSL.ByteString + IO + () + Void + ) + peerMultiplex = + Handshake.simpleSingletonVersions + n2nVer + ( NtN.NodeToNodeVersionData + { NtN.networkMagic = networkMagic + , NtN.diffusionMode = NtN.InitiatorOnlyDiffusionMode + , NtN.peerSharing = PeerSharing.PeerSharingDisabled + , NtN.query = False + } + ) + $ \_n2nData -> bundleToApp protocolBundle + + -- | Build the protocol bundle with conditional protocol inclusion. + -- Protocols with 'Nothing' clients are excluded (empty list). + protocolBundle :: NetMux.OuroborosBundle + 'Mux.InitiatorMode + (NetCtx.MinimalInitiatorContext NtN.RemoteAddress) + (NetCtx.ResponderContext NtN.RemoteAddress) + BSL.ByteString + IO + () + Void + protocolBundle = NetMux.TemperatureBundle + -- Hot protocols: ChainSync, BlockFetch, TxSubmission (conditional). + (NetMux.WithHot $ catMaybes + [ mkChainSyncMiniProtocol myCodecs <$> clientChainSync clients + , mkBlockFetchMiniProtocol myCodecs <$> clientBlockFetch clients + , mkTxSubmissionMiniProtocol myCodecs tracers <$> clientTxSubmission clients + ]) + -- Warm protocols: none. + (NetMux.WithWarm []) + -- Established protocols: KeepAlive (conditional). + (NetMux.WithEstablished $ catMaybes + [ mkKeepAliveMiniProtocol myCodecs tracers <$> clientKeepAlive clients + ]) + + -- | Convert bundle to application by folding all protocols. + bundleToApp :: NetMux.OuroborosBundle mode initiatorCtx responderCtx bs m a b + -> NetMux.OuroborosApplication mode initiatorCtx responderCtx bs m a b + bundleToApp (NetMux.TemperatureBundle (NetMux.WithHot h) (NetMux.WithWarm w) (NetMux.WithEstablished e)) = + NetMux.OuroborosApplication (h <> w <> e) + diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode/KeepAlive.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode/KeepAlive.hs new file mode 100644 index 00000000000..3d1cd2d9858 --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode/KeepAlive.hs @@ -0,0 +1,78 @@ +{-# LANGUAGE ImportQualifiedPost #-} + +-------------------------------------------------------------------------------- + +-- TODO TODO TODO: Add support for latency metrics, very useful for benchmarks. + +-- | KeepAlive client for maintaining connection liveness. +-- +-- This module provides a KeepAlive protocol client that sends periodic +-- keepalive messages to prevent idle connection timeouts. +-- +-- == Usage +-- @ +-- client <- mkClient 10 -- 10 seconds between keepalives +-- -- Use client with NodeToNode.connect +-- @ +module Cardano.Benchmarking.TxCentrifuge.NodeToNode.KeepAlive + ( KeepAliveClient + , keepAliveClient + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Data.Proxy (Proxy (..)) +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +---------- +-- time -- +---------- +import Data.Time.Clock (DiffTime) +----------------------- +-- ouroboros-network -- +----------------------- +import Ouroboros.Network.ControlMessage qualified as ControlMsg +import Ouroboros.Network.KeepAlive qualified as KeepAlive +import Ouroboros.Network.Protocol.KeepAlive.Client qualified as KAClient +--------- +-- stm -- +--------- +import Control.Concurrent.Class.MonadSTM.Strict qualified as StrictSTM +------------ +-- random -- +------------ +import System.Random qualified as Random + +-------------------------------------------------------------------------------- + +-- | KeepAlive client for maintaining connection liveness. +type KeepAliveClient = KAClient.KeepAliveClient IO () + +-- | Create a KeepAlive client that sends periodic keepalive messages. +-- +-- The client runs indefinitely, sending keepalive cookies at the specified +-- interval (in seconds). This keeps the connection alive and allows the remote +-- peer to detect connection failures. +-- +-- Note: This client does not track peer GSV (latency) metrics. For advanced +-- use cases requiring GSV tracking, construct the client directly using +-- 'Ouroboros.Network.KeepAlive.keepAliveClient' with appropriate parameters. +keepAliveClient + -- | Interval between keepalive messages (in seconds). + :: DiffTime + -> IO KeepAliveClient +keepAliveClient interval = do + rng <- Random.newStdGen + dummyGSVMap <- StrictSTM.newTVarIO Map.empty + pure $ KeepAlive.keepAliveClient + mempty -- tracer (no tracing in default client) + rng + (ControlMsg.continueForever (Proxy :: Proxy IO)) + () -- dummy peer address (GSV tracking not used) + dummyGSVMap + (KeepAlive.KeepAliveInterval interval) diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode/TxIdSync.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode/TxIdSync.hs new file mode 100644 index 00000000000..345a8cc386b --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode/TxIdSync.hs @@ -0,0 +1,409 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE LambdaCase #-} + +-------------------------------------------------------------------------------- + +-- | Transaction confirmation tracking via ChainSync and BlockFetch. +-- +-- This module provides clients for the ChainSync and BlockFetch mini-protocols +-- that work together to: +-- 1. Follow the chain tip via ChainSync (receives headers, detects rollbacks). +-- 2. Fetch block bodies via BlockFetch. +-- 3. Extract transactions from blocks. +-- 4. Broadcast when transactions reach the configured confirmation depth. +-- +-- == Header Queue (@stateHeaders@) +-- +-- The header queue is written to by ChainSync and read by BlockFetch. +-- Both protocols can remove headers from it: +-- * __ChainSync__ filters out headers past the rollback point on +-- @MsgRollBackward@. +-- * __BlockFetch__ claims the head on @MsgBlock@ (block received) or filters +-- it out on @MsgNoBlocks@ (block no longer available on the node). +-- +-- == Confirmation Depth +-- +-- The @confirmationDepth@ parameter controls when transactions are considered +-- confirmed: +-- * @0@: Immediate, broadcast as soon as a block is seen (no reorg +-- protection). +-- * @N@: Wait for N additional blocks on top before broadcasting. +-- +-- == Rollback Handling +-- +-- When ChainSync announces a rollback: +-- * All blocks after the rollback point are discarded from the pending queue. +-- * Transactions in discarded blocks are never broadcast. +-- * Transactions already broadcast (buried deeper than rollback) are unaffected. +-- +-- == Usage +-- +-- Create the sync state, pass the clients to a NodeToNode connection, and +-- subscribe to the broadcast channel to receive confirmed transactions: +-- +-- @ +-- -- 1. Create shared state with a confirmation depth of 6 blocks. +-- state <- emptyState Config { confirmationDepth = 6 } +-- +-- -- 2. Subscribe to the broadcast channel (before connecting, so no +-- -- confirmations are missed). +-- sub <- atomically $ dupTChan (stateBroadcast state) +-- +-- -- 3. Connect to a node, running both clients over the same mux. +-- N2N.connect ioManager codecConfig networkMagic tracers addrInfo +-- N2N.emptyClients +-- { N2N.clientChainSync = Just $ chainSyncClient state +-- , N2N.clientBlockFetch = Just $ blockFetchClient state +-- } +-- +-- -- 4. Read confirmed transactions as they arrive. +-- confirmed <- atomically $ readTChan sub +-- putStrLn $ "Confirmed: " ++ show (confirmedTxId confirmed) +-- @ +module Cardano.Benchmarking.TxCentrifuge.NodeToNode.TxIdSync + ( -- * Configuration + Config (..) + -- * State + , State, emptyState + -- * Subscription. + , ConfirmedTx (..) + , stateBroadcast + -- * Client types + , BlockFetchClient + , ChainSyncClient + -- * Protocol Clients + , chainSyncClient + , blockFetchClient + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Monad (forM_) +import Data.Foldable (toList) +import Data.Functor.Const (Const (..)) +import Numeric.Natural (Natural) +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +-------------------- +-- cardano-ledger -- +-------------------- +import Cardano.Ledger.Block qualified as LedgerBlock +import Cardano.Ledger.Core qualified as Core +---------------- +-- containers -- +---------------- +import Data.Sequence (Seq) +import Data.Sequence qualified as Seq +------------------------- +-- ouroboros-consensus -- +------------------------- +import Ouroboros.Consensus.Block qualified as Block +import Ouroboros.Consensus.Cardano.Block qualified as Cardano +import Ouroboros.Consensus.Shelley.Eras qualified as Eras +import Ouroboros.Consensus.Shelley.Ledger qualified as Shelley +----------------------- +-- ouroboros-network -- +----------------------- +import Ouroboros.Network.Block qualified as Net +import Ouroboros.Network.Protocol.BlockFetch.Client qualified as BF +import Ouroboros.Network.Protocol.BlockFetch.Type qualified as BFType +import Ouroboros.Network.Protocol.ChainSync.Client qualified as CS +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM + +-------------------------------------------------------------------------------- +-- Types. +-------------------------------------------------------------------------------- + +type CardanoBlock = Cardano.CardanoBlock Eras.StandardCrypto + +-- | BlockFetch client for retrieving full block bodies. +type BlockFetchClient = + BF.BlockFetchClient + CardanoBlock + (Net.Point CardanoBlock) + IO + () + +-- | ChainSync client for following the chain tip (receives headers). +type ChainSyncClient = + CS.ChainSyncClient + (Block.Header CardanoBlock) + (Net.Point CardanoBlock) + (Net.Tip CardanoBlock) + IO + () + +-------------------------------------------------------------------------------- +-- Internal State. +-------------------------------------------------------------------------------- + +-- | Configuration for transaction synchronization. +data Config = Config + { -- | Number of blocks to wait before confirming. + -- 0 = immediate (no reorg protection), N = wait for N blocks on top. + confirmationDepth :: !Natural + } + +-- | Information of a confirmed transaction, according to the 'Config'. +data ConfirmedTx = ConfirmedTx + { -- | The confirmed transaction ID. + confirmedTxId :: !Api.TxId + -- | Block number where this transaction was included. + , confirmedTxBlockNo :: !Block.BlockNo + -- | Slot number where this transaction was included. + , confirmedTxSlotNo :: !Block.SlotNo + } + deriving (Show, Eq) + +-- | Shared state between ChainSync and BlockFetch clients. +data State = State + { -- | Configuration. + stateConfig :: !Config + -- | Current chain tip as reported by the node. + , stateCurrentTip :: !(STM.TVar (Net.Tip CardanoBlock)) + -- | Headers received from ChainSync, waiting for BlockFetch. + -- Removed by both ChainSync (rollback filter) and BlockFetch (claim on + -- block received, filter on block unavailable). + -- Uses @TVar (Seq …)@ instead of @TBQueue@ because both removal patterns + -- need filtered access, which TBQueue does not support. + , stateHeaders :: !(STM.TVar (Seq (Block.Header CardanoBlock))) + -- | Blocks fetched but not yet confirmed (ordered by block number). + , statePendingBlocks :: !(STM.TVar (Seq CardanoBlock)) + -- | Broadcast channel for confirmed transactions. + -- Write-only end; subscribers obtain a read-end via 'subscribe'. + , stateBroadcast :: !(STM.TChan ConfirmedTx) + } + +-- | Create initial sync state. +emptyState :: Config -> IO State +emptyState config = do + currentTip <- STM.newTVarIO Net.TipGenesis + headerQueue <- STM.newTVarIO Seq.empty + pendingBlocks <- STM.newTVarIO Seq.empty + broadcast <- STM.newBroadcastTChanIO + pure State + { stateConfig = config + , stateCurrentTip = currentTip + , stateHeaders = headerQueue + , statePendingBlocks = pendingBlocks + , stateBroadcast = broadcast + } + +-------------------------------------------------------------------------------- +-- ChainSync Client. +-------------------------------------------------------------------------------- + +-- | ChainSync client that follows the chain and adds headers to a queue. +-- +-- On @MsgRollForward@: queues the header for BlockFetch. +-- On @MsgRollBackward@: discards pending blocks after the rollback point. +chainSyncClient :: State -> ChainSyncClient +chainSyncClient state = CS.ChainSyncClient $ pure clientStIdle + where + -- Request the next update from the server. + clientStIdle = CS.SendMsgRequestNext + (pure ()) -- Action when server says "await". + clientStNext -- Handler for the roll-forward / roll-backward response. + -- Handle the server's roll-forward or roll-backward response. + clientStNext = CS.ClientStNext + { -- Advance the tip and queue the new header for BlockFetch to use. + CS.recvMsgRollForward = \header tip -> CS.ChainSyncClient $ do + STM.atomically $ do + ---------- STM START ---------- + STM.writeTVar (stateCurrentTip state) tip + STM.modifyTVar' (stateHeaders state) + -- Append the new header at the end! + (\q -> q <> Seq.singleton header) + ---------- STM ENDED ---------- + -- Continue following the chain. + pure clientStIdle + , -- Discard pending blocks and queued headers past the rollback point. + CS.recvMsgRollBackward = \rollbackPoint tip -> CS.ChainSyncClient $ do + let keepBlock block = case rollbackPoint of + Net.BlockPoint newSlot _ -> Block.blockSlot block <= newSlot + Net.GenesisPoint -> False + keepHeader header = case rollbackPoint of + Net.BlockPoint newSlot _ -> Block.blockSlot header <= newSlot + Net.GenesisPoint -> False + STM.atomically $ do + ---------- STM START ---------- + STM.writeTVar (stateCurrentTip state) tip + STM.modifyTVar' (stateHeaders state) (Seq.filter keepHeader) + STM.modifyTVar' (statePendingBlocks state) (Seq.filter keepBlock) + ---------- STM ENDED ---------- + pure clientStIdle + } + +-------------------------------------------------------------------------------- +-- BlockFetch Client. +-------------------------------------------------------------------------------- + +-- | BlockFetch client that fetches blocks and processes transactions. +-- +-- Continuously peeks at headers from the queue (without consuming them), +-- fetches their blocks, and processes transactions. Headers stay in +-- @stateHeaders@ until the block body arrives or the node reports the block is +-- unavailable, so that concurrent rollbacks (via ChainSync) can still filter +-- them out. This closes the in-flight gap that would otherwise allow a +-- rolled-back block to be silently inserted into @statePendingBlocks@. +blockFetchClient :: State -> BlockFetchClient +blockFetchClient state = BF.BlockFetchClient $ do + -- Peek at the next header without consuming it. + -- The header stays in stateHeaders so rollbacks can still filter it out. + header <- STM.atomically $ do + ---------- STM START ---------- + headersSeq <- STM.readTVar (stateHeaders state) + if Seq.null headersSeq + then STM.retry + else pure (Seq.index headersSeq 0) + ---------- STM ENDED ---------- + -- We ask for only one block, using a [point..point] range. + let !point = Net.BlockPoint + (Block.blockSlot header) + (Block.blockHash header) + -- The actual request. + pure $ BF.SendMsgRequestRange + (BFType.ChainRange point point) + (BF.BlockFetchResponse + { -- MsgStartBatch: the node has the block and will send it next via + -- MsgBlock, followed by MsgBatchDone. + BF.handleStartBatch = pure BF.BlockFetchReceiver + -- MsgStartBatch → MsgBlock → MsgBatchDone. + { BF.handleBlock = \block -> do + STM.atomically $ do + ---------- STM START ---------- + -- False if a ChainSync rollback already removed this header. + notRolledBack <- claimHeader state point + if notRolledBack + then processBlock state block + else pure () + ---------- STM ENDED ---------- + -- Single-block range: no further blocks expected. + pure BF.BlockFetchReceiver + { BF.handleBlock = \_ -> + error "blockFetchClient: unexpected second block." + , BF.handleBatchDone = pure () + } + , BF.handleBatchDone = pure () + } + -- MsgNoBlocks: the node no longer has the requested block (e.g. it + -- was pruned or belongs to a fork that the node has since rolled + -- back). + , BF.handleNoBlocks = STM.atomically $ + ---------- STM START ---------- + -- Filter the peeked header out of stateHeaders (the other removal + -- path is ChainSync's rollback filter; see module header). + STM.modifyTVar' + (stateHeaders state) + (Seq.filter + (\h -> + let headerPoint = Net.BlockPoint + (Block.blockSlot h) + (Block.blockHash h) + in headerPoint /= point + ) + ) + ---------- STM ENDED ---------- + } + ) + -- Recursion. The continuation. Start the peek all over again. + (blockFetchClient state) + +-------------------------------------------------------------------------------- +-- Block Processing. +-------------------------------------------------------------------------------- + +-- | Claim the previously peeked header from the head of @stateHeaders@. +-- +-- Returns @True@ if the header was found and removed (slot and hash match the +-- head of the queue). Returns @False@ if the header is no longer present either +-- because a rollback already filtered it out, or because a previous call +-- already claimed it. +claimHeader :: State -> Net.Point CardanoBlock -> STM.STM Bool +claimHeader state point = do + headersSeq <- STM.readTVar (stateHeaders state) + if Seq.null headersSeq + then do + -- ChainSync rolled back the header. + pure False + else do + -- Get the first header in the sequence (like a queue). + let header = Seq.index headersSeq 0 + headerPoint = Net.BlockPoint + (Block.blockSlot header) + (Block.blockHash header) + -- As new headers are added at the end of the sequence, we check that the + -- first one is still the one we peeked. + if headerPoint == point + then do + -- We can remove it and let BlockFetch process it. + STM.writeTVar (stateHeaders state) (Seq.drop 1 headersSeq) + pure True + else do + -- ChainSync rolled back the header. + pure False + +-- | Add a block to @statePendingBlocks@ and broadcast any transactions that +-- have reached the configured confirmation depth. +-- +-- Must be called inside an @atomically@ block together with 'claimHeader' +-- so that the header consumption and block insertion are a single atomic step. +processBlock :: State -> CardanoBlock -> STM.STM () +processBlock state newBlock = do + let depth = fromIntegral (confirmationDepth (stateConfig state)) :: Block.BlockNo + tip <- STM.readTVar (stateCurrentTip state) + pending <- STM.readTVar (statePendingBlocks state) + let isConfirmed block = case tip of + Net.TipGenesis -> False + Net.Tip _ _ tipBlock -> tipBlock >= Block.blockNo block + depth + (confirmed, remaining) = + Seq.spanl isConfirmed (pending <> Seq.singleton newBlock) + STM.writeTVar (statePendingBlocks state) remaining + -- Broadcast each confirmed transaction. + forM_ (toList confirmed) $ \block -> + forM_ (extractTxIds block) $ \txId -> + STM.writeTChan (stateBroadcast state) ConfirmedTx + { confirmedTxId = txId + , confirmedTxBlockNo = Block.blockNo block + , confirmedTxSlotNo = Block.blockSlot block + } + +-------------------------------------------------------------------------------- +-- Transaction ID Extraction. +-------------------------------------------------------------------------------- + +-- | Extract all transaction IDs from a Cardano block. +extractTxIds :: CardanoBlock -> [Api.TxId] +extractTxIds = \case + -- Byron era: skip (different transaction format, not relevant for benchmarking) + Cardano.BlockByron _ -> [] + -- Shelley-based eras: extract TxIds. + Cardano.BlockShelley blk -> extractFromShelleyBlock blk + Cardano.BlockAllegra blk -> extractFromShelleyBlock blk + Cardano.BlockMary blk -> extractFromShelleyBlock blk + Cardano.BlockAlonzo blk -> extractFromShelleyBlock blk + Cardano.BlockBabbage blk -> extractFromShelleyBlock blk + Cardano.BlockConway blk -> extractFromShelleyBlock blk + Cardano.BlockDijkstra blk -> extractFromShelleyBlock blk + +-- | Extract transaction IDs from a Shelley-based block. +extractFromShelleyBlock + :: Core.EraBlockBody ledgerEra + => Shelley.ShelleyBlock proto ledgerEra + -> [Api.TxId] +extractFromShelleyBlock shelleyBlock = + case Shelley.shelleyBlockRaw shelleyBlock of + LedgerBlock.Block _ body -> + let txSeq = getConst (Core.txSeqBlockBodyL Const body) + in map toTxId (toList txSeq) + where + toTxId tx = Api.fromShelleyTxId (Core.txIdTx tx) diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode/TxSubmission.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode/TxSubmission.hs new file mode 100644 index 00000000000..53dde4ef629 --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/NodeToNode/TxSubmission.hs @@ -0,0 +1,385 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE PackageImports #-} +{-# LANGUAGE ScopedTypeVariables #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.TxCentrifuge.NodeToNode.TxSubmission + ( TxSubmissionClient + , txSubmissionClient + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Data.Foldable (toList) +import Numeric.Natural (Natural) +import Data.List.NonEmpty qualified as NE +---------------- +-- bytestring -- +---------------- +import Data.ByteString qualified as BS +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +---------------- +-- containers -- +---------------- +import Data.Sequence qualified as Seq +import Data.Set qualified as Set +------------------- +-- contra-tracer -- +------------------- +import "contra-tracer" Control.Tracer (Tracer, traceWith) +------------------------- +-- ouroboros-consensus -- +------------------------- +import Ouroboros.Consensus.Cardano qualified as Consensus (CardanoBlock) +import Ouroboros.Consensus.Cardano.Block qualified as Block +import Ouroboros.Consensus.Ledger.SupportsMempool qualified as Mempool +import Ouroboros.Consensus.Shelley.Eras qualified as Eras +import Ouroboros.Consensus.Shelley.Ledger.Mempool + qualified as Mempool (TxId(ShelleyTxId)) +----------------------- +-- ouroboros-network -- +----------------------- +import Ouroboros.Network.Protocol.TxSubmission2.Client qualified as TxSub +import Ouroboros.Network.Protocol.TxSubmission2.Type qualified as TxSub +import Ouroboros.Network.SizeInBytes qualified as Net +------------------- +-- tx-centrifuge -- +------------------- +import Cardano.Benchmarking.TxCentrifuge.Tracing qualified as Tracing + +-------------------------------------------------------------------------------- + +type CardanoBlock = Consensus.CardanoBlock Eras.StandardCrypto + +-- | TxSubmission2 client for submitting transactions. +type TxSubmissionClient = + TxSub.TxSubmissionClient + (Mempool.GenTxId CardanoBlock) + (Mempool.GenTx CardanoBlock) + IO + () + +-- | A pre-computed entry in the unacknowledged sequence. +-- +-- All protocol-ready values are computed once at entry time (via 'toEntry') +-- rather than re-derived on every protocol round-trip. +data UnAckedEntry = UnAckedEntry + { -- | For protocol announcement ('MsgReplyTxIds') and matching ('MsgRequestTxs'). + uaeGenTxId :: !(Mempool.GenTxId CardanoBlock) + -- | For protocol body delivery ('MsgReplyTxs'). + , uaeGenTx :: !(Mempool.GenTx CardanoBlock) + -- | For protocol announcement ('MsgReplyTxIds'). + , uaeSize :: !Net.SizeInBytes + } + +-- | Internal state: the unacknowledged tx sequence (oldest first, matching the +-- server's FIFO). Acks remove elements from the front; new announcements are +-- appended at the back. +-- Uses 'Seq' for O(1) length and O(log n) take/drop (vs O(n) for lists). +type UnAcked = Seq.Seq UnAckedEntry + +-------------------------------------------------------------------------------- + +-- | Convert a cardano-api Tx to an 'UnAckedEntry', pre-computing all +-- protocol-ready values. This is the single boundary crossing: every subsequent +-- protocol handler works with native consensus types. +toEntry :: Api.Tx Api.ConwayEra -> UnAckedEntry +toEntry tx = + let !genTx = toGenTx tx + !genTxId = Mempool.txId genTx + !size = Net.SizeInBytes + (fromIntegral (BS.length (Api.serialiseToCBOR tx))) + in UnAckedEntry + { uaeGenTxId = genTxId + , uaeGenTx = genTx + , uaeSize = size + } + +-- | Extract the protocol announcement pair from a pre-computed entry. +entryToIdSize :: UnAckedEntry -> (Mempool.GenTxId CardanoBlock, Net.SizeInBytes) +entryToIdSize e = (uaeGenTxId e, uaeSize e) + +-------------------------------------------------------------------------------- + +-- | Create a TxSubmission2 client that pulls txs from caller-supplied IO +-- actions. No intermediate queue, the blocking action is called for the first +-- mandatory tx, and the non-blocking action drains the rest up to the +-- requested count, capped by @maxBatchSize@. +txSubmissionClient + -- | Tracer for structured TxSubmission2 events. + :: Tracer IO Tracing.TxSubmission + -- | Target name (remote node identifier). + -> String + -- | Max batch size per request. + -> Natural + -- | Blocking: wait for a token (must not fail). + -> IO (Api.Tx Api.ConwayEra) + -- | NonBlocking: poll for a token. + -> IO (Maybe (Api.Tx Api.ConwayEra)) + -> TxSubmissionClient +txSubmissionClient tracer targetName maxBatchSize blockingFetch nonBlockingFetch = + TxSub.TxSubmissionClient $ pure $ TxSub.ClientStIdle + { TxSub.recvMsgRequestTxIds = + requestTxIds + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + Seq.empty + , TxSub.recvMsgRequestTxs = + requestTxs + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + Seq.empty + } + +-------------------------------------------------------------------------------- + +-- | Drain up to @n@ tokens without blocking. +-- This is the primary token consumption path for both 'SingBlocking' (after the +-- first mandatory tx) and 'SingNonBlocking' requests. Stops as soon as the +-- callback returns 'Nothing' (rate-limited). +drainUpTo :: Int + -> IO (Maybe (Api.Tx Api.ConwayEra)) + -> IO [Api.Tx Api.ConwayEra] +drainUpTo 0 _ = pure [] +drainUpTo n fetch = fetch >>= \case + Nothing -> pure [] + Just x -> (x :) <$> drainUpTo (n - 1) fetch + +-- | Handle @MsgRequestTxIds@. +-- +-- TxSubmission2 protocol semantics: +-- SingBlocking → must return at least 1 tx; may block. +-- SingNonBlocking → return 0..reqNum txs; must not block. +-- +-- In both cases, after satisfying the minimum (1 for blocking, 0 for +-- non-blocking), 'drainUpTo' fills the rest via non-blocking calls. +-- Under sustained load a Cardano node operates at near-full mempool capacity +-- and almost exclusively issues 'SingNonBlocking' requests, so the +-- non-blocking path is the dominant token consumption path. +-- See the fairness analysis in WorkloadRunner.runWorkload for details. +requestTxIds + :: forall blocking. + -- | Tracer for structured TxSubmission2 events. + Tracer IO Tracing.TxSubmission + -- | Target name (remote node identifier). + -> String + -- | Max batch size per request. + -> Natural + -- | Blocking: wait for a token (must not fail). + -> IO (Api.Tx Api.ConwayEra) + -- | NonBlocking: poll for a token. + -> IO (Maybe (Api.Tx Api.ConwayEra)) + -- | Unacknowledged transactions (oldest first). + -> UnAcked + -- | Blocking style singleton: + -- * 'SingBlocking': (must return >= 1 tx). + -- * 'SingNonBlocking': (may return 0). + -> TxSub.SingBlockingStyle blocking + -- | Number of tx IDs to ACK. + -> TxSub.NumTxIdsToAck + -- | Number of tx IDs requested. + -> TxSub.NumTxIdsToReq + -> IO ( TxSub.ClientStTxIds + blocking + (Mempool.GenTxId CardanoBlock) + (Mempool.GenTx CardanoBlock) + IO + () + ) +requestTxIds + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + unacked + blocking + (TxSub.NumTxIdsToAck ackNum) + (TxSub.NumTxIdsToReq reqNum) + = do + -- Trace: node asked for tx id announcements. + --------------------------------------------- + traceWith tracer $ + Tracing.RequestTxIds + targetName + -- TxIds not yet acknowledged. + (map + (fromGenTxId . uaeGenTxId) + (toList unacked) + ) + (fromIntegral ackNum) -- How many the node is ACKing. + (fromIntegral reqNum) -- How many new TxIds it wants. + -- Pull txs from the callbacks, capped by maxBatchSize. + ------------------------------------------------------- + newTxs <- do + let !effectiveReq = min + (fromIntegral reqNum) + (fromIntegral maxBatchSize :: Int) + case blocking of + TxSub.SingBlocking -> do + -- Block for exactly one tx (protocol minimum), then + -- remaining up to effectiveReq-1 without blocking. + tx1 <- blockingFetch + rest <- drainUpTo (effectiveReq - 1) nonBlockingFetch + pure (tx1 : rest) + TxSub.SingNonBlocking -> do + -- Return whatever is available up to effectiveReq. + drainUpTo effectiveReq nonBlockingFetch + -- Convert to protocol-ready entries (single boundary crossing). + ---------------------------------------------------------------- + let !newEntries = map toEntry newTxs + -- Drop acknowledged entries. + ----------------------------- + -- Drop acknowledged entries from the front (oldest first, matching the + -- server's FIFO), then append new announcements at the back. + let !unacked' = + let !remaining = Seq.drop (fromIntegral ackNum) unacked + in remaining Seq.>< Seq.fromList newEntries + -- Trace: we replied with tx id announcements. + ---------------------------------------------- + traceWith tracer $ + Tracing.ReplyTxIds + targetName + (fromIntegral ackNum) -- how many the node is ACKing. + (fromIntegral reqNum) -- how many new TxIds it wants. + (map (fromGenTxId . uaeGenTxId) (toList unacked')) -- updated unacked after ACK + new. + (map (fromGenTxId . uaeGenTxId) newEntries) -- TxIds we announced in this reply. + -- Build the protocol continuation. + ----------------------------------- + let nextIdle = TxSub.ClientStIdle + -- Continues the protocol loop with the updated unacked list. + { TxSub.recvMsgRequestTxIds = + requestTxIds + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + unacked' + , TxSub.recvMsgRequestTxs = + requestTxs + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + unacked' + } + -- Answer with what we obtained from the callbacks. + --------------------------------------------------- + case blocking of + TxSub.SingBlocking -> do + case NE.nonEmpty newEntries of + Nothing -> error "requestTxIds: blocking fetch returned empty list!" + Just entries -> do + pure $ TxSub.SendMsgReplyTxIds + (TxSub.BlockingReply $ fmap entryToIdSize entries) + nextIdle + TxSub.SingNonBlocking -> do + pure $ TxSub.SendMsgReplyTxIds + (TxSub.NonBlockingReply $ fmap entryToIdSize newEntries) + nextIdle + +-- | Handle @MsgRequestTxs@: look up requested tx ids in the unacked list and +-- send back the matching transactions. +requestTxs + -- | Tracer for structured TxSubmission2 events. + :: Tracer IO Tracing.TxSubmission + -- | Target name (remote node identifier). + -> String + -- | Max batch size per request. + -> Natural + -- | Blocking: wait for a token (must not fail). + -> IO (Api.Tx Api.ConwayEra) + -- | NonBlocking: poll for a token. + -> IO (Maybe (Api.Tx Api.ConwayEra)) + -- | Unacknowledged transactions (oldest first). + -> UnAcked + -- | Transaction IDs the node is requesting full bodies for. + -> [Mempool.GenTxId CardanoBlock] + -> IO ( TxSub.ClientStTxs + (Mempool.GenTxId CardanoBlock) + (Mempool.GenTx CardanoBlock) + IO + () + ) +requestTxs + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + unacked + requestedTxIds + = do + -- Trace: node asked for full transactions by TxId. + --------------------------------------------------- + traceWith tracer $ + Tracing.RequestTxs + targetName + (map fromGenTxId requestedTxIds) -- TxIds the node requested. + -- Build response. + ------------------ + -- Match directly on consensus GenTxId (native protocol type). + let requestedSet = Set.fromList requestedTxIds + entriesToSend = toList $ Seq.filter + (\e -> uaeGenTxId e `Set.member` requestedSet) + unacked + -- Trace: we replied with the matching transactions. + ---------------------------------------------------- + traceWith tracer $ + Tracing.ReplyTxs + targetName + -- TxIds the node requested. + (map fromGenTxId requestedTxIds) + -- TxIds we actually sent. + (map (fromGenTxId . uaeGenTxId) entriesToSend) + -- Response and protocol continuation. + -------------------------------------- + pure $ TxSub.SendMsgReplyTxs (map uaeGenTx entriesToSend) $ TxSub.ClientStIdle + -- Continues the protocol loop with no changes to the unacked list. + { TxSub.recvMsgRequestTxIds = + requestTxIds tracer targetName + maxBatchSize blockingFetch nonBlockingFetch + unacked + , TxSub.recvMsgRequestTxs = + requestTxs tracer targetName + maxBatchSize blockingFetch nonBlockingFetch + unacked + } + +-- Protocol boundaries. +-------------------------------------------------------------------------------- + +-- The internal pipeline (builder, TxAssembly, fund management) uses cardano-api +-- types ('Api.Tx', 'Api.TxId'), while the TxSubmission2 wire protocol speaks +-- consensus types ('Mempool.GenTx', 'Mempool.GenTxId'). +-- 'toEntry' is the single boundary crossing: it converts a cardano-api 'Api.Tx' +-- into an 'UnAckedEntry' with all protocol-ready values pre-computed, so that +-- protocol handlers work with native consensus types without repeated +-- conversions. + +-- | Convert a cardano-api 'Api.Tx' to a consensus 'Mempool.GenTx'. +toGenTx :: Api.Tx Api.ConwayEra -> Mempool.GenTx CardanoBlock +toGenTx tx = Api.toConsensusGenTx $ Api.TxInMode Api.shelleyBasedEra tx + +-- | Convert a consensus 'Mempool.GenTxId' to a cardano-api 'Api.TxId'. +-- Used only for trace rendering (the protocol handlers match on 'GenTxId' +-- directly via 'uaeGenTxId'). +-- +-- All Shelley-based eras use the same 'Mempool.ShelleyTxId' wrapper, so a +-- single 'Api.fromShelleyTxId' covers every post-Byron era. +fromGenTxId :: Mempool.GenTxId CardanoBlock -> Api.TxId +fromGenTxId (Block.GenTxIdShelley (Mempool.ShelleyTxId i)) = Api.fromShelleyTxId i +fromGenTxId (Block.GenTxIdAllegra (Mempool.ShelleyTxId i)) = Api.fromShelleyTxId i +fromGenTxId (Block.GenTxIdMary (Mempool.ShelleyTxId i)) = Api.fromShelleyTxId i +fromGenTxId (Block.GenTxIdAlonzo (Mempool.ShelleyTxId i)) = Api.fromShelleyTxId i +fromGenTxId (Block.GenTxIdBabbage (Mempool.ShelleyTxId i)) = Api.fromShelleyTxId i +fromGenTxId (Block.GenTxIdConway (Mempool.ShelleyTxId i)) = Api.fromShelleyTxId i +fromGenTxId (Block.GenTxIdDijkstra (Mempool.ShelleyTxId i)) = Api.fromShelleyTxId i +fromGenTxId (Block.GenTxIdByron _) = + error "fromGenTxId: Byron transactions not supported" diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing.hs new file mode 100644 index 00000000000..7632c6c3420 --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing.hs @@ -0,0 +1,487 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PackageImports #-} +{-# LANGUAGE ScopedTypeVariables #-} + +-------------------------------------------------------------------------------- + +-- | Tracer setup for the tx-centrifuge. Creates configured contra-tracers +-- backed by trace-dispatcher and reads optional @TraceOptions@ from the +-- generator config file. +module Cardano.Benchmarking.TxCentrifuge.Tracing + ( Tracers (..) + , setupTracers, nullTracers + , BuilderTrace (..) + , mkBuilderNewTx + , mkBuilderRecycle + , TxSubmission (..) + -- * Re-exports + , traceWith + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Exception (SomeException, try) +----------- +-- aeson -- +----------- +import Data.Aeson (Value (String), (.=), object) +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +----------------- +-- containers -- +----------------- +import Data.Map.Strict qualified as Map +------------------- +-- contra-tracer -- +------------------- +import "contra-tracer" Control.Tracer (Tracer (..), traceWith) +------------------------- +-- ouroboros-consensus -- +------------------------- +import Ouroboros.Consensus.Cardano qualified as Consensus (CardanoBlock) +import Ouroboros.Consensus.Ledger.SupportsMempool qualified as Mempool +import Ouroboros.Consensus.Shelley.Eras qualified as Eras +-- Orphan instances needed for LedgerSupportsProtocol (ShelleyBlock ...) +import Ouroboros.Consensus.Shelley.Ledger.SupportsProtocol () +----------------------- +-- ouroboros-network -- +----------------------- +import Ouroboros.Network.Driver.Simple qualified as Simple +import Ouroboros.Network.Protocol.KeepAlive.Type qualified as KA +import Ouroboros.Network.Protocol.TxSubmission2.Type qualified as STX +---------- +-- text -- +---------- +import Data.Text qualified as Text +---------------------- +-- trace-dispatcher -- +---------------------- +import Cardano.Logging qualified as Logging +------------------- +-- tx-centrifuge -- +------------------- +import Cardano.Benchmarking.TxCentrifuge.Fund qualified as Fund +-- Imported for its orphan LogFormatting / MetaTrace instances. +import Cardano.Benchmarking.TxCentrifuge.Tracing.Orphans () + +-------------------------------------------------------------------------------- +-- Tracers +-------------------------------------------------------------------------------- + +type CardanoBlock = Consensus.CardanoBlock Eras.StandardCrypto + +data Tracers = Tracers + { -- | Builder trace: transaction construction and recycling events. + trBuilder + :: !(Tracer IO BuilderTrace) + -- | Clean, structured TxSubmission2 trace emitted by TxSubmission.hs. + , trTxSubmission + :: !(Tracer IO TxSubmission) + -- | Low-level protocol trace from ouroboros-network's Driver.runPeer. + , trTxSubmission2 + :: !( Tracer + IO + ( Simple.TraceSendRecv + ( STX.TxSubmission2 + (Mempool.GenTxId CardanoBlock) + (Mempool.GenTx CardanoBlock) + ) + ) + ) + , trKeepAlive + :: !( Tracer + IO + (Simple.TraceSendRecv KA.KeepAlive) + ) + } + +-- | All-silent tracers. +nullTracers :: Tracers +nullTracers = Tracers + { trBuilder = Tracer (\_ -> pure ()) + , trTxSubmission = Tracer (\_ -> pure ()) + , trTxSubmission2 = Tracer (\_ -> pure ()) + , trKeepAlive = Tracer (\_ -> pure ()) + } + +-------------------------------------------------------------------------------- +-- Tracer setup +-------------------------------------------------------------------------------- + +-- | Create configured tracers from the tx-centrifuge config file. If the file +-- contains a @TraceOptions@ section, those settings are used. Otherwise falls +-- back to a sensible default (stdout, machine format, severity Debug). +setupTracers :: FilePath -> IO Tracers +setupTracers configFile = do + trConfig <- + either + (\(_ :: SomeException) -> defaultTraceConfig) + id + <$> try (Logging.readConfiguration configFile) + configReflection <- Logging.emptyConfigReflection + stdoutTrace <- Logging.standardTracer + let trForward = mempty + mbTrEkg = Nothing + -- Builder (TxCentrifuge.Builder.NewTx, TxCentrifuge.Builder.Recycle). + !builderTr <- + Logging.mkCardanoTracer + stdoutTrace trForward mbTrEkg ["TxCentrifuge", "Builder"] + Logging.configureTracers + configReflection trConfig [builderTr] + -- TxSubmission (TxCentrifuge.TxSubmission.*). + !txSubTraceTr <- + Logging.mkCardanoTracer + stdoutTrace trForward mbTrEkg ["TxCentrifuge", "TxSubmission"] + Logging.configureTracers + configReflection trConfig [txSubTraceTr] + -- TxSubmission2 (low-level protocol trace). + !txSub2Trace <- + Logging.mkCardanoTracer + stdoutTrace trForward mbTrEkg ["TxSubmission2"] + Logging.configureTracers + configReflection trConfig [txSub2Trace] + -- KeepAlive. + !keepAliveTr <- + Logging.mkCardanoTracer + stdoutTrace trForward mbTrEkg ["KeepAlive"] + Logging.configureTracers + configReflection trConfig [keepAliveTr] + pure Tracers + { trBuilder = + Tracer $ Logging.traceWith builderTr + , trTxSubmission = + Tracer $ Logging.traceWith txSubTraceTr + , trTxSubmission2 = + Tracer $ Logging.traceWith txSub2Trace + , trKeepAlive = + Tracer $ Logging.traceWith keepAliveTr + } + +-- | Default config: stdout machine format, severity Debug for all namespaces. +defaultTraceConfig :: Logging.TraceConfig +defaultTraceConfig = Logging.emptyTraceConfig + { Logging.tcOptions = Map.fromList + [ ( [] + , [ Logging.ConfSeverity + (Logging.SeverityF (Just Logging.Debug)) + , Logging.ConfBackend + [Logging.Stdout Logging.MachineFormat] + ] + ) + ] + } + +-------------------------------------------------------------------------------- +-- Builder trace messages +-------------------------------------------------------------------------------- + +-- | Trace messages emitted by the payload builder thread. +-- +-- == Builder pipeline +-- +-- The builder consumes input UTxOs (unspent funds) from the input queue, builds +-- and signs a transaction, and enqueues the result for workers to submit. Each +-- transaction produces new output UTxOs. After submission, these outputs can be +-- recycled at different points back to the input queue, forming a closed loop: +-- +-- @ +-- inputs --> [builder: build & sign tx] --> (tx, outputs) --> [do something] +-- ^ | +-- +---------------------maybe recycle outputs --------------------+ +-- @ +-- +-- == Cardano identifiers +-- +-- The Cardano ledger uses a UTxO (Unspent Transaction Output) model. Every +-- transaction consumes existing UTxOs as /inputs/ and produces new UTxOs as +-- /outputs/. Three types from @cardano-api@ identify these objects: +-- +-- === 'Api.TxId' — transaction identifier +-- +-- A Blake2b-256 hash of the serialised transaction body ('Api.TxBody'). +-- Uniquely identifies a transaction on the blockchain. Rendered as a +-- 64-character hex string via 'Api.serialiseToRawBytesHexText'. +-- +-- === 'Api.TxIx' — output index +-- +-- A zero-based index selecting one output within a transaction. +-- +-- === 'Api.TxIn' — UTxO reference +-- +-- A @('Api.TxId', 'Api.TxIx')@ pair that uniquely identifies a single UTxO on +-- the ledger. The standard display format is @\"\#\\"@, +-- produced by 'Api.renderTxIn'. +-- +-- A transaction's /input/ 'Api.TxIn's reference existing UTxOs being spent. Its +-- /output/ 'Api.TxIn's are derived from the new 'Api.TxId' paired with +-- sequential indices (0, 1, 2, ...). +-- +-- In the tx-centrifuge, each 'Fund' record wraps a 'Api.TxIn' (the UTxO +-- reference), its Lovelace value, and the signing key needed to spend it. +data BuilderTrace + = -- | A new transaction was built. + -- + -- * 'String' — builder name (the workload name, see 'Runtime.builderName'). + -- + -- * 'Api.TxId' — Blake2b-256 hash identifying the new transaction. + -- Obtain via @'Api.getTxId' ('Api.getTxBody' signedTx)@. + -- + -- * @['Fund.Fund']@ (inputs) — funds consumed by this transaction. Each + -- fund's 'Fund.fundTxIn' is a 'Api.TxIn' pointing to an existing UTxO + -- on the ledger. + -- + -- * @['Fund.Fund']@ (outputs) — funds produced by this transaction. Each + -- fund's 'Fund.fundTxIn' is derived from the new 'Api.TxId' and a + -- sequential 'Api.TxIx' index (0, 1, 2, ...). + BuilderNewTx !String !Api.TxId [Fund.Fund] [Fund.Fund] + -- | Output funds were recycled back to the workload's input queue. + -- + -- * 'String' — builder name (the workload name, see 'Runtime.builderName'). + -- + -- In the tx-centrifuge's closed-loop pipeline, output funds of a + -- transaction are recycled so they can be consumed by future transactions, + -- enabling indefinite-duration runs without pre-generating all UTxOs. + -- + -- * @['Fund.Fund']@ — the recycled output funds. + | BuilderRecycle !String [Fund.Fund] + +-- | Build a 'BuilderNewTx' trace from the builder name, a signed transaction, +-- and its input and output funds. Extracts the 'Api.TxId' from the transaction +-- body. +mkBuilderNewTx :: String -- ^ Builder name. + -> Api.Tx Api.ConwayEra -- ^ Signed transaction. + -> [Fund.Fund] -- ^ Input funds (consumed). + -> [Fund.Fund] -- ^ Output funds (produced). + -> BuilderTrace +mkBuilderNewTx name tx = BuilderNewTx name (Api.getTxId (Api.getTxBody tx)) + +-- | Build a 'BuilderRecycle' trace from the builder name and the recycled +-- output funds. +mkBuilderRecycle :: String -- ^ Builder name. + -> [Fund.Fund] -- ^ Recycled output funds. + -> BuilderTrace +mkBuilderRecycle = BuilderRecycle + +-- | Machine-readable ('forMachine') and human-readable ('forHuman') rendering +-- of 'BuilderTrace' messages. +-- +-- Machine format ('Logging.DNormal'): +-- +-- @ +-- { \"builder\": \"workload-name\" +-- , \"txId\": \"\<64-char hex\>\" +-- , \"inputs\": [\"\#\\", ...] +-- , \"outputs\": [\"\#\\", ...] +-- } +-- @ +-- +-- Machine format ('Logging.DDetailed' and above): +-- +-- @ +-- { \"builder\": \"workload-name\" +-- , \"txId\": \"\<64-char hex\>\" +-- , \"inputs\": [{\"utxo\": \"\#\\", \"lovelace\": 1000000}, ...] +-- , \"outputs\": [{\"utxo\": \"\#\\", \"lovelace\": 500000}, ...] +-- } +-- @ +-- +-- Human format: +-- +-- @ +-- NewTx [workload-name] \ inputs=[\#\,...] outputs=[\#\,...] +-- @ +instance Logging.LogFormatting BuilderTrace where + forMachine dtal (BuilderNewTx name txId inputs outputs) = mconcat + [ "builder" .= name + , "txId" .= String (Api.serialiseToRawBytesHexText txId) + , "inputs" .= map (renderFund dtal) inputs + , "outputs" .= map (renderFund dtal) outputs + ] + forMachine dtal (BuilderRecycle name outputs) = mconcat + [ "builder" .= name + , "outputs" .= map (renderFund dtal) outputs + ] + forHuman (BuilderNewTx name txId inputs outputs) = + "NewTx [" <> Text.pack name <> "] " + <> Api.serialiseToRawBytesHexText txId + <> " inputs=[" <> renderFundTxIns inputs <> "]" + <> " outputs=[" <> renderFundTxIns outputs <> "]" + forHuman (BuilderRecycle name outputs) = + "Recycle [" <> Text.pack name <> "]" + <> " outputs=[" <> renderFundTxIns outputs <> "]" + +-- | Render a single fund for 'forMachine' output. +-- +-- * 'Logging.DMinimal', 'Logging.DNormal': just the UTxO reference as a +-- string (@\"\#\\"@). +-- * 'Logging.DDetailed', 'Logging.DMaximum': a JSON object with @\"utxo\"@ +-- and @\"lovelace\"@ fields. +renderFund :: Logging.DetailLevel -> Fund.Fund -> Value +renderFund dtal fund + | dtal >= Logging.DDetailed = + object [ "utxo" .= Api.renderTxIn (Fund.fundTxIn fund) + , "lovelace" .= Fund.fundValue fund + ] + | otherwise = + String (Api.renderTxIn (Fund.fundTxIn fund)) + +-- | Render a list of funds as comma-separated @\"\#\\"@ references. +renderFundTxIns :: [Fund.Fund] -> Text.Text +renderFundTxIns = Text.intercalate "," . map (Api.renderTxIn . Fund.fundTxIn) + +-- | Namespace: @TxCentrifuge.Builder.NewTx@ and @TxCentrifuge.Builder.Recycle@. +-- The outer prefix @[\"TxCentrifuge\", \"Builder\"]@ is set when creating the +-- tracer via 'Logging.mkCardanoTracer' in 'setupTracers'. +instance Logging.MetaTrace BuilderTrace where + namespaceFor BuilderNewTx{} = Logging.Namespace [] ["NewTx"] + namespaceFor BuilderRecycle{} = Logging.Namespace [] ["Recycle"] + severityFor (Logging.Namespace _ ["NewTx"]) _ = Just Logging.Info + severityFor (Logging.Namespace _ ["Recycle"]) _ = Just Logging.Info + severityFor _ _ = Nothing + documentFor (Logging.Namespace _ ["NewTx"]) = Just + "A new transaction was built from input UTxOs, producing output UTxOs." + documentFor (Logging.Namespace _ ["Recycle"]) = Just + "Output UTxOs were recycled back to the workload's input queue for reuse." + documentFor _ = Nothing + allNamespaces = + [ Logging.Namespace [] ["NewTx"] + , Logging.Namespace [] ["Recycle"] + ] + +-------------------------------------------------------------------------------- +-- TxSubmission trace messages +-------------------------------------------------------------------------------- + +-- | Clean, structured trace of the TxSubmission2 protocol as seen from the +-- generator side. Replaces the verbose @Show@-based tracing in +-- @ouroboros-network@'s @TraceSendRecv@ with fields that are easy to parse and +-- verify. +-- +-- Every constructor carries a @target@ field identifying the remote node (the +-- 'Runtime.targetName' of the 'Runtime.Target'). +data TxSubmission + = -- | The node asked us to announce transaction identifiers + -- (@MsgRequestTxIds@). + -- + -- * 'String': target node name. + -- * @['Api.TxId']@: TxIds we have not yet received an ACK for. + -- * 'Int': number of TxIds the node is acknowledging (ACK). + -- * 'Int': number of new TxIds the node is requesting (REQ). + RequestTxIds !String [Api.TxId] !Int !Int + -- | We replied to @MsgRequestTxIds@ with TxId\/size pairs. + -- + -- * 'String': target node name. + -- * 'Int': number of TxIds the node is acknowledging (ACK). + -- * 'Int': number of new TxIds the node is requesting (REQ). + -- * @['Api.TxId']@: updated unacked TxIds (after ACK + new announcements). + -- * @['Api.TxId']@: TxIds we announced in this reply. + | ReplyTxIds !String !Int !Int [Api.TxId] [Api.TxId] + -- | The node asked for full transactions by TxId (@MsgRequestTxs@). + -- + -- * 'String': target node name. + -- * @['Api.TxId']@: TxIds the node requested. + | RequestTxs !String [Api.TxId] + -- | We replied to @MsgRequestTxs@ with the requested transactions. + -- + -- * 'String': target node name. + -- * @['Api.TxId']@: TxIds the node requested. + -- * @['Api.TxId']@: TxIds we actually sent (subset of requested; a TxId is + -- missing if it was not in the unacked list). + | ReplyTxs !String [Api.TxId] [Api.TxId] + +-- | Machine-readable and human-readable rendering. All TxId lists are omitted +-- below 'Logging.DDetailed' to avoid the cost of hex-encoding every transaction +-- identifier on every protocol round-trip. +-- +-- Machine format ('Logging.DNormal'): +-- +-- @ +-- { \"target\": \"n\", \"ack\": 0, \"req\": 3 } +-- { \"target\": \"n\" } +-- { \"target\": \"n\" } +-- { \"target\": \"n\" } +-- @ +-- +-- Machine format ('Logging.DDetailed' and above): +-- +-- @ +-- { \"target\": \"n\", \"ack\": 0, \"req\": 3, \"unacked\": [\"ab..\"] } +-- { \"target\": \"n\", \"ack\": 0, \"req\": 3, \"txIds\": [\"ab..\"], \"unacked\": [\"ab..\"] } +-- { \"target\": \"n\", \"txIds\": [\"ab..\"] } +-- { \"target\": \"n\", \"txIds\": [\"ab..\"], \"requested\": [\"ab..\"] } +-- @ +instance Logging.LogFormatting TxSubmission where + forMachine dtal (RequestTxIds target unacked ack req) = mconcat $ + [ "target" .= target + , "ack" .= ack + , "req" .= req + ] + ++ [ "unacked" .= map Api.serialiseToRawBytesHexText unacked + | dtal >= Logging.DDetailed ] + forMachine dtal (ReplyTxIds target ack req unacked announced) = mconcat $ + [ "target" .= target ] + ++ [ "ack" .= ack + | dtal >= Logging.DDetailed ] + ++ [ "req" .= req + | dtal >= Logging.DDetailed ] + ++ [ "txIds" .= map Api.serialiseToRawBytesHexText announced + | dtal >= Logging.DDetailed ] + ++ [ "unacked" .= map Api.serialiseToRawBytesHexText unacked + | dtal >= Logging.DDetailed ] + forMachine dtal (RequestTxs target txIds) = mconcat $ + [ "target" .= target ] + ++ [ "txIds" .= map Api.serialiseToRawBytesHexText txIds + | dtal >= Logging.DDetailed ] + forMachine dtal (ReplyTxs target requested sent) = mconcat $ + [ "target" .= target ] + ++ [ "txIds" .= map Api.serialiseToRawBytesHexText sent + | dtal >= Logging.DDetailed ] + ++ [ "requested" .= map Api.serialiseToRawBytesHexText requested + | dtal >= Logging.DDetailed ] + forHuman (RequestTxIds target _unacked ack req) = + "RequestTxIds [" <> Text.pack target <> "]" + <> " ack=" <> Text.pack (show ack) + <> " req=" <> Text.pack (show req) + forHuman (ReplyTxIds target ack req _unacked _announced) = + "ReplyTxIds [" <> Text.pack target <> "]" + <> " ack=" <> Text.pack (show ack) + <> " req=" <> Text.pack (show req) + forHuman (RequestTxs target _txIds) = + "RequestTxs [" <> Text.pack target <> "]" + forHuman (ReplyTxs target _requested _sent) = + "ReplyTxs [" <> Text.pack target <> "]" + +-- | Namespace: @TxCentrifuge.TxSubmission.*@. The outer prefix is set via +-- 'Logging.mkCardanoTracer' in 'setupTracers'. +instance Logging.MetaTrace TxSubmission where + namespaceFor RequestTxIds{} = Logging.Namespace [] ["RequestTxIds"] + namespaceFor ReplyTxIds{} = Logging.Namespace [] ["ReplyTxIds"] + namespaceFor RequestTxs{} = Logging.Namespace [] ["RequestTxs"] + namespaceFor ReplyTxs{} = Logging.Namespace [] ["ReplyTxs"] + severityFor (Logging.Namespace _ ["RequestTxIds"]) _ = Just Logging.Info + severityFor (Logging.Namespace _ ["ReplyTxIds"]) _ = Just Logging.Info + severityFor (Logging.Namespace _ ["RequestTxs"]) _ = Just Logging.Info + severityFor (Logging.Namespace _ ["ReplyTxs"]) _ = Just Logging.Info + severityFor _ _ = Nothing + documentFor (Logging.Namespace _ ["RequestTxIds"]) = Just + "Node requested tx id announcements (blocking or non-blocking)." + documentFor (Logging.Namespace _ ["ReplyTxIds"]) = Just + "We replied with tx id announcements and sizes." + documentFor (Logging.Namespace _ ["RequestTxs"]) = Just + "Node requested full transactions by TxId." + documentFor (Logging.Namespace _ ["ReplyTxs"]) = Just + "We sent the requested transactions." + documentFor _ = Nothing + allNamespaces = + [ Logging.Namespace [] ["RequestTxIds"] + , Logging.Namespace [] ["ReplyTxIds"] + , Logging.Namespace [] ["RequestTxs"] + , Logging.Namespace [] ["ReplyTxs"] + ] diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing/Orphans.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing/Orphans.hs new file mode 100644 index 00000000000..6338f893757 --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing/Orphans.hs @@ -0,0 +1,404 @@ +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE UndecidableInstances #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +-------------------------------------------------------------------------------- + +-- | Orphan 'LogFormatting' and 'MetaTrace' instances copied from @cardano-node@ +-- (@NodeToNode.hs@ and @NodeToClient.hs@) so that trace-dispatcher can format +-- TxSubmission2, KeepAlive, and TraceSendRecv messages. +module Cardano.Benchmarking.TxCentrifuge.Tracing.Orphans () where + +-------------------------------------------------------------------------------- + +----------- +-- aeson -- +----------- +import Data.Aeson (Value (String), (.=)) +---------- +-- text -- +---------- +import Data.Text (pack) +----------------------- +-- ouroboros-network -- +----------------------- +-- First two using same qualified as "typed-protocol" imports below. +-- This is two import "NodeToClient.hs" `TraceSendMsg` instances unmmodified. +import Ouroboros.Network.Driver.Simple qualified as Simple +import Ouroboros.Network.Driver.Stateful qualified as Stateful +import Ouroboros.Network.Protocol.KeepAlive.Type qualified as KA +import Ouroboros.Network.Protocol.TxSubmission2.Type qualified as STX +---------------------- +-- trace-dispatcher -- +---------------------- +-- We prefer the qualified import above but used to copy instances unmmodified. +import Cardano.Logging + ( LogFormatting (..) + , MetaTrace (..) + , Namespace (..) + , SeverityS (..) + , nsCast + , nsPrependInner + ) +--------------------- +-- typed-protocols -- +--------------------- +-- First one to copy unmodified the instance definition of `TxSubmissionNode2`. +import Network.TypedProtocol.Codec (AnyMessage (AnyMessageAndAgency)) +import Network.TypedProtocol.Codec qualified as Simple +import Network.TypedProtocol.Stateful.Codec qualified as Stateful + +-- Copied instances: from cardano-node NodeToClient.hs +-------------------------------------------------------------------------------- +-- Driver Simple. +-------------------------------------------------------------------------------- + +instance LogFormatting (Simple.AnyMessage ps) + => LogFormatting (Simple.TraceSendRecv ps) where + forMachine dtal (Simple.TraceSendMsg m) = mconcat + [ "kind" .= String "Send" , "msg" .= forMachine dtal m ] + forMachine dtal (Simple.TraceRecvMsg m) = mconcat + [ "kind" .= String "Recv" , "msg" .= forMachine dtal m ] + + forHuman (Simple.TraceSendMsg m) = "Send: " <> forHuman m + forHuman (Simple.TraceRecvMsg m) = "Receive: " <> forHuman m + + asMetrics (Simple.TraceSendMsg m) = asMetrics m + asMetrics (Simple.TraceRecvMsg m) = asMetrics m + +instance LogFormatting (Stateful.AnyMessage ps f) + => LogFormatting (Stateful.TraceSendRecv ps f) where + forMachine dtal (Stateful.TraceSendMsg m) = mconcat + [ "kind" .= String "Send" , "msg" .= forMachine dtal m ] + forMachine dtal (Stateful.TraceRecvMsg m) = mconcat + [ "kind" .= String "Recv" , "msg" .= forMachine dtal m ] + + forHuman (Stateful.TraceSendMsg m) = "Send: " <> forHuman m + forHuman (Stateful.TraceRecvMsg m) = "Receive: " <> forHuman m + + asMetrics (Stateful.TraceSendMsg m) = asMetrics m + asMetrics (Stateful.TraceRecvMsg m) = asMetrics m + +instance MetaTrace (Simple.AnyMessage ps) => + MetaTrace (Simple.TraceSendRecv ps) where + namespaceFor (Simple.TraceSendMsg msg) = + nsPrependInner "Send" (namespaceFor msg) + namespaceFor (Simple.TraceRecvMsg msg) = + nsPrependInner "Receive" (namespaceFor msg) + + severityFor (Namespace out ("Send" : tl)) (Just (Simple.TraceSendMsg msg)) = + severityFor (Namespace out tl) (Just msg) + severityFor (Namespace out ("Send" : tl)) Nothing = + severityFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + severityFor (Namespace out ("Receive" : tl)) (Just (Simple.TraceRecvMsg msg)) = + severityFor (Namespace out tl) (Just msg) + severityFor (Namespace out ("Receive" : tl)) Nothing = + severityFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + severityFor _ _ = Nothing + + privacyFor (Namespace out ("Send" : tl)) (Just (Simple.TraceSendMsg msg)) = + privacyFor (Namespace out tl) (Just msg) + privacyFor (Namespace out ("Send" : tl)) Nothing = + privacyFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + privacyFor (Namespace out ("Receive" : tl)) (Just (Simple.TraceRecvMsg msg)) = + privacyFor (Namespace out tl) (Just msg) + privacyFor (Namespace out ("Receive" : tl)) Nothing = + privacyFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + privacyFor _ _ = Nothing + + detailsFor (Namespace out ("Send" : tl)) (Just (Simple.TraceSendMsg msg)) = + detailsFor (Namespace out tl) (Just msg) + detailsFor (Namespace out ("Send" : tl)) Nothing = + detailsFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + detailsFor (Namespace out ("Receive" : tl)) (Just (Simple.TraceRecvMsg msg)) = + detailsFor (Namespace out tl) (Just msg) + detailsFor (Namespace out ("Receive" : tl)) Nothing = + detailsFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + detailsFor _ _ = Nothing + + metricsDocFor (Namespace out ("Send" : tl)) = + metricsDocFor (nsCast (Namespace out tl) :: Namespace (Simple.AnyMessage ps)) + metricsDocFor (Namespace out ("Receive" : tl)) = + metricsDocFor (nsCast (Namespace out tl) :: Namespace (Simple.AnyMessage ps)) + metricsDocFor _ = [] + + documentFor (Namespace out ("Send" : tl)) = + documentFor (nsCast (Namespace out tl) :: Namespace (Simple.AnyMessage ps)) + documentFor (Namespace out ("Receive" : tl)) = + documentFor (nsCast (Namespace out tl) :: Namespace (Simple.AnyMessage ps)) + documentFor _ = Nothing + + allNamespaces = + let cn = allNamespaces :: [Namespace (Simple.AnyMessage ps)] + in fmap (nsPrependInner "Send") cn ++ fmap (nsPrependInner "Receive") cn + +instance MetaTrace (Stateful.AnyMessage ps f) => + MetaTrace (Stateful.TraceSendRecv ps f) where + namespaceFor (Stateful.TraceSendMsg msg) = + nsPrependInner "Send" (namespaceFor msg) + namespaceFor (Stateful.TraceRecvMsg msg) = + nsPrependInner "Receive" (namespaceFor msg) + + severityFor (Namespace out ("Send" : tl)) (Just (Stateful.TraceSendMsg msg)) = + severityFor (Namespace out tl) (Just msg) + severityFor (Namespace out ("Send" : tl)) Nothing = + severityFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + + severityFor (Namespace out ("Receive" : tl)) (Just (Stateful.TraceRecvMsg msg)) = + severityFor (Namespace out tl) (Just msg) + severityFor (Namespace out ("Receive" : tl)) Nothing = + severityFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + severityFor _ _ = Nothing + + privacyFor (Namespace out ("Send" : tl)) (Just (Stateful.TraceSendMsg msg)) = + privacyFor (Namespace out tl) (Just msg) + privacyFor (Namespace out ("Send" : tl)) Nothing = + privacyFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + privacyFor (Namespace out ("Receive" : tl)) (Just (Stateful.TraceRecvMsg msg)) = + privacyFor (Namespace out tl) (Just msg) + privacyFor (Namespace out ("Receive" : tl)) Nothing = + privacyFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + privacyFor _ _ = Nothing + + detailsFor (Namespace out ("Send" : tl)) (Just (Stateful.TraceSendMsg msg)) = + detailsFor (Namespace out tl) (Just msg) + detailsFor (Namespace out ("Send" : tl)) Nothing = + detailsFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + detailsFor (Namespace out ("Receive" : tl)) (Just (Stateful.TraceRecvMsg msg)) = + detailsFor (Namespace out tl) (Just msg) + detailsFor (Namespace out ("Receive" : tl)) Nothing = + detailsFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + detailsFor _ _ = Nothing + + metricsDocFor (Namespace out ("Send" : tl)) = + metricsDocFor (nsCast (Namespace out tl) :: Namespace (Stateful.AnyMessage ps f)) + metricsDocFor (Namespace out ("Receive" : tl)) = + metricsDocFor (nsCast (Namespace out tl) :: Namespace (Stateful.AnyMessage ps f)) + metricsDocFor _ = [] + + documentFor (Namespace out ("Send" : tl)) = + documentFor (nsCast (Namespace out tl) :: Namespace (Stateful.AnyMessage ps f)) + documentFor (Namespace out ("Receive" : tl)) = + documentFor (nsCast (Namespace out tl) :: Namespace (Stateful.AnyMessage ps f)) + documentFor _ = Nothing + + allNamespaces = + let cn = allNamespaces :: [Namespace (Stateful.AnyMessage ps f)] + in fmap (nsPrependInner "Send") cn ++ fmap (nsPrependInner "Receive") cn + +-- Copied instances: from cardano-node NodeToNode.hs +-------------------------------------------------------------------------------- +-- TxSubmissionNode2 Tracer +-------------------------------------------------------------------------------- + +instance (Show txid, Show tx) + => LogFormatting (AnyMessage (STX.TxSubmission2 txid tx)) where + forMachine _dtal (AnyMessageAndAgency stok STX.MsgInit) = + mconcat + [ "kind" .= String "MsgInit" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok STX.MsgRequestTxIds {}) = + mconcat + [ "kind" .= String "MsgRequestTxIds" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok (STX.MsgReplyTxIds _)) = + mconcat + [ "kind" .= String "MsgReplyTxIds" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok (STX.MsgRequestTxs txids)) = + mconcat + [ "kind" .= String "MsgRequestTxs" + , "agency" .= String (pack $ show stok) + , "txIds" .= String (pack $ show txids) + ] + forMachine _dtal (AnyMessageAndAgency stok (STX.MsgReplyTxs txs)) = + mconcat + [ "kind" .= String "MsgReplyTxs" + , "agency" .= String (pack $ show stok) + , "txs" .= String (pack $ show txs) + ] + forMachine _dtal (AnyMessageAndAgency stok STX.MsgDone) = + mconcat + [ "kind" .= String "MsgDone" + , "agency" .= String (pack $ show stok) + ] + +instance MetaTrace (AnyMessage (STX.TxSubmission2 txid tx)) where + namespaceFor (AnyMessageAndAgency _stok STX.MsgInit {}) = + Namespace [] ["MsgInit"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgRequestTxIds {}) = + Namespace [] ["RequestTxIds"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgReplyTxIds {}) = + Namespace [] ["ReplyTxIds"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgRequestTxs {}) = + Namespace [] ["RequestTxs"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgReplyTxs {}) = + Namespace [] ["ReplyTxs"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgDone {}) = + Namespace [] ["Done"] + + severityFor (Namespace _ ["MsgInit"]) _ = Just Debug + severityFor (Namespace _ ["RequestTxIds"]) _ = Just Debug + severityFor (Namespace _ ["ReplyTxIds"]) _ = Just Debug + severityFor (Namespace _ ["RequestTxs"]) _ = Just Debug + severityFor (Namespace _ ["ReplyTxs"]) _ = Just Debug + severityFor (Namespace _ ["Done"]) _ = Just Debug + severityFor _ _ = Nothing + + documentFor (Namespace _ ["MsgInit"]) = Just + "Client side hello message." + documentFor (Namespace _ ["RequestTxIds"]) = Just $ mconcat + [ "Request a non-empty list of transaction identifiers from the client, " + , "and confirm a number of outstanding transaction identifiers. " + , "\n " + , "With 'TokBlocking' this is a a blocking operation: the response will " + , "always have at least one transaction identifier, and it does not expect " + , "a prompt response: there is no timeout. This covers the case when there " + , "is nothing else to do but wait. For example this covers leaf nodes that " + , "rarely, if ever, create and submit a transaction. " + , "\n " + , "With 'TokNonBlocking' this is a non-blocking operation: the response " + , "may be an empty list and this does expect a prompt response. This " + , "covers high throughput use cases where we wish to pipeline, by " + , "interleaving requests for additional transaction identifiers with " + , "requests for transactions, which requires these requests not block. " + , "\n " + , "The request gives the maximum number of transaction identifiers that " + , "can be accepted in the response. This must be greater than zero in the " + , "'TokBlocking' case. In the 'TokNonBlocking' case either the numbers " + , "acknowledged or the number requested must be non-zero. In either case, " + , "the number requested must not put the total outstanding over the fixed " + , "protocol limit. " + , "\n" + , "The request also gives the number of outstanding transaction " + , "identifiers that can now be acknowledged. The actual transactions " + , "to acknowledge are known to the peer based on the FIFO order in which " + , "they were provided. " + , "\n " + , "There is no choice about when to use the blocking case versus the " + , "non-blocking case, it depends on whether there are any remaining " + , "unacknowledged transactions (after taking into account the ones " + , "acknowledged in this message): " + , "\n " + , "* The blocking case must be used when there are zero remaining " + , " unacknowledged transactions. " + , "\n " + , "* The non-blocking case must be used when there are non-zero remaining " + , " unacknowledged transactions." + ] + documentFor (Namespace _ ["ReplyTxIds"]) = Just $ mconcat + [ "Reply with a list of transaction identifiers for available " + , "transactions, along with the size of each transaction. " + , "\n " + , "The list must not be longer than the maximum number requested. " + , "\n " + , "In the 'StTxIds' 'StBlocking' state the list must be non-empty while " + , "in the 'StTxIds' 'StNonBlocking' state the list may be empty. " + , "\n " + , "These transactions are added to the notional FIFO of outstanding " + , "transaction identifiers for the protocol. " + , "\n " + , "The order in which these transaction identifiers are returned must be " + , "the order in which they are submitted to the mempool, to preserve " + , "dependent transactions." + ] + documentFor (Namespace _ ["RequestTxs"]) = Just $ mconcat + [ "Request one or more transactions corresponding to the given " + , "transaction identifiers. " + , "\n " + , "While it is the responsibility of the replying peer to keep within " + , "pipelining in-flight limits, the sender must also cooperate by keeping " + , "the total requested across all in-flight requests within the limits. " + , "\n" + , "It is an error to ask for transaction identifiers that were not " + , "previously announced (via 'MsgReplyTxIds'). " + , "\n" + , "It is an error to ask for transaction identifiers that are not " + , "outstanding or that were already asked for." + ] + documentFor (Namespace _ ["ReplyTxs"]) = Just $ mconcat + [ "Reply with the requested transactions, or implicitly discard." + , "\n" + , "Transactions can become invalid between the time the transaction " + , "identifier was sent and the transaction being requested. Invalid " + , "(including committed) transactions do not need to be sent." + , "\n" + , "Any transaction identifiers requested but not provided in this reply " + , "should be considered as if this peer had never announced them. (Note " + , "that this is no guarantee that the transaction is invalid, it may still " + , "be valid and available from another peer)." + ] + documentFor (Namespace _ ["Done"]) = Just $ mconcat + [ "Termination message, initiated by the client when the server is " + , "making a blocking call for more transaction identifiers." + ] + documentFor _ = Nothing + + allNamespaces = [ + Namespace [] ["MsgInit"] + , Namespace [] ["RequestTxIds"] + , Namespace [] ["ReplyTxIds"] + , Namespace [] ["RequestTxs"] + , Namespace [] ["ReplyTxs"] + , Namespace [] ["Done"] + ] + +-- Copied instances: from cardano-node NodeToNode.hs +-------------------------------------------------------------------------------- +-- KeepAlive Tracer +-------------------------------------------------------------------------------- + +instance LogFormatting (AnyMessage KA.KeepAlive) where + forMachine _dtal (AnyMessageAndAgency stok KA.MsgKeepAlive {}) = + mconcat + [ "kind" .= String "KeepAlive" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok KA.MsgKeepAliveResponse {}) = + mconcat + [ "kind" .= String "KeepAliveResponse" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok KA.MsgDone) = + mconcat + [ "kind" .= String "Done" + , "agency" .= String (pack $ show stok) + ] + +instance MetaTrace (AnyMessage KA.KeepAlive) where + namespaceFor (AnyMessageAndAgency _stok KA.MsgKeepAlive {}) = + Namespace [] ["KeepAlive"] + namespaceFor (AnyMessageAndAgency _stok KA.MsgKeepAliveResponse {}) = + Namespace [] ["KeepAliveResponse"] + namespaceFor (AnyMessageAndAgency _stok KA.MsgDone) = + Namespace [] ["Done"] + + severityFor (Namespace _ ["KeepAlive"]) _ = Just Debug + severityFor (Namespace _ ["KeepAliveResponse"]) _ = Just Debug + severityFor (Namespace _ ["Done"]) _ = Just Debug + severityFor _ _ = Nothing + + documentFor (Namespace _ ["KeepAlive"]) = Just + "Client side message to keep the connection alive." + documentFor (Namespace _ ["KeepAliveResponse"]) = Just $ mconcat + [ "Server side response to a previous client KeepAlive message." + ] + documentFor (Namespace _ ["Done"]) = Just $ mconcat + [ "Termination message, initiated by the client." + ] + documentFor _ = Nothing + + allNamespaces = [ + Namespace [] ["KeepAlive"] + , Namespace [] ["KeepAliveResponse"] + , Namespace [] ["Done"] + ] diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/TxAssembly.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/TxAssembly.hs new file mode 100644 index 00000000000..504cd91746e --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/TxAssembly.hs @@ -0,0 +1,169 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.TxCentrifuge.TxAssembly + ( buildTx + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Data.Function ((&)) +import Data.List (nubBy) +import Numeric.Natural (Natural) +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +------------------------- +-- cardano-ledger-core -- +------------------------- +import Cardano.Ledger.Coin qualified as L +------------------- +-- tx-centrifuge -- +------------------- +import Cardano.Benchmarking.TxCentrifuge.Fund ( Fund(..) ) + +-------------------------------------------------------------------------------- + +-- | Build and sign a transaction consuming the given funds and producing +-- @numOutputs@ outputs to @destAddr@. Returns the signed transaction and +-- recycled funds (one per output, keyed with @outKey@ for future spending). +-- +-- Signing keys are extracted from the input funds. If inputs belong to +-- different keys, all unique keys are used as witnesses. +-- +-- Fixed to ConwayEra. No Plutus, no metadata, fixed fee. +buildTx + -- | Destination address for outputs (embeds the network identifier). + :: Api.AddressInEra Api.ConwayEra + -- | Signing key for recycled output funds. + -> Api.SigningKey Api.PaymentKey + -- | Input funds. + -> [Fund] + -- | Number of outputs. + -> Natural + -- | Fee. + -> L.Coin + -> Either String (Api.Tx Api.ConwayEra, [Fund]) +buildTx destAddr outKey inFunds numOutputs fee + | null inFunds = Left "buildTx: no input funds" + | numOutputs == 0 = Left "buildTx: outputs_per_tx must be >= 1" + | feeLovelace < 0 = Left "buildTx: fee must be >= 0" + | changeTotal <= 0 = Left $ "buildTx: insufficient funds — total inputs (" + ++ show totalIn ++ " lovelace) do not cover fee (" + ++ show feeLovelace ++ " lovelace)" + -- Guard against outputs that would be below the Cardano minimum UTxO + -- value. We cannot check the actual protocol-parameter minimum here (it + -- depends on the serialised output size and the current coinsPerUTxOByte), + -- but we can catch the obviously-invalid case where integer division + -- produces zero-value or negative outputs. A real minimum UTxO check + -- should be added once the protocol parameters are threaded through to this + -- function. + | minOutputLovelace <= 0 = Left $ "buildTx: output value too low — " + ++ show numOutputs ++ " outputs from " + ++ show changeTotal ++ " lovelace change yields " + ++ show minOutputLovelace ++ " lovelace per output" + | otherwise = + let maybeTxBody = Api.createTransactionBody + (Api.shelleyBasedEra @Api.ConwayEra) + txBodyContent + in case maybeTxBody of + Left err -> Left ("buildTx: " ++ show err) + Right txBody -> + let signedTx = Api.signShelleyTransaction + (Api.shelleyBasedEra @Api.ConwayEra) + txBody + (map Api.WitnessPaymentKey uniqueKeys) + txId = Api.getTxId txBody + outFunds = [ Fund { fundTxIn = Api.TxIn txId (Api.TxIx ix) + , fundValue = amt + , fundSignKey = outKey + } + | (ix, amt) <- zip [0..] outAmounts + ] + in Right (signedTx, outFunds) + where + + totalIn :: Integer + totalIn = sum (map fundValue inFunds) + + feeLovelace :: Integer + feeLovelace = let L.Coin c = fee in c + + changeTotal :: Integer + changeTotal = totalIn - feeLovelace + + -- Minimum per-output lovelace amount (used for the zero-value guard above). + minOutputLovelace :: Integer + minOutputLovelace = changeTotal `div` fromIntegral numOutputs + + -- Split change evenly; first output absorbs the remainder. + outAmounts :: [Integer] + outAmounts = + let base = changeTotal `div` fromIntegral numOutputs + remainder = changeTotal `mod` fromIntegral numOutputs + in (base + remainder) : replicate (fromIntegral numOutputs - 1) base + + -- Unique signing keys from input funds + -- (deduplicated by verification key hash). + uniqueKeys :: [Api.SigningKey Api.PaymentKey] + uniqueKeys = nubBy sameKey (map fundSignKey inFunds) + where + sameKey + :: Api.SigningKey Api.PaymentKey + -> Api.SigningKey Api.PaymentKey + -> Bool + sameKey a b = Api.verificationKeyHash (Api.getVerificationKey a) + == Api.verificationKeyHash (Api.getVerificationKey b) + + txIns + :: [ ( Api.TxIn + , Api.BuildTxWith Api.BuildTx + (Api.Witness Api.WitCtxTxIn Api.ConwayEra) + ) + ] + txIns = map + (\f -> + ( fundTxIn f + , Api.BuildTxWith + (Api.KeyWitness Api.KeyWitnessForSpending) + ) + ) inFunds + + mkTxOut :: Integer -> Api.TxOut Api.CtxTx Api.ConwayEra + mkTxOut lovelace = Api.TxOut + destAddr + ( Api.shelleyBasedEraConstraints + (Api.shelleyBasedEra @Api.ConwayEra) $ + Api.lovelaceToTxOutValue + (Api.shelleyBasedEra @Api.ConwayEra) + (Api.Coin lovelace) + ) + Api.TxOutDatumNone + Api.ReferenceScriptNone + + txBodyContent :: Api.TxBodyContent Api.BuildTx Api.ConwayEra + txBodyContent = Api.defaultTxBodyContent Api.ShelleyBasedEraConway + & Api.setTxIns txIns + & Api.setTxInsCollateral Api.TxInsCollateralNone + & Api.setTxOuts (map mkTxOut outAmounts) + & Api.setTxFee + ( Api.TxFeeExplicit + (Api.shelleyBasedEra @Api.ConwayEra) + (Api.Coin feeLovelace) + ) + & Api.setTxValidityLowerBound Api.TxValidityNoLowerBound + & Api.setTxValidityUpperBound + ( Api.defaultTxValidityUpperBound + Api.ShelleyBasedEraConway + ) + & Api.setTxMetadata Api.TxMetadataNone + -- We are using an explicit fee! + -- Using `Nothing` instead of `ledgerPP :: Api.LedgerProtocolParameters Api.ConwayEra`. + -- TODO: Will need something else for plutus scripts! + & Api.setTxProtocolParams (Api.BuildTxWith Nothing) diff --git a/bench/tx-centrifuge/test/lib/Test/PullFiction/Harness.hs b/bench/tx-centrifuge/test/lib/Test/PullFiction/Harness.hs new file mode 100644 index 00000000000..cc1ff1ca270 --- /dev/null +++ b/bench/tx-centrifuge/test/lib/Test/PullFiction/Harness.hs @@ -0,0 +1,507 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + + +-------------------------------------------------------------------------------- + +module Test.PullFiction.Harness + ( -- * Test results + TestResult(..) + -- * Naming helpers + , targetName + , nodeName + -- * Running tests + , resolveConfig + , loadConfig + , runTest + , runTpsTest + , runPipelineIsolationTest + -- * Metrics & formatting + , getDuration + , formatMetrics + , formatDuration + -- * Assertions (pure) + , checkElapsedTolerance + , checkTpsTolerance + , checkTargetFairness + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Concurrent (threadDelay) +import Control.Exception (onException, throwIO) +import Control.Monad (forever, when) +import Data.IORef qualified as IORef +import Data.List (intercalate) +import Data.List.NonEmpty qualified as NE +import System.Environment (lookupEnv) +import Text.Read (readMaybe) +----------- +-- aeson -- +----------- +import Data.Aeson qualified as Aeson +----------- +-- async -- +----------- +import Control.Concurrent.Async qualified as Async +----------- +-- clock -- +----------- +-- NOTE: System.Clock is used directly here (rather than PullFiction.Clock) +-- intentionally. The harness measures overall test wall-clock time, which is +-- independent of the rate-limiter's internal clock. Keeping them separate +-- ensures that test timing cannot be influenced by any future changes to +-- PullFiction.Clock. +import System.Clock qualified as Clock +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +--------------------- +-- pull-fiction -- +--------------------- +import Cardano.Benchmarking.PullFiction.Config.Runtime qualified as Runtime +import Cardano.Benchmarking.PullFiction.Config.Validated qualified as Validated +import Cardano.Benchmarking.PullFiction.WorkloadRunner (runWorkload) + +-------------------------------------------------------------------------------- + +-- | Aggregate results from a TPS test run. +data TestResult = TestResult + { -- | Wall-clock time the test actually ran. + elapsedSeconds :: !Double + -- | Actual token count per target, keyed by target name. + , targetCounts :: !(Map.Map String Int) + } deriving (Show) + +-------------------------------------------------------------------------------- +-- Naming helpers +-------------------------------------------------------------------------------- + +-- | Qualified target name: @\"workload\/target\"@. This is the key format used +-- by 'runTpsTest' for per-target counters. +targetName :: String -> String -> String +targetName workload target = workload ++ "/" ++ target + +-- | Node name matching the config's @\"node-NN\"@ zero-padded naming scheme +-- (e.g. @\"node-01\"@, @\"node-50\"@). +nodeName :: Int -> String +nodeName i = "node-" ++ (if i < 10 then "0" else "") ++ show i + +-------------------------------------------------------------------------------- +-- Running tests +-------------------------------------------------------------------------------- + +-- | Decode a JSON config with pre-built inputs and resolve into a +-- 'Runtime.Runtime'. +-- +-- This is the common entry point for tests that need a resolved pipeline. +-- Uses a trivial builder (1 input per batch; the input itself is the payload) +-- so the pipeline exercises rate limiting and recycling without real +-- transaction building. 'loadConfig' is a thin wrapper for the common case +-- of @()@ inputs. +resolveConfig :: FilePath -> NE.NonEmpty input -> IO (Runtime.Runtime input input) +resolveConfig path inputs = do + raw <- Aeson.eitherDecodeFileStrict path >>= either fail pure + validated <- either fail pure $ Validated.validate raw inputs + Runtime.resolve + -- mkBuilder: 1 input per batch; input IS the payload; recycle the same + -- input as output. + (\_ _ _ -> pure Runtime.BuilderHandle + { Runtime.bhInputsPerBatch = 1 + , Runtime.bhBuildPayload = \is -> pure ((), head is, is) + }) + -- mkObserver: no test config uses observers. + (\_ name _ -> fail $ "resolveConfig: unexpected observer: " ++ name) + -- onRecycle: no-op. + (\_ _ -> pure ()) + validated + +-- | Load a generator config from a JSON file with dummy inputs and resolve into +-- a 'Runtime.Runtime'. +-- +-- Useful for tests that only need config metadata (rate limits, targets) and do +-- not use the input pipeline. +loadConfig :: FilePath -> IO (Runtime.Runtime () ()) +loadConfig path = resolveConfig path (() NE.:| []) + +-- | Run the pipeline scaffolding shared by all test runners. +-- +-- 'Runtime.resolve' has already spawned a builder async per workload (each +-- reads from the input queue, produces payloads, and enqueues them) and loaded +-- initial inputs. This function spawns workers via 'runWorkload', races +-- them against the configured duration, then cancels all asyncs (builders and +-- workers). +-- +-- @payload = input@ — the builder treats the input itself as the payload. +runTest + :: Runtime.Runtime input input + -> Double -- ^ Duration in seconds. + -> (Runtime.Workload input input -- ^ Workload the worker belongs to. + -> Runtime.Target input input -- ^ Target the worker serves. + -> IO input -- ^ Blocking fetch (rate-limited). + -> IO (Maybe input) -- ^ Non-blocking fetch. + -> IO () -- ^ Worker body. + ) + -> IO Double -- ^ Elapsed wall-clock seconds. +runTest runtime durationSecs workerBody = do + let allWorkloads = Map.elems (Runtime.workloads runtime) + -- Start time. + start <- Clock.getTime Clock.MonotonicRaw + -- Spawn workers via runWorkload, passing the caller-supplied callbacks. + -- Runtime asyncs (builders, recyclers) are already running. + workers <- concat <$> mapM + (\workload -> runWorkload workload $ + \target fetchPayload tryFetchPayload -> workerBody workload target fetchPayload tryFetchPayload + ) + allWorkloads + -- Race the test duration against any async dying. Exceptions are thrown + -- synchronously (not via Async.link) so Tasty's withResource can properly + -- cache and propagate them to all test cases in the group. + let allAsyncs = Runtime.asyncs runtime ++ workers + cancelAll = mapM_ Async.cancel allAsyncs + winner <- Async.race + (threadDelay (round (durationSecs * 1_000_000))) + (Async.waitAnyCatch allAsyncs) + `onException` cancelAll + -- End time. + end <- Clock.getTime Clock.MonotonicRaw + cancelAll + case winner of + Right (_, Left ex) -> throwIO ex + _ -> pure () + -- Return with the elapsed time. + pure $ fromIntegral (Clock.toNanoSecs (end - start)) / 1e9 + +-- | Decode a JSON config, create @()@ inputs, resolve into a +-- 'Runtime.Runtime', then run the pipeline, collecting per-target token +-- counts. +-- +-- The pipeline is trivial: a builder thread reads @()@ from the input queue +-- and writes @((), [()])@ to the payload queue; 'runWorkload' handles rate +-- limiting and input recycling; the worker callback just increments a +-- per-target counter. +-- +-- The caller is responsible for checking the returned 'TestResult' against its +-- own expected TPS map via 'checkTpsTolerance', 'checkTargetFairness', etc. +runTpsTest + -- | Path to the JSON config file. + :: FilePath + -- | Test duration in seconds. + -> Double + -> IO TestResult +runTpsTest configPath durationSecs = do + runtime <- resolveConfig configPath (() NE.:| replicate 99_999 ()) + -- Per-target counters keyed by "workloadName/targetName". + let allTargets = concatMap + (\wl -> map + (\rt -> + targetName (Runtime.workloadName wl) (Runtime.targetName rt) + ) + (Map.elems (Runtime.targets wl)) + ) + (Map.elems (Runtime.workloads runtime)) + counters <- Map.fromList <$> mapM + (\key -> do + ref <- IORef.newIORef (0 :: Int) + pure (key, ref) + ) + allTargets + -- Each worker calls fetchPayload in a loop, increments its counter, and + -- recycles the input back to the pipe for the builder to reuse. + elapsed <- runTest runtime durationSecs $ + \workload target fetchPayload _tryFetchPayload -> do + let key = targetName (Runtime.workloadName workload) + (Runtime.targetName target) + ref = counters Map.! key + forever $ do + _ <- fetchPayload + IORef.atomicModifyIORef' ref (\c -> (c + 1, ())) + -- Collect results. + perTarget <- Map.fromList <$> mapM + (\(key, ref) -> do + c <- IORef.readIORef ref + pure (key, c) + ) + (Map.toList counters) + -- Returns the map with the tokens per target. + pure TestResult + { elapsedSeconds = elapsed + , targetCounts = perTarget + } + +-- | Run a pipeline isolation test that verifies each workload's input recycling +-- loop is closed: inputs tagged for workload N are only ever observed by +-- workload N's workers, never by another workload. +-- +-- Inputs are @(Int, Int)@ tuples where the first element is the workload index +-- and the second is an input identifier within that workload. +-- 'Runtime.resolve' partitions inputs in ascending workload-key order, so +-- workload @i@ (0-based by key order) receives only inputs whose first element +-- is @i@. This also tests the partition logic itself. +-- +-- If any worker observes an input with a foreign workload tag, the test fails +-- immediately. Both 'fetchPayload' (blocking) and 'tryFetchPayload' +-- (non-blocking) paths are exercised on every iteration. +runPipelineIsolationTest + -- | Path to the JSON config file. + :: FilePath + -- | Number of workloads (must match config). + -> Int + -- | Test duration in seconds. + -> Double + -> IO () +runPipelineIsolationTest configPath nWorkloads durationSecs = do + let inputsPerWorkload = 2000 + taggedInputs = + [ (i, j) + | i <- [0 :: Int .. nWorkloads - 1] + , j <- [0 :: Int .. inputsPerWorkload - 1] + ] + inputs <- case taggedInputs of + (t:ts) -> pure (t NE.:| ts) + [] -> fail "runPipelineIsolationTest: nWorkloads must be >= 1" + runtime <- resolveConfig configPath inputs + -- Workloads are stored in a Map, so keys are ascending. + -- resolve partitions contiguous chunks in the same order. + let nameToTag = Map.fromList $ + zip (Map.keys (Runtime.workloads runtime)) [0 :: Int ..] + -- Workers: fetch payload (= input tag), assert it matches the workload. + -- fetchPayload and tryFetchPayload recycle consumed inputs automatically + -- (see 'TargetWorker'); the worker only checks the tag. Both blocking and + -- non-blocking paths are exercised on every iteration, verifying closed-loop + -- recycling in both code paths. + _ <- runTest runtime durationSecs $ + \workload _target fetchPayload tryFetchPayload -> do + let wlName = Runtime.workloadName workload + expectedTag = nameToTag Map.! wlName + check (wlIdx, _) = + when (wlIdx /= expectedTag) $ + fail $ "Input leakage: workload " ++ wlName + ++ " (tag " ++ show expectedTag + ++ ") received input tagged " ++ show wlIdx + forever $ do + tag <- fetchPayload + check tag + mTag <- tryFetchPayload + case mTag of + Nothing -> pure () + Just tag' -> check tag' + pure () + +-------------------------------------------------------------------------------- +-- Metrics & formatting +-------------------------------------------------------------------------------- + +-- | Default test duration in seconds +-- (overridable via PULL_FICTION_TEST_DURATION_SECS). +defaultDuration :: Double +defaultDuration = 60.0 + +-- | Read test duration from the @PULL_FICTION_TEST_DURATION_SECS@ environment +-- variable, falling back to 'defaultDuration' (60 s). +getDuration :: IO Double +getDuration = do + env <- lookupEnv "PULL_FICTION_TEST_DURATION_SECS" + pure $ maybe defaultDuration + (\s -> maybe defaultDuration id (readMaybe s)) env + +-- | Format a duration as a compact string for test group titles (e.g. @60.0@ +-- becomes @\"60s\"@, @5.0@ becomes @\"5s\"@). +formatDuration :: Double -> String +formatDuration d = show (round d :: Int) ++ "s" + +-- | Format a full metrics summary as a string. +-- Suitable for use as the result description in 'testCaseInfo'. +formatMetrics + :: Double -- ^ Configured test duration in seconds. + -> Map.Map String Double -- ^ Expected TPS per target. + -> TestResult -> String +formatMetrics cfgDuration expectedTps r = intercalate "\n" + [ "Global" + , " targets: " ++ show (Map.size (targetCounts r)) + , " duration: " ++ formatFixed 2 dur ++ " s" + ++ " (target " ++ formatFixed 0 cfgDuration + ++ " s, " ++ formatSignedPct durErr ++ "%)" + , " configured TPS: " ++ show (round cfgTps :: Int) + , " actual TPS: " ++ show (round actualTps :: Int) + ++ " (" ++ formatSignedPct tpsErr ++ "%)" + , " total tokens: " ++ show totalTokens + ++ " (expected " ++ show expected + ++ ", " ++ formatSignedPct tokenErr ++ "%)" + , "Per-target tokens" + , " mean: " ++ show (round tMean :: Int) + ++ biasT (round tMean :: Int) + , " min: " ++ show tMin ++ biasT tMin + , " max: " ++ show tMax ++ biasT tMax + , " spread (max-min): " ++ show tSpread + ++ " (" ++ formatFixed 1 tSpreadPct ++ "% of ideal)" + , " worst deviation: " ++ formatFixed 1 tWorstDev ++ "% from mean" + , " std deviation: " ++ show (round tStddev :: Int) + , " CV: " ++ formatFixed 2 tCv ++ "%" + , "Per-target TPS" + , " mean: " ++ formatFixed tpsDp sMean ++ biasS sMean + , " min: " ++ formatFixed tpsDp sMin ++ biasS sMin + , " max: " ++ formatFixed tpsDp sMax ++ biasS sMax + , " spread (max-min): " ++ formatFixed tpsDp sSpread + ++ " (" ++ formatFixed 1 sSpreadPct ++ "% of ideal)" + , " worst deviation: " ++ formatFixed 1 sWorstDev ++ "% from mean" + , " std deviation: " ++ formatFixed tpsDp sStddev + , " CV: " ++ formatFixed 2 sCv ++ "%" + ] + where + durErr = (elapsedSeconds r - cfgDuration) + / cfgDuration * 100 + totalTokens = sum (Map.elems (targetCounts r)) + cfgTps = sum (Map.elems expectedTps) + actualTps = fromIntegral totalTokens / elapsedSeconds r + tpsErr = (actualTps - cfgTps) / cfgTps * 100 + expected = round (cfgTps * elapsedSeconds r) :: Int + tokenErr = (fromIntegral totalTokens - fromIntegral expected) + / fromIntegral expected * 100 :: Double + counts = Map.elems (targetCounts r) + dur = elapsedSeconds r + n = fromIntegral (length counts) :: Double + -- Ideal per-target values (mean of expectedTps). + idealTps = sum (Map.elems expectedTps) + / fromIntegral (Map.size expectedTps) + idealTokens = idealTps * dur + -- Token stats + tMean = fromIntegral (sum counts) / n + tMin = minimum counts + tMax = maximum counts + tSpread = tMax - tMin + tSpreadPct = fromIntegral tSpread / idealTokens * 100 + tWorstDev = maximum + (map (\c -> abs (fromIntegral c - tMean) + / tMean) counts) * 100 + tVariance = sum (map (\c -> (fromIntegral c - tMean) ** 2) counts) / n + tStddev = sqrt tVariance + tCv = tStddev / tMean * 100 + biasT v = let d = fromIntegral v - idealTokens + p' = d / idealTokens * 100 + in " (ideal " ++ show (round idealTokens :: Int) + ++ ", " ++ formatSignedPct p' ++ "%)" + -- TPS stats (tokens / duration per target) + tpsList = map (\c -> fromIntegral c / dur) counts :: [Double] + sMean = sum tpsList / n + sMin = minimum tpsList + sMax = maximum tpsList + sSpread = sMax - sMin + sSpreadPct = sSpread / idealTps * 100 + sWorstDev = maximum (map (\s -> abs (s - sMean) / sMean) tpsList) * 100 + sVariance = sum (map (\s -> (s - sMean) ** 2) tpsList) / n + sStddev = sqrt sVariance + sCv = sStddev / sMean * 100 + -- Decimal places: use 0 when per-target TPS >= 1, otherwise enough to show + -- the leading significant digit plus one extra for resolution (e.g. 0.2 + -- TPS -> 2 dp so min/max/spread are distinguishable). + tpsDp = if idealTps >= 1 then 0 + else max 1 (ceiling (negate (logBase 10 idealTps)) + 1 :: Int) + biasS v = let d = v - idealTps + p' = d / idealTps * 100 + in " (ideal " ++ formatFixed tpsDp idealTps + ++ ", " ++ formatSignedPct p' ++ "%)" + +-- | Format a 'Double' with exactly @n@ decimal places, rounding half-up. +-- +-- >>> formatFixed 2 3.1415 +-- "3.14" +-- >>> formatFixed 0 99.7 +-- "100" +formatFixed :: Int -> Double -> String +formatFixed 0 x = show (round x :: Int) +formatFixed decimals x = + let factor = 10 ^ decimals :: Int + scaled = round (x * fromIntegral factor) :: Int + (whole, frac) = scaled `quotRem` factor + fracStr = let s = show (abs frac) + in replicate (decimals - length s) '0' ++ s + in (if x < 0 && whole == 0 then "-" else "") ++ show whole ++ "." ++ fracStr + +-- | Format a percentage value with a leading sign (@+@ or @-@) and one decimal +-- place. Used in metrics output to show relative deviations. +-- +-- >>> formatSignedPct 3.14 +-- "+3.1" +-- >>> formatSignedPct (-0.5) +-- "-0.5" +formatSignedPct :: Double -> String +formatSignedPct x = (if x >= 0 then "+" else "") ++ formatFixed 1 x + +-------------------------------------------------------------------------------- +-- Assertions (pure) +-------------------------------------------------------------------------------- + +-- | Check that the elapsed wall-clock time is within the given relative +-- tolerance of the configured duration. Returns 'Nothing' on success, or 'Just' +-- an error message on failure. +-- +-- A test that overshoots significantly (e.g. 231s vs 60s) indicates that the +-- rate-limiting mechanism cannot keep up: the feeder loop overhead exceeds the +-- target inter-tick delay. +checkElapsedTolerance + :: Double -- ^ Tolerance (e.g. 0.05 for 5%). + -> Double -- ^ Configured test duration in seconds. + -> TestResult -> Maybe String +checkElapsedTolerance tolerance cfgDuration result + | abs pctErr / 100 <= tolerance = Nothing + | otherwise = Just $ + "elapsed " ++ formatFixed 1 actual ++ " s (" + ++ (if pctErr >= 0 then "+" else "") ++ show (round pctErr :: Int) + ++ "%) vs target " ++ formatFixed 0 cfgDuration ++ " s" + where + actual = elapsedSeconds result + pctErr = (actual - cfgDuration) / cfgDuration * 100 + +-- | Check that actual TPS is within the given relative tolerance of configured +-- TPS. Returns 'Nothing' on success, or 'Just' an error message on failure. +checkTpsTolerance + :: Double -- ^ Tolerance (e.g. 0.05 for 5%). + -> Map.Map String Double -- ^ Expected TPS per target. + -> TestResult -> Maybe String +checkTpsTolerance tolerance expectedTps result + | abs pctErr / 100 <= tolerance = Nothing + | otherwise = Just $ + "actual " ++ show (round actualTps :: Int) ++ " TPS (" + ++ (if pctErr >= 0 then "+" else "") ++ show (round pctErr :: Int) + ++ "%) vs target " ++ show (round cfgTps :: Int) + where + totalTokens = sum (Map.elems (targetCounts result)) + cfgTps = sum (Map.elems expectedTps) + actualTps = fromIntegral totalTokens / elapsedSeconds result + pctErr = (actualTps - cfgTps) / cfgTps * 100 + +-- | Check a single target's token count against its expected TPS. Returns +-- 'Nothing' on success, or 'Just' an error message on failure. +-- +-- Applies a per-target discrete-distribution continuity correction: the actual +-- token count is an integer, so even a perfect system deviates from a +-- non-integer expected count by at least the rounding distance. We subtract +-- this /quantization floor/ so that the tolerance measures only the /excess/ +-- deviation attributable to the scheduling algorithm, not to integer +-- arithmetic. +checkTargetFairness + :: Double -- ^ Tolerance (e.g. 0.10 for 10%). + -> Map.Map String Double -- ^ Expected TPS per target. + -> TestResult -> String -> Maybe String +checkTargetFairness tolerance expectedTps result name + | excessDev <= tolerance = Nothing + | otherwise = Just $ + show (round (dev * 100) :: Int) ++ "% from expected " + ++ show (round expectedCount :: Int) + ++ " (actual " ++ show actual ++ ")" + where + actual = Map.findWithDefault 0 name (targetCounts result) + elapsed = elapsedSeconds result + eTps = Map.findWithDefault 0 name expectedTps + expectedCount = eTps * elapsed + dev = abs (fromIntegral actual - expectedCount) / expectedCount + frac = expectedCount - fromIntegral (floor expectedCount :: Int) + qFloor + | frac == 0 = 0 + | otherwise = min frac (1 - frac) / expectedCount + excessDev = max 0 (dev - qFloor) diff --git a/bench/tx-centrifuge/test/pull-fiction/Main.hs b/bench/tx-centrifuge/test/pull-fiction/Main.hs new file mode 100644 index 00000000000..94717d6e689 --- /dev/null +++ b/bench/tx-centrifuge/test/pull-fiction/Main.hs @@ -0,0 +1,25 @@ +{-# LANGUAGE ImportQualifiedPost #-} + +module Main where + +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +import Test.Tasty.Runners qualified as Tasty +--------------------- +-- pull-fiction -- +--------------------- +import Test.PullFiction.GeneratorTest qualified as GeneratorTest +import Test.PullFiction.Harness qualified as Harness +import Test.PullFiction.PipelineTest qualified as PipelineTest + +main :: IO () +main = do + dur <- Harness.getDuration + Tasty.defaultMain + $ Tasty.localOption (Tasty.NumThreads 1) + $ Tasty.testGroup "pull-fiction" + [ GeneratorTest.generatorTests dur + , PipelineTest.pipelineTests dur + ] diff --git a/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/GeneratorTest.hs b/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/GeneratorTest.hs new file mode 100644 index 00000000000..c93c88328fc --- /dev/null +++ b/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/GeneratorTest.hs @@ -0,0 +1,127 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +module Test.PullFiction.GeneratorTest + ( generatorTests + ) where + +-------------------------------------------------------------------------------- + +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +----------------- +-- tasty-hunit -- +----------------- +import Test.Tasty.HUnit qualified as HUnit +--------------------- +-- pull-fiction -- +--------------------- +import Paths_tx_centrifuge qualified as Paths +import Test.PullFiction.Harness qualified as Harness + +-------------------------------------------------------------------------------- + +generatorTests :: Double -> Tasty.TestTree +generatorTests duration = Tasty.testGroup "TPS" + [ -- A "shared" global rate limiter. + tpsTestGroup "Shared-limiter mode (50 targets, 10 TPS" + "data/config-shared-10.json" + (Map.fromList + [ (Harness.targetName "default" (Harness.nodeName i), 0.2) + | i <- [1..50] + ] + ) + duration + 0.05 + 0.15 + , tpsTestGroup "Shared-limiter mode (50 targets, 100k TPS" + "data/config-shared-100k.json" + (Map.fromList + [ (Harness.targetName "default" (Harness.nodeName i), 2_000) + | i <- [1..50] + ] + ) + duration + 0.05 + 0.15 + -- A "per_target" scoped rate limiter. Lower per-target tolerance. + , tpsTestGroup "Per-target-limiter mode (50 targets, 0.2 TPS/target" + "data/config-per-target-0_2.json" + (Map.fromList + [ (Harness.targetName "default" (Harness.nodeName i), 0.20) + | i <- [1..50] + ] + ) + duration + 0.05 + 0.05 + , tpsTestGroup "Per-target-limiter mode (50 targets, 2k TPS/target" + "data/config-per-target-2k.json" + (Map.fromList + [(Harness.targetName "default" (Harness.nodeName i), 2_000) + | i <- [1..50] + ] + ) + duration + 0.05 + 0.05 + ] + +tpsTestGroup + :: String -- ^ Test group label (duration is appended). + -> String -- ^ Data-file config name. + -> Map.Map String Double -- ^ Expected TPS per target, keyed by name. + -> Double -- ^ Test duration in seconds. + -> Double -- ^ Global TPS tolerance. + -> Double -- ^ Per-target fairness tolerance. + -> Tasty.TestTree +tpsTestGroup label configName expectedMap duration globalTol fairnessTol = + Tasty.withResource + (do path <- Paths.getDataFileName configName + Harness.runTpsTest path duration + ) + (const $ pure ()) + $ \getResult -> + Tasty.testGroup + (label ++ ", " ++ Harness.formatDuration duration ++ ")") + [ -- Total elapsed time. + HUnit.testCase + ("Elapsed time within " + ++ show (round (globalTol * 100) :: Int) ++ "% of target" + ) $ do + result <- getResult + case Harness.checkElapsedTolerance globalTol duration result of + Nothing -> pure () + Just err -> HUnit.assertFailure err + -- Total TPS. + , HUnit.testCaseInfo + ("Global TPS within " + ++ show (round (globalTol * 100) :: Int) ++ "% tolerance" + ) $ do + result <- getResult + let metrics = Harness.formatMetrics duration expectedMap result + case Harness.checkTpsTolerance globalTol expectedMap result of + Nothing -> pure metrics + Just err -> HUnit.assertFailure + (err ++ "\n" ++ metrics) + -- TPS per target. + , Tasty.testGroup + ("Per-target TPS within " + ++ show (round (fairnessTol * 100) :: Int) ++ "% of expected" + ) + [ HUnit.testCase name $ do + result <- getResult + case Harness.checkTargetFairness fairnessTol expectedMap result name of + Nothing -> pure () + Just err -> HUnit.assertFailure err + | name <- Map.keys expectedMap + ] + ] diff --git a/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/PipelineTest.hs b/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/PipelineTest.hs new file mode 100644 index 00000000000..6140aa30a86 --- /dev/null +++ b/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/PipelineTest.hs @@ -0,0 +1,59 @@ +{-# LANGUAGE ImportQualifiedPost #-} + +-------------------------------------------------------------------------------- + +module Test.PullFiction.PipelineTest + ( pipelineTests + ) where + +-------------------------------------------------------------------------------- + +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +----------------- +-- tasty-hunit -- +----------------- +import Test.Tasty.HUnit qualified as HUnit +--------------------- +-- pull-fiction -- +--------------------- +import Paths_tx_centrifuge qualified as Paths +import Test.PullFiction.Harness qualified as Harness + +-------------------------------------------------------------------------------- + +pipelineTests :: Double -> Tasty.TestTree +pipelineTests dur = Tasty.testGroup "pipeline" + [ + -- Pipeline test 1: single-group, per-group input queue. + -- ------------------------------------------------- + -- + -- 1 workload with 50 targets sharing one input queue. Recycled inputs + -- return to the same queue. Exercises Runtime.resolve with 1 workload, + -- verifying that closed-loop recycling delivers tokens to every target and + -- inputs stay within the workload. + + HUnit.testCase + ("Single-group pipeline (50 targets, " ++ Harness.formatDuration dur ++ ")") $ do + path <- Paths.getDataFileName "data/config-per-target-0_2.json" + Harness.runPipelineIsolationTest path 1 dur + + -- Pipeline test 2: multi-group, per-group input queues. + -- ---------------------------------------------------- + -- + -- 50 workloads, each with 1 target at 1 TPS (50 TPS aggregate). + -- Each workload has its own input queue; recycled inputs must return to the + -- originating workload's queue and never leak to another group. + -- + -- Inputs are tagged with (workloadIndex, inputIndex) tuples. If any worker + -- ever observes an input with a foreign workload tag, the test fails + -- immediately. This also exercises resolve's partition logic. + + , HUnit.testCase + "Multi-group pipeline isolation (50 groups x 1 TPS, 10s)" $ do + path <- Paths.getDataFileName "data/config-multi-group.json" + Harness.runPipelineIsolationTest path 50 10 + + ] diff --git a/bench/tx-centrifuge/test/tx-centrifuge/Main.hs b/bench/tx-centrifuge/test/tx-centrifuge/Main.hs new file mode 100644 index 00000000000..3c0c71bd929 --- /dev/null +++ b/bench/tx-centrifuge/test/tx-centrifuge/Main.hs @@ -0,0 +1,17 @@ +{-# LANGUAGE ImportQualifiedPost #-} + +module Main where + +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +--------------------- +-- tx-centrifuge -- +--------------------- +import Test.TxCentrifuge.TxTest qualified as TxTest + +main :: IO () +main = Tasty.defaultMain $ Tasty.testGroup "tx-centrifuge" + [ TxTest.txTests + ] diff --git a/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs b/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs new file mode 100644 index 00000000000..eecba963c6e --- /dev/null +++ b/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs @@ -0,0 +1,223 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Test.TxCentrifuge.TxTest + ( txTests + , testSetup + , mkDummyFund + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import System.IO (hFlush, hPutStrLn, stderr) +----------- +-- aeson -- +----------- +import Data.Aeson qualified as Aeson +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +------------------------- +-- cardano-ledger-core -- +------------------------- +import Cardano.Ledger.Coin qualified as L +----------- +-- clock -- +----------- +import System.Clock qualified as Clock +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +----------------- +-- tasty-hunit -- +----------------- +import Test.Tasty.HUnit ((@?=)) +import Test.Tasty.HUnit qualified as HUnit +------------------ +-- tx-generator -- +------------------ +import Cardano.TxGenerator.ProtocolParameters qualified as PP +--------------------- +-- tx-centrifuge -- +--------------------- +import Cardano.Benchmarking.TxCentrifuge.Fund qualified as Fund +import Cardano.Benchmarking.TxCentrifuge.TxAssembly qualified as TxAssembly +import Paths_tx_centrifuge qualified as Paths + +-------------------------------------------------------------------------------- + +txTests :: Tasty.TestTree +txTests = Tasty.testGroup "node" + [ HUnit.testCase "buildTx: simple 1-in-1-out transaction" $ do + (_ledgerPP, signKey, addr) <- testSetup + let fund = mkDummyFund signKey 0 10_000_000 + fee = L.Coin 200_000 + case TxAssembly.buildTx {-- ledgerPP --} addr signKey [fund] 1 fee of + Left err -> + HUnit.assertFailure $ "buildTx failed: " ++ err + Right (tx, outFunds) -> do + -- One output fund recycled. + length outFunds @?= 1 + -- Output value = input - fee. + Fund.fundValue (head outFunds) @?= (10_000_000 - 200_000) + -- The recycled fund's TxIn references the new tx. + let txId = Api.getTxId (Api.getTxBody tx) + Fund.fundTxIn (head outFunds) + @?= Api.TxIn txId (Api.TxIx 0) + + , HUnit.testCase "buildTx: 2-in-3-out transaction" $ do + (_ledgerPP, signKey, addr) <- testSetup + let fund1 = mkDummyFund signKey 0 5_000_000 + fund2 = mkDummyFund signKey 1 5_000_000 + fee = L.Coin 200_000 + case TxAssembly.buildTx {-- ledgerPP --} addr signKey + [fund1, fund2] 3 fee of + Left err -> + HUnit.assertFailure $ "buildTx failed: " ++ err + Right (_tx, outFunds) -> do + -- Three output funds. + length outFunds @?= 3 + -- Total output = total input - fee. + let totalOut = sum (map Fund.fundValue outFunds) + totalOut @?= (10_000_000 - 200_000) + + , HUnit.testCase "buildTx: insufficient funds" $ do + (_ledgerPP, signKey, addr) <- testSetup + let fund = mkDummyFund signKey 0 100_000 + fee = L.Coin 200_000 + case TxAssembly.buildTx {-- ledgerPP --} addr signKey [fund] 1 fee of + Left _ -> pure () -- expected + Right _ -> + HUnit.assertFailure + "buildTx should fail when funds < fee" + + , HUnit.testCase "buildTx: no input funds" $ do + (_ledgerPP, signKey, addr) <- testSetup + let fee = L.Coin 200_000 + case TxAssembly.buildTx {-- ledgerPP --} addr signKey [] 1 fee of + Left _ -> pure () -- expected + Right _ -> + HUnit.assertFailure + "buildTx should fail with no inputs" + + , HUnit.testCase "buildTx: zero outputs" $ do + (_ledgerPP, signKey, addr) <- testSetup + let fund = mkDummyFund signKey 0 10_000_000 + fee = L.Coin 200_000 + case TxAssembly.buildTx {-- ledgerPP --} addr signKey [fund] 0 fee of + Left _ -> pure () -- expected + Right _ -> + HUnit.assertFailure + "buildTx should fail with 0 outputs" + + , HUnit.testCase + "buildTx: signing throughput (single-threaded)" $ do + (ledgerPP, signKey, addr) <- testSetup + -- Build N transactions sequentially and measure wall-clock + -- time. This quantifies the single-threaded builder bottleneck. + let n = 10_000 :: Int + fee = L.Coin 200_000 + -- Use a large initial fund so recycling doesn't deplete it. + initialFund = mkDummyFund signKey 0 1_000_000_000_000 + + start <- Clock.getTime Clock.MonotonicRaw + go n initialFund ledgerPP addr signKey fee + end <- Clock.getTime Clock.MonotonicRaw + + let elapsedNs = Clock.toNanoSecs (end - start) + elapsedS = fromIntegral elapsedNs / 1e9 :: Double + tps = fromIntegral n / elapsedS + + hPutStrLn stderr "" + hPutStrLn stderr + " --- Single-threaded buildTx throughput ---" + hPutStrLn stderr $ " txs built: " ++ show n + hPutStrLn stderr $ " elapsed: " ++ show elapsedS ++ " s" + hPutStrLn stderr $ + " throughput: " ++ show (round tps :: Int) ++ " tx/s" + hFlush stderr + + -- Sanity: we should be able to sign at least 1000 tx/s on any + -- reasonable hardware. This is not a hard performance target, + -- just a smoke test that buildTx isn't catastrophically slow. + HUnit.assertBool + ("buildTx throughput too low: " + ++ show (round tps :: Int) ++ " tx/s") + (tps > 1000) + ] + where + -- Build N txs sequentially, recycling the first output each time. + go :: Int -> Fund.Fund + -> Api.LedgerProtocolParameters Api.ConwayEra + -> Api.AddressInEra Api.ConwayEra + -> Api.SigningKey Api.PaymentKey -> L.Coin -> IO () + go 0 _ _ _ _ _ = pure () + go remaining fund ledgerPP addr signKey fee = + case TxAssembly.buildTx {-- ledgerPP --} addr signKey [fund] 1 fee of + Left err -> + error $ "throughput test: buildTx failed at iteration " + ++ show remaining ++ ": " ++ err + Right (_, outFunds) -> + go (remaining - 1) (head outFunds) + ledgerPP addr signKey fee + +-------------------------------------------------------------------------------- +-- Test helpers +-------------------------------------------------------------------------------- + +-- | Load protocol parameters and create common test fixtures. +testSetup + :: IO ( Api.LedgerProtocolParameters Api.ConwayEra + , Api.SigningKey Api.PaymentKey + , Api.AddressInEra Api.ConwayEra + ) +testSetup = do + -- Load protocol parameters from the CI test file. + ppPath <- Paths.getDataFileName "data/protocol-parameters.ci-test.json" + protocolParameters <- + Aeson.eitherDecodeFileStrict' ppPath >>= either fail pure + ledgerPP <- + case PP.convertToLedgerProtocolParameters + Api.ShelleyBasedEraConway protocolParameters of + Left err -> + fail $ "convertToLedgerProtocolParameters: " ++ show err + Right pp -> pure pp + + -- Generate a fresh signing key and derive its address. + signKey <- Api.generateSigningKey Api.AsPaymentKey + let networkId = Api.Testnet (Api.NetworkMagic 42) + addr = Api.shelleyAddressInEra + (Api.shelleyBasedEra @Api.ConwayEra) + $ Api.makeShelleyAddress networkId + (Api.PaymentCredentialByKey + (Api.verificationKeyHash + (Api.getVerificationKey signKey))) + Api.NoStakeAddress + + pure (ledgerPP, signKey, addr) + +-- | Create a dummy fund with a synthetic TxIn. Uses the signing key's +-- verification key hash to derive a deterministic TxId (via +-- 'Api.genesisUTxOPseudoTxIn') and the caller-supplied @index@ as the +-- 'Api.TxIx'. Each distinct @index@ produces a unique 'Api.TxIn', so +-- multi-input tests can create several funds from the same key without +-- accidentally producing duplicate inputs. +mkDummyFund :: Api.SigningKey Api.PaymentKey -> Word -> Integer -> Fund.Fund +mkDummyFund signKey index lovelace = Fund.Fund + { Fund.fundTxIn = + let Api.TxIn txId _ = Fund.genesisTxIn + (Api.Testnet (Api.NetworkMagic 42)) + signKey + in Api.TxIn txId (Api.TxIx index) + , Fund.fundValue = lovelace + , Fund.fundSignKey = signKey + } diff --git a/bench/tx-centrifuge/tx-centrifuge.cabal b/bench/tx-centrifuge/tx-centrifuge.cabal new file mode 100644 index 00000000000..b83a30b9259 --- /dev/null +++ b/bench/tx-centrifuge/tx-centrifuge.cabal @@ -0,0 +1,211 @@ +cabal-version: 3.0 + +name: tx-centrifuge +version: 0.1.0.0 +synopsis: Standalone transaction generator for Cardano benchmarking +description: Pull-based transaction generator targeting the higher TPS + rates and workload isolation that Leios benchmarking requires. + Built from scratch so that tx-generator's historical baselines + remain untouched. +category: Cardano, + Benchmark, +copyright: 2026 Intersect MBO. +author: Federico Mastellone (210034+fmaste@users.noreply.github.com) +license: Apache-2.0 +license-files: LICENSE + NOTICE +build-type: Simple + +extra-doc-files: README.md +data-files: data/config-shared-10.json + data/config-shared-100k.json + data/config-per-target-0_2.json + data/config-per-target-2k.json + data/config-per-target-200.json + data/config-multi-group.json + data/protocol-parameters.ci-test.json + +-------------------------------------------------------------------------------- + +common project-config + default-language: Haskell2010 + +common ghc-warnings + ghc-options: -Wall + -Wcompat + -Wincomplete-record-updates + -Wincomplete-uni-patterns + -Wno-prepositive-qualified-module + -Wno-unticked-promoted-constructors + -Wpartial-fields + -Wredundant-constraints + -fobject-code -fno-ignore-interface-pragmas + -fno-omit-interface-pragmas + +-- -N: multicore runtime capabilities (critical for high-TPS CPU load). +-- -A64m: larger nursery to reduce minor-GC frequency in the hot path. +-- -T: RTS stats available for tuning/regression checks. +common rts-defaults + ghc-options: -threaded + -rtsopts + "-with-rtsopts=-N -A64m -T" + +-------------------------------------------------------------------------------- + +executable tx-centrifuge + import: project-config, ghc-warnings, rts-defaults + hs-source-dirs: app + main-is: Main.hs + build-depends: base + , aeson + , async + , bytestring + , cardano-api + , cardano-ledger-core + , cardano-node + , containers + , network + , ouroboros-consensus + , ouroboros-consensus-cardano + , ouroboros-network-framework + , stm + , text + , transformers + , tx-generator + , tx-centrifuge:pull-fiction + , tx-centrifuge:tx-centrifuge-lib + +-------------------------------------------------------------------------------- + +-- | Domain-independent, pull-based load generation engine. +-- +-- Provides rate-limited pipeline management (input queue, payload queue, +-- closed-loop recycling), GCRA-based admission control, and workload +-- orchestration. Zero Cardano dependencies — the library is parameterised over +-- abstract input and payload types so it can drive any pull-based protocol +-- (e.g. Cardano's TxSubmission2 mini-protocol). +library pull-fiction + import: project-config, ghc-warnings + hs-source-dirs: lib/pull-fiction + visibility: public + exposed-modules: Cardano.Benchmarking.PullFiction.Config.Raw + Cardano.Benchmarking.PullFiction.Config.Runtime + Cardano.Benchmarking.PullFiction.Config.Validated + Cardano.Benchmarking.PullFiction.Clock + Cardano.Benchmarking.PullFiction.WorkloadRunner + other-modules: Cardano.Benchmarking.PullFiction.Internal.RateLimiter + build-depends: base >=4.12 && <5 + , aeson + , async + , clock + , containers + , stm + , text + +-- Sub-library with node functionality decoupled from the core library above. +library tx-centrifuge-lib + import: project-config, ghc-warnings + hs-source-dirs: lib/tx-centrifuge + visibility: public + exposed-modules: Cardano.Benchmarking.TxCentrifuge.Fund + Cardano.Benchmarking.TxCentrifuge.NodeToNode + Cardano.Benchmarking.TxCentrifuge.NodeToNode.KeepAlive + Cardano.Benchmarking.TxCentrifuge.NodeToNode.TxIdSync + Cardano.Benchmarking.TxCentrifuge.NodeToNode.TxSubmission + Cardano.Benchmarking.TxCentrifuge.Tracing + Cardano.Benchmarking.TxCentrifuge.Tracing.Orphans + Cardano.Benchmarking.TxCentrifuge.TxAssembly + build-depends: base >=4.12 && <5 + , aeson + , bytestring + , cardano-api ^>= 10.24.1 + , cardano-ledger-api + , cardano-ledger-core + , containers + , io-classes:{io-classes, strict-stm} + , network + , network-mux + , ouroboros-consensus ^>= 0.30 + , ouroboros-consensus-cardano ^>= 0.26 + , ouroboros-consensus-diffusion ^>= 0.26 + , ouroboros-network ^>= 0.22.6 + , ouroboros-network-api ^>= 0.16 + , ouroboros-network-framework ^>= 0.19.3 + , ouroboros-network-protocols ^>= 0.15.2 + , random + , serialise + , stm + , contra-tracer + , text + , time + , trace-dispatcher + , typed-protocols:{typed-protocols, stateful} >= 1.0 + +-------------------------------------------------------------------------------- + +-- Test suites import rts-defaults so performance/fairness behavior matches +-- production runtime defaults instead of a different RTS profile. + +test-suite pull-fiction-test + import: project-config, ghc-warnings, rts-defaults + type: exitcode-stdio-1.0 + hs-source-dirs: test/pull-fiction + main-is: Main.hs + other-modules: Test.PullFiction.GeneratorTest + Test.PullFiction.PipelineTest + Paths_tx_centrifuge + autogen-modules: Paths_tx_centrifuge + build-depends: base >=4.12 && <5 + , containers + , tasty + , tasty-hunit + , tx-centrifuge:pull-fiction + , tx-centrifuge:test-harness + +test-suite tx-centrifuge-test + import: project-config, ghc-warnings, rts-defaults + type: exitcode-stdio-1.0 + hs-source-dirs: test/tx-centrifuge + main-is: Main.hs + other-modules: Test.TxCentrifuge.TxTest + Paths_tx_centrifuge + autogen-modules: Paths_tx_centrifuge + build-depends: base >=4.12 && <5 + , aeson + , cardano-api + , cardano-ledger-core + , clock + , tasty + , tasty-hunit + , tx-centrifuge:tx-centrifuge-lib + , tx-generator + +library test-harness + import: project-config, ghc-warnings + visibility: private + hs-source-dirs: test/lib + exposed-modules: Test.PullFiction.Harness + build-depends: base >=4.12 && <5 + , aeson + , async + , clock + , containers + , tx-centrifuge:pull-fiction + +-- Bench imports rts-defaults so benchmark numbers are measured with the same +-- RTS configuration used by the executable and tests. +-------------------------------------------------------------------------------- + +benchmark core-bench + import: project-config, ghc-warnings, rts-defaults + type: exitcode-stdio-1.0 + hs-source-dirs: bench + main-is: Bench.hs + other-modules: Paths_tx_centrifuge + autogen-modules: Paths_tx_centrifuge + build-depends: base >=4.12 && <5 + , containers + , criterion + , deepseq + , tx-centrifuge:pull-fiction + , tx-centrifuge:test-harness diff --git a/bench/tx-generator/src/Cardano/Benchmarking/Wallet.hs b/bench/tx-generator/src/Cardano/Benchmarking/Wallet.hs index bf5739208ef..8ba0c542372 100644 --- a/bench/tx-generator/src/Cardano/Benchmarking/Wallet.hs +++ b/bench/tx-generator/src/Cardano/Benchmarking/Wallet.hs @@ -18,6 +18,7 @@ import Cardano.TxGenerator.Tx import Cardano.TxGenerator.Types import Cardano.TxGenerator.UTxO +import Data.List (foldl') import Prelude import Control.Concurrent.MVar @@ -64,13 +65,13 @@ askWalletRef r f = do -- | This does an insertion into the `MVar` contents. walletRefInsertFund :: WalletRef -> Fund -> IO () -walletRefInsertFund ref fund = modifyMVar_ ref $ \w -> return $ FundQueue.insertFund w fund +walletRefInsertFund ref fund = modifyMVar_ ref $ \w -> return $! FundQueue.insertFund w fund -- | 'mkWalletFundStoreList' hides its second argument in -- 'FundToStoreList'. This is not used anywhere. mkWalletFundStoreList :: WalletRef -> FundToStoreList IO mkWalletFundStoreList walletRef funds = modifyMVar_ walletRef - $ \wallet -> return (foldl FundQueue.insertFund wallet funds) + $ \wallet -> return $! foldl' FundQueue.insertFund wallet funds -- | 'mkWalletFundStore' hides its second argument in 'FundToStore'. -- This is only ever called in tandem with 'createAndStore' in @@ -79,16 +80,16 @@ mkWalletFundStoreList walletRef funds = modifyMVar_ walletRef -- 'WalletRef' 'MVar' by side effect. mkWalletFundStore :: WalletRef -> FundToStore IO mkWalletFundStore walletRef fund = modifyMVar_ walletRef - $ \wallet -> return $ FundQueue.insertFund wallet fund + $ \wallet -> return $! FundQueue.insertFund wallet fund -- | 'walletSource' is only ever used in -- 'Cardano.Benchmarking.Script.Core.evalGenerator' to pass -- to 'Cardano.TxGenerator.Tx.sourceToStoreTransaction' and -- its associated functions. walletSource :: WalletRef -> Int -> FundSource IO -walletSource ref munch = modifyMVar ref $ \fifo -> return $ case removeFunds munch fifo of - Nothing -> (fifo, Left $ TxGenError "WalletSource: out of funds") - Just (newFifo, funds) -> (newFifo, Right funds) +walletSource ref munch = modifyMVar ref $ \fifo -> case removeFunds munch fifo of + Nothing -> return (fifo, Left $ TxGenError "WalletSource: out of funds") + Just (newFifo, funds) -> return (newFifo, Right funds) -- | Just a preview of the wallet's funds; wallet remains unmodified. walletPreview :: WalletRef -> Int -> IO [Fund] diff --git a/bench/tx-generator/src/Cardano/TxGenerator/Fund.hs b/bench/tx-generator/src/Cardano/TxGenerator/Fund.hs index a2235ac3b5a..d1d6443e132 100644 --- a/bench/tx-generator/src/Cardano/TxGenerator/Fund.hs +++ b/bench/tx-generator/src/Cardano/TxGenerator/Fund.hs @@ -36,7 +36,7 @@ import Data.Function (on) -- use of lenses. data FundInEra era = FundInEra { _fundTxIn :: !TxIn - , _fundWitness :: Witness WitCtxTxIn era + , _fundWitness :: !(Witness WitCtxTxIn era) , _fundVal :: !(TxOutValue era) , _fundSigningKey :: !(Maybe (SigningKey PaymentKey)) } diff --git a/bench/tx-generator/src/Cardano/TxGenerator/Internal/Fifo.hs b/bench/tx-generator/src/Cardano/TxGenerator/Internal/Fifo.hs index eaa0b9f27df..a04ceec7cb0 100644 --- a/bench/tx-generator/src/Cardano/TxGenerator/Internal/Fifo.hs +++ b/bench/tx-generator/src/Cardano/TxGenerator/Internal/Fifo.hs @@ -1,4 +1,3 @@ -{-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} {-| Module : Cardano.TxGenerator.Internal.Fifo Description : FIFO/queue data structure. @@ -48,7 +47,9 @@ remove :: Fifo a -> Maybe (Fifo a, a) remove fifo = case fifo of Fifo [] [] -> Nothing Fifo (h:t) y -> Just (Fifo t y, h) - Fifo [] y -> let ~(h:t) = reverse y in Just (Fifo t [], h) + Fifo [] y -> case reverse y of + (h:t) -> Just (Fifo t [], h) + [] -> Nothing -- | Dequeueing /n/ items just iterates calling remove within the -- `Maybe` monad. Removing n from a Fifo of length k when k < n is diff --git a/cabal.project b/cabal.project index 1a821ddc7db..2881fb5dd3e 100644 --- a/cabal.project +++ b/cabal.project @@ -31,6 +31,7 @@ packages: bench/cardano-topology bench/locli bench/plutus-scripts-bench + bench/tx-centrifuge bench/tx-generator bench/cardano-recon-framework trace-dispatcher diff --git a/nix/workbench/backend/nomad-job.nix b/nix/workbench/backend/nomad-job.nix index f012eeafb07..8b18f0127c6 100644 --- a/nix/workbench/backend/nomad-job.nix +++ b/nix/workbench/backend/nomad-job.nix @@ -895,7 +895,7 @@ let in # Recreate the "run-script.json" with IPs and ports that are # nomad template variables. - (runScriptToGoTemplate + (runScriptToGoTemplate2 runScript # Just the node names. (lib.attrsets.mapAttrsToList @@ -1394,6 +1394,32 @@ let '' ; + runScriptToGoTemplate2 = runScript: _: builtins.replaceStrings + ( + (builtins.genList + (i: ''__addr_${toString i}__'') + 100 + ) + ++ + (builtins.genList + (i: ''"__port_${toString i}__"'') + 100 + ) + ) + ( + (builtins.genList + (i: ''{{range nomadService "${(nodeNameToServicePortName "node-${toString i}")}"}}{{.Address}}{{end}}'') + 100 + ) + ++ + (builtins.genList + (i: ''{{range nomadService "${(nodeNameToServicePortName "node-${toString i}")}"}}{{.Port}}{{end}}'') + 100 + ) + ) + (lib.generators.toJSON {} runScript) + ; + # Convert from generator's "run-script.json" with all addresses being # "127.0.0.01" to one with all addresses being a placeholder like # "{{NOMAD_IP_node-X}}". diff --git a/nix/workbench/backend/nomad.nix b/nix/workbench/backend/nomad.nix index 9b272b86b29..38111f5abb5 100644 --- a/nix/workbench/backend/nomad.nix +++ b/nix/workbench/backend/nomad.nix @@ -176,6 +176,12 @@ let # Avoid nix cache misses on every commit because of `set-git-rev`. flake-output = "cardanoNodePackages.tx-generator.passthru.noGitRev"; }; + tx-centrifuge = rec { + # Local reference only used if not "cloud". + nix-store-path = haskellProject.exes.tx-centrifuge; + flake-reference = "github:intersectmbo/cardano-node"; + flake-output = "cardanoNodePackages.tx-centrifuge"; + }; } ; diff --git a/nix/workbench/service/generator.nix b/nix/workbench/service/generator.nix index 2f4a294edc3..6945f818c3c 100644 --- a/nix/workbench/service/generator.nix +++ b/nix/workbench/service/generator.nix @@ -147,6 +147,129 @@ let let serviceConfig = generatorServiceConfig nodeSpecs; service = generatorServiceConfigService serviceConfig; + genesisFunds = + (let + # create-testnet-data --testnet-magic 42 --total-supply 2010000000000000 --utxo-keys 100 --genesis-keys 0 --delegated-supply 2000000000000000 --pools 2 --stake-delegators 2 --drep-keys 0 --stuffed-utxo 000000 + # Ends with 90000000000 each utxo-key. + # value = (profile.genesis.funds_balance - profile.genesis.shelley.protocolParams.poolDeposit * profile.composition.n_pools) / profile.genesis.utxo_keys; + value = (profile.derived.supply_total - profile.derived.supply_delegated) * 9 / (profile.genesis.utxo_keys * 10); + in +__toJSON + (builtins.genList + (i: + { signing_key = "../genesis/utxo-keys/utxo${toString (i+1)}.skey"; # Key index is not zero based =) + inherit value; + } + ) + profile.genesis.utxo_keys + ) + ) + ; + txCentrifugeConfig = + { # pull-fiction parameters. + ########################## + initial_inputs = + { type = "genesis_utxo_keys"; + params = + { network_magic = profile.genesis.network_magic; + signing_keys_file = "./funds.json"; + } + ; + } + ; + builder = + { type = "value"; + params = + { inputs_per_tx = 2; + outputs_per_tx = 2; + fee = 1000000; + optimistic_recycle = false; + } + ; + } + ; + rate_limit = + { scope = "shared"; + type = "token_bucket"; + params = { tps = 12; }; + } + ; + max_batch_size = 500; + on_exhaustion = "error"; + # One node per-workload. + workloads = + builtins.listToAttrs + (builtins.genList + (i: + { name = "node-${toString i}"; + value = + { targets = + { "${toString i}" = + # { addr = "127.0.0.1"; + # port = (30000 + i); + # } + { addr = "__addr_${toString i}__"; + port = "__port_${toString i}__"; + } + ; + } + ; + } + ; + } + ) + profile.composition.n_pool_hosts + ) + ; + # tx-centrifuge parameters. + ########################### + nodeConfig = "../${runningNode}/config.json"; + protocol_parameters = + { epoch_length = profile.genesis.shelley.epochLength; + min_fee_a = profile.genesis.shelley.protocolParams.minFeeA; + min_fee_b = profile.genesis.shelley.protocolParams.minFeeB; + } + ; + # Tracing parameters. + ##################### + TraceOptions = + { "" = + { backends = [ "Stdout MachineFormat" ]; + detail = "DNormal"; + severity = "Debug"; + }; + # ouroboros-network traces. + "KeepAlive" = { severity="Silence";}; + "KeepAlive.Receive.KeepAliveResponse" = { severity="Silence";}; + "KeepAlive.Send.KeepAlive" = { severity="Silence";}; + "TxSubmission2" = { severity="Silence";}; + "TxSubmission2.Receive" = { severity="Silence";}; + "TxSubmission2.Receive.MsgInit" = { severity="Silence";}; + "TxSubmission2.Receive.RequestTxIds" = { severity="Silence";}; + "TxSubmission2.Receive.RequestTxs" = { severity="Silence";}; + "TxSubmission2.Receive.Done" = { severity="Silence";}; + "TxSubmission2.Send" = { severity="Silence";}; + "TxSubmission2.Send.MsgInit" = { severity="Silence";}; + "TxSubmission2.Send.ReplyTxIds" = { severity="Silence";}; + "TxSubmission2.Send.ReplyTxs" = { severity="Silence";}; + "TxSubmission2.Send.Done" = { severity="Silence";}; + # tx-centrifuge traces. + "TxCentrifuge.Builder.NewTx" = { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.Builder.Recycle" = { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.TxSubmission.RequestTxIds" = + { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.TxSubmission.ReplyTxIds" = + { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.TxSubmission.RequestTxs" = + { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.TxSubmission.ReplyTxs" = + { severity="Debug";detail="DDetailed";}; + }; + TurnOnLogMetrics = false; + TurnOnLogging = true; + TraceOptionNodeName = "leios-generator"; + } + ; in { start = '' @@ -189,11 +312,12 @@ let # Extra workloads end ####################### ############################################# - ${service.script} + echo ${__toJSON genesisFunds} > ./funds.json + ${haskellProject.exes.tx-centrifuge}/bin/tx-centrifuge run-script.json '' ; - config = (service.decideRunScript service); + config = txCentrifugeConfig; # Not present on every profile. # Don't create a derivation to a file containing "null" !!!