diff --git a/.eslintrc.json b/.eslintrc.json deleted file mode 100644 index 3e961d5..0000000 --- a/.eslintrc.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "extends": ["next", "prettier", "next/core-web-vitals"], - "plugins": ["react", "react-hooks", "prettier"], - "rules": { - "no-unused-vars": [ - "error", - { - "args": "after-used", - "caughtErrors": "none", - "ignoreRestSiblings": true, - "vars": "all" - } - ] - } -} diff --git a/.gitignore b/.gitignore index ab94bdd..48f78b2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,3 @@ -# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. - # dependencies /node_modules /.pnp @@ -9,12 +7,9 @@ # testing /coverage -# next.js -/.next/ -/out/ - -# production -/build +# build output +/dist/ +/.astro/ # misc .DS_Store @@ -28,16 +23,9 @@ yarn-error.log* # local env files .env*.local -# vercel -.vercel - # typescript *.tsbuildinfo -next-env.d.ts package-lock.json .idea/ - -# fumadocs generated -.source/ diff --git a/.prettierrc b/.prettierrc index bc786df..62675af 100644 --- a/.prettierrc +++ b/.prettierrc @@ -2,7 +2,8 @@ "singleQuote": true, "arrowParens": "always", "trailingComma": "none", - "jsxBracketSameLine": true, + "bracketSameLine": true, + "plugins": ["prettier-plugin-astro"], "printWidth": 100, "tabWidth": 2, "semi": false diff --git a/astro.config.mjs b/astro.config.mjs new file mode 100644 index 0000000..92310bf --- /dev/null +++ b/astro.config.mjs @@ -0,0 +1,9 @@ +import { defineConfig } from 'astro/config' +import react from '@astrojs/react' +import sitemap from '@astrojs/sitemap' + +export default defineConfig({ + site: 'https://evolve.com', + output: 'static', + integrations: [react(), sitemap()] +}) diff --git a/bun.lock b/bun.lock new file mode 100644 index 0000000..861644e --- /dev/null +++ b/bun.lock @@ -0,0 +1,1646 @@ +{ + "lockfileVersion": 1, + "configVersion": 1, + "workspaces": { + "": { + "name": "site", + "dependencies": { + "@astrojs/react": "^5.0.2", + "@astrojs/sitemap": "^3.7.2", + "@rive-app/react-webgl2": "^4.27.0", + "astro": "^6.1.1", + "motion": "^12.35.0", + "react": "^19.2.3", + "react-dom": "^19.2.3", + "react-fast-marquee": "^1.6.5", + "react-markdown": "^10.1.0", + "tailwind-merge": "^3.5.0", + }, + "devDependencies": { + "@commitlint/cli": "^20.3.0", + "@commitlint/config-conventional": "^20.3.0", + "@playwright/test": "^1.58.2", + "@tailwindcss/postcss": "^4.1.18", + "@types/node": "^25", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", + "@typescript-eslint/eslint-plugin": "^8.57.2", + "@typescript-eslint/parser": "^8.57.2", + "autoprefixer": "^10.4.23", + "commitizen": "^4.3.1", + "cz-conventional-changelog": "^3.3.0", + "eslint": "^9.39.2", + "eslint-config-prettier": "^10.1.8", + "eslint-plugin-prettier": "^5.5.4", + "husky": "^9.1.7", + "lint-staged": "^16.2.7", + "postcss": "^8", + "prettier": "^3.7.4", + "prettier-plugin-astro": "^0.14.1", + "tailwindcss": "^4.1.18", + "typescript": "^5", + }, + }, + }, + "packages": { + "@alloc/quick-lru": ["@alloc/quick-lru@5.2.0", "", {}, "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw=="], + + "@astrojs/compiler": ["@astrojs/compiler@2.13.1", "", {}, "sha512-f3FN83d2G/v32ipNClRKgYv30onQlMZX1vCeZMjPsMMPl1mDpmbl0+N5BYo4S/ofzqJyS5hvwacEo0CCVDn/Qg=="], + + "@astrojs/internal-helpers": ["@astrojs/internal-helpers@0.8.0", "", { "dependencies": { "picomatch": "^4.0.3" } }, "sha512-J56GrhEiV+4dmrGLPNOl2pZjpHXAndWVyiVDYGDuw6MWKpBSEMLdFxHzeM/6sqaknw9M+HFfHZAcvi3OfT3D/w=="], + + "@astrojs/markdown-remark": ["@astrojs/markdown-remark@7.1.0", "", { "dependencies": { "@astrojs/internal-helpers": "0.8.0", "@astrojs/prism": "4.0.1", "github-slugger": "^2.0.0", "hast-util-from-html": "^2.0.3", "hast-util-to-text": "^4.0.2", "js-yaml": "^4.1.1", "mdast-util-definitions": "^6.0.0", "rehype-raw": "^7.0.0", "rehype-stringify": "^10.0.1", "remark-gfm": "^4.0.1", "remark-parse": "^11.0.0", "remark-rehype": "^11.1.2", "remark-smartypants": "^3.0.2", "retext-smartypants": "^6.2.0", "shiki": "^4.0.0", "smol-toml": "^1.6.0", "unified": "^11.0.5", "unist-util-remove-position": "^5.0.0", "unist-util-visit": "^5.1.0", "unist-util-visit-parents": "^6.0.2", "vfile": "^6.0.3" } }, "sha512-P+HnCsu2js3BoTc8kFmu+E9gOcFeMdPris75g+Zl4sY8+bBRbSQV6xzcBDbZ27eE7yBGEGQoqjpChx+KJYIPYQ=="], + + "@astrojs/prism": ["@astrojs/prism@4.0.1", "", { "dependencies": { "prismjs": "^1.30.0" } }, "sha512-nksZQVjlferuWzhPsBpQ1JE5XuKAf1id1/9Hj4a9KG4+ofrlzxUUwX4YGQF/SuDiuiGKEnzopGOt38F3AnVWsQ=="], + + "@astrojs/react": ["@astrojs/react@5.0.2", "", { "dependencies": { "@astrojs/internal-helpers": "0.8.0", "@vitejs/plugin-react": "^5.2.0", "devalue": "^5.6.4", "ultrahtml": "^1.6.0", "vite": "^7.3.1" }, "peerDependencies": { "@types/react": "^17.0.50 || ^18.0.21 || ^19.0.0", "@types/react-dom": "^17.0.17 || ^18.0.6 || ^19.0.0", "react": "^17.0.2 || ^18.0.0 || ^19.0.0", "react-dom": "^17.0.2 || ^18.0.0 || ^19.0.0" } }, "sha512-BDpPrapV3Wgp9sD7aTMvP+ORH0jFEue9OmkBu98KcBbTlsQCnvisDW3m7PQrMptXwEDlX5HGfP/CHmkEVY2tZA=="], + + "@astrojs/sitemap": ["@astrojs/sitemap@3.7.2", "", { "dependencies": { "sitemap": "^9.0.0", "stream-replace-string": "^2.0.0", "zod": "^4.3.6" } }, "sha512-PqkzkcZTb5ICiyIR8VoKbIAP/laNRXi5tw616N1Ckk+40oNB8Can1AzVV56lrbC5GKSZFCyJYUVYqVivMisvpA=="], + + "@astrojs/telemetry": ["@astrojs/telemetry@3.3.0", "", { "dependencies": { "ci-info": "^4.2.0", "debug": "^4.4.0", "dlv": "^1.1.3", "dset": "^3.1.4", "is-docker": "^3.0.0", "is-wsl": "^3.1.0", "which-pm-runs": "^1.1.0" } }, "sha512-UFBgfeldP06qu6khs/yY+q1cDAaArM2/7AEIqQ9Cuvf7B1hNLq0xDrZkct+QoIGyjq56y8IaE2I3CTvG99mlhQ=="], + + "@babel/code-frame": ["@babel/code-frame@7.29.0", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw=="], + + "@babel/compat-data": ["@babel/compat-data@7.29.0", "", {}, "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg=="], + + "@babel/core": ["@babel/core@7.29.0", "", { "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", "@babel/helper-compilation-targets": "^7.28.6", "@babel/helper-module-transforms": "^7.28.6", "@babel/helpers": "^7.28.6", "@babel/parser": "^7.29.0", "@babel/template": "^7.28.6", "@babel/traverse": "^7.29.0", "@babel/types": "^7.29.0", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA=="], + + "@babel/generator": ["@babel/generator@7.29.1", "", { "dependencies": { "@babel/parser": "^7.29.0", "@babel/types": "^7.29.0", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw=="], + + "@babel/helper-compilation-targets": ["@babel/helper-compilation-targets@7.28.6", "", { "dependencies": { "@babel/compat-data": "^7.28.6", "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" } }, "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA=="], + + "@babel/helper-globals": ["@babel/helper-globals@7.28.0", "", {}, "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw=="], + + "@babel/helper-module-imports": ["@babel/helper-module-imports@7.28.6", "", { "dependencies": { "@babel/traverse": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw=="], + + "@babel/helper-module-transforms": ["@babel/helper-module-transforms@7.28.6", "", { "dependencies": { "@babel/helper-module-imports": "^7.28.6", "@babel/helper-validator-identifier": "^7.28.5", "@babel/traverse": "^7.28.6" }, "peerDependencies": { "@babel/core": "^7.0.0" } }, "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA=="], + + "@babel/helper-plugin-utils": ["@babel/helper-plugin-utils@7.28.6", "", {}, "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug=="], + + "@babel/helper-string-parser": ["@babel/helper-string-parser@7.27.1", "", {}, "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA=="], + + "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.28.5", "", {}, "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q=="], + + "@babel/helper-validator-option": ["@babel/helper-validator-option@7.27.1", "", {}, "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg=="], + + "@babel/helpers": ["@babel/helpers@7.29.2", "", { "dependencies": { "@babel/template": "^7.28.6", "@babel/types": "^7.29.0" } }, "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw=="], + + "@babel/parser": ["@babel/parser@7.29.2", "", { "dependencies": { "@babel/types": "^7.29.0" }, "bin": "./bin/babel-parser.js" }, "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA=="], + + "@babel/plugin-transform-react-jsx-self": ["@babel/plugin-transform-react-jsx-self@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw=="], + + "@babel/plugin-transform-react-jsx-source": ["@babel/plugin-transform-react-jsx-source@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw=="], + + "@babel/template": ["@babel/template@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ=="], + + "@babel/traverse": ["@babel/traverse@7.29.0", "", { "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.29.0", "@babel/template": "^7.28.6", "@babel/types": "^7.29.0", "debug": "^4.3.1" } }, "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA=="], + + "@babel/types": ["@babel/types@7.29.0", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A=="], + + "@capsizecss/unpack": ["@capsizecss/unpack@4.0.0", "", { "dependencies": { "fontkitten": "^1.0.0" } }, "sha512-VERIM64vtTP1C4mxQ5thVT9fK0apjPFobqybMtA1UdUujWka24ERHbRHFGmpbbhp73MhV+KSsHQH9C6uOTdEQA=="], + + "@clack/core": ["@clack/core@1.1.0", "", { "dependencies": { "sisteransi": "^1.0.5" } }, "sha512-SVcm4Dqm2ukn64/8Gub2wnlA5nS2iWJyCkdNHcvNHPIeBTGojpdJ+9cZKwLfmqy7irD4N5qLteSilJlE0WLAtA=="], + + "@clack/prompts": ["@clack/prompts@1.1.0", "", { "dependencies": { "@clack/core": "1.1.0", "sisteransi": "^1.0.5" } }, "sha512-pkqbPGtohJAvm4Dphs2M8xE29ggupihHdy1x84HNojZuMtFsHiUlRvqD24tM2+XmI+61LlfNceM3Wr7U5QES5g=="], + + "@commitlint/cli": ["@commitlint/cli@20.5.0", "", { "dependencies": { "@commitlint/format": "^20.5.0", "@commitlint/lint": "^20.5.0", "@commitlint/load": "^20.5.0", "@commitlint/read": "^20.5.0", "@commitlint/types": "^20.5.0", "tinyexec": "^1.0.0", "yargs": "^17.0.0" }, "bin": { "commitlint": "./cli.js" } }, "sha512-yNkyN/tuKTJS3wdVfsZ2tXDM4G4Gi7z+jW54Cki8N8tZqwKBltbIvUUrSbT4hz1bhW/h0CdR+5sCSpXD+wMKaQ=="], + + "@commitlint/config-conventional": ["@commitlint/config-conventional@20.5.0", "", { "dependencies": { "@commitlint/types": "^20.5.0", "conventional-changelog-conventionalcommits": "^9.2.0" } }, "sha512-t3Ni88rFw1XMa4nZHgOKJ8fIAT9M2j5TnKyTqJzsxea7FUetlNdYFus9dz+MhIRZmc16P0PPyEfh6X2d/qw8SA=="], + + "@commitlint/config-validator": ["@commitlint/config-validator@20.5.0", "", { "dependencies": { "@commitlint/types": "^20.5.0", "ajv": "^8.11.0" } }, "sha512-T/Uh6iJUzyx7j35GmHWdIiGRQB+ouZDk0pwAaYq4SXgB54KZhFdJ0vYmxiW6AMYICTIWuyMxDBl1jK74oFp/Gw=="], + + "@commitlint/ensure": ["@commitlint/ensure@20.5.0", "", { "dependencies": { "@commitlint/types": "^20.5.0", "lodash.camelcase": "^4.3.0", "lodash.kebabcase": "^4.1.1", "lodash.snakecase": "^4.1.1", "lodash.startcase": "^4.4.0", "lodash.upperfirst": "^4.3.1" } }, "sha512-IpHqAUesBeW1EDDdjzJeaOxU9tnogLAyXLRBn03SHlj1SGENn2JGZqSWGkFvBJkJzfXAuCNtsoYzax+ZPS+puw=="], + + "@commitlint/execute-rule": ["@commitlint/execute-rule@20.0.0", "", {}, "sha512-xyCoOShoPuPL44gVa+5EdZsBVao/pNzpQhkzq3RdtlFdKZtjWcLlUFQHSWBuhk5utKYykeJPSz2i8ABHQA+ZZw=="], + + "@commitlint/format": ["@commitlint/format@20.5.0", "", { "dependencies": { "@commitlint/types": "^20.5.0", "picocolors": "^1.1.1" } }, "sha512-TI9EwFU/qZWSK7a5qyXMpKPPv3qta7FO4tKW+Wt2al7sgMbLWTsAcDpX1cU8k16TRdsiiet9aOw0zpvRXNJu7Q=="], + + "@commitlint/is-ignored": ["@commitlint/is-ignored@20.5.0", "", { "dependencies": { "@commitlint/types": "^20.5.0", "semver": "^7.6.0" } }, "sha512-JWLarAsurHJhPozbuAH6GbP4p/hdOCoqS9zJMfqwswne+/GPs5V0+rrsfOkP68Y8PSLphwtFXV0EzJ+GTXTTGg=="], + + "@commitlint/lint": ["@commitlint/lint@20.5.0", "", { "dependencies": { "@commitlint/is-ignored": "^20.5.0", "@commitlint/parse": "^20.5.0", "@commitlint/rules": "^20.5.0", "@commitlint/types": "^20.5.0" } }, "sha512-jiM3hNUdu04jFBf1VgPdjtIPvbuVfDTBAc6L98AWcoLjF5sYqkulBHBzlVWll4rMF1T5zeQFB6r//a+s+BBKlA=="], + + "@commitlint/load": ["@commitlint/load@20.5.0", "", { "dependencies": { "@commitlint/config-validator": "^20.5.0", "@commitlint/execute-rule": "^20.0.0", "@commitlint/resolve-extends": "^20.5.0", "@commitlint/types": "^20.5.0", "cosmiconfig": "^9.0.1", "cosmiconfig-typescript-loader": "^6.1.0", "is-plain-obj": "^4.1.0", "lodash.mergewith": "^4.6.2", "picocolors": "^1.1.1" } }, "sha512-sLhhYTL/KxeOTZjjabKDhwidGZan84XKK1+XFkwDYL/4883kIajcz/dZFAhBJmZPtL8+nBx6bnkzA95YxPeDPw=="], + + "@commitlint/message": ["@commitlint/message@20.4.3", "", {}, "sha512-6akwCYrzcrFcTYz9GyUaWlhisY4lmQ3KvrnabmhoeAV8nRH4dXJAh4+EUQ3uArtxxKQkvxJS78hNX2EU3USgxQ=="], + + "@commitlint/parse": ["@commitlint/parse@20.5.0", "", { "dependencies": { "@commitlint/types": "^20.5.0", "conventional-changelog-angular": "^8.2.0", "conventional-commits-parser": "^6.3.0" } }, "sha512-SeKWHBMk7YOTnnEWUhx+d1a9vHsjjuo6Uo1xRfPNfeY4bdYFasCH1dDpAv13Lyn+dDPOels+jP6D2GRZqzc5fA=="], + + "@commitlint/read": ["@commitlint/read@20.5.0", "", { "dependencies": { "@commitlint/top-level": "^20.4.3", "@commitlint/types": "^20.5.0", "git-raw-commits": "^5.0.0", "minimist": "^1.2.8", "tinyexec": "^1.0.0" } }, "sha512-JDEIJ2+GnWpK8QqwfmW7O42h0aycJEWNqcdkJnyzLD11nf9dW2dWLTVEa8Wtlo4IZFGLPATjR5neA5QlOvIH1w=="], + + "@commitlint/resolve-extends": ["@commitlint/resolve-extends@20.5.0", "", { "dependencies": { "@commitlint/config-validator": "^20.5.0", "@commitlint/types": "^20.5.0", "global-directory": "^4.0.1", "import-meta-resolve": "^4.0.0", "lodash.mergewith": "^4.6.2", "resolve-from": "^5.0.0" } }, "sha512-3SHPWUW2v0tyspCTcfSsYml0gses92l6TlogwzvM2cbxDgmhSRc+fldDjvGkCXJrjSM87BBaWYTPWwwyASZRrg=="], + + "@commitlint/rules": ["@commitlint/rules@20.5.0", "", { "dependencies": { "@commitlint/ensure": "^20.5.0", "@commitlint/message": "^20.4.3", "@commitlint/to-lines": "^20.0.0", "@commitlint/types": "^20.5.0" } }, "sha512-5NdQXQEdnDPT5pK8O39ZA7HohzPRHEsDGU23cyVCNPQy4WegAbAwrQk3nIu7p2sl3dutPk8RZd91yKTrMTnRkQ=="], + + "@commitlint/to-lines": ["@commitlint/to-lines@20.0.0", "", {}, "sha512-2l9gmwiCRqZNWgV+pX1X7z4yP0b3ex/86UmUFgoRt672Ez6cAM2lOQeHFRUTuE6sPpi8XBCGnd8Kh3bMoyHwJw=="], + + "@commitlint/top-level": ["@commitlint/top-level@20.4.3", "", { "dependencies": { "escalade": "^3.2.0" } }, "sha512-qD9xfP6dFg5jQ3NMrOhG0/w5y3bBUsVGyJvXxdWEwBm8hyx4WOk3kKXw28T5czBYvyeCVJgJJ6aoJZUWDpaacQ=="], + + "@commitlint/types": ["@commitlint/types@20.5.0", "", { "dependencies": { "conventional-commits-parser": "^6.3.0", "picocolors": "^1.1.1" } }, "sha512-ZJoS8oSq2CAZEpc/YI9SulLrdiIyXeHb/OGqGrkUP6Q7YV+0ouNAa7GjqRdXeQPncHQIDz/jbCTlHScvYvO/gA=="], + + "@conventional-changelog/git-client": ["@conventional-changelog/git-client@2.6.0", "", { "dependencies": { "@simple-libs/child-process-utils": "^1.0.0", "@simple-libs/stream-utils": "^1.2.0", "semver": "^7.5.2" }, "peerDependencies": { "conventional-commits-filter": "^5.0.0", "conventional-commits-parser": "^6.3.0" }, "optionalPeers": ["conventional-commits-filter", "conventional-commits-parser"] }, "sha512-T+uPDciKf0/ioNNDpMGc8FDsehJClZP0yR3Q5MN6wE/Y/1QZ7F+80OgznnTCOlMEG4AV0LvH2UJi3C/nBnaBUg=="], + + "@emnapi/runtime": ["@emnapi/runtime@1.9.1", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-VYi5+ZVLhpgK4hQ0TAjiQiZ6ol0oe4mBx7mVv7IflsiEp0OWoVsp/+f9Vc1hOhE0TtkORVrI1GvzyreqpgWtkA=="], + + "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.4", "", { "os": "aix", "cpu": "ppc64" }, "sha512-cQPwL2mp2nSmHHJlCyoXgHGhbEPMrEEU5xhkcy3Hs/O7nGZqEpZ2sUtLaL9MORLtDfRvVl2/3PAuEkYZH0Ty8Q=="], + + "@esbuild/android-arm": ["@esbuild/android-arm@0.27.4", "", { "os": "android", "cpu": "arm" }, "sha512-X9bUgvxiC8CHAGKYufLIHGXPJWnr0OCdR0anD2e21vdvgCI8lIfqFbnoeOz7lBjdrAGUhqLZLcQo6MLhTO2DKQ=="], + + "@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.4", "", { "os": "android", "cpu": "arm64" }, "sha512-gdLscB7v75wRfu7QSm/zg6Rx29VLdy9eTr2t44sfTW7CxwAtQghZ4ZnqHk3/ogz7xao0QAgrkradbBzcqFPasw=="], + + "@esbuild/android-x64": ["@esbuild/android-x64@0.27.4", "", { "os": "android", "cpu": "x64" }, "sha512-PzPFnBNVF292sfpfhiyiXCGSn9HZg5BcAz+ivBuSsl6Rk4ga1oEXAamhOXRFyMcjwr2DVtm40G65N3GLeH1Lvw=="], + + "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-b7xaGIwdJlht8ZFCvMkpDN6uiSmnxxK56N2GDTMYPr2/gzvfdQN8rTfBsvVKmIVY/X7EM+/hJKEIbbHs9oA4tQ=="], + + "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-sR+OiKLwd15nmCdqpXMnuJ9W2kpy0KigzqScqHI3Hqwr7IXxBp3Yva+yJwoqh7rE8V77tdoheRYataNKL4QrPw=="], + + "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.4", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-jnfpKe+p79tCnm4GVav68A7tUFeKQwQyLgESwEAUzyxk/TJr4QdGog9sqWNcUbr/bZt/O/HXouspuQDd9JxFSw=="], + + "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.4", "", { "os": "freebsd", "cpu": "x64" }, "sha512-2kb4ceA/CpfUrIcTUl1wrP/9ad9Atrp5J94Lq69w7UwOMolPIGrfLSvAKJp0RTvkPPyn6CIWrNy13kyLikZRZQ=="], + + "@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.4", "", { "os": "linux", "cpu": "arm" }, "sha512-aBYgcIxX/wd5n2ys0yESGeYMGF+pv6g0DhZr3G1ZG4jMfruU9Tl1i2Z+Wnj9/KjGz1lTLCcorqE2viePZqj4Eg=="], + + "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-7nQOttdzVGth1iz57kxg9uCz57dxQLHWxopL6mYuYthohPKEK0vU0C3O21CcBK6KDlkYVcnDXY099HcCDXd9dA=="], + + "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.4", "", { "os": "linux", "cpu": "ia32" }, "sha512-oPtixtAIzgvzYcKBQM/qZ3R+9TEUd1aNJQu0HhGyqtx6oS7qTpvjheIWBbes4+qu1bNlo2V4cbkISr8q6gRBFA=="], + + "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.4", "", { "os": "linux", "cpu": "none" }, "sha512-8mL/vh8qeCoRcFH2nM8wm5uJP+ZcVYGGayMavi8GmRJjuI3g1v6Z7Ni0JJKAJW+m0EtUuARb6Lmp4hMjzCBWzA=="], + + "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.4", "", { "os": "linux", "cpu": "none" }, "sha512-1RdrWFFiiLIW7LQq9Q2NES+HiD4NyT8Itj9AUeCl0IVCA459WnPhREKgwrpaIfTOe+/2rdntisegiPWn/r/aAw=="], + + "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.4", "", { "os": "linux", "cpu": "ppc64" }, "sha512-tLCwNG47l3sd9lpfyx9LAGEGItCUeRCWeAx6x2Jmbav65nAwoPXfewtAdtbtit/pJFLUWOhpv0FpS6GQAmPrHA=="], + + "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.4", "", { "os": "linux", "cpu": "none" }, "sha512-BnASypppbUWyqjd1KIpU4AUBiIhVr6YlHx/cnPgqEkNoVOhHg+YiSVxM1RLfiy4t9cAulbRGTNCKOcqHrEQLIw=="], + + "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.4", "", { "os": "linux", "cpu": "s390x" }, "sha512-+eUqgb/Z7vxVLezG8bVB9SfBie89gMueS+I0xYh2tJdw3vqA/0ImZJ2ROeWwVJN59ihBeZ7Tu92dF/5dy5FttA=="], + + "@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.4", "", { "os": "linux", "cpu": "x64" }, "sha512-S5qOXrKV8BQEzJPVxAwnryi2+Iq5pB40gTEIT69BQONqR7JH1EPIcQ/Uiv9mCnn05jff9umq/5nqzxlqTOg9NA=="], + + "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.4", "", { "os": "none", "cpu": "arm64" }, "sha512-xHT8X4sb0GS8qTqiwzHqpY00C95DPAq7nAwX35Ie/s+LO9830hrMd3oX0ZMKLvy7vsonee73x0lmcdOVXFzd6Q=="], + + "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.4", "", { "os": "none", "cpu": "x64" }, "sha512-RugOvOdXfdyi5Tyv40kgQnI0byv66BFgAqjdgtAKqHoZTbTF2QqfQrFwa7cHEORJf6X2ht+l9ABLMP0dnKYsgg=="], + + "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.4", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-2MyL3IAaTX+1/qP0O1SwskwcwCoOI4kV2IBX1xYnDDqthmq5ArrW94qSIKCAuRraMgPOmG0RDTA74mzYNQA9ow=="], + + "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.4", "", { "os": "openbsd", "cpu": "x64" }, "sha512-u8fg/jQ5aQDfsnIV6+KwLOf1CmJnfu1ShpwqdwC0uA7ZPwFws55Ngc12vBdeUdnuWoQYx/SOQLGDcdlfXhYmXQ=="], + + "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.4", "", { "os": "none", "cpu": "arm64" }, "sha512-JkTZrl6VbyO8lDQO3yv26nNr2RM2yZzNrNHEsj9bm6dOwwu9OYN28CjzZkH57bh4w0I2F7IodpQvUAEd1mbWXg=="], + + "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.4", "", { "os": "sunos", "cpu": "x64" }, "sha512-/gOzgaewZJfeJTlsWhvUEmUG4tWEY2Spp5M20INYRg2ZKl9QPO3QEEgPeRtLjEWSW8FilRNacPOg8R1uaYkA6g=="], + + "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.4", "", { "os": "win32", "cpu": "arm64" }, "sha512-Z9SExBg2y32smoDQdf1HRwHRt6vAHLXcxD2uGgO/v2jK7Y718Ix4ndsbNMU/+1Qiem9OiOdaqitioZwxivhXYg=="], + + "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.4", "", { "os": "win32", "cpu": "ia32" }, "sha512-DAyGLS0Jz5G5iixEbMHi5KdiApqHBWMGzTtMiJ72ZOLhbu/bzxgAe8Ue8CTS3n3HbIUHQz/L51yMdGMeoxXNJw=="], + + "@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.4", "", { "os": "win32", "cpu": "x64" }, "sha512-+knoa0BDoeXgkNvvV1vvbZX4+hizelrkwmGJBdT17t8FNPwG2lKemmuMZlmaNQ3ws3DKKCxpb4zRZEIp3UxFCg=="], + + "@eslint-community/eslint-utils": ["@eslint-community/eslint-utils@4.9.1", "", { "dependencies": { "eslint-visitor-keys": "^3.4.3" }, "peerDependencies": { "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ=="], + + "@eslint-community/regexpp": ["@eslint-community/regexpp@4.12.2", "", {}, "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew=="], + + "@eslint/config-array": ["@eslint/config-array@0.21.2", "", { "dependencies": { "@eslint/object-schema": "^2.1.7", "debug": "^4.3.1", "minimatch": "^3.1.5" } }, "sha512-nJl2KGTlrf9GjLimgIru+V/mzgSK0ABCDQRvxw5BjURL7WfH5uoWmizbH7QB6MmnMBd8cIC9uceWnezL1VZWWw=="], + + "@eslint/config-helpers": ["@eslint/config-helpers@0.4.2", "", { "dependencies": { "@eslint/core": "^0.17.0" } }, "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw=="], + + "@eslint/core": ["@eslint/core@0.17.0", "", { "dependencies": { "@types/json-schema": "^7.0.15" } }, "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ=="], + + "@eslint/eslintrc": ["@eslint/eslintrc@3.3.5", "", { "dependencies": { "ajv": "^6.14.0", "debug": "^4.3.2", "espree": "^10.0.1", "globals": "^14.0.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", "js-yaml": "^4.1.1", "minimatch": "^3.1.5", "strip-json-comments": "^3.1.1" } }, "sha512-4IlJx0X0qftVsN5E+/vGujTRIFtwuLbNsVUe7TO6zYPDR1O6nFwvwhIKEKSrl6dZchmYBITazxKoUYOjdtjlRg=="], + + "@eslint/js": ["@eslint/js@9.39.4", "", {}, "sha512-nE7DEIchvtiFTwBw4Lfbu59PG+kCofhjsKaCWzxTpt4lfRjRMqG6uMBzKXuEcyXhOHoUp9riAm7/aWYGhXZ9cw=="], + + "@eslint/object-schema": ["@eslint/object-schema@2.1.7", "", {}, "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA=="], + + "@eslint/plugin-kit": ["@eslint/plugin-kit@0.4.1", "", { "dependencies": { "@eslint/core": "^0.17.0", "levn": "^0.4.1" } }, "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA=="], + + "@humanfs/core": ["@humanfs/core@0.19.1", "", {}, "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA=="], + + "@humanfs/node": ["@humanfs/node@0.16.7", "", { "dependencies": { "@humanfs/core": "^0.19.1", "@humanwhocodes/retry": "^0.4.0" } }, "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ=="], + + "@humanwhocodes/module-importer": ["@humanwhocodes/module-importer@1.0.1", "", {}, "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA=="], + + "@humanwhocodes/retry": ["@humanwhocodes/retry@0.4.3", "", {}, "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ=="], + + "@img/colour": ["@img/colour@1.1.0", "", {}, "sha512-Td76q7j57o/tLVdgS746cYARfSyxk8iEfRxewL9h4OMzYhbW4TAcppl0mT4eyqXddh6L/jwoM75mo7ixa/pCeQ=="], + + "@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.2.4" }, "os": "darwin", "cpu": "arm64" }, "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w=="], + + "@img/sharp-darwin-x64": ["@img/sharp-darwin-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-x64": "1.2.4" }, "os": "darwin", "cpu": "x64" }, "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw=="], + + "@img/sharp-libvips-darwin-arm64": ["@img/sharp-libvips-darwin-arm64@1.2.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g=="], + + "@img/sharp-libvips-darwin-x64": ["@img/sharp-libvips-darwin-x64@1.2.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg=="], + + "@img/sharp-libvips-linux-arm": ["@img/sharp-libvips-linux-arm@1.2.4", "", { "os": "linux", "cpu": "arm" }, "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A=="], + + "@img/sharp-libvips-linux-arm64": ["@img/sharp-libvips-linux-arm64@1.2.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw=="], + + "@img/sharp-libvips-linux-ppc64": ["@img/sharp-libvips-linux-ppc64@1.2.4", "", { "os": "linux", "cpu": "ppc64" }, "sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA=="], + + "@img/sharp-libvips-linux-riscv64": ["@img/sharp-libvips-linux-riscv64@1.2.4", "", { "os": "linux", "cpu": "none" }, "sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA=="], + + "@img/sharp-libvips-linux-s390x": ["@img/sharp-libvips-linux-s390x@1.2.4", "", { "os": "linux", "cpu": "s390x" }, "sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ=="], + + "@img/sharp-libvips-linux-x64": ["@img/sharp-libvips-linux-x64@1.2.4", "", { "os": "linux", "cpu": "x64" }, "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw=="], + + "@img/sharp-libvips-linuxmusl-arm64": ["@img/sharp-libvips-linuxmusl-arm64@1.2.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw=="], + + "@img/sharp-libvips-linuxmusl-x64": ["@img/sharp-libvips-linuxmusl-x64@1.2.4", "", { "os": "linux", "cpu": "x64" }, "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg=="], + + "@img/sharp-linux-arm": ["@img/sharp-linux-arm@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm": "1.2.4" }, "os": "linux", "cpu": "arm" }, "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw=="], + + "@img/sharp-linux-arm64": ["@img/sharp-linux-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg=="], + + "@img/sharp-linux-ppc64": ["@img/sharp-linux-ppc64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-ppc64": "1.2.4" }, "os": "linux", "cpu": "ppc64" }, "sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA=="], + + "@img/sharp-linux-riscv64": ["@img/sharp-linux-riscv64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-riscv64": "1.2.4" }, "os": "linux", "cpu": "none" }, "sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw=="], + + "@img/sharp-linux-s390x": ["@img/sharp-linux-s390x@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-s390x": "1.2.4" }, "os": "linux", "cpu": "s390x" }, "sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg=="], + + "@img/sharp-linux-x64": ["@img/sharp-linux-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ=="], + + "@img/sharp-linuxmusl-arm64": ["@img/sharp-linuxmusl-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg=="], + + "@img/sharp-linuxmusl-x64": ["@img/sharp-linuxmusl-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q=="], + + "@img/sharp-wasm32": ["@img/sharp-wasm32@0.34.5", "", { "dependencies": { "@emnapi/runtime": "^1.7.0" }, "cpu": "none" }, "sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw=="], + + "@img/sharp-win32-arm64": ["@img/sharp-win32-arm64@0.34.5", "", { "os": "win32", "cpu": "arm64" }, "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g=="], + + "@img/sharp-win32-ia32": ["@img/sharp-win32-ia32@0.34.5", "", { "os": "win32", "cpu": "ia32" }, "sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg=="], + + "@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.34.5", "", { "os": "win32", "cpu": "x64" }, "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw=="], + + "@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.13", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA=="], + + "@jridgewell/remapping": ["@jridgewell/remapping@2.3.5", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ=="], + + "@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.2", "", {}, "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw=="], + + "@jridgewell/source-map": ["@jridgewell/source-map@0.3.11", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25" } }, "sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA=="], + + "@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.5", "", {}, "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og=="], + + "@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.31", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw=="], + + "@oslojs/encoding": ["@oslojs/encoding@1.1.0", "", {}, "sha512-70wQhgYmndg4GCPxPPxPGevRKqTIJ2Nh4OkiMWmDAVYsTQ+Ta7Sq+rPevXyXGdzr30/qZBnyOalCszoMxlyldQ=="], + + "@parcel/watcher": ["@parcel/watcher@2.5.6", "", { "dependencies": { "detect-libc": "^2.0.3", "is-glob": "^4.0.3", "node-addon-api": "^7.0.0", "picomatch": "^4.0.3" }, "optionalDependencies": { "@parcel/watcher-android-arm64": "2.5.6", "@parcel/watcher-darwin-arm64": "2.5.6", "@parcel/watcher-darwin-x64": "2.5.6", "@parcel/watcher-freebsd-x64": "2.5.6", "@parcel/watcher-linux-arm-glibc": "2.5.6", "@parcel/watcher-linux-arm-musl": "2.5.6", "@parcel/watcher-linux-arm64-glibc": "2.5.6", "@parcel/watcher-linux-arm64-musl": "2.5.6", "@parcel/watcher-linux-x64-glibc": "2.5.6", "@parcel/watcher-linux-x64-musl": "2.5.6", "@parcel/watcher-win32-arm64": "2.5.6", "@parcel/watcher-win32-ia32": "2.5.6", "@parcel/watcher-win32-x64": "2.5.6" } }, "sha512-tmmZ3lQxAe/k/+rNnXQRawJ4NjxO2hqiOLTHvWchtGZULp4RyFeh6aU4XdOYBFe2KE1oShQTv4AblOs2iOrNnQ=="], + + "@parcel/watcher-android-arm64": ["@parcel/watcher-android-arm64@2.5.6", "", { "os": "android", "cpu": "arm64" }, "sha512-YQxSS34tPF/6ZG7r/Ih9xy+kP/WwediEUsqmtf0cuCV5TPPKw/PQHRhueUo6JdeFJaqV3pyjm0GdYjZotbRt/A=="], + + "@parcel/watcher-darwin-arm64": ["@parcel/watcher-darwin-arm64@2.5.6", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Z2ZdrnwyXvvvdtRHLmM4knydIdU9adO3D4n/0cVipF3rRiwP+3/sfzpAwA/qKFL6i1ModaabkU7IbpeMBgiVEA=="], + + "@parcel/watcher-darwin-x64": ["@parcel/watcher-darwin-x64@2.5.6", "", { "os": "darwin", "cpu": "x64" }, "sha512-HgvOf3W9dhithcwOWX9uDZyn1lW9R+7tPZ4sug+NGrGIo4Rk1hAXLEbcH1TQSqxts0NYXXlOWqVpvS1SFS4fRg=="], + + "@parcel/watcher-freebsd-x64": ["@parcel/watcher-freebsd-x64@2.5.6", "", { "os": "freebsd", "cpu": "x64" }, "sha512-vJVi8yd/qzJxEKHkeemh7w3YAn6RJCtYlE4HPMoVnCpIXEzSrxErBW5SJBgKLbXU3WdIpkjBTeUNtyBVn8TRng=="], + + "@parcel/watcher-linux-arm-glibc": ["@parcel/watcher-linux-arm-glibc@2.5.6", "", { "os": "linux", "cpu": "arm" }, "sha512-9JiYfB6h6BgV50CCfasfLf/uvOcJskMSwcdH1PHH9rvS1IrNy8zad6IUVPVUfmXr+u+Km9IxcfMLzgdOudz9EQ=="], + + "@parcel/watcher-linux-arm-musl": ["@parcel/watcher-linux-arm-musl@2.5.6", "", { "os": "linux", "cpu": "arm" }, "sha512-Ve3gUCG57nuUUSyjBq/MAM0CzArtuIOxsBdQ+ftz6ho8n7s1i9E1Nmk/xmP323r2YL0SONs1EuwqBp2u1k5fxg=="], + + "@parcel/watcher-linux-arm64-glibc": ["@parcel/watcher-linux-arm64-glibc@2.5.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-f2g/DT3NhGPdBmMWYoxixqYr3v/UXcmLOYy16Bx0TM20Tchduwr4EaCbmxh1321TABqPGDpS8D/ggOTaljijOA=="], + + "@parcel/watcher-linux-arm64-musl": ["@parcel/watcher-linux-arm64-musl@2.5.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-qb6naMDGlbCwdhLj6hgoVKJl2odL34z2sqkC7Z6kzir8b5W65WYDpLB6R06KabvZdgoHI/zxke4b3zR0wAbDTA=="], + + "@parcel/watcher-linux-x64-glibc": ["@parcel/watcher-linux-x64-glibc@2.5.6", "", { "os": "linux", "cpu": "x64" }, "sha512-kbT5wvNQlx7NaGjzPFu8nVIW1rWqV780O7ZtkjuWaPUgpv2NMFpjYERVi0UYj1msZNyCzGlaCWEtzc+exjMGbQ=="], + + "@parcel/watcher-linux-x64-musl": ["@parcel/watcher-linux-x64-musl@2.5.6", "", { "os": "linux", "cpu": "x64" }, "sha512-1JRFeC+h7RdXwldHzTsmdtYR/Ku8SylLgTU/reMuqdVD7CtLwf0VR1FqeprZ0eHQkO0vqsbvFLXUmYm/uNKJBg=="], + + "@parcel/watcher-win32-arm64": ["@parcel/watcher-win32-arm64@2.5.6", "", { "os": "win32", "cpu": "arm64" }, "sha512-3ukyebjc6eGlw9yRt678DxVF7rjXatWiHvTXqphZLvo7aC5NdEgFufVwjFfY51ijYEWpXbqF5jtrK275z52D4Q=="], + + "@parcel/watcher-win32-ia32": ["@parcel/watcher-win32-ia32@2.5.6", "", { "os": "win32", "cpu": "ia32" }, "sha512-k35yLp1ZMwwee3Ez/pxBi5cf4AoBKYXj00CZ80jUz5h8prpiaQsiRPKQMxoLstNuqe2vR4RNPEAEcjEFzhEz/g=="], + + "@parcel/watcher-win32-x64": ["@parcel/watcher-win32-x64@2.5.6", "", { "os": "win32", "cpu": "x64" }, "sha512-hbQlYcCq5dlAX9Qx+kFb0FHue6vbjlf0FrNzSKdYK2APUf7tGfGxQCk2ihEREmbR6ZMc0MVAD5RIX/41gpUzTw=="], + + "@pkgr/core": ["@pkgr/core@0.2.9", "", {}, "sha512-QNqXyfVS2wm9hweSYD2O7F0G06uurj9kZ96TRQE5Y9hU7+tgdZwIkbAKc5Ocy1HxEY2kuDQa6cQ1WRs/O5LFKA=="], + + "@playwright/test": ["@playwright/test@1.58.2", "", { "dependencies": { "playwright": "1.58.2" }, "bin": { "playwright": "cli.js" } }, "sha512-akea+6bHYBBfA9uQqSYmlJXn61cTa+jbO87xVLCWbTqbWadRVmhxlXATaOjOgcBaWU4ePo0wB41KMFv3o35IXA=="], + + "@rive-app/react-webgl2": ["@rive-app/react-webgl2@4.27.3", "", { "dependencies": { "@rive-app/webgl2": "2.35.4" }, "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0" } }, "sha512-f+i0w/iipxwJWOH2J+0u2se0OdTM5PNWqwndRMO6EwOG5YsHHOc9opmFQCViiwkzT6Sr3vU/hqdrZy8tno6exw=="], + + "@rive-app/webgl2": ["@rive-app/webgl2@2.35.4", "", {}, "sha512-1dX1axIC6WgrvCkBsH1tjHjPz3xXGoDUuu3CZMdnlCOtGmASq4WM7LOPvuggmLgGl5mLHX9VF7Nr/AakpcMeUw=="], + + "@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-rc.3", "", {}, "sha512-eybk3TjzzzV97Dlj5c+XrBFW57eTNhzod66y9HrBlzJ6NsCrWCp/2kaPS3K9wJmurBC0Tdw4yPjXKZqlznim3Q=="], + + "@rollup/pluginutils": ["@rollup/pluginutils@5.3.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", "picomatch": "^4.0.2" }, "peerDependencies": { "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "optionalPeers": ["rollup"] }, "sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q=="], + + "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.60.1", "", { "os": "android", "cpu": "arm" }, "sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA=="], + + "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.60.1", "", { "os": "android", "cpu": "arm64" }, "sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA=="], + + "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.60.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw=="], + + "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.60.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew=="], + + "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.60.1", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w=="], + + "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.60.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g=="], + + "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.60.1", "", { "os": "linux", "cpu": "arm" }, "sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g=="], + + "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.60.1", "", { "os": "linux", "cpu": "arm" }, "sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg=="], + + "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.60.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ=="], + + "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.60.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA=="], + + "@rollup/rollup-linux-loong64-gnu": ["@rollup/rollup-linux-loong64-gnu@4.60.1", "", { "os": "linux", "cpu": "none" }, "sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ=="], + + "@rollup/rollup-linux-loong64-musl": ["@rollup/rollup-linux-loong64-musl@4.60.1", "", { "os": "linux", "cpu": "none" }, "sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw=="], + + "@rollup/rollup-linux-ppc64-gnu": ["@rollup/rollup-linux-ppc64-gnu@4.60.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw=="], + + "@rollup/rollup-linux-ppc64-musl": ["@rollup/rollup-linux-ppc64-musl@4.60.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg=="], + + "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.60.1", "", { "os": "linux", "cpu": "none" }, "sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg=="], + + "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.60.1", "", { "os": "linux", "cpu": "none" }, "sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg=="], + + "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.60.1", "", { "os": "linux", "cpu": "s390x" }, "sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ=="], + + "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.60.1", "", { "os": "linux", "cpu": "x64" }, "sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg=="], + + "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.60.1", "", { "os": "linux", "cpu": "x64" }, "sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w=="], + + "@rollup/rollup-openbsd-x64": ["@rollup/rollup-openbsd-x64@4.60.1", "", { "os": "openbsd", "cpu": "x64" }, "sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw=="], + + "@rollup/rollup-openharmony-arm64": ["@rollup/rollup-openharmony-arm64@4.60.1", "", { "os": "none", "cpu": "arm64" }, "sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA=="], + + "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.60.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g=="], + + "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.60.1", "", { "os": "win32", "cpu": "ia32" }, "sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg=="], + + "@rollup/rollup-win32-x64-gnu": ["@rollup/rollup-win32-x64-gnu@4.60.1", "", { "os": "win32", "cpu": "x64" }, "sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg=="], + + "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.60.1", "", { "os": "win32", "cpu": "x64" }, "sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ=="], + + "@shikijs/core": ["@shikijs/core@4.0.2", "", { "dependencies": { "@shikijs/primitive": "4.0.2", "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.5" } }, "sha512-hxT0YF4ExEqB8G/qFdtJvpmHXBYJ2lWW7qTHDarVkIudPFE6iCIrqdgWxGn5s+ppkGXI0aEGlibI0PAyzP3zlw=="], + + "@shikijs/engine-javascript": ["@shikijs/engine-javascript@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "^10.0.2", "oniguruma-to-es": "^4.3.4" } }, "sha512-7PW0Nm49DcoUIQEXlJhNNBHyoGMjalRETTCcjMqEaMoJRLljy1Bi/EGV3/qLBgLKQejdspiiYuHGQW6dX94Nag=="], + + "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-UpCB9Y2sUKlS9z8juFSKz7ZtysmeXCgnRF0dlhXBkmQnek7lAToPte8DkxmEYGNTMii72zU/lyXiCB6StuZeJg=="], + + "@shikijs/langs": ["@shikijs/langs@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2" } }, "sha512-KaXby5dvoeuZzN0rYQiPMjFoUrz4hgwIE+D6Du9owcHcl6/g16/yT5BQxSW5cGt2MZBz6Hl0YuRqf12omRfUUg=="], + + "@shikijs/primitive": ["@shikijs/primitive@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-M6UMPrSa3fN5ayeJwFVl9qWofl273wtK1VG8ySDZ1mQBfhCpdd8nEx7nPZ/tk7k+TYcpqBZzj/AnwxT9lO+HJw=="], + + "@shikijs/themes": ["@shikijs/themes@4.0.2", "", { "dependencies": { "@shikijs/types": "4.0.2" } }, "sha512-mjCafwt8lJJaVSsQvNVrJumbnnj1RI8jbUKrPKgE6E3OvQKxnuRoBaYC51H4IGHePsGN/QtALglWBU7DoKDFnA=="], + + "@shikijs/types": ["@shikijs/types@4.0.2", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-qzbeRooUTPnLE+sHD/Z8DStmaDgnbbc/pMrU203950aRqjX/6AFHeDYT+j00y2lPdz0ywJKx7o/7qnqTivtlXg=="], + + "@shikijs/vscode-textmate": ["@shikijs/vscode-textmate@10.0.2", "", {}, "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="], + + "@simple-libs/child-process-utils": ["@simple-libs/child-process-utils@1.0.2", "", { "dependencies": { "@simple-libs/stream-utils": "^1.2.0" } }, "sha512-/4R8QKnd/8agJynkNdJmNw2MBxuFTRcNFnE5Sg/G+jkSsV8/UBgULMzhizWWW42p8L5H7flImV2ATi79Ove2Tw=="], + + "@simple-libs/stream-utils": ["@simple-libs/stream-utils@1.2.0", "", {}, "sha512-KxXvfapcixpz6rVEB6HPjOUZT22yN6v0vI0urQSk1L8MlEWPDFCZkhw2xmkyoTGYeFw7tWTZd7e3lVzRZRN/EA=="], + + "@tailwindcss/node": ["@tailwindcss/node@4.2.2", "", { "dependencies": { "@jridgewell/remapping": "^2.3.5", "enhanced-resolve": "^5.19.0", "jiti": "^2.6.1", "lightningcss": "1.32.0", "magic-string": "^0.30.21", "source-map-js": "^1.2.1", "tailwindcss": "4.2.2" } }, "sha512-pXS+wJ2gZpVXqFaUEjojq7jzMpTGf8rU6ipJz5ovJV6PUGmlJ+jvIwGrzdHdQ80Sg+wmQxUFuoW1UAAwHNEdFA=="], + + "@tailwindcss/oxide": ["@tailwindcss/oxide@4.2.2", "", { "optionalDependencies": { "@tailwindcss/oxide-android-arm64": "4.2.2", "@tailwindcss/oxide-darwin-arm64": "4.2.2", "@tailwindcss/oxide-darwin-x64": "4.2.2", "@tailwindcss/oxide-freebsd-x64": "4.2.2", "@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.2", "@tailwindcss/oxide-linux-arm64-gnu": "4.2.2", "@tailwindcss/oxide-linux-arm64-musl": "4.2.2", "@tailwindcss/oxide-linux-x64-gnu": "4.2.2", "@tailwindcss/oxide-linux-x64-musl": "4.2.2", "@tailwindcss/oxide-wasm32-wasi": "4.2.2", "@tailwindcss/oxide-win32-arm64-msvc": "4.2.2", "@tailwindcss/oxide-win32-x64-msvc": "4.2.2" } }, "sha512-qEUA07+E5kehxYp9BVMpq9E8vnJuBHfJEC0vPC5e7iL/hw7HR61aDKoVoKzrG+QKp56vhNZe4qwkRmMC0zDLvg=="], + + "@tailwindcss/oxide-android-arm64": ["@tailwindcss/oxide-android-arm64@4.2.2", "", { "os": "android", "cpu": "arm64" }, "sha512-dXGR1n+P3B6748jZO/SvHZq7qBOqqzQ+yFrXpoOWWALWndF9MoSKAT3Q0fYgAzYzGhxNYOoysRvYlpixRBBoDg=="], + + "@tailwindcss/oxide-darwin-arm64": ["@tailwindcss/oxide-darwin-arm64@4.2.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-iq9Qjr6knfMpZHj55/37ouZeykwbDqF21gPFtfnhCCKGDcPI/21FKC9XdMO/XyBM7qKORx6UIhGgg6jLl7BZlg=="], + + "@tailwindcss/oxide-darwin-x64": ["@tailwindcss/oxide-darwin-x64@4.2.2", "", { "os": "darwin", "cpu": "x64" }, "sha512-BlR+2c3nzc8f2G639LpL89YY4bdcIdUmiOOkv2GQv4/4M0vJlpXEa0JXNHhCHU7VWOKWT/CjqHdTP8aUuDJkuw=="], + + "@tailwindcss/oxide-freebsd-x64": ["@tailwindcss/oxide-freebsd-x64@4.2.2", "", { "os": "freebsd", "cpu": "x64" }, "sha512-YUqUgrGMSu2CDO82hzlQ5qSb5xmx3RUrke/QgnoEx7KvmRJHQuZHZmZTLSuuHwFf0DJPybFMXMYf+WJdxHy/nQ=="], + + "@tailwindcss/oxide-linux-arm-gnueabihf": ["@tailwindcss/oxide-linux-arm-gnueabihf@4.2.2", "", { "os": "linux", "cpu": "arm" }, "sha512-FPdhvsW6g06T9BWT0qTwiVZYE2WIFo2dY5aCSpjG/S/u1tby+wXoslXS0kl3/KXnULlLr1E3NPRRw0g7t2kgaQ=="], + + "@tailwindcss/oxide-linux-arm64-gnu": ["@tailwindcss/oxide-linux-arm64-gnu@4.2.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-4og1V+ftEPXGttOO7eCmW7VICmzzJWgMx+QXAJRAhjrSjumCwWqMfkDrNu1LXEQzNAwz28NCUpucgQPrR4S2yw=="], + + "@tailwindcss/oxide-linux-arm64-musl": ["@tailwindcss/oxide-linux-arm64-musl@4.2.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-oCfG/mS+/+XRlwNjnsNLVwnMWYH7tn/kYPsNPh+JSOMlnt93mYNCKHYzylRhI51X+TbR+ufNhhKKzm6QkqX8ag=="], + + "@tailwindcss/oxide-linux-x64-gnu": ["@tailwindcss/oxide-linux-x64-gnu@4.2.2", "", { "os": "linux", "cpu": "x64" }, "sha512-rTAGAkDgqbXHNp/xW0iugLVmX62wOp2PoE39BTCGKjv3Iocf6AFbRP/wZT/kuCxC9QBh9Pu8XPkv/zCZB2mcMg=="], + + "@tailwindcss/oxide-linux-x64-musl": ["@tailwindcss/oxide-linux-x64-musl@4.2.2", "", { "os": "linux", "cpu": "x64" }, "sha512-XW3t3qwbIwiSyRCggeO2zxe3KWaEbM0/kW9e8+0XpBgyKU4ATYzcVSMKteZJ1iukJ3HgHBjbg9P5YPRCVUxlnQ=="], + + "@tailwindcss/oxide-wasm32-wasi": ["@tailwindcss/oxide-wasm32-wasi@4.2.2", "", { "dependencies": { "@emnapi/core": "^1.8.1", "@emnapi/runtime": "^1.8.1", "@emnapi/wasi-threads": "^1.1.0", "@napi-rs/wasm-runtime": "^1.1.1", "@tybys/wasm-util": "^0.10.1", "tslib": "^2.8.1" }, "cpu": "none" }, "sha512-eKSztKsmEsn1O5lJ4ZAfyn41NfG7vzCg496YiGtMDV86jz1q/irhms5O0VrY6ZwTUkFy/EKG3RfWgxSI3VbZ8Q=="], + + "@tailwindcss/oxide-win32-arm64-msvc": ["@tailwindcss/oxide-win32-arm64-msvc@4.2.2", "", { "os": "win32", "cpu": "arm64" }, "sha512-qPmaQM4iKu5mxpsrWZMOZRgZv1tOZpUm+zdhhQP0VhJfyGGO3aUKdbh3gDZc/dPLQwW4eSqWGrrcWNBZWUWaXQ=="], + + "@tailwindcss/oxide-win32-x64-msvc": ["@tailwindcss/oxide-win32-x64-msvc@4.2.2", "", { "os": "win32", "cpu": "x64" }, "sha512-1T/37VvI7WyH66b+vqHj/cLwnCxt7Qt3WFu5Q8hk65aOvlwAhs7rAp1VkulBJw/N4tMirXjVnylTR72uI0HGcA=="], + + "@tailwindcss/postcss": ["@tailwindcss/postcss@4.2.2", "", { "dependencies": { "@alloc/quick-lru": "^5.2.0", "@tailwindcss/node": "4.2.2", "@tailwindcss/oxide": "4.2.2", "postcss": "^8.5.6", "tailwindcss": "4.2.2" } }, "sha512-n4goKQbW8RVXIbNKRB/45LzyUqN451deQK0nzIeauVEqjlI49slUlgKYJM2QyUzap/PcpnS7kzSUmPb1sCRvYQ=="], + + "@types/babel__core": ["@types/babel__core@7.20.5", "", { "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", "@types/babel__generator": "*", "@types/babel__template": "*", "@types/babel__traverse": "*" } }, "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA=="], + + "@types/babel__generator": ["@types/babel__generator@7.27.0", "", { "dependencies": { "@babel/types": "^7.0.0" } }, "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg=="], + + "@types/babel__template": ["@types/babel__template@7.4.4", "", { "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A=="], + + "@types/babel__traverse": ["@types/babel__traverse@7.28.0", "", { "dependencies": { "@babel/types": "^7.28.2" } }, "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q=="], + + "@types/debug": ["@types/debug@4.1.13", "", { "dependencies": { "@types/ms": "*" } }, "sha512-KSVgmQmzMwPlmtljOomayoR89W4FynCAi3E8PPs7vmDVPe84hT+vGPKkJfThkmXs0x0jAaa9U8uW8bbfyS2fWw=="], + + "@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="], + + "@types/estree-jsx": ["@types/estree-jsx@1.0.5", "", { "dependencies": { "@types/estree": "*" } }, "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg=="], + + "@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="], + + "@types/json-schema": ["@types/json-schema@7.0.15", "", {}, "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA=="], + + "@types/mdast": ["@types/mdast@4.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA=="], + + "@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="], + + "@types/nlcst": ["@types/nlcst@2.0.3", "", { "dependencies": { "@types/unist": "*" } }, "sha512-vSYNSDe6Ix3q+6Z7ri9lyWqgGhJTmzRjZRqyq15N0Z/1/UnVsno9G/N40NBijoYx2seFDIl0+B2mgAb9mezUCA=="], + + "@types/node": ["@types/node@25.5.0", "", { "dependencies": { "undici-types": "~7.18.0" } }, "sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw=="], + + "@types/react": ["@types/react@19.2.14", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w=="], + + "@types/react-dom": ["@types/react-dom@19.2.3", "", { "peerDependencies": { "@types/react": "^19.2.0" } }, "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ=="], + + "@types/sax": ["@types/sax@1.2.7", "", { "dependencies": { "@types/node": "*" } }, "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A=="], + + "@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="], + + "@typescript-eslint/eslint-plugin": ["@typescript-eslint/eslint-plugin@8.57.2", "", { "dependencies": { "@eslint-community/regexpp": "^4.12.2", "@typescript-eslint/scope-manager": "8.57.2", "@typescript-eslint/type-utils": "8.57.2", "@typescript-eslint/utils": "8.57.2", "@typescript-eslint/visitor-keys": "8.57.2", "ignore": "^7.0.5", "natural-compare": "^1.4.0", "ts-api-utils": "^2.4.0" }, "peerDependencies": { "@typescript-eslint/parser": "^8.57.2", "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-NZZgp0Fm2IkD+La5PR81sd+g+8oS6JwJje+aRWsDocxHkjyRw0J5L5ZTlN3LI1LlOcGL7ph3eaIUmTXMIjLk0w=="], + + "@typescript-eslint/parser": ["@typescript-eslint/parser@8.57.2", "", { "dependencies": { "@typescript-eslint/scope-manager": "8.57.2", "@typescript-eslint/types": "8.57.2", "@typescript-eslint/typescript-estree": "8.57.2", "@typescript-eslint/visitor-keys": "8.57.2", "debug": "^4.4.3" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-30ScMRHIAD33JJQkgfGW1t8CURZtjc2JpTrq5n2HFhOefbAhb7ucc7xJwdWcrEtqUIYJ73Nybpsggii6GtAHjA=="], + + "@typescript-eslint/project-service": ["@typescript-eslint/project-service@8.57.2", "", { "dependencies": { "@typescript-eslint/tsconfig-utils": "^8.57.2", "@typescript-eslint/types": "^8.57.2", "debug": "^4.4.3" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-FuH0wipFywXRTHf+bTTjNyuNQQsQC3qh/dYzaM4I4W0jrCqjCVuUh99+xd9KamUfmCGPvbO8NDngo/vsnNVqgw=="], + + "@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.57.2", "", { "dependencies": { "@typescript-eslint/types": "8.57.2", "@typescript-eslint/visitor-keys": "8.57.2" } }, "sha512-snZKH+W4WbWkrBqj4gUNRIGb/jipDW3qMqVJ4C9rzdFc+wLwruxk+2a5D+uoFcKPAqyqEnSb4l2ULuZf95eSkw=="], + + "@typescript-eslint/tsconfig-utils": ["@typescript-eslint/tsconfig-utils@8.57.2", "", { "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-3Lm5DSM+DCowsUOJC+YqHHnKEfFh5CoGkj5Z31NQSNF4l5wdOwqGn99wmwN/LImhfY3KJnmordBq/4+VDe2eKw=="], + + "@typescript-eslint/type-utils": ["@typescript-eslint/type-utils@8.57.2", "", { "dependencies": { "@typescript-eslint/types": "8.57.2", "@typescript-eslint/typescript-estree": "8.57.2", "@typescript-eslint/utils": "8.57.2", "debug": "^4.4.3", "ts-api-utils": "^2.4.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-Co6ZCShm6kIbAM/s+oYVpKFfW7LBc6FXoPXjTRQ449PPNBY8U0KZXuevz5IFuuUj2H9ss40atTaf9dlGLzbWZg=="], + + "@typescript-eslint/types": ["@typescript-eslint/types@8.57.2", "", {}, "sha512-/iZM6FnM4tnx9csuTxspMW4BOSegshwX5oBDznJ7S4WggL7Vczz5d2W11ecc4vRrQMQHXRSxzrCsyG5EsPPTbA=="], + + "@typescript-eslint/typescript-estree": ["@typescript-eslint/typescript-estree@8.57.2", "", { "dependencies": { "@typescript-eslint/project-service": "8.57.2", "@typescript-eslint/tsconfig-utils": "8.57.2", "@typescript-eslint/types": "8.57.2", "@typescript-eslint/visitor-keys": "8.57.2", "debug": "^4.4.3", "minimatch": "^10.2.2", "semver": "^7.7.3", "tinyglobby": "^0.2.15", "ts-api-utils": "^2.4.0" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-2MKM+I6g8tJxfSmFKOnHv2t8Sk3T6rF20A1Puk0svLK+uVapDZB/4pfAeB7nE83uAZrU6OxW+HmOd5wHVdXwXA=="], + + "@typescript-eslint/utils": ["@typescript-eslint/utils@8.57.2", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.9.1", "@typescript-eslint/scope-manager": "8.57.2", "@typescript-eslint/types": "8.57.2", "@typescript-eslint/typescript-estree": "8.57.2" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-krRIbvPK1ju1WBKIefiX+bngPs+odIQUtR7kymzPfo1POVw3jlF+nLkmexdSSd4UCbDcQn+wMBATOOmpBbqgKg=="], + + "@typescript-eslint/visitor-keys": ["@typescript-eslint/visitor-keys@8.57.2", "", { "dependencies": { "@typescript-eslint/types": "8.57.2", "eslint-visitor-keys": "^5.0.0" } }, "sha512-zhahknjobV2FiD6Ee9iLbS7OV9zi10rG26odsQdfBO/hjSzUQbkIYgda+iNKK1zNiW2ey+Lf8MU5btN17V3dUw=="], + + "@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="], + + "@vitejs/plugin-react": ["@vitejs/plugin-react@5.2.0", "", { "dependencies": { "@babel/core": "^7.29.0", "@babel/plugin-transform-react-jsx-self": "^7.27.1", "@babel/plugin-transform-react-jsx-source": "^7.27.1", "@rolldown/pluginutils": "1.0.0-rc.3", "@types/babel__core": "^7.20.5", "react-refresh": "^0.18.0" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-YmKkfhOAi3wsB1PhJq5Scj3GXMn3WvtQ/JC0xoopuHoXSdmtdStOpFrYaT1kie2YgFBcIe64ROzMYRjCrYOdYw=="], + + "acorn": ["acorn@8.16.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw=="], + + "acorn-jsx": ["acorn-jsx@5.3.2", "", { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ=="], + + "ajv": ["ajv@6.14.0", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw=="], + + "ansi-escapes": ["ansi-escapes@4.3.2", "", { "dependencies": { "type-fest": "^0.21.3" } }, "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ=="], + + "ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "ansi-styles": ["ansi-styles@3.2.1", "", { "dependencies": { "color-convert": "^1.9.0" } }, "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA=="], + + "anymatch": ["anymatch@3.1.3", "", { "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" } }, "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw=="], + + "arg": ["arg@5.0.2", "", {}, "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg=="], + + "argparse": ["argparse@2.0.1", "", {}, "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="], + + "aria-query": ["aria-query@5.3.2", "", {}, "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw=="], + + "array-ify": ["array-ify@1.0.0", "", {}, "sha512-c5AMf34bKdvPhQ7tBGhqkgKNUzMr4WUs+WDtC2ZUGOUncbxKMTvqxYctiseW3+L4bA8ec+GcZ6/A/FW4m8ukng=="], + + "array-iterate": ["array-iterate@2.0.1", "", {}, "sha512-I1jXZMjAgCMmxT4qxXfPXa6SthSoE8h6gkSI9BGGNv8mP8G/v0blc+qFnZu6K42vTOiuME596QaLO0TP3Lk0xg=="], + + "astro": ["astro@6.1.1", "", { "dependencies": { "@astrojs/compiler": "^3.0.1", "@astrojs/internal-helpers": "0.8.0", "@astrojs/markdown-remark": "7.1.0", "@astrojs/telemetry": "3.3.0", "@capsizecss/unpack": "^4.0.0", "@clack/prompts": "^1.1.0", "@oslojs/encoding": "^1.1.0", "@rollup/pluginutils": "^5.3.0", "aria-query": "^5.3.2", "axobject-query": "^4.1.0", "ci-info": "^4.4.0", "clsx": "^2.1.1", "common-ancestor-path": "^2.0.0", "cookie": "^1.1.1", "devalue": "^5.6.3", "diff": "^8.0.3", "dlv": "^1.1.3", "dset": "^3.1.4", "es-module-lexer": "^2.0.0", "esbuild": "^0.27.3", "flattie": "^1.1.1", "fontace": "~0.4.1", "github-slugger": "^2.0.0", "html-escaper": "3.0.3", "http-cache-semantics": "^4.2.0", "js-yaml": "^4.1.1", "magic-string": "^0.30.21", "magicast": "^0.5.2", "mrmime": "^2.0.1", "neotraverse": "^0.6.18", "obug": "^2.1.1", "p-limit": "^7.3.0", "p-queue": "^9.1.0", "package-manager-detector": "^1.6.0", "piccolore": "^0.1.3", "picomatch": "^4.0.3", "rehype": "^13.0.2", "semver": "^7.7.4", "shiki": "^4.0.2", "smol-toml": "^1.6.0", "svgo": "^4.0.1", "tinyclip": "^0.1.12", "tinyexec": "^1.0.4", "tinyglobby": "^0.2.15", "tsconfck": "^3.1.6", "ultrahtml": "^1.6.0", "unifont": "~0.7.4", "unist-util-visit": "^5.1.0", "unstorage": "^1.17.4", "vfile": "^6.0.3", "vite": "^7.3.1", "vitefu": "^1.1.2", "xxhash-wasm": "^1.1.0", "yargs-parser": "^22.0.0", "zod": "^4.3.6" }, "optionalDependencies": { "sharp": "^0.34.0" }, "bin": { "astro": "bin/astro.mjs" } }, "sha512-vq8sHpu1JsY1fWAunn+tdKNbVDmLQNiVdyuGsVT2csgITdFGXXVAyEXFWc1DzkMN0ehElPeiHnqItyQOJK+GqA=="], + + "at-least-node": ["at-least-node@1.0.0", "", {}, "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg=="], + + "autoprefixer": ["autoprefixer@10.4.27", "", { "dependencies": { "browserslist": "^4.28.1", "caniuse-lite": "^1.0.30001774", "fraction.js": "^5.3.4", "picocolors": "^1.1.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.1.0" }, "bin": { "autoprefixer": "bin/autoprefixer" } }, "sha512-NP9APE+tO+LuJGn7/9+cohklunJsXWiaWEfV3si4Gi/XHDwVNgkwr1J3RQYFIvPy76GmJ9/bW8vyoU1LcxwKHA=="], + + "aws4fetch": ["aws4fetch@1.0.20", "", {}, "sha512-/djoAN709iY65ETD6LKCtyyEI04XIBP5xVvfmNxsEP0uJB5tyaGBztSryRr4HqMStr9R06PisQE7m9zDTXKu6g=="], + + "axobject-query": ["axobject-query@4.1.0", "", {}, "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ=="], + + "bail": ["bail@2.0.2", "", {}, "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw=="], + + "balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], + + "base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="], + + "baseline-browser-mapping": ["baseline-browser-mapping@2.10.12", "", { "bin": { "baseline-browser-mapping": "dist/cli.cjs" } }, "sha512-qyq26DxfY4awP2gIRXhhLWfwzwI+N5Nxk6iQi8EFizIaWIjqicQTE4sLnZZVdeKPRcVNoJOkkpfzoIYuvCKaIQ=="], + + "bl": ["bl@4.1.0", "", { "dependencies": { "buffer": "^5.5.0", "inherits": "^2.0.4", "readable-stream": "^3.4.0" } }, "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w=="], + + "boolbase": ["boolbase@1.0.0", "", {}, "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="], + + "brace-expansion": ["brace-expansion@1.1.13", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w=="], + + "braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="], + + "browserslist": ["browserslist@4.28.1", "", { "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", "electron-to-chromium": "^1.5.263", "node-releases": "^2.0.27", "update-browserslist-db": "^1.2.0" }, "bin": { "browserslist": "cli.js" } }, "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA=="], + + "buffer": ["buffer@5.7.1", "", { "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.1.13" } }, "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ=="], + + "buffer-from": ["buffer-from@1.1.2", "", {}, "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ=="], + + "cachedir": ["cachedir@2.3.0", "", {}, "sha512-A+Fezp4zxnit6FanDmv9EqXNAi3vt9DWp51/71UEhXukb7QUuvtv9344h91dyAxuTLoSYJFU299qzR3tzwPAhw=="], + + "callsites": ["callsites@3.1.0", "", {}, "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="], + + "caniuse-lite": ["caniuse-lite@1.0.30001782", "", {}, "sha512-dZcaJLJeDMh4rELYFw1tvSn1bhZWYFOt468FcbHHxx/Z/dFidd1I6ciyFdi3iwfQCyOjqo9upF6lGQYtMiJWxw=="], + + "ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="], + + "chalk": ["chalk@2.4.2", "", { "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", "supports-color": "^5.3.0" } }, "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ=="], + + "character-entities": ["character-entities@2.0.2", "", {}, "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ=="], + + "character-entities-html4": ["character-entities-html4@2.1.0", "", {}, "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA=="], + + "character-entities-legacy": ["character-entities-legacy@3.0.0", "", {}, "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ=="], + + "character-reference-invalid": ["character-reference-invalid@2.0.1", "", {}, "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw=="], + + "chardet": ["chardet@0.7.0", "", {}, "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA=="], + + "chokidar": ["chokidar@5.0.0", "", { "dependencies": { "readdirp": "^5.0.0" } }, "sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw=="], + + "ci-info": ["ci-info@4.4.0", "", {}, "sha512-77PSwercCZU2Fc4sX94eF8k8Pxte6JAwL4/ICZLFjJLqegs7kCuAsqqj/70NQF6TvDpgFjkubQB2FW2ZZddvQg=="], + + "cli-cursor": ["cli-cursor@3.1.0", "", { "dependencies": { "restore-cursor": "^3.1.0" } }, "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw=="], + + "cli-spinners": ["cli-spinners@2.9.2", "", {}, "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg=="], + + "cli-truncate": ["cli-truncate@5.2.0", "", { "dependencies": { "slice-ansi": "^8.0.0", "string-width": "^8.2.0" } }, "sha512-xRwvIOMGrfOAnM1JYtqQImuaNtDEv9v6oIYAs4LIHwTiKee8uwvIi363igssOC0O5U04i4AlENs79LQLu9tEMw=="], + + "cli-width": ["cli-width@3.0.0", "", {}, "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw=="], + + "cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="], + + "clone": ["clone@1.0.4", "", {}, "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg=="], + + "clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="], + + "color-convert": ["color-convert@1.9.3", "", { "dependencies": { "color-name": "1.1.3" } }, "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg=="], + + "color-name": ["color-name@1.1.3", "", {}, "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="], + + "colorette": ["colorette@2.0.20", "", {}, "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w=="], + + "comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="], + + "commander": ["commander@14.0.3", "", {}, "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw=="], + + "commitizen": ["commitizen@4.3.1", "", { "dependencies": { "cachedir": "2.3.0", "cz-conventional-changelog": "3.3.0", "dedent": "0.7.0", "detect-indent": "6.1.0", "find-node-modules": "^2.1.2", "find-root": "1.1.0", "fs-extra": "9.1.0", "glob": "7.2.3", "inquirer": "8.2.5", "is-utf8": "^0.2.1", "lodash": "4.17.21", "minimist": "1.2.7", "strip-bom": "4.0.0", "strip-json-comments": "3.1.1" }, "bin": { "cz": "bin/git-cz", "git-cz": "bin/git-cz", "commitizen": "bin/commitizen" } }, "sha512-gwAPAVTy/j5YcOOebcCRIijn+mSjWJC+IYKivTu6aG8Ei/scoXgfsMRnuAk6b0GRste2J4NGxVdMN3ZpfNaVaw=="], + + "common-ancestor-path": ["common-ancestor-path@2.0.0", "", {}, "sha512-dnN3ibLeoRf2HNC+OlCiNc5d2zxbLJXOtiZUudNFSXZrNSydxcCsSpRzXwfu7BBWCIfHPw+xTayeBvJCP/D8Ng=="], + + "compare-func": ["compare-func@2.0.0", "", { "dependencies": { "array-ify": "^1.0.0", "dot-prop": "^5.1.0" } }, "sha512-zHig5N+tPWARooBnb0Zx1MFcdfpyJrfTJ3Y5L+IFvUm8rM74hHz66z0gw0x4tijh5CorKkKUCnW82R2vmpeCRA=="], + + "concat-map": ["concat-map@0.0.1", "", {}, "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="], + + "conventional-changelog-angular": ["conventional-changelog-angular@8.3.1", "", { "dependencies": { "compare-func": "^2.0.0" } }, "sha512-6gfI3otXK5Ph5DfCOI1dblr+kN3FAm5a97hYoQkqNZxOaYa5WKfXH+AnpsmS+iUH2mgVC2Cg2Qw9m5OKcmNrIg=="], + + "conventional-changelog-conventionalcommits": ["conventional-changelog-conventionalcommits@9.3.1", "", { "dependencies": { "compare-func": "^2.0.0" } }, "sha512-dTYtpIacRpcZgrvBYvBfArMmK2xvIpv2TaxM0/ZI5CBtNUzvF2x0t15HsbRABWprS6UPmvj+PzHVjSx4qAVKyw=="], + + "conventional-commit-types": ["conventional-commit-types@3.0.0", "", {}, "sha512-SmmCYnOniSsAa9GqWOeLqc179lfr5TRu5b4QFDkbsrJ5TZjPJx85wtOr3zn+1dbeNiXDKGPbZ72IKbPhLXh/Lg=="], + + "conventional-commits-parser": ["conventional-commits-parser@6.4.0", "", { "dependencies": { "@simple-libs/stream-utils": "^1.2.0", "meow": "^13.0.0" }, "bin": { "conventional-commits-parser": "dist/cli/index.js" } }, "sha512-tvRg7FIBNlyPzjdG8wWRlPHQJJHI7DylhtRGeU9Lq+JuoPh5BKpPRX83ZdLrvXuOSu5Eo/e7SzOQhU4Hd2Miuw=="], + + "convert-source-map": ["convert-source-map@2.0.0", "", {}, "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg=="], + + "cookie": ["cookie@1.1.1", "", {}, "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ=="], + + "cookie-es": ["cookie-es@1.2.2", "", {}, "sha512-+W7VmiVINB+ywl1HGXJXmrqkOhpKrIiVZV6tQuV54ZyQC7MMuBt81Vc336GMLoHBq5hV/F9eXgt5Mnx0Rha5Fg=="], + + "cosmiconfig": ["cosmiconfig@9.0.1", "", { "dependencies": { "env-paths": "^2.2.1", "import-fresh": "^3.3.0", "js-yaml": "^4.1.0", "parse-json": "^5.2.0" }, "peerDependencies": { "typescript": ">=4.9.5" }, "optionalPeers": ["typescript"] }, "sha512-hr4ihw+DBqcvrsEDioRO31Z17x71pUYoNe/4h6Z0wB72p7MU7/9gH8Q3s12NFhHPfYBBOV3qyfUxmr/Yn3shnQ=="], + + "cosmiconfig-typescript-loader": ["cosmiconfig-typescript-loader@6.2.0", "", { "dependencies": { "jiti": "^2.6.1" }, "peerDependencies": { "@types/node": "*", "cosmiconfig": ">=9", "typescript": ">=5" } }, "sha512-GEN39v7TgdxgIoNcdkRE3uiAzQt3UXLyHbRHD6YoL048XAeOomyxaP+Hh/+2C6C2wYjxJ2onhJcsQp+L4YEkVQ=="], + + "cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="], + + "crossws": ["crossws@0.3.5", "", { "dependencies": { "uncrypto": "^0.1.3" } }, "sha512-ojKiDvcmByhwa8YYqbQI/hg7MEU0NC03+pSdEq4ZUnZR9xXpwk7E43SMNGkn+JxJGPFtNvQ48+vV2p+P1ml5PA=="], + + "css-select": ["css-select@5.2.2", "", { "dependencies": { "boolbase": "^1.0.0", "css-what": "^6.1.0", "domhandler": "^5.0.2", "domutils": "^3.0.1", "nth-check": "^2.0.1" } }, "sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw=="], + + "css-tree": ["css-tree@3.2.1", "", { "dependencies": { "mdn-data": "2.27.1", "source-map-js": "^1.2.1" } }, "sha512-X7sjQzceUhu1u7Y/ylrRZFU2FS6LRiFVp6rKLPg23y3x3c3DOKAwuXGDp+PAGjh6CSnCjYeAul8pcT8bAl+lSA=="], + + "css-what": ["css-what@6.2.2", "", {}, "sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA=="], + + "csso": ["csso@5.0.5", "", { "dependencies": { "css-tree": "~2.2.0" } }, "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ=="], + + "csstype": ["csstype@3.2.3", "", {}, "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ=="], + + "cz-conventional-changelog": ["cz-conventional-changelog@3.3.0", "", { "dependencies": { "chalk": "^2.4.1", "commitizen": "^4.0.3", "conventional-commit-types": "^3.0.0", "lodash.map": "^4.5.1", "longest": "^2.0.1", "word-wrap": "^1.0.3" }, "optionalDependencies": { "@commitlint/load": ">6.1.1" } }, "sha512-U466fIzU5U22eES5lTNiNbZ+d8dfcHcssH4o7QsdWaCcRs/feIPCxKYSWkYBNs5mny7MvEfwpTLWjvbm94hecw=="], + + "debug": ["debug@4.4.3", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="], + + "decode-named-character-reference": ["decode-named-character-reference@1.3.0", "", { "dependencies": { "character-entities": "^2.0.0" } }, "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q=="], + + "dedent": ["dedent@0.7.0", "", {}, "sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA=="], + + "deep-is": ["deep-is@0.1.4", "", {}, "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="], + + "defaults": ["defaults@1.0.4", "", { "dependencies": { "clone": "^1.0.2" } }, "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A=="], + + "defu": ["defu@6.1.4", "", {}, "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg=="], + + "dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="], + + "destr": ["destr@2.0.5", "", {}, "sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA=="], + + "detect-file": ["detect-file@1.0.0", "", {}, "sha512-DtCOLG98P007x7wiiOmfI0fi3eIKyWiLTGJ2MDnVi/E04lWGbf+JzrRHMm0rgIIZJGtHpKpbVgLWHrv8xXpc3Q=="], + + "detect-indent": ["detect-indent@6.1.0", "", {}, "sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA=="], + + "detect-libc": ["detect-libc@2.1.2", "", {}, "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ=="], + + "devalue": ["devalue@5.6.4", "", {}, "sha512-Gp6rDldRsFh/7XuouDbxMH3Mx8GMCcgzIb1pDTvNyn8pZGQ22u+Wa+lGV9dQCltFQ7uVw0MhRyb8XDskNFOReA=="], + + "devlop": ["devlop@1.1.0", "", { "dependencies": { "dequal": "^2.0.0" } }, "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA=="], + + "diff": ["diff@8.0.4", "", {}, "sha512-DPi0FmjiSU5EvQV0++GFDOJ9ASQUVFh5kD+OzOnYdi7n3Wpm9hWWGfB/O2blfHcMVTL5WkQXSnRiK9makhrcnw=="], + + "dlv": ["dlv@1.1.3", "", {}, "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA=="], + + "dom-serializer": ["dom-serializer@2.0.0", "", { "dependencies": { "domelementtype": "^2.3.0", "domhandler": "^5.0.2", "entities": "^4.2.0" } }, "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg=="], + + "domelementtype": ["domelementtype@2.3.0", "", {}, "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw=="], + + "domhandler": ["domhandler@5.0.3", "", { "dependencies": { "domelementtype": "^2.3.0" } }, "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w=="], + + "domutils": ["domutils@3.2.2", "", { "dependencies": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", "domhandler": "^5.0.3" } }, "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw=="], + + "dot-prop": ["dot-prop@5.3.0", "", { "dependencies": { "is-obj": "^2.0.0" } }, "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q=="], + + "dset": ["dset@3.1.4", "", {}, "sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA=="], + + "electron-to-chromium": ["electron-to-chromium@1.5.328", "", {}, "sha512-QNQ5l45DzYytThO21403XN3FvK0hOkWDG8viNf6jqS42msJ8I4tGDSpBCgvDRRPnkffafiwAym2X2eHeGD2V0w=="], + + "emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "enhanced-resolve": ["enhanced-resolve@5.20.1", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.3.0" } }, "sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA=="], + + "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], + + "env-paths": ["env-paths@2.2.1", "", {}, "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A=="], + + "environment": ["environment@1.1.0", "", {}, "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q=="], + + "error-ex": ["error-ex@1.3.4", "", { "dependencies": { "is-arrayish": "^0.2.1" } }, "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ=="], + + "es-module-lexer": ["es-module-lexer@2.0.0", "", {}, "sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw=="], + + "esbuild": ["esbuild@0.27.4", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.4", "@esbuild/android-arm": "0.27.4", "@esbuild/android-arm64": "0.27.4", "@esbuild/android-x64": "0.27.4", "@esbuild/darwin-arm64": "0.27.4", "@esbuild/darwin-x64": "0.27.4", "@esbuild/freebsd-arm64": "0.27.4", "@esbuild/freebsd-x64": "0.27.4", "@esbuild/linux-arm": "0.27.4", "@esbuild/linux-arm64": "0.27.4", "@esbuild/linux-ia32": "0.27.4", "@esbuild/linux-loong64": "0.27.4", "@esbuild/linux-mips64el": "0.27.4", "@esbuild/linux-ppc64": "0.27.4", "@esbuild/linux-riscv64": "0.27.4", "@esbuild/linux-s390x": "0.27.4", "@esbuild/linux-x64": "0.27.4", "@esbuild/netbsd-arm64": "0.27.4", "@esbuild/netbsd-x64": "0.27.4", "@esbuild/openbsd-arm64": "0.27.4", "@esbuild/openbsd-x64": "0.27.4", "@esbuild/openharmony-arm64": "0.27.4", "@esbuild/sunos-x64": "0.27.4", "@esbuild/win32-arm64": "0.27.4", "@esbuild/win32-ia32": "0.27.4", "@esbuild/win32-x64": "0.27.4" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-Rq4vbHnYkK5fws5NF7MYTU68FPRE1ajX7heQ/8QXXWqNgqqJ/GkmmyxIzUnf2Sr/bakf8l54716CcMGHYhMrrQ=="], + + "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], + + "escape-string-regexp": ["escape-string-regexp@4.0.0", "", {}, "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA=="], + + "eslint": ["eslint@9.39.4", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", "@eslint/config-array": "^0.21.2", "@eslint/config-helpers": "^0.4.2", "@eslint/core": "^0.17.0", "@eslint/eslintrc": "^3.3.5", "@eslint/js": "9.39.4", "@eslint/plugin-kit": "^0.4.1", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.2", "@types/estree": "^1.0.6", "ajv": "^6.14.0", "chalk": "^4.0.0", "cross-spawn": "^7.0.6", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", "eslint-scope": "^8.4.0", "eslint-visitor-keys": "^4.2.1", "espree": "^10.4.0", "esquery": "^1.5.0", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", "file-entry-cache": "^8.0.0", "find-up": "^5.0.0", "glob-parent": "^6.0.2", "ignore": "^5.2.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", "json-stable-stringify-without-jsonify": "^1.0.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.5", "natural-compare": "^1.4.0", "optionator": "^0.9.3" }, "peerDependencies": { "jiti": "*" }, "optionalPeers": ["jiti"], "bin": { "eslint": "bin/eslint.js" } }, "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ=="], + + "eslint-config-prettier": ["eslint-config-prettier@10.1.8", "", { "peerDependencies": { "eslint": ">=7.0.0" }, "bin": { "eslint-config-prettier": "bin/cli.js" } }, "sha512-82GZUjRS0p/jganf6q1rEO25VSoHH0hKPCTrgillPjdI/3bgBhAE1QzHrHTizjpRvy6pGAvKjDJtk2pF9NDq8w=="], + + "eslint-plugin-prettier": ["eslint-plugin-prettier@5.5.5", "", { "dependencies": { "prettier-linter-helpers": "^1.0.1", "synckit": "^0.11.12" }, "peerDependencies": { "@types/eslint": ">=8.0.0", "eslint": ">=8.0.0", "eslint-config-prettier": ">= 7.0.0 <10.0.0 || >=10.1.0", "prettier": ">=3.0.0" }, "optionalPeers": ["@types/eslint", "eslint-config-prettier"] }, "sha512-hscXkbqUZ2sPithAuLm5MXL+Wph+U7wHngPBv9OMWwlP8iaflyxpjTYZkmdgB4/vPIhemRlBEoLrH7UC1n7aUw=="], + + "eslint-scope": ["eslint-scope@8.4.0", "", { "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" } }, "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg=="], + + "eslint-visitor-keys": ["eslint-visitor-keys@4.2.1", "", {}, "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ=="], + + "espree": ["espree@10.4.0", "", { "dependencies": { "acorn": "^8.15.0", "acorn-jsx": "^5.3.2", "eslint-visitor-keys": "^4.2.1" } }, "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ=="], + + "esquery": ["esquery@1.7.0", "", { "dependencies": { "estraverse": "^5.1.0" } }, "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g=="], + + "esrecurse": ["esrecurse@4.3.0", "", { "dependencies": { "estraverse": "^5.2.0" } }, "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag=="], + + "estraverse": ["estraverse@5.3.0", "", {}, "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA=="], + + "estree-util-is-identifier-name": ["estree-util-is-identifier-name@3.0.0", "", {}, "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg=="], + + "estree-walker": ["estree-walker@2.0.2", "", {}, "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="], + + "esutils": ["esutils@2.0.3", "", {}, "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="], + + "eventemitter3": ["eventemitter3@5.0.4", "", {}, "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw=="], + + "expand-tilde": ["expand-tilde@2.0.2", "", { "dependencies": { "homedir-polyfill": "^1.0.1" } }, "sha512-A5EmesHW6rfnZ9ysHQjPdJRni0SRar0tjtG5MNtm9n5TUvsYU8oozprtRD4AqHxcZWWlVuAmQo2nWKfN9oyjTw=="], + + "extend": ["extend@3.0.2", "", {}, "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="], + + "external-editor": ["external-editor@3.1.0", "", { "dependencies": { "chardet": "^0.7.0", "iconv-lite": "^0.4.24", "tmp": "^0.0.33" } }, "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew=="], + + "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="], + + "fast-diff": ["fast-diff@1.3.0", "", {}, "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw=="], + + "fast-json-stable-stringify": ["fast-json-stable-stringify@2.1.0", "", {}, "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="], + + "fast-levenshtein": ["fast-levenshtein@2.0.6", "", {}, "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw=="], + + "fast-uri": ["fast-uri@3.1.0", "", {}, "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA=="], + + "fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="], + + "figures": ["figures@3.2.0", "", { "dependencies": { "escape-string-regexp": "^1.0.5" } }, "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg=="], + + "file-entry-cache": ["file-entry-cache@8.0.0", "", { "dependencies": { "flat-cache": "^4.0.0" } }, "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ=="], + + "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], + + "find-node-modules": ["find-node-modules@2.1.3", "", { "dependencies": { "findup-sync": "^4.0.0", "merge": "^2.1.1" } }, "sha512-UC2I2+nx1ZuOBclWVNdcnbDR5dlrOdVb7xNjmT/lHE+LsgztWks3dG7boJ37yTS/venXw84B/mAW9uHVoC5QRg=="], + + "find-root": ["find-root@1.1.0", "", {}, "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng=="], + + "find-up": ["find-up@5.0.0", "", { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng=="], + + "findup-sync": ["findup-sync@4.0.0", "", { "dependencies": { "detect-file": "^1.0.0", "is-glob": "^4.0.0", "micromatch": "^4.0.2", "resolve-dir": "^1.0.1" } }, "sha512-6jvvn/12IC4quLBL1KNokxC7wWTvYncaVUYSoxWw7YykPLuRrnv4qdHcSOywOI5RpkOVGeQRtWM8/q+G6W6qfQ=="], + + "flat-cache": ["flat-cache@4.0.1", "", { "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" } }, "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw=="], + + "flatted": ["flatted@3.4.2", "", {}, "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA=="], + + "flattie": ["flattie@1.1.1", "", {}, "sha512-9UbaD6XdAL97+k/n+N7JwX46K/M6Zc6KcFYskrYL8wbBV/Uyk0CTAMY0VT+qiK5PM7AIc9aTWYtq65U7T+aCNQ=="], + + "fontace": ["fontace@0.4.1", "", { "dependencies": { "fontkitten": "^1.0.2" } }, "sha512-lDMvbAzSnHmbYMTEld5qdtvNH2/pWpICOqpean9IgC7vUbUJc3k+k5Dokp85CegamqQpFbXf0rAVkbzpyTA8aw=="], + + "fontkitten": ["fontkitten@1.0.3", "", { "dependencies": { "tiny-inflate": "^1.0.3" } }, "sha512-Wp1zXWPVUPBmfoa3Cqc9ctaKuzKAV6uLstRqlR56kSjplf5uAce+qeyYym7F+PHbGTk+tCEdkCW6RD7DX/gBZw=="], + + "fraction.js": ["fraction.js@5.3.4", "", {}, "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ=="], + + "framer-motion": ["framer-motion@12.38.0", "", { "dependencies": { "motion-dom": "^12.38.0", "motion-utils": "^12.36.0", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-rFYkY/pigbcswl1XQSb7q424kSTQ8q6eAC+YUsSKooHQYuLdzdHjrt6uxUC+PRAO++q5IS7+TamgIw1AphxR+g=="], + + "fs-extra": ["fs-extra@9.1.0", "", { "dependencies": { "at-least-node": "^1.0.0", "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" } }, "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ=="], + + "fs.realpath": ["fs.realpath@1.0.0", "", {}, "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="], + + "fsevents": ["fsevents@2.3.2", "", { "os": "darwin" }, "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA=="], + + "gensync": ["gensync@1.0.0-beta.2", "", {}, "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg=="], + + "get-caller-file": ["get-caller-file@2.0.5", "", {}, "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="], + + "get-east-asian-width": ["get-east-asian-width@1.5.0", "", {}, "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA=="], + + "git-raw-commits": ["git-raw-commits@5.0.1", "", { "dependencies": { "@conventional-changelog/git-client": "^2.6.0", "meow": "^13.0.0" }, "bin": { "git-raw-commits": "src/cli.js" } }, "sha512-Y+csSm2GD/PCSh6Isd/WiMjNAydu0VBiG9J7EdQsNA5P9uXvLayqjmTsNlK5Gs9IhblFZqOU0yid5Il5JPoLiQ=="], + + "github-slugger": ["github-slugger@2.0.0", "", {}, "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="], + + "glob": ["glob@7.2.3", "", { "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^3.1.1", "once": "^1.3.0", "path-is-absolute": "^1.0.0" } }, "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q=="], + + "glob-parent": ["glob-parent@6.0.2", "", { "dependencies": { "is-glob": "^4.0.3" } }, "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A=="], + + "global-directory": ["global-directory@4.0.1", "", { "dependencies": { "ini": "4.1.1" } }, "sha512-wHTUcDUoZ1H5/0iVqEudYW4/kAlN5cZ3j/bXn0Dpbizl9iaUVeWSHqiOjsgk6OW2bkLclbBjzewBz6weQ1zA2Q=="], + + "global-modules": ["global-modules@1.0.0", "", { "dependencies": { "global-prefix": "^1.0.1", "is-windows": "^1.0.1", "resolve-dir": "^1.0.0" } }, "sha512-sKzpEkf11GpOFuw0Zzjzmt4B4UZwjOcG757PPvrfhxcLFbq0wpsgpOqxpxtxFiCG4DtG93M6XRVbF2oGdev7bg=="], + + "global-prefix": ["global-prefix@1.0.2", "", { "dependencies": { "expand-tilde": "^2.0.2", "homedir-polyfill": "^1.0.1", "ini": "^1.3.4", "is-windows": "^1.0.1", "which": "^1.2.14" } }, "sha512-5lsx1NUDHtSjfg0eHlmYvZKv8/nVqX4ckFbM+FrGcQ+04KWcWFo9P5MxPZYSzUvyzmdTbI7Eix8Q4IbELDqzKg=="], + + "globals": ["globals@14.0.0", "", {}, "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ=="], + + "graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="], + + "h3": ["h3@1.15.10", "", { "dependencies": { "cookie-es": "^1.2.2", "crossws": "^0.3.5", "defu": "^6.1.4", "destr": "^2.0.5", "iron-webcrypto": "^1.2.1", "node-mock-http": "^1.0.4", "radix3": "^1.1.2", "ufo": "^1.6.3", "uncrypto": "^0.1.3" } }, "sha512-YzJeWSkDZxAhvmp8dexjRK5hxziRO7I9m0N53WhvYL5NiWfkUkzssVzY9jvGu0HBoLFW6+duYmNSn6MaZBCCtg=="], + + "has-flag": ["has-flag@3.0.0", "", {}, "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw=="], + + "hast-util-from-html": ["hast-util-from-html@2.0.3", "", { "dependencies": { "@types/hast": "^3.0.0", "devlop": "^1.1.0", "hast-util-from-parse5": "^8.0.0", "parse5": "^7.0.0", "vfile": "^6.0.0", "vfile-message": "^4.0.0" } }, "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw=="], + + "hast-util-from-parse5": ["hast-util-from-parse5@8.0.3", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "devlop": "^1.0.0", "hastscript": "^9.0.0", "property-information": "^7.0.0", "vfile": "^6.0.0", "vfile-location": "^5.0.0", "web-namespaces": "^2.0.0" } }, "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg=="], + + "hast-util-is-element": ["hast-util-is-element@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g=="], + + "hast-util-parse-selector": ["hast-util-parse-selector@4.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A=="], + + "hast-util-raw": ["hast-util-raw@9.1.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "@ungap/structured-clone": "^1.0.0", "hast-util-from-parse5": "^8.0.0", "hast-util-to-parse5": "^8.0.0", "html-void-elements": "^3.0.0", "mdast-util-to-hast": "^13.0.0", "parse5": "^7.0.0", "unist-util-position": "^5.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0", "web-namespaces": "^2.0.0", "zwitch": "^2.0.0" } }, "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw=="], + + "hast-util-to-html": ["hast-util-to-html@9.0.5", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-whitespace": "^3.0.0", "html-void-elements": "^3.0.0", "mdast-util-to-hast": "^13.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "stringify-entities": "^4.0.0", "zwitch": "^2.0.4" } }, "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw=="], + + "hast-util-to-jsx-runtime": ["hast-util-to-jsx-runtime@2.3.6", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "hast-util-whitespace": "^3.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "style-to-js": "^1.0.0", "unist-util-position": "^5.0.0", "vfile-message": "^4.0.0" } }, "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg=="], + + "hast-util-to-parse5": ["hast-util-to-parse5@8.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "web-namespaces": "^2.0.0", "zwitch": "^2.0.0" } }, "sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA=="], + + "hast-util-to-text": ["hast-util-to-text@4.0.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "hast-util-is-element": "^3.0.0", "unist-util-find-after": "^5.0.0" } }, "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A=="], + + "hast-util-whitespace": ["hast-util-whitespace@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw=="], + + "hastscript": ["hastscript@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w=="], + + "homedir-polyfill": ["homedir-polyfill@1.0.3", "", { "dependencies": { "parse-passwd": "^1.0.0" } }, "sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA=="], + + "html-escaper": ["html-escaper@3.0.3", "", {}, "sha512-RuMffC89BOWQoY0WKGpIhn5gX3iI54O6nRA0yC124NYVtzjmFWBIiFd8M0x+ZdX0P9R4lADg1mgP8C7PxGOWuQ=="], + + "html-url-attributes": ["html-url-attributes@3.0.1", "", {}, "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ=="], + + "html-void-elements": ["html-void-elements@3.0.0", "", {}, "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg=="], + + "http-cache-semantics": ["http-cache-semantics@4.2.0", "", {}, "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ=="], + + "husky": ["husky@9.1.7", "", { "bin": { "husky": "bin.js" } }, "sha512-5gs5ytaNjBrh5Ow3zrvdUUY+0VxIuWVL4i9irt6friV+BqdCfmV11CQTWMiBYWHbXhco+J1kHfTOUkePhCDvMA=="], + + "iconv-lite": ["iconv-lite@0.4.24", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3" } }, "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA=="], + + "ieee754": ["ieee754@1.2.1", "", {}, "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="], + + "ignore": ["ignore@7.0.5", "", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="], + + "immutable": ["immutable@5.1.5", "", {}, "sha512-t7xcm2siw+hlUM68I+UEOK+z84RzmN59as9DZ7P1l0994DKUWV7UXBMQZVxaoMSRQ+PBZbHCOoBt7a2wxOMt+A=="], + + "import-fresh": ["import-fresh@3.3.1", "", { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ=="], + + "import-meta-resolve": ["import-meta-resolve@4.2.0", "", {}, "sha512-Iqv2fzaTQN28s/FwZAoFq0ZSs/7hMAHJVX+w8PZl3cY19Pxk6jFFalxQoIfW2826i/fDLXv8IiEZRIT0lDuWcg=="], + + "imurmurhash": ["imurmurhash@0.1.4", "", {}, "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA=="], + + "inflight": ["inflight@1.0.6", "", { "dependencies": { "once": "^1.3.0", "wrappy": "1" } }, "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA=="], + + "inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="], + + "ini": ["ini@4.1.1", "", {}, "sha512-QQnnxNyfvmHFIsj7gkPcYymR8Jdw/o7mp5ZFihxn6h8Ci6fh3Dx4E1gPjpQEpIuPo9XVNY/ZUwh4BPMjGyL01g=="], + + "inline-style-parser": ["inline-style-parser@0.2.7", "", {}, "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA=="], + + "inquirer": ["inquirer@8.2.5", "", { "dependencies": { "ansi-escapes": "^4.2.1", "chalk": "^4.1.1", "cli-cursor": "^3.1.0", "cli-width": "^3.0.0", "external-editor": "^3.0.3", "figures": "^3.0.0", "lodash": "^4.17.21", "mute-stream": "0.0.8", "ora": "^5.4.1", "run-async": "^2.4.0", "rxjs": "^7.5.5", "string-width": "^4.1.0", "strip-ansi": "^6.0.0", "through": "^2.3.6", "wrap-ansi": "^7.0.0" } }, "sha512-QAgPDQMEgrDssk1XiwwHoOGYF9BAbUcc1+j+FhEvaOt8/cKRqyLn0U5qA6F74fGhTMGxf92pOvPBeh29jQJDTQ=="], + + "iron-webcrypto": ["iron-webcrypto@1.2.1", "", {}, "sha512-feOM6FaSr6rEABp/eDfVseKyTMDt+KGpeB35SkVn9Tyn0CqvVsY3EwI0v5i8nMHyJnzCIQf7nsy3p41TPkJZhg=="], + + "is-alphabetical": ["is-alphabetical@2.0.1", "", {}, "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ=="], + + "is-alphanumerical": ["is-alphanumerical@2.0.1", "", { "dependencies": { "is-alphabetical": "^2.0.0", "is-decimal": "^2.0.0" } }, "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw=="], + + "is-arrayish": ["is-arrayish@0.2.1", "", {}, "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg=="], + + "is-decimal": ["is-decimal@2.0.1", "", {}, "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A=="], + + "is-docker": ["is-docker@3.0.0", "", { "bin": { "is-docker": "cli.js" } }, "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ=="], + + "is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="], + + "is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + + "is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="], + + "is-hexadecimal": ["is-hexadecimal@2.0.1", "", {}, "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg=="], + + "is-inside-container": ["is-inside-container@1.0.0", "", { "dependencies": { "is-docker": "^3.0.0" }, "bin": { "is-inside-container": "cli.js" } }, "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA=="], + + "is-interactive": ["is-interactive@1.0.0", "", {}, "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w=="], + + "is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="], + + "is-obj": ["is-obj@2.0.0", "", {}, "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w=="], + + "is-plain-obj": ["is-plain-obj@4.1.0", "", {}, "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="], + + "is-unicode-supported": ["is-unicode-supported@0.1.0", "", {}, "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw=="], + + "is-utf8": ["is-utf8@0.2.1", "", {}, "sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q=="], + + "is-windows": ["is-windows@1.0.2", "", {}, "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA=="], + + "is-wsl": ["is-wsl@3.1.1", "", { "dependencies": { "is-inside-container": "^1.0.0" } }, "sha512-e6rvdUCiQCAuumZslxRJWR/Doq4VpPR82kqclvcS0efgt430SlGIk05vdCN58+VrzgtIcfNODjozVielycD4Sw=="], + + "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], + + "jiti": ["jiti@2.6.1", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ=="], + + "js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="], + + "js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], + + "jsesc": ["jsesc@3.1.0", "", { "bin": { "jsesc": "bin/jsesc" } }, "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA=="], + + "json-buffer": ["json-buffer@3.0.1", "", {}, "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ=="], + + "json-parse-even-better-errors": ["json-parse-even-better-errors@2.3.1", "", {}, "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w=="], + + "json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="], + + "json-stable-stringify-without-jsonify": ["json-stable-stringify-without-jsonify@1.0.1", "", {}, "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw=="], + + "json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="], + + "jsonfile": ["jsonfile@6.2.0", "", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg=="], + + "keyv": ["keyv@4.5.4", "", { "dependencies": { "json-buffer": "3.0.1" } }, "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw=="], + + "levn": ["levn@0.4.1", "", { "dependencies": { "prelude-ls": "^1.2.1", "type-check": "~0.4.0" } }, "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ=="], + + "lightningcss": ["lightningcss@1.32.0", "", { "dependencies": { "detect-libc": "^2.0.3" }, "optionalDependencies": { "lightningcss-android-arm64": "1.32.0", "lightningcss-darwin-arm64": "1.32.0", "lightningcss-darwin-x64": "1.32.0", "lightningcss-freebsd-x64": "1.32.0", "lightningcss-linux-arm-gnueabihf": "1.32.0", "lightningcss-linux-arm64-gnu": "1.32.0", "lightningcss-linux-arm64-musl": "1.32.0", "lightningcss-linux-x64-gnu": "1.32.0", "lightningcss-linux-x64-musl": "1.32.0", "lightningcss-win32-arm64-msvc": "1.32.0", "lightningcss-win32-x64-msvc": "1.32.0" } }, "sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ=="], + + "lightningcss-android-arm64": ["lightningcss-android-arm64@1.32.0", "", { "os": "android", "cpu": "arm64" }, "sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg=="], + + "lightningcss-darwin-arm64": ["lightningcss-darwin-arm64@1.32.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ=="], + + "lightningcss-darwin-x64": ["lightningcss-darwin-x64@1.32.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w=="], + + "lightningcss-freebsd-x64": ["lightningcss-freebsd-x64@1.32.0", "", { "os": "freebsd", "cpu": "x64" }, "sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig=="], + + "lightningcss-linux-arm-gnueabihf": ["lightningcss-linux-arm-gnueabihf@1.32.0", "", { "os": "linux", "cpu": "arm" }, "sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw=="], + + "lightningcss-linux-arm64-gnu": ["lightningcss-linux-arm64-gnu@1.32.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ=="], + + "lightningcss-linux-arm64-musl": ["lightningcss-linux-arm64-musl@1.32.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg=="], + + "lightningcss-linux-x64-gnu": ["lightningcss-linux-x64-gnu@1.32.0", "", { "os": "linux", "cpu": "x64" }, "sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA=="], + + "lightningcss-linux-x64-musl": ["lightningcss-linux-x64-musl@1.32.0", "", { "os": "linux", "cpu": "x64" }, "sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg=="], + + "lightningcss-win32-arm64-msvc": ["lightningcss-win32-arm64-msvc@1.32.0", "", { "os": "win32", "cpu": "arm64" }, "sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw=="], + + "lightningcss-win32-x64-msvc": ["lightningcss-win32-x64-msvc@1.32.0", "", { "os": "win32", "cpu": "x64" }, "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q=="], + + "lines-and-columns": ["lines-and-columns@1.2.4", "", {}, "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="], + + "lint-staged": ["lint-staged@16.4.0", "", { "dependencies": { "commander": "^14.0.3", "listr2": "^9.0.5", "picomatch": "^4.0.3", "string-argv": "^0.3.2", "tinyexec": "^1.0.4", "yaml": "^2.8.2" }, "bin": { "lint-staged": "bin/lint-staged.js" } }, "sha512-lBWt8hujh/Cjysw5GYVmZpFHXDCgZzhrOm8vbcUdobADZNOK/bRshr2kM3DfgrrtR1DQhfupW9gnIXOfiFi+bw=="], + + "listr2": ["listr2@9.0.5", "", { "dependencies": { "cli-truncate": "^5.0.0", "colorette": "^2.0.20", "eventemitter3": "^5.0.1", "log-update": "^6.1.0", "rfdc": "^1.4.1", "wrap-ansi": "^9.0.0" } }, "sha512-ME4Fb83LgEgwNw96RKNvKV4VTLuXfoKudAmm2lP8Kk87KaMK0/Xrx/aAkMWmT8mDb+3MlFDspfbCs7adjRxA2g=="], + + "locate-path": ["locate-path@6.0.0", "", { "dependencies": { "p-locate": "^5.0.0" } }, "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw=="], + + "lodash": ["lodash@4.17.21", "", {}, "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="], + + "lodash.camelcase": ["lodash.camelcase@4.3.0", "", {}, "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA=="], + + "lodash.kebabcase": ["lodash.kebabcase@4.1.1", "", {}, "sha512-N8XRTIMMqqDgSy4VLKPnJ/+hpGZN+PHQiJnSenYqPaVV/NCqEogTnAdZLQiGKhxX+JCs8waWq2t1XHWKOmlY8g=="], + + "lodash.map": ["lodash.map@4.6.0", "", {}, "sha512-worNHGKLDetmcEYDvh2stPCrrQRkP20E4l0iIS7F8EvzMqBBi7ltvFN5m1HvTf1P7Jk1txKhvFcmYsCr8O2F1Q=="], + + "lodash.merge": ["lodash.merge@4.6.2", "", {}, "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="], + + "lodash.mergewith": ["lodash.mergewith@4.6.2", "", {}, "sha512-GK3g5RPZWTRSeLSpgP8Xhra+pnjBC56q9FZYe1d5RN3TJ35dbkGy3YqBSMbyCrlbi+CM9Z3Jk5yTL7RCsqboyQ=="], + + "lodash.snakecase": ["lodash.snakecase@4.1.1", "", {}, "sha512-QZ1d4xoBHYUeuouhEq3lk3Uq7ldgyFXGBhg04+oRLnIz8o9T65Eh+8YdroUwn846zchkA9yDsDl5CVVaV2nqYw=="], + + "lodash.startcase": ["lodash.startcase@4.4.0", "", {}, "sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg=="], + + "lodash.upperfirst": ["lodash.upperfirst@4.3.1", "", {}, "sha512-sReKOYJIJf74dhJONhU4e0/shzi1trVbSWDOhKYE5XV2O+H7Sb2Dihwuc7xWxVl+DgFPyTqIN3zMfT9cq5iWDg=="], + + "log-symbols": ["log-symbols@4.1.0", "", { "dependencies": { "chalk": "^4.1.0", "is-unicode-supported": "^0.1.0" } }, "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg=="], + + "log-update": ["log-update@6.1.0", "", { "dependencies": { "ansi-escapes": "^7.0.0", "cli-cursor": "^5.0.0", "slice-ansi": "^7.1.0", "strip-ansi": "^7.1.0", "wrap-ansi": "^9.0.0" } }, "sha512-9ie8ItPR6tjY5uYJh8K/Zrv/RMZ5VOlOWvtZdEHYSTFKZfIBPQa9tOAEeAWhd+AnIneLJ22w5fjOYtoutpWq5w=="], + + "longest": ["longest@2.0.1", "", {}, "sha512-Ajzxb8CM6WAnFjgiloPsI3bF+WCxcvhdIG3KNA2KN962+tdBsHcuQ4k4qX/EcS/2CRkcc0iAkR956Nib6aXU/Q=="], + + "longest-streak": ["longest-streak@3.1.0", "", {}, "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g=="], + + "lru-cache": ["lru-cache@11.2.7", "", {}, "sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA=="], + + "magic-string": ["magic-string@0.30.21", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.5" } }, "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ=="], + + "magicast": ["magicast@0.5.2", "", { "dependencies": { "@babel/parser": "^7.29.0", "@babel/types": "^7.29.0", "source-map-js": "^1.2.1" } }, "sha512-E3ZJh4J3S9KfwdjZhe2afj6R9lGIN5Pher1pF39UGrXRqq/VDaGVIGN13BjHd2u8B61hArAGOnso7nBOouW3TQ=="], + + "markdown-table": ["markdown-table@3.0.4", "", {}, "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw=="], + + "mdast-util-definitions": ["mdast-util-definitions@6.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-scTllyX6pnYNZH/AIp/0ePz6s4cZtARxImwoPJ7kS42n+MnVsI4XbnG6d4ibehRIldYMWM2LD7ImQblVhUejVQ=="], + + "mdast-util-find-and-replace": ["mdast-util-find-and-replace@3.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "escape-string-regexp": "^5.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg=="], + + "mdast-util-from-markdown": ["mdast-util-from-markdown@2.0.3", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "mdast-util-to-string": "^4.0.0", "micromark": "^4.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-W4mAWTvSlKvf8L6J+VN9yLSqQ9AOAAvHuoDAmPkz4dHf553m5gVj2ejadHJhoJmcmxEnOv6Pa8XJhpxE93kb8Q=="], + + "mdast-util-gfm": ["mdast-util-gfm@3.1.0", "", { "dependencies": { "mdast-util-from-markdown": "^2.0.0", "mdast-util-gfm-autolink-literal": "^2.0.0", "mdast-util-gfm-footnote": "^2.0.0", "mdast-util-gfm-strikethrough": "^2.0.0", "mdast-util-gfm-table": "^2.0.0", "mdast-util-gfm-task-list-item": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ=="], + + "mdast-util-gfm-autolink-literal": ["mdast-util-gfm-autolink-literal@2.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "ccount": "^2.0.0", "devlop": "^1.0.0", "mdast-util-find-and-replace": "^3.0.0", "micromark-util-character": "^2.0.0" } }, "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ=="], + + "mdast-util-gfm-footnote": ["mdast-util-gfm-footnote@2.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0" } }, "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ=="], + + "mdast-util-gfm-strikethrough": ["mdast-util-gfm-strikethrough@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg=="], + + "mdast-util-gfm-table": ["mdast-util-gfm-table@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "markdown-table": "^3.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg=="], + + "mdast-util-gfm-task-list-item": ["mdast-util-gfm-task-list-item@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ=="], + + "mdast-util-mdx-expression": ["mdast-util-mdx-expression@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ=="], + + "mdast-util-mdx-jsx": ["mdast-util-mdx-jsx@3.2.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", "unist-util-stringify-position": "^4.0.0", "vfile-message": "^4.0.0" } }, "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q=="], + + "mdast-util-mdxjs-esm": ["mdast-util-mdxjs-esm@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg=="], + + "mdast-util-phrasing": ["mdast-util-phrasing@4.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "unist-util-is": "^6.0.0" } }, "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w=="], + + "mdast-util-to-hast": ["mdast-util-to-hast@13.2.1", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@ungap/structured-clone": "^1.0.0", "devlop": "^1.0.0", "micromark-util-sanitize-uri": "^2.0.0", "trim-lines": "^3.0.0", "unist-util-position": "^5.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA=="], + + "mdast-util-to-markdown": ["mdast-util-to-markdown@2.1.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "longest-streak": "^3.0.0", "mdast-util-phrasing": "^4.0.0", "mdast-util-to-string": "^4.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "unist-util-visit": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA=="], + + "mdast-util-to-string": ["mdast-util-to-string@4.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0" } }, "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg=="], + + "mdn-data": ["mdn-data@2.27.1", "", {}, "sha512-9Yubnt3e8A0OKwxYSXyhLymGW4sCufcLG6VdiDdUGVkPhpqLxlvP5vl1983gQjJl3tqbrM731mjaZaP68AgosQ=="], + + "meow": ["meow@13.2.0", "", {}, "sha512-pxQJQzB6djGPXh08dacEloMFopsOqGVRKFPYvPOt9XDZ1HasbgDZA74CJGreSU4G3Ak7EFJGoiH2auq+yXISgA=="], + + "merge": ["merge@2.1.1", "", {}, "sha512-jz+Cfrg9GWOZbQAnDQ4hlVnQky+341Yk5ru8bZSe6sIDTCIg8n9i/u7hSQGSVOF3C7lH6mGtqjkiT9G4wFLL0w=="], + + "micromark": ["micromark@4.0.2", "", { "dependencies": { "@types/debug": "^4.0.0", "debug": "^4.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA=="], + + "micromark-core-commonmark": ["micromark-core-commonmark@2.0.3", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-destination": "^2.0.0", "micromark-factory-label": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-title": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-html-tag-name": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg=="], + + "micromark-extension-gfm": ["micromark-extension-gfm@3.0.0", "", { "dependencies": { "micromark-extension-gfm-autolink-literal": "^2.0.0", "micromark-extension-gfm-footnote": "^2.0.0", "micromark-extension-gfm-strikethrough": "^2.0.0", "micromark-extension-gfm-table": "^2.0.0", "micromark-extension-gfm-tagfilter": "^2.0.0", "micromark-extension-gfm-task-list-item": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w=="], + + "micromark-extension-gfm-autolink-literal": ["micromark-extension-gfm-autolink-literal@2.1.0", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw=="], + + "micromark-extension-gfm-footnote": ["micromark-extension-gfm-footnote@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw=="], + + "micromark-extension-gfm-strikethrough": ["micromark-extension-gfm-strikethrough@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw=="], + + "micromark-extension-gfm-table": ["micromark-extension-gfm-table@2.1.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg=="], + + "micromark-extension-gfm-tagfilter": ["micromark-extension-gfm-tagfilter@2.0.0", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg=="], + + "micromark-extension-gfm-task-list-item": ["micromark-extension-gfm-task-list-item@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw=="], + + "micromark-factory-destination": ["micromark-factory-destination@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA=="], + + "micromark-factory-label": ["micromark-factory-label@2.0.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg=="], + + "micromark-factory-space": ["micromark-factory-space@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg=="], + + "micromark-factory-title": ["micromark-factory-title@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw=="], + + "micromark-factory-whitespace": ["micromark-factory-whitespace@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ=="], + + "micromark-util-character": ["micromark-util-character@2.1.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q=="], + + "micromark-util-chunked": ["micromark-util-chunked@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA=="], + + "micromark-util-classify-character": ["micromark-util-classify-character@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q=="], + + "micromark-util-combine-extensions": ["micromark-util-combine-extensions@2.0.1", "", { "dependencies": { "micromark-util-chunked": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg=="], + + "micromark-util-decode-numeric-character-reference": ["micromark-util-decode-numeric-character-reference@2.0.2", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw=="], + + "micromark-util-decode-string": ["micromark-util-decode-string@2.0.1", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ=="], + + "micromark-util-encode": ["micromark-util-encode@2.0.1", "", {}, "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw=="], + + "micromark-util-html-tag-name": ["micromark-util-html-tag-name@2.0.1", "", {}, "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA=="], + + "micromark-util-normalize-identifier": ["micromark-util-normalize-identifier@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q=="], + + "micromark-util-resolve-all": ["micromark-util-resolve-all@2.0.1", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg=="], + + "micromark-util-sanitize-uri": ["micromark-util-sanitize-uri@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ=="], + + "micromark-util-subtokenize": ["micromark-util-subtokenize@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA=="], + + "micromark-util-symbol": ["micromark-util-symbol@2.0.1", "", {}, "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q=="], + + "micromark-util-types": ["micromark-util-types@2.0.2", "", {}, "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA=="], + + "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], + + "mimic-fn": ["mimic-fn@2.1.0", "", {}, "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg=="], + + "mimic-function": ["mimic-function@5.0.1", "", {}, "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA=="], + + "minimatch": ["minimatch@3.1.5", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w=="], + + "minimist": ["minimist@1.2.7", "", {}, "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g=="], + + "motion": ["motion@12.38.0", "", { "dependencies": { "framer-motion": "^12.38.0", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-uYfXzeHlgThchzwz5Te47dlv5JOUC7OB4rjJ/7XTUgtBZD8CchMN8qEJ4ZVsUmTyYA44zjV0fBwsiktRuFnn+w=="], + + "motion-dom": ["motion-dom@12.38.0", "", { "dependencies": { "motion-utils": "^12.36.0" } }, "sha512-pdkHLD8QYRp8VfiNLb8xIBJis1byQ9gPT3Jnh2jqfFtAsWUA3dEepDlsWe/xMpO8McV+VdpKVcp+E+TGJEtOoA=="], + + "motion-utils": ["motion-utils@12.36.0", "", {}, "sha512-eHWisygbiwVvf6PZ1vhaHCLamvkSbPIeAYxWUuL3a2PD/TROgE7FvfHWTIH4vMl798QLfMw15nRqIaRDXTlYRg=="], + + "mrmime": ["mrmime@2.0.1", "", {}, "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ=="], + + "ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "mute-stream": ["mute-stream@0.0.8", "", {}, "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA=="], + + "nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="], + + "natural-compare": ["natural-compare@1.4.0", "", {}, "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="], + + "neotraverse": ["neotraverse@0.6.18", "", {}, "sha512-Z4SmBUweYa09+o6pG+eASabEpP6QkQ70yHj351pQoEXIs8uHbaU2DWVmzBANKgflPa47A50PtB2+NgRpQvr7vA=="], + + "nlcst-to-string": ["nlcst-to-string@4.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0" } }, "sha512-YKLBCcUYKAg0FNlOBT6aI91qFmSiFKiluk655WzPF+DDMA02qIyy8uiRqI8QXtcFpEvll12LpL5MXqEmAZ+dcA=="], + + "node-addon-api": ["node-addon-api@7.1.1", "", {}, "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ=="], + + "node-fetch-native": ["node-fetch-native@1.6.7", "", {}, "sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q=="], + + "node-mock-http": ["node-mock-http@1.0.4", "", {}, "sha512-8DY+kFsDkNXy1sJglUfuODx1/opAGJGyrTuFqEoN90oRc2Vk0ZbD4K2qmKXBBEhZQzdKHIVfEJpDU8Ak2NJEvQ=="], + + "node-releases": ["node-releases@2.0.36", "", {}, "sha512-TdC8FSgHz8Mwtw9g5L4gR/Sh9XhSP/0DEkQxfEFXOpiul5IiHgHan2VhYYb6agDSfp4KuvltmGApc8HMgUrIkA=="], + + "normalize-path": ["normalize-path@3.0.0", "", {}, "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA=="], + + "nth-check": ["nth-check@2.1.1", "", { "dependencies": { "boolbase": "^1.0.0" } }, "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w=="], + + "obug": ["obug@2.1.1", "", {}, "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ=="], + + "ofetch": ["ofetch@1.5.1", "", { "dependencies": { "destr": "^2.0.5", "node-fetch-native": "^1.6.7", "ufo": "^1.6.1" } }, "sha512-2W4oUZlVaqAPAil6FUg/difl6YhqhUR7x2eZY4bQCko22UXg3hptq9KLQdqFClV+Wu85UX7hNtdGTngi/1BxcA=="], + + "ohash": ["ohash@2.0.11", "", {}, "sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ=="], + + "once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="], + + "onetime": ["onetime@5.1.2", "", { "dependencies": { "mimic-fn": "^2.1.0" } }, "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg=="], + + "oniguruma-parser": ["oniguruma-parser@0.12.1", "", {}, "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w=="], + + "oniguruma-to-es": ["oniguruma-to-es@4.3.5", "", { "dependencies": { "oniguruma-parser": "^0.12.1", "regex": "^6.1.0", "regex-recursion": "^6.0.2" } }, "sha512-Zjygswjpsewa0NLTsiizVuMQZbp0MDyM6lIt66OxsF21npUDlzpHi1Mgb/qhQdkb+dWFTzJmFbEWdvZgRho8eQ=="], + + "optionator": ["optionator@0.9.4", "", { "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", "type-check": "^0.4.0", "word-wrap": "^1.2.5" } }, "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g=="], + + "ora": ["ora@5.4.1", "", { "dependencies": { "bl": "^4.1.0", "chalk": "^4.1.0", "cli-cursor": "^3.1.0", "cli-spinners": "^2.5.0", "is-interactive": "^1.0.0", "is-unicode-supported": "^0.1.0", "log-symbols": "^4.1.0", "strip-ansi": "^6.0.0", "wcwidth": "^1.0.1" } }, "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ=="], + + "os-tmpdir": ["os-tmpdir@1.0.2", "", {}, "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g=="], + + "p-limit": ["p-limit@7.3.0", "", { "dependencies": { "yocto-queue": "^1.2.1" } }, "sha512-7cIXg/Z0M5WZRblrsOla88S4wAK+zOQQWeBYfV3qJuJXMr+LnbYjaadrFaS0JILfEDPVqHyKnZ1Z/1d6J9VVUw=="], + + "p-locate": ["p-locate@5.0.0", "", { "dependencies": { "p-limit": "^3.0.2" } }, "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw=="], + + "p-queue": ["p-queue@9.1.0", "", { "dependencies": { "eventemitter3": "^5.0.1", "p-timeout": "^7.0.0" } }, "sha512-O/ZPaXuQV29uSLbxWBGGZO1mCQXV2BLIwUr59JUU9SoH76mnYvtms7aafH/isNSNGwuEfP6W/4xD0/TJXxrizw=="], + + "p-timeout": ["p-timeout@7.0.1", "", {}, "sha512-AxTM2wDGORHGEkPCt8yqxOTMgpfbEHqF51f/5fJCmwFC3C/zNcGT63SymH2ttOAaiIws2zVg4+izQCjrakcwHg=="], + + "package-manager-detector": ["package-manager-detector@1.6.0", "", {}, "sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA=="], + + "parent-module": ["parent-module@1.0.1", "", { "dependencies": { "callsites": "^3.0.0" } }, "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g=="], + + "parse-entities": ["parse-entities@4.0.2", "", { "dependencies": { "@types/unist": "^2.0.0", "character-entities-legacy": "^3.0.0", "character-reference-invalid": "^2.0.0", "decode-named-character-reference": "^1.0.0", "is-alphanumerical": "^2.0.0", "is-decimal": "^2.0.0", "is-hexadecimal": "^2.0.0" } }, "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw=="], + + "parse-json": ["parse-json@5.2.0", "", { "dependencies": { "@babel/code-frame": "^7.0.0", "error-ex": "^1.3.1", "json-parse-even-better-errors": "^2.3.0", "lines-and-columns": "^1.1.6" } }, "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg=="], + + "parse-latin": ["parse-latin@7.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "@types/unist": "^3.0.0", "nlcst-to-string": "^4.0.0", "unist-util-modify-children": "^4.0.0", "unist-util-visit-children": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-mhHgobPPua5kZ98EF4HWiH167JWBfl4pvAIXXdbaVohtK7a6YBOy56kvhCqduqyo/f3yrHFWmqmiMg/BkBkYYQ=="], + + "parse-passwd": ["parse-passwd@1.0.0", "", {}, "sha512-1Y1A//QUXEZK7YKz+rD9WydcE1+EuPr6ZBgKecAB8tmoW6UFv0NREVJe1p+jRxtThkcbbKkfwIbWJe/IeE6m2Q=="], + + "parse5": ["parse5@7.3.0", "", { "dependencies": { "entities": "^6.0.0" } }, "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw=="], + + "path-exists": ["path-exists@4.0.0", "", {}, "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="], + + "path-is-absolute": ["path-is-absolute@1.0.1", "", {}, "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg=="], + + "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], + + "piccolore": ["piccolore@0.1.3", "", {}, "sha512-o8bTeDWjE086iwKrROaDf31K0qC/BENdm15/uH9usSC/uZjJOKb2YGiVHfLY4GhwsERiPI1jmwI2XrA7ACOxVw=="], + + "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="], + + "picomatch": ["picomatch@4.0.4", "", {}, "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A=="], + + "playwright": ["playwright@1.58.2", "", { "dependencies": { "playwright-core": "1.58.2" }, "optionalDependencies": { "fsevents": "2.3.2" }, "bin": { "playwright": "cli.js" } }, "sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A=="], + + "playwright-core": ["playwright-core@1.58.2", "", { "bin": { "playwright-core": "cli.js" } }, "sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg=="], + + "postcss": ["postcss@8.5.8", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg=="], + + "postcss-value-parser": ["postcss-value-parser@4.2.0", "", {}, "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="], + + "prelude-ls": ["prelude-ls@1.2.1", "", {}, "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g=="], + + "prettier": ["prettier@3.8.1", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg=="], + + "prettier-linter-helpers": ["prettier-linter-helpers@1.0.1", "", { "dependencies": { "fast-diff": "^1.1.2" } }, "sha512-SxToR7P8Y2lWmv/kTzVLC1t/GDI2WGjMwNhLLE9qtH8Q13C+aEmuRlzDst4Up4s0Wc8sF2M+J57iB3cMLqftfg=="], + + "prettier-plugin-astro": ["prettier-plugin-astro@0.14.1", "", { "dependencies": { "@astrojs/compiler": "^2.9.1", "prettier": "^3.0.0", "sass-formatter": "^0.7.6" } }, "sha512-RiBETaaP9veVstE4vUwSIcdATj6dKmXljouXc/DDNwBSPTp8FRkLGDSGFClKsAFeeg+13SB0Z1JZvbD76bigJw=="], + + "prismjs": ["prismjs@1.30.0", "", {}, "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw=="], + + "property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="], + + "radix3": ["radix3@1.1.2", "", {}, "sha512-b484I/7b8rDEdSDKckSSBA8knMpcdsXudlE/LNL639wFoHKwLbEkQFZHWEYwDC0wa0FKUcCY+GAF73Z7wxNVFA=="], + + "react": ["react@19.2.4", "", {}, "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ=="], + + "react-dom": ["react-dom@19.2.4", "", { "dependencies": { "scheduler": "^0.27.0" }, "peerDependencies": { "react": "^19.2.4" } }, "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ=="], + + "react-fast-marquee": ["react-fast-marquee@1.6.5", "", { "peerDependencies": { "react": ">= 16.8.0 || ^18.0.0", "react-dom": ">= 16.8.0 || ^18.0.0" } }, "sha512-swDnPqrT2XISAih0o74zQVE2wQJFMvkx+9VZXYYNSLb/CUcAzU9pNj637Ar2+hyRw6b4tP6xh4GQZip2ZCpQpg=="], + + "react-markdown": ["react-markdown@10.1.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "hast-util-to-jsx-runtime": "^2.0.0", "html-url-attributes": "^3.0.0", "mdast-util-to-hast": "^13.0.0", "remark-parse": "^11.0.0", "remark-rehype": "^11.0.0", "unified": "^11.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" }, "peerDependencies": { "@types/react": ">=18", "react": ">=18" } }, "sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ=="], + + "react-refresh": ["react-refresh@0.18.0", "", {}, "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw=="], + + "readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="], + + "readdirp": ["readdirp@5.0.0", "", {}, "sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ=="], + + "regex": ["regex@6.1.0", "", { "dependencies": { "regex-utilities": "^2.3.0" } }, "sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg=="], + + "regex-recursion": ["regex-recursion@6.0.2", "", { "dependencies": { "regex-utilities": "^2.3.0" } }, "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg=="], + + "regex-utilities": ["regex-utilities@2.3.0", "", {}, "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng=="], + + "rehype": ["rehype@13.0.2", "", { "dependencies": { "@types/hast": "^3.0.0", "rehype-parse": "^9.0.0", "rehype-stringify": "^10.0.0", "unified": "^11.0.0" } }, "sha512-j31mdaRFrwFRUIlxGeuPXXKWQxet52RBQRvCmzl5eCefn/KGbomK5GMHNMsOJf55fgo3qw5tST5neDuarDYR2A=="], + + "rehype-parse": ["rehype-parse@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-from-html": "^2.0.0", "unified": "^11.0.0" } }, "sha512-ksCzCD0Fgfh7trPDxr2rSylbwq9iYDkSn8TCDmEJ49ljEUBxDVCzCHv7QNzZOfODanX4+bWQ4WZqLCRWYLfhag=="], + + "rehype-raw": ["rehype-raw@7.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-raw": "^9.0.0", "vfile": "^6.0.0" } }, "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww=="], + + "rehype-stringify": ["rehype-stringify@10.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-to-html": "^9.0.0", "unified": "^11.0.0" } }, "sha512-k9ecfXHmIPuFVI61B9DeLPN0qFHfawM6RsuX48hoqlaKSF61RskNjSm1lI8PhBEM0MRdLxVVm4WmTqJQccH9mA=="], + + "remark-gfm": ["remark-gfm@4.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-gfm": "^3.0.0", "micromark-extension-gfm": "^3.0.0", "remark-parse": "^11.0.0", "remark-stringify": "^11.0.0", "unified": "^11.0.0" } }, "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg=="], + + "remark-parse": ["remark-parse@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "micromark-util-types": "^2.0.0", "unified": "^11.0.0" } }, "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA=="], + + "remark-rehype": ["remark-rehype@11.1.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "mdast-util-to-hast": "^13.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw=="], + + "remark-smartypants": ["remark-smartypants@3.0.2", "", { "dependencies": { "retext": "^9.0.0", "retext-smartypants": "^6.0.0", "unified": "^11.0.4", "unist-util-visit": "^5.0.0" } }, "sha512-ILTWeOriIluwEvPjv67v7Blgrcx+LZOkAUVtKI3putuhlZm84FnqDORNXPPm+HY3NdZOMhyDwZ1E+eZB/Df5dA=="], + + "remark-stringify": ["remark-stringify@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-to-markdown": "^2.0.0", "unified": "^11.0.0" } }, "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw=="], + + "require-directory": ["require-directory@2.1.1", "", {}, "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="], + + "require-from-string": ["require-from-string@2.0.2", "", {}, "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="], + + "resolve-dir": ["resolve-dir@1.0.1", "", { "dependencies": { "expand-tilde": "^2.0.0", "global-modules": "^1.0.0" } }, "sha512-R7uiTjECzvOsWSfdM0QKFNBVFcK27aHOUwdvK53BcW8zqnGdYp0Fbj82cy54+2A4P2tFM22J5kRfe1R+lM/1yg=="], + + "resolve-from": ["resolve-from@5.0.0", "", {}, "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw=="], + + "restore-cursor": ["restore-cursor@3.1.0", "", { "dependencies": { "onetime": "^5.1.0", "signal-exit": "^3.0.2" } }, "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA=="], + + "retext": ["retext@9.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "retext-latin": "^4.0.0", "retext-stringify": "^4.0.0", "unified": "^11.0.0" } }, "sha512-sbMDcpHCNjvlheSgMfEcVrZko3cDzdbe1x/e7G66dFp0Ff7Mldvi2uv6JkJQzdRcvLYE8CA8Oe8siQx8ZOgTcA=="], + + "retext-latin": ["retext-latin@4.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "parse-latin": "^7.0.0", "unified": "^11.0.0" } }, "sha512-hv9woG7Fy0M9IlRQloq/N6atV82NxLGveq+3H2WOi79dtIYWN8OaxogDm77f8YnVXJL2VD3bbqowu5E3EMhBYA=="], + + "retext-smartypants": ["retext-smartypants@6.2.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "nlcst-to-string": "^4.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-kk0jOU7+zGv//kfjXEBjdIryL1Acl4i9XNkHxtM7Tm5lFiCog576fjNC9hjoR7LTKQ0DsPWy09JummSsH1uqfQ=="], + + "retext-stringify": ["retext-stringify@4.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "nlcst-to-string": "^4.0.0", "unified": "^11.0.0" } }, "sha512-rtfN/0o8kL1e+78+uxPTqu1Klt0yPzKuQ2BfWwwfgIUSayyzxpM1PJzkKt4V8803uB9qSy32MvI7Xep9khTpiA=="], + + "rfdc": ["rfdc@1.4.1", "", {}, "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA=="], + + "rollup": ["rollup@4.60.1", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.60.1", "@rollup/rollup-android-arm64": "4.60.1", "@rollup/rollup-darwin-arm64": "4.60.1", "@rollup/rollup-darwin-x64": "4.60.1", "@rollup/rollup-freebsd-arm64": "4.60.1", "@rollup/rollup-freebsd-x64": "4.60.1", "@rollup/rollup-linux-arm-gnueabihf": "4.60.1", "@rollup/rollup-linux-arm-musleabihf": "4.60.1", "@rollup/rollup-linux-arm64-gnu": "4.60.1", "@rollup/rollup-linux-arm64-musl": "4.60.1", "@rollup/rollup-linux-loong64-gnu": "4.60.1", "@rollup/rollup-linux-loong64-musl": "4.60.1", "@rollup/rollup-linux-ppc64-gnu": "4.60.1", "@rollup/rollup-linux-ppc64-musl": "4.60.1", "@rollup/rollup-linux-riscv64-gnu": "4.60.1", "@rollup/rollup-linux-riscv64-musl": "4.60.1", "@rollup/rollup-linux-s390x-gnu": "4.60.1", "@rollup/rollup-linux-x64-gnu": "4.60.1", "@rollup/rollup-linux-x64-musl": "4.60.1", "@rollup/rollup-openbsd-x64": "4.60.1", "@rollup/rollup-openharmony-arm64": "4.60.1", "@rollup/rollup-win32-arm64-msvc": "4.60.1", "@rollup/rollup-win32-ia32-msvc": "4.60.1", "@rollup/rollup-win32-x64-gnu": "4.60.1", "@rollup/rollup-win32-x64-msvc": "4.60.1", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w=="], + + "run-async": ["run-async@2.4.1", "", {}, "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ=="], + + "rxjs": ["rxjs@7.8.2", "", { "dependencies": { "tslib": "^2.1.0" } }, "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA=="], + + "s.color": ["s.color@0.0.15", "", {}, "sha512-AUNrbEUHeKY8XsYr/DYpl+qk5+aM+DChopnWOPEzn8YKzOhv4l2zH6LzZms3tOZP3wwdOyc0RmTciyi46HLIuA=="], + + "safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="], + + "safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="], + + "sass": ["sass@1.98.0", "", { "dependencies": { "chokidar": "^4.0.0", "immutable": "^5.1.5", "source-map-js": ">=0.6.2 <2.0.0" }, "optionalDependencies": { "@parcel/watcher": "^2.4.1" }, "bin": { "sass": "sass.js" } }, "sha512-+4N/u9dZ4PrgzGgPlKnaaRQx64RO0JBKs9sDhQ2pLgN6JQZ25uPQZKQYaBJU48Kd5BxgXoJ4e09Dq7nMcOUW3A=="], + + "sass-formatter": ["sass-formatter@0.7.9", "", { "dependencies": { "suf-log": "^2.5.3" } }, "sha512-CWZ8XiSim+fJVG0cFLStwDvft1VI7uvXdCNJYXhDvowiv+DsbD1nXLiQ4zrE5UBvj5DWZJ93cwN0NX5PMsr1Pw=="], + + "sax": ["sax@1.6.0", "", {}, "sha512-6R3J5M4AcbtLUdZmRv2SygeVaM7IhrLXu9BmnOGmmACak8fiUtOsYNWUS4uK7upbmHIBbLBeFeI//477BKLBzA=="], + + "scheduler": ["scheduler@0.27.0", "", {}, "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q=="], + + "semver": ["semver@7.7.4", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="], + + "sharp": ["sharp@0.34.5", "", { "dependencies": { "@img/colour": "^1.0.0", "detect-libc": "^2.1.2", "semver": "^7.7.3" }, "optionalDependencies": { "@img/sharp-darwin-arm64": "0.34.5", "@img/sharp-darwin-x64": "0.34.5", "@img/sharp-libvips-darwin-arm64": "1.2.4", "@img/sharp-libvips-darwin-x64": "1.2.4", "@img/sharp-libvips-linux-arm": "1.2.4", "@img/sharp-libvips-linux-arm64": "1.2.4", "@img/sharp-libvips-linux-ppc64": "1.2.4", "@img/sharp-libvips-linux-riscv64": "1.2.4", "@img/sharp-libvips-linux-s390x": "1.2.4", "@img/sharp-libvips-linux-x64": "1.2.4", "@img/sharp-libvips-linuxmusl-arm64": "1.2.4", "@img/sharp-libvips-linuxmusl-x64": "1.2.4", "@img/sharp-linux-arm": "0.34.5", "@img/sharp-linux-arm64": "0.34.5", "@img/sharp-linux-ppc64": "0.34.5", "@img/sharp-linux-riscv64": "0.34.5", "@img/sharp-linux-s390x": "0.34.5", "@img/sharp-linux-x64": "0.34.5", "@img/sharp-linuxmusl-arm64": "0.34.5", "@img/sharp-linuxmusl-x64": "0.34.5", "@img/sharp-wasm32": "0.34.5", "@img/sharp-win32-arm64": "0.34.5", "@img/sharp-win32-ia32": "0.34.5", "@img/sharp-win32-x64": "0.34.5" } }, "sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg=="], + + "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], + + "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], + + "shiki": ["shiki@4.0.2", "", { "dependencies": { "@shikijs/core": "4.0.2", "@shikijs/engine-javascript": "4.0.2", "@shikijs/engine-oniguruma": "4.0.2", "@shikijs/langs": "4.0.2", "@shikijs/themes": "4.0.2", "@shikijs/types": "4.0.2", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-eAVKTMedR5ckPo4xne/PjYQYrU3qx78gtJZ+sHlXEg5IHhhoQhMfZVzetTYuaJS0L2Ef3AcCRzCHV8T0WI6nIQ=="], + + "signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], + + "sisteransi": ["sisteransi@1.0.5", "", {}, "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="], + + "sitemap": ["sitemap@9.0.1", "", { "dependencies": { "@types/node": "^24.9.2", "@types/sax": "^1.2.1", "arg": "^5.0.0", "sax": "^1.4.1" }, "bin": { "sitemap": "dist/esm/cli.js" } }, "sha512-S6hzjGJSG3d6if0YoF5kTyeRJvia6FSTBroE5fQ0bu1QNxyJqhhinfUsXi9fH3MgtXODWvwo2BDyQSnhPQ88uQ=="], + + "slice-ansi": ["slice-ansi@8.0.0", "", { "dependencies": { "ansi-styles": "^6.2.3", "is-fullwidth-code-point": "^5.1.0" } }, "sha512-stxByr12oeeOyY2BlviTNQlYV5xOj47GirPr4yA1hE9JCtxfQN0+tVbkxwCtYDQWhEKWFHsEK48ORg5jrouCAg=="], + + "smol-toml": ["smol-toml@1.6.1", "", {}, "sha512-dWUG8F5sIIARXih1DTaQAX4SsiTXhInKf1buxdY9DIg4ZYPZK5nGM1VRIYmEbDbsHt7USo99xSLFu5Q1IqTmsg=="], + + "source-map": ["source-map@0.6.1", "", {}, "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="], + + "source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="], + + "source-map-support": ["source-map-support@0.5.21", "", { "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" } }, "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w=="], + + "space-separated-tokens": ["space-separated-tokens@2.0.2", "", {}, "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q=="], + + "stream-replace-string": ["stream-replace-string@2.0.0", "", {}, "sha512-TlnjJ1C0QrmxRNrON00JvaFFlNh5TTG00APw23j74ET7gkQpTASi6/L2fuiav8pzK715HXtUeClpBTw2NPSn6w=="], + + "string-argv": ["string-argv@0.3.2", "", {}, "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q=="], + + "string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "string_decoder": ["string_decoder@1.3.0", "", { "dependencies": { "safe-buffer": "~5.2.0" } }, "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA=="], + + "stringify-entities": ["stringify-entities@4.0.4", "", { "dependencies": { "character-entities-html4": "^2.0.0", "character-entities-legacy": "^3.0.0" } }, "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg=="], + + "strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "strip-bom": ["strip-bom@4.0.0", "", {}, "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w=="], + + "strip-json-comments": ["strip-json-comments@3.1.1", "", {}, "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig=="], + + "style-to-js": ["style-to-js@1.1.21", "", { "dependencies": { "style-to-object": "1.0.14" } }, "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ=="], + + "style-to-object": ["style-to-object@1.0.14", "", { "dependencies": { "inline-style-parser": "0.2.7" } }, "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw=="], + + "suf-log": ["suf-log@2.5.3", "", { "dependencies": { "s.color": "0.0.15" } }, "sha512-KvC8OPjzdNOe+xQ4XWJV2whQA0aM1kGVczMQ8+dStAO6KfEB140JEVQ9dE76ONZ0/Ylf67ni4tILPJB41U0eow=="], + + "supports-color": ["supports-color@5.5.0", "", { "dependencies": { "has-flag": "^3.0.0" } }, "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow=="], + + "svgo": ["svgo@4.0.1", "", { "dependencies": { "commander": "^11.1.0", "css-select": "^5.1.0", "css-tree": "^3.0.1", "css-what": "^6.1.0", "csso": "^5.0.5", "picocolors": "^1.1.1", "sax": "^1.5.0" }, "bin": "./bin/svgo.js" }, "sha512-XDpWUOPC6FEibaLzjfe0ucaV0YrOjYotGJO1WpF0Zd+n6ZGEQUsSugaoLq9QkEZtAfQIxT42UChcssDVPP3+/w=="], + + "synckit": ["synckit@0.11.12", "", { "dependencies": { "@pkgr/core": "^0.2.9" } }, "sha512-Bh7QjT8/SuKUIfObSXNHNSK6WHo6J1tHCqJsuaFDP7gP0fkzSfTxI8y85JrppZ0h8l0maIgc2tfuZQ6/t3GtnQ=="], + + "tailwind-merge": ["tailwind-merge@3.5.0", "", {}, "sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A=="], + + "tailwindcss": ["tailwindcss@4.2.2", "", {}, "sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q=="], + + "tapable": ["tapable@2.3.2", "", {}, "sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA=="], + + "terser": ["terser@5.16.9", "", { "dependencies": { "@jridgewell/source-map": "^0.3.2", "acorn": "^8.5.0", "commander": "^2.20.0", "source-map-support": "~0.5.20" }, "bin": { "terser": "bin/terser" } }, "sha512-HPa/FdTB9XGI2H1/keLFZHxl6WNvAI4YalHGtDQTlMnJcoqSab1UwL4l1hGEhs6/GmLHBZIg/YgB++jcbzoOEg=="], + + "through": ["through@2.3.8", "", {}, "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg=="], + + "tiny-inflate": ["tiny-inflate@1.0.3", "", {}, "sha512-pkY1fj1cKHb2seWDy0B16HeWyczlJA9/WW3u3c4z/NiWDsO3DOU5D7nhTLE9CF0yXv/QZFY7sEJmj24dK+Rrqw=="], + + "tinyclip": ["tinyclip@0.1.12", "", {}, "sha512-Ae3OVUqifDw0wBriIBS7yVaW44Dp6eSHQcyq4Igc7eN2TJH/2YsicswaW+J/OuMvhpDPOKEgpAZCjkb4hpoyeA=="], + + "tinyexec": ["tinyexec@1.0.4", "", {}, "sha512-u9r3uZC0bdpGOXtlxUIdwf9pkmvhqJdrVCH9fapQtgy/OeTTMZ1nqH7agtvEfmGui6e1XxjcdrlxvxJvc3sMqw=="], + + "tinyglobby": ["tinyglobby@0.2.15", "", { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" } }, "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ=="], + + "tmp": ["tmp@0.0.33", "", { "dependencies": { "os-tmpdir": "~1.0.2" } }, "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw=="], + + "to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="], + + "trim-lines": ["trim-lines@3.0.1", "", {}, "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg=="], + + "trough": ["trough@2.2.0", "", {}, "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw=="], + + "ts-api-utils": ["ts-api-utils@2.5.0", "", { "peerDependencies": { "typescript": ">=4.8.4" } }, "sha512-OJ/ibxhPlqrMM0UiNHJ/0CKQkoKF243/AEmplt3qpRgkW8VG7IfOS41h7V8TjITqdByHzrjcS/2si+y4lIh8NA=="], + + "tsconfck": ["tsconfck@3.1.6", "", { "peerDependencies": { "typescript": "^5.0.0" }, "optionalPeers": ["typescript"], "bin": { "tsconfck": "bin/tsconfck.js" } }, "sha512-ks6Vjr/jEw0P1gmOVwutM3B7fWxoWBL2KRDb1JfqGVawBmO5UsvmWOQFGHBPl5yxYz4eERr19E6L7NMv+Fej4w=="], + + "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], + + "type-check": ["type-check@0.4.0", "", { "dependencies": { "prelude-ls": "^1.2.1" } }, "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew=="], + + "type-fest": ["type-fest@0.21.3", "", {}, "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w=="], + + "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], + + "ufo": ["ufo@1.6.3", "", {}, "sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q=="], + + "ultrahtml": ["ultrahtml@1.6.0", "", {}, "sha512-R9fBn90VTJrqqLDwyMph+HGne8eqY1iPfYhPzZrvKpIfwkWZbcYlfpsb8B9dTvBfpy1/hqAD7Wi8EKfP9e8zdw=="], + + "uncrypto": ["uncrypto@0.1.3", "", {}, "sha512-Ql87qFHB3s/De2ClA9e0gsnS6zXG27SkTiSJwjCc9MebbfapQfuPzumMIUMi38ezPZVNFcHI9sUIepeQfw8J8Q=="], + + "undici-types": ["undici-types@7.18.2", "", {}, "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w=="], + + "unified": ["unified@11.0.5", "", { "dependencies": { "@types/unist": "^3.0.0", "bail": "^2.0.0", "devlop": "^1.0.0", "extend": "^3.0.0", "is-plain-obj": "^4.0.0", "trough": "^2.0.0", "vfile": "^6.0.0" } }, "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA=="], + + "unifont": ["unifont@0.7.4", "", { "dependencies": { "css-tree": "^3.1.0", "ofetch": "^1.5.1", "ohash": "^2.0.11" } }, "sha512-oHeis4/xl42HUIeHuNZRGEvxj5AaIKR+bHPNegRq5LV1gdc3jundpONbjglKpihmJf+dswygdMJn3eftGIMemg=="], + + "unist-util-find-after": ["unist-util-find-after@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ=="], + + "unist-util-is": ["unist-util-is@6.0.1", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g=="], + + "unist-util-modify-children": ["unist-util-modify-children@4.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "array-iterate": "^2.0.0" } }, "sha512-+tdN5fGNddvsQdIzUF3Xx82CU9sMM+fA0dLgR9vOmT0oPT2jH+P1nd5lSqfCfXAw+93NhcXNY2qqvTUtE4cQkw=="], + + "unist-util-position": ["unist-util-position@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA=="], + + "unist-util-remove-position": ["unist-util-remove-position@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q=="], + + "unist-util-stringify-position": ["unist-util-stringify-position@4.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ=="], + + "unist-util-visit": ["unist-util-visit@5.1.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg=="], + + "unist-util-visit-children": ["unist-util-visit-children@3.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-RgmdTfSBOg04sdPcpTSD1jzoNBjt9a80/ZCzp5cI9n1qPzLZWF9YdvWGN2zmTumP1HWhXKdUWexjy/Wy/lJ7tA=="], + + "unist-util-visit-parents": ["unist-util-visit-parents@6.0.2", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ=="], + + "universalify": ["universalify@2.0.1", "", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="], + + "unstorage": ["unstorage@1.17.5", "", { "dependencies": { "anymatch": "^3.1.3", "chokidar": "^5.0.0", "destr": "^2.0.5", "h3": "^1.15.10", "lru-cache": "^11.2.7", "node-fetch-native": "^1.6.7", "ofetch": "^1.5.1", "ufo": "^1.6.3" }, "peerDependencies": { "@azure/app-configuration": "^1.8.0", "@azure/cosmos": "^4.2.0", "@azure/data-tables": "^13.3.0", "@azure/identity": "^4.6.0", "@azure/keyvault-secrets": "^4.9.0", "@azure/storage-blob": "^12.26.0", "@capacitor/preferences": "^6 || ^7 || ^8", "@deno/kv": ">=0.9.0", "@netlify/blobs": "^6.5.0 || ^7.0.0 || ^8.1.0 || ^9.0.0 || ^10.0.0", "@planetscale/database": "^1.19.0", "@upstash/redis": "^1.34.3", "@vercel/blob": ">=0.27.1", "@vercel/functions": "^2.2.12 || ^3.0.0", "@vercel/kv": "^1 || ^2 || ^3", "aws4fetch": "^1.0.20", "db0": ">=0.2.1", "idb-keyval": "^6.2.1", "ioredis": "^5.4.2", "uploadthing": "^7.4.4" }, "optionalPeers": ["@azure/app-configuration", "@azure/cosmos", "@azure/data-tables", "@azure/identity", "@azure/keyvault-secrets", "@azure/storage-blob", "@capacitor/preferences", "@deno/kv", "@netlify/blobs", "@planetscale/database", "@upstash/redis", "@vercel/blob", "@vercel/functions", "@vercel/kv", "aws4fetch", "db0", "idb-keyval", "ioredis", "uploadthing"] }, "sha512-0i3iqvRfx29hkNntHyQvJTpf5W9dQ9ZadSoRU8+xVlhVtT7jAX57fazYO9EHvcRCfBCyi5YRya7XCDOsbTgkPg=="], + + "update-browserslist-db": ["update-browserslist-db@1.2.3", "", { "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": { "update-browserslist-db": "cli.js" } }, "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w=="], + + "uri-js": ["uri-js@4.4.1", "", { "dependencies": { "punycode": "^2.1.0" } }, "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg=="], + + "util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="], + + "vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="], + + "vfile-location": ["vfile-location@5.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg=="], + + "vfile-message": ["vfile-message@4.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw=="], + + "vite": ["vite@7.3.1", "", { "dependencies": { "esbuild": "^0.27.0", "fdir": "^6.5.0", "picomatch": "^4.0.3", "postcss": "^8.5.6", "rollup": "^4.43.0", "tinyglobby": "^0.2.15" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^20.19.0 || >=22.12.0", "jiti": ">=1.21.0", "less": "^4.0.0", "lightningcss": "^1.21.0", "sass": "^1.70.0", "sass-embedded": "^1.70.0", "stylus": ">=0.54.8", "sugarss": "^5.0.0", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["@types/node", "jiti", "less", "lightningcss", "sass", "sass-embedded", "stylus", "sugarss", "terser", "tsx", "yaml"], "bin": { "vite": "bin/vite.js" } }, "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA=="], + + "vitefu": ["vitefu@1.1.2", "", { "peerDependencies": { "vite": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-beta.0" }, "optionalPeers": ["vite"] }, "sha512-zpKATdUbzbsycPFBN71nS2uzBUQiVnFoOrr2rvqv34S1lcAgMKKkjWleLGeiJlZ8lwCXvtWaRn7R3ZC16SYRuw=="], + + "wcwidth": ["wcwidth@1.0.1", "", { "dependencies": { "defaults": "^1.0.3" } }, "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg=="], + + "web-namespaces": ["web-namespaces@2.0.1", "", {}, "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ=="], + + "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], + + "which-pm-runs": ["which-pm-runs@1.1.0", "", {}, "sha512-n1brCuqClxfFfq/Rb0ICg9giSZqCS+pLtccdag6C2HyufBrh3fBOiy9nb6ggRMvWOVH5GrdJskj5iGTZNxd7SA=="], + + "word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="], + + "wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="], + + "wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="], + + "xxhash-wasm": ["xxhash-wasm@1.1.0", "", {}, "sha512-147y/6YNh+tlp6nd/2pWq38i9h6mz/EuQ6njIrmW8D1BS5nCqs0P6DG+m6zTGnNz5I+uhZ0SHxBs9BsPrwcKDA=="], + + "y18n": ["y18n@5.0.8", "", {}, "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="], + + "yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], + + "yaml": ["yaml@2.8.3", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg=="], + + "yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="], + + "yargs-parser": ["yargs-parser@22.0.0", "", {}, "sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw=="], + + "yocto-queue": ["yocto-queue@1.2.2", "", {}, "sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ=="], + + "zod": ["zod@4.3.6", "", {}, "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg=="], + + "zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="], + + "@babel/core/semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], + + "@babel/helper-compilation-targets/lru-cache": ["lru-cache@5.1.1", "", { "dependencies": { "yallist": "^3.0.2" } }, "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w=="], + + "@babel/helper-compilation-targets/semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], + + "@commitlint/config-validator/ajv": ["ajv@8.18.0", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A=="], + + "@commitlint/read/minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], + + "@eslint-community/eslint-utils/eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="], + + "@eslint/eslintrc/ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], + + "@tailwindcss/oxide-wasm32-wasi/@emnapi/core": ["@emnapi/core@1.9.1", "", { "dependencies": { "@emnapi/wasi-threads": "1.2.0", "tslib": "^2.4.0" }, "bundled": true }, "sha512-mukuNALVsoix/w1BJwFzwXBN/dHeejQtuVzcDsfOEsdpCumXb/E9j8w11h5S54tT1xhifGfbbSm/ICrObRb3KA=="], + + "@tailwindcss/oxide-wasm32-wasi/@emnapi/runtime": ["@emnapi/runtime@1.9.1", "", { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, "sha512-VYi5+ZVLhpgK4hQ0TAjiQiZ6ol0oe4mBx7mVv7IflsiEp0OWoVsp/+f9Vc1hOhE0TtkORVrI1GvzyreqpgWtkA=="], + + "@tailwindcss/oxide-wasm32-wasi/@emnapi/wasi-threads": ["@emnapi/wasi-threads@1.2.0", "", { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, "sha512-N10dEJNSsUx41Z6pZsXU8FjPjpBEplgH24sfkmITrBED1/U2Esum9F3lfLrMjKHHjmi557zQn7kR9R+XWXu5Rg=="], + + "@tailwindcss/oxide-wasm32-wasi/@napi-rs/wasm-runtime": ["@napi-rs/wasm-runtime@1.1.2", "", { "dependencies": { "@tybys/wasm-util": "^0.10.1" }, "peerDependencies": { "@emnapi/core": "^1.7.1", "@emnapi/runtime": "^1.7.1" }, "bundled": true }, "sha512-sNXv5oLJ7ob93xkZ1XnxisYhGYXfaG9f65/ZgYuAu3qt7b3NadcOEhLvx28hv31PgX8SZJRYrAIPQilQmFpLVw=="], + + "@tailwindcss/oxide-wasm32-wasi/@tybys/wasm-util": ["@tybys/wasm-util@0.10.1", "", { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg=="], + + "@tailwindcss/oxide-wasm32-wasi/tslib": ["tslib@2.8.1", "", { "bundled": true }, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], + + "@typescript-eslint/typescript-estree/minimatch": ["minimatch@10.2.4", "", { "dependencies": { "brace-expansion": "^5.0.2" } }, "sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg=="], + + "@typescript-eslint/visitor-keys/eslint-visitor-keys": ["eslint-visitor-keys@5.0.1", "", {}, "sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA=="], + + "anymatch/picomatch": ["picomatch@2.3.2", "", {}, "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA=="], + + "astro/@astrojs/compiler": ["@astrojs/compiler@3.0.1", "", {}, "sha512-z97oYbdebO5aoWzuJ/8q5hLK232+17KcLZ7cJ8BCWk6+qNzVxn/gftC0KzMBUTD8WAaBkPpNSQK6PXLnNrZ0CA=="], + + "chalk/escape-string-regexp": ["escape-string-regexp@1.0.5", "", {}, "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg=="], + + "cli-truncate/string-width": ["string-width@8.2.0", "", { "dependencies": { "get-east-asian-width": "^1.5.0", "strip-ansi": "^7.1.2" } }, "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw=="], + + "csso/css-tree": ["css-tree@2.2.1", "", { "dependencies": { "mdn-data": "2.0.28", "source-map-js": "^1.0.1" } }, "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA=="], + + "dom-serializer/entities": ["entities@4.5.0", "", {}, "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw=="], + + "eslint/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], + + "eslint/ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], + + "figures/escape-string-regexp": ["escape-string-regexp@1.0.5", "", {}, "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg=="], + + "global-prefix/ini": ["ini@1.3.8", "", {}, "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="], + + "global-prefix/which": ["which@1.3.1", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "which": "./bin/which" } }, "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ=="], + + "import-fresh/resolve-from": ["resolve-from@4.0.0", "", {}, "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g=="], + + "inquirer/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], + + "listr2/wrap-ansi": ["wrap-ansi@9.0.2", "", { "dependencies": { "ansi-styles": "^6.2.1", "string-width": "^7.0.0", "strip-ansi": "^7.1.0" } }, "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww=="], + + "log-symbols/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], + + "log-update/ansi-escapes": ["ansi-escapes@7.3.0", "", { "dependencies": { "environment": "^1.0.0" } }, "sha512-BvU8nYgGQBxcmMuEeUEmNTvrMVjJNSH7RgW24vXexN4Ven6qCvy4TntnvlnwnMLTVlcRQQdbRY8NKnaIoeWDNg=="], + + "log-update/cli-cursor": ["cli-cursor@5.0.0", "", { "dependencies": { "restore-cursor": "^5.0.0" } }, "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw=="], + + "log-update/slice-ansi": ["slice-ansi@7.1.2", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-iOBWFgUX7caIZiuutICxVgX1SdxwAVFFKwt1EvMYYec/NWO5meOJ6K5uQxhrYBdQJne4KxiqZc+KptFOWFSI9w=="], + + "log-update/strip-ansi": ["strip-ansi@7.2.0", "", { "dependencies": { "ansi-regex": "^6.2.2" } }, "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w=="], + + "log-update/wrap-ansi": ["wrap-ansi@9.0.2", "", { "dependencies": { "ansi-styles": "^6.2.1", "string-width": "^7.0.0", "strip-ansi": "^7.1.0" } }, "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww=="], + + "mdast-util-find-and-replace/escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="], + + "micromatch/picomatch": ["picomatch@2.3.2", "", {}, "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA=="], + + "ora/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], + + "p-locate/p-limit": ["p-limit@3.1.0", "", { "dependencies": { "yocto-queue": "^0.1.0" } }, "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ=="], + + "parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], + + "rollup/fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], + + "sass/chokidar": ["chokidar@4.0.3", "", { "dependencies": { "readdirp": "^4.0.1" } }, "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA=="], + + "sitemap/@types/node": ["@types/node@24.12.0", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ=="], + + "slice-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + + "slice-ansi/is-fullwidth-code-point": ["is-fullwidth-code-point@5.1.0", "", { "dependencies": { "get-east-asian-width": "^1.3.1" } }, "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ=="], + + "svgo/commander": ["commander@11.1.0", "", {}, "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ=="], + + "terser/commander": ["commander@2.20.3", "", {}, "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="], + + "vite/fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], + + "wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "yargs/yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="], + + "@commitlint/config-validator/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], + + "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@5.0.5", "", { "dependencies": { "balanced-match": "^4.0.2" } }, "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ=="], + + "cli-truncate/string-width/strip-ansi": ["strip-ansi@7.2.0", "", { "dependencies": { "ansi-regex": "^6.2.2" } }, "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w=="], + + "csso/css-tree/mdn-data": ["mdn-data@2.0.28", "", {}, "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g=="], + + "eslint/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "eslint/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], + + "inquirer/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "inquirer/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], + + "listr2/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + + "listr2/wrap-ansi/string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="], + + "listr2/wrap-ansi/strip-ansi": ["strip-ansi@7.2.0", "", { "dependencies": { "ansi-regex": "^6.2.2" } }, "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w=="], + + "log-symbols/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "log-symbols/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], + + "log-update/cli-cursor/restore-cursor": ["restore-cursor@5.1.0", "", { "dependencies": { "onetime": "^7.0.0", "signal-exit": "^4.1.0" } }, "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA=="], + + "log-update/slice-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + + "log-update/slice-ansi/is-fullwidth-code-point": ["is-fullwidth-code-point@5.1.0", "", { "dependencies": { "get-east-asian-width": "^1.3.1" } }, "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ=="], + + "log-update/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], + + "log-update/wrap-ansi/ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + + "log-update/wrap-ansi/string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="], + + "ora/chalk/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "ora/chalk/supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], + + "p-locate/p-limit/yocto-queue": ["yocto-queue@0.1.0", "", {}, "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="], + + "sass/chokidar/readdirp": ["readdirp@4.1.2", "", {}, "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg=="], + + "sitemap/@types/node/undici-types": ["undici-types@7.16.0", "", {}, "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw=="], + + "wrap-ansi/ansi-styles/color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], + + "@typescript-eslint/typescript-estree/minimatch/brace-expansion/balanced-match": ["balanced-match@4.0.4", "", {}, "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA=="], + + "cli-truncate/string-width/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], + + "eslint/chalk/ansi-styles/color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], + + "eslint/chalk/supports-color/has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], + + "inquirer/chalk/ansi-styles/color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], + + "inquirer/chalk/supports-color/has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], + + "listr2/wrap-ansi/string-width/emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="], + + "listr2/wrap-ansi/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], + + "log-symbols/chalk/ansi-styles/color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], + + "log-symbols/chalk/supports-color/has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], + + "log-update/cli-cursor/restore-cursor/onetime": ["onetime@7.0.0", "", { "dependencies": { "mimic-function": "^5.0.0" } }, "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ=="], + + "log-update/cli-cursor/restore-cursor/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + + "log-update/wrap-ansi/string-width/emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="], + + "ora/chalk/ansi-styles/color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], + + "ora/chalk/supports-color/has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], + + "wrap-ansi/ansi-styles/color-convert/color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], + + "eslint/chalk/ansi-styles/color-convert/color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], + + "inquirer/chalk/ansi-styles/color-convert/color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], + + "log-symbols/chalk/ansi-styles/color-convert/color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], + + "ora/chalk/ansi-styles/color-convert/color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], + } +} diff --git a/content/docs/adr/adr-001-node-interface.md b/content/docs/adr/adr-001-node-interface.md deleted file mode 100644 index 7671d9c..0000000 --- a/content/docs/adr/adr-001-node-interface.md +++ /dev/null @@ -1,55 +0,0 @@ -# Using Evolve `Node` as replacement of Tendermint `Node` - -## Changelog - -- 26.02.2021: Initial Draft -- 29.09.2022: Rename Optimint to rollmint -- 22.01.2023: Rename rollmint to Evolve - -Replacing on the `Node` level gives much flexibility. Still, significant amount of code can be reused, and there is no need to refactor lazyledger-core. -Cosmos SDK is tightly coupled with Tendermint with regards to node creation, RPC, app initialization, etc. De-coupling requires big refactoring of cosmos-sdk. - -There are known issues related to Tendermint RPC communication. - -## Replacing Tendermint `Node` - -Tendermint `Node` is a struct. It's used directly in cosmos-sdk (not via interface). -We don't need to introduce common interface `Node`s, because the plan is to use scaffolding tool in the future, so we can make any required changes in cosmos-sdk. - -### Interface required by cosmos-sdk - -- BaseService (struct): - - Service (interface) - - Start() - - IsRunning() - - Stop() - - Logger -- Direct access: - - ConfigureRPC() - - EventBus() - -## Alternative approaches - -### Create RPC from scratch - -- Pros: - - May be possible to avoid Tendermint issues - - Should be possible to avoid dependency on Tendermint in Evolve - - Changes probably limited to cosmos-sdk (not required in tendermint/lazyledger-core) -- Cons: - - Reinventing the wheel - - Requires bigger, much more complicated changes in cosmos-sdk - - Probably can't upstream such changes to cosmos-sdk - -## `tendermint` vs `lazyledger-core` - -Right now, either `tendermint` or `lazyledger-core` can be used for base types (including interfaces). -Similarly, vanilla `cosmos-sdk` (not a fork under lazyledger organization) can be used as a base for optimistic client. -`lazyledger-core` is a repository created because of needs related to lazyledger client, not optimistic client. -On the other hand, some of the functionality will be shared between both clients. This will have to be resolved later in time. -Using 'vanilla' repositories (not forks) probably will make easier to upstream changes if required, and will make scaffolding -easier. - -## Development - -`cosmos-sdk-rollmit` is a repository dedicated for maintenance of Evolve-enabled version of Cosmos SDK. diff --git a/content/docs/adr/adr-002-mempool.md b/content/docs/adr/adr-002-mempool.md deleted file mode 100644 index a040ba4..0000000 --- a/content/docs/adr/adr-002-mempool.md +++ /dev/null @@ -1,28 +0,0 @@ -# Mempool - -For now, mempool implementation from lazyledger-core/Tendermint will be used. - -## Pros - -* good integration with other re-used code (see ADR-001) -* well tested -* glue code is not required -* it will be updated in case of ABCI++ adoption -* mempool doesn't depend on P2P layer, so it's easy to replace it with libp2p -* mempool does not require any knowledge about the internal structure of the Txs and is already "abci-ready" - -## Cons - -* inherit all limitations of the tendermint mempool - * no prioritization of Txs - * many [open issues](https://github.com/cometbft/cometbft/issues?q=is%3Aissue+is%3Aopen+mempool+label%3AC%3Amempool) -* legacy code base (the tendermint mempool exists for a while now) - -## Alternatives - -* Implementation from scratch - * time consuming - * error prone -* Re-using other mempool (Celo, Prysm, etc) - * different API - * potential licensing issues diff --git a/content/docs/adr/adr-003-peer-discovery.md b/content/docs/adr/adr-003-peer-discovery.md deleted file mode 100644 index 4510013..0000000 --- a/content/docs/adr/adr-003-peer-discovery.md +++ /dev/null @@ -1,40 +0,0 @@ -# Peer Discovery - -## Changelog - -- 31.03.2021: Initial Draft -- 29.09.2022: Rename Optimint to rollmint -- 22.01.2023: Rename rollmint to Rollkit - -Libp2p provides multiple ways to discover peers (DHT, mDNS, PubSub peer exchange). Currently there are no plans to support mDNS (as it's limited to local networks). - -## Proposed network architecture - -1. There will be a set of well-known, application-agnostic seed nodes. Every Evolve client will be able to connect to such node, addresses will be saved in configuration. - - This does not limit applications as they can still create independent networks with separate set of seed nodes. -2. Nodes in the network will serve DHT. It will be used for active peer discovery. Client of each optimistic network will be able to find other peers in this particular network. - - All nodes will cooperate on the same DHT. - - ChainID will be used to advertise that client participates in a particular optimistic network. -3. Nodes from multiple networks will help with peer discovery (via single DHT). -4. After connecting to nodes found in DHT, GossipSub will handle peer lists for clients. - -### Pros - -- Shared DHT should make it easier to find peers. -- Use of existing libraries. - -### Cons - -- There may be some overhead for clients to handle DHT requests from other optimistic networks. - -## Alternatives - -1. Joining public IPFS DHT for peer discovery. - - pros: large network - finding peers should be very easy - - cons: we may affect public IPFS network stability in case of misconfiguration, possibly lot of unrelated traffic -2. Custom peer-exchange protocol. - - pros: full flexibility of implementation - - cons: need to create from scratch and test -3. Re-use of existing peer discovery mechanism like `discv5` - - pros: ready & battle-tested software - - cons: use different network stack, requires lot of integration diff --git a/content/docs/adr/adr-004-core-types.md b/content/docs/adr/adr-004-core-types.md deleted file mode 100644 index bf2d5f2..0000000 --- a/content/docs/adr/adr-004-core-types.md +++ /dev/null @@ -1,211 +0,0 @@ -# ADR 004: Blockchain Core Data Types - -## Changelog - -- 19.04.2021: Initial Draft -- 29.09.2022: Rename Optimint to rollmint -- 22.01.2023: Rename rollmint to Rollkit - -## Context - -This document describes the core data structures of any Rollkit-powered blockchain. - -## Alternative Approaches - -Alternatives for ChainID: - -- an integer type like unit64 -- a string that fulfills some basic rules like the ChainID for Cosmos chains - -## Decision - -We design the core data types as minimalistic as possible, i.e. they only contain the absolute necessary -data for an optimistic applications to function properly. -If there are any additional fields that conflict with above's claimed minimalism, then they are necessarily inherited -by the ABCI imposed separation between application state machine and consensus/networking (often also referred to as ABCI-server and -client). -Where such tradeoffs are made, we explicitly comment on them. - -## Detailed Design - -### Transactions - -In Rollkit, like in Tendermint, Transactions are just an opaque slice of bytes: - -```go -type Tx []byte -type Txs []Tx -``` - -If necessary `Tx` could be turned into a struct. Currently, there is no need for that though. - -### Block Header - -```go -type NamespaceID [8]byte - -type Header struct { - // Block and App version - Version Version - // NamespaceID identifies this chain e.g. when connected to other chains via IBC. - NamespaceID NamespaceID - - Height uint64 - Time uint64 // time in tai64 format - - // prev block info - LastHeaderHash [32]byte - - // hashes of block data - DataHash [32]byte // Block.Data root aka Transactions - AppHash [32]byte // state after applying txs from the current block - - - // Note that the address can be derived from the pubkey which can be derived - // from the signature when using secp256k. - // We keep this in case users choose another signature format where the - // pubkey can't be recovered by the signature (e.g. ed25519). - ProposerAddress Address // original proposer of the block -} - -// Version captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. -// This is equivalent to the tmversion.Consensus type in Tendermint. -type Version struct { - Block uint32 - App uint32 -} -``` - -### Block and Block.Data - -```go -type Block struct { - Header Header - Data Data - LastCommit *Commit -} - -type Data struct { - Txs Txs - IntermediateStateRoots IntermediateStateRoots - Evidence EvidenceData -} - -type EvidenceData struct { - Evidence []Evidence -} -``` - -#### Evidence - -`Evidence` represents a go-interface (or oneof in protobuf) of known set of concrete fraud-proofs. -The details for this will be defined in a separated adr/PR. - -Here is an incomplete list of potential evidence types: - -- Same Aggregator signed two different blocks at the same height - - figure out if this is actually malicious / slashable behaviour - e.g. clients could simply accept the last block included in a LL block -- State Transition Fraud Proofs (for previous blocks) - -#### Commit - -```go -type Commit struct { - Height uint64 - HeaderHash [32]byte - Signatures []Signature // most of the time this is a single signature -} -``` - -#### ConsensusParams - -[ConsensusParams](https://docs.tendermint.com/master/spec/core/state.html#consensusparams) can be updated by the application through ABCI. -This could be seen as a state transition and the ConsensusHash in the header would then require a dedicated state fraud proof. -That said, none of the existing default Cosmos-SDK modules actually make use of this functionality though. -Hence, we can treat the ConsensusParams as constants (for the same app version). -We clearly need to communicate this to optimistic application chain developers. -Ideally, we should ensure this programmatically to guarantee that this assumption always holds inside Rollkit. - -The ConsensusParams have the exact same structure as in Tendermint. For the sake of self-containedness we still list them here: - -```go -// ConsensusParams contains consensus critical parameters that determine the -// validity of blocks. -type ConsensusParams struct { - Block BlockParams - Evidence EvidenceParams - Validator ValidatorParams - Version VersionParams -} - -// BlockParams contains limits on the block size. -type BlockParams struct { - // Max block size, in bytes. - // Note: must be greater than 0 - MaxBytes int64 - // Max gas per block. - // Note: must be greater or equal to -1 - MaxGas int64 - // Minimum time increment between consecutive blocks (in milliseconds) If the - // block header timestamp is ahead of the system clock, decrease this value. - // - // Not exposed to the application. - TimeIotaMs int64 -} - -// EvidenceParams determine how we handle evidence of malfeasance. -type EvidenceParams struct { - // Max age of evidence, in blocks. - // - // The basic formula for calculating this is: MaxAgeDuration / {average block - // time}. - MaxAgeNumBlocks int64 - // Max age of evidence, in time. - // - // It should correspond with an app's "unbonding period" or other similar - // mechanism for handling [Nothing-At-Stake - // attacks](https://vitalik.eth.limo/general/2017/12/31/pos_faq.html#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). - MaxAgeDuration time.Duration - // This sets the maximum size of total evidence in bytes that can be committed in a single block. - // and should fall comfortably under the max block bytes. - // Default is 1048576 or 1MB - MaxBytes int64 -} - -// ValidatorParams restrict the public key types validators can use. -type ValidatorParams struct { - PubKeyTypes []string -} - -// VersionParams contains the ABCI application version. -type VersionParams struct { - AppVersion uint64 -} -``` - -## Status - -Proposed and partly implemented. - -For finishing the implementation these items need to be tackled at least: - -- [ ] methods on core types (e.g. for hashing and basic validation etc) -- [ ] equivalent types for serialization purposes (probably protobuf) -- [ ] conversion from and to protobuf - -## Consequences - -### Positive - -- very close to the original Tendermint types which makes on-boarding devs familiar for the Cosmos-SDK and Tendermint easier - -### Negative - -- dependency on abci types for evidence interface (in the current implementation at least) - -### Neutral - -## References - -- diff --git a/content/docs/adr/adr-005-serialization.md b/content/docs/adr/adr-005-serialization.md deleted file mode 100644 index 2e94a80..0000000 --- a/content/docs/adr/adr-005-serialization.md +++ /dev/null @@ -1,37 +0,0 @@ -# ADR 005: Serialization - -## Changelog - -- 2021-05-31: Created - -## Context - -All the basic data types needs to be efficiently serialized into binary format before saving in KV store or sending to network. - -## Alternative Approaches - -There are countless alternatives to `protobuf`, including `flatbuffers`, `avro`, `ASN.1`, `RLP`. - -## Decision - -`protobuf` is used for data serialization both for storing and network communication. -`protobuf` is used widely in entire Cosmos ecosystem, and we would need to use it anyways. - -## Status - -{Accepted} - -## Consequences - -### Positive - -- well known serialization method -- language independent - -### Negative - -- there are known issues with `protobuf` - -### Neutral - -- it's de-facto standard in Cosmos ecosystem diff --git a/content/docs/adr/adr-006-da-interface.md b/content/docs/adr/adr-006-da-interface.md deleted file mode 100644 index 2d69702..0000000 --- a/content/docs/adr/adr-006-da-interface.md +++ /dev/null @@ -1,114 +0,0 @@ -# ADR 006: Data Availability Client Interface - -## Changelog - -- 2021.04.30: Initial draft -- 2021.06.03: Init method added -- 2021.07.09: Added CheckBlockAvailability method, added KVStore to Init method, added missing result types -- 29.09.2022: Rename Optimint to rollmint -- 22.01.2023: Rename rollmint to Rollkit - -## Context - -Rollkit requires data availability layer. Different implementations are expected. - -## Alternative Approaches - -> This section contains information around alternative options that are considered before making a decision. It should contain an explanation on why the alternative approach(es) were not chosen. - -## Decision - -Defined interface should be very generic. -Interface should consist of 5 methods: `Init`, `Start`, `Stop`, `SubmitBlock`, `CheckBlockAvailability`. -There is also optional interface `BlockRetriever` for data availability layer clients that are also able to get block data. -All the details are implementation-specific. - -## Detailed Design - -Definition of interface: - -```go -type DataAvailabilityLayerClient interface { - // Init is called once to allow DA client to read configuration and initialize resources. - Init(config []byte, kvStore store.KVStore, logger log.Logger) error - - Start() error - Stop() error - - // SubmitBlock submits the passed in block to the DA layer. - // This should create a transaction which (potentially) - // triggers a state transition in the DA layer. - SubmitBlock(block *types.Block) ResultSubmitBlock - - // CheckBlockAvailability queries DA layer to check block's data availability. - CheckBlockAvailability(block *types.Block) ResultCheckBlock -} - -// BlockRetriever is additional interface that can be implemented by Data Availability Layer Client that is able to retrieve -// block data from DA layer. This gives the ability to use it for block synchronization. -type BlockRetriever interface { - RetrieveBlock(height uint64) ResultRetrieveBlock -} - -// TODO define an enum of different non-happy-path cases -// that might need to be handled by Rollkit independent of -// the underlying DA chain. -type StatusCode uint64 - -// Data Availability return codes. -const ( - StatusUnknown StatusCode = iota - StatusSuccess - StatusTimeout - StatusError -) - -type DAResult struct { - // Code is to determine if the action succeeded. - Code StatusCode - // Message may contain DA layer specific information (like DA block height/hash, detailed error message, etc) - Message string -} - -// ResultSubmitBlock contains information returned from DA layer after block submission. -type ResultSubmitBlock struct { - DAResult - // Not sure if this needs to be bubbled up to other - // parts of Rollkit. - // Hash hash.Hash -} - -// ResultCheckBlock contains information about block availability, returned from DA layer client. -type ResultCheckBlock struct { - DAResult - // DataAvailable is the actual answer whether the block is available or not. - // It can be true if and only if Code is equal to StatusSuccess. - DataAvailable bool -} - -type ResultRetrieveBlock struct { - DAResult - // Block is the full block retrieved from Data Availability Layer. - // If Code is not equal to StatusSuccess, it has to be nil. - Block *types.Block -} -``` -> - -## Status - -Implemented - -## Consequences - -> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. - -### Positive - -### Negative - -### Neutral - -## References - -> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! diff --git a/content/docs/adr/adr-007-header-commit-to-shares.md b/content/docs/adr/adr-007-header-commit-to-shares.md deleted file mode 100644 index 98b11cd..0000000 --- a/content/docs/adr/adr-007-header-commit-to-shares.md +++ /dev/null @@ -1,65 +0,0 @@ -# ADR 007: Commit to Shares in Header - -## Changelog - -- 2021-10-01: Initial draft - -## Context - -1. Only [a single data root must be included in the Celestia block header](https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/data_structures.md#header); if more than one data root is included, light nodes will not have any guarantees that the data behind the other data roots is available unless they run a separate sampling process per data root. -1. [PayForMessage transactions](https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/data_structures.md#signedtransactiondatapayformessage) include a commitment of roots of subtrees of the Celestia data root. This is a requirement for compact proofs that a message was or was not included correctly. -1. [Over the wire](https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/networking.md#wiretxpayformessage), PayForMessage transactions include (potentially) multiple signatures for the same message with different sizes of Celestia block, to allow for [non-interactive message inclusion](https://github.com/celestiaorg/celestia-specs/blob/master/src/rationale/message_block_layout.md#non-interactive-default-rules). - -Blocks must follow a similar strategy to the above. Specifically, the data root in block headers [must commit to subtree roots in the Celestia data root](https://github.com/evstack/ev-node/issues/133), otherwise nodes would not have any guarantees that the data behind the data root in a block header is available. - -## Alternative Approaches - -### Alternative 1: Don't Commit to Data Root in Block Header - -One proposal is to simply not include any commitment to data in the block header, and use the `MessageShareCommitment` field in the respective PayForMessage transaction to commit to _both_ the block header and block data. - -This may only work securely under certain scenarios, but not in the general case. Since block data is not committed to in the block header, it will not be signed over by the block producer set (e.g. if the block producers are using a Tendermint-like protocol) and can therefore be malleated by these parties. - -### Alternative 2: One Message Per Layout - -Another proposal is having the block header commit to subtree roots. However, since the layout of shares (and thus, the subtree structures) might change depending on the size of the data square, this means the message that is paid for in PayForMessage is different depending on the layout (since the message includes the block header). - -A different message can be included per witness for PayForMessage transactions over the wire. This would lead to unacceptably high networking overhead unfortunately, since messages are expected to be quite large individually. - -### Alternative 3: Auction Off Rows Instead of Shares - -Instead of auctioning off individual shares, rows can be auctioned off entirely. In other words, only a single message can be included per row (though a message may span more than one row), and always begins at the start of a row. This will also simplify various codepaths that no longer need to account for the potential of multiple messages and namespaces per row. - -This would not solve anything since the size of the square will still change, and is also a design pivot. It is not yet decided whether this pivot is worth taking. - -An alternative to this alternative would be to make the block size fixed. Combined with auctioning off rows, this would enormously simplify message inclusion logic. - -## Decision - -Resolving this issue is accomplished with two components. - -First, the transactions are committed to _in share form and layout_ in the block header, similar to the commitment in PayForMessage. For this, only transactions are committed to, and begin aligned with power of 2 shares. This commitment will be different than the one in the PayForMessage transaction that pays for this message since that commits to both the transactions and block header. A new block header and commitment is constructed for each of the different block sizes possible. - -To preserve the property that transactions begin aligned at a power of 2 boundary, we append the block header _after_ the transactions, effectively turning it into a footer. Instead of downloading the first X shares of each message to extract the header for DoS prevention, nodes will instead download the last X shares of each message. - -In order to avoid having to include many different messages with PayForMessage over the wire, we modify the WirePayForMessage structure to include a new field, `footers`. The number of footers must be equal to the number of witnesses. When verifying each witness, the associated footer is appended to the message, which combined is the effective message. - -![Proposal.](figures/header_shares_commit.jpg) - -## Detailed Design - -## Status - -Discarded - -## Consequences - -### Positive - -### Negative - -### Neutral - -## References - -1. diff --git a/content/docs/adr/adr-008-mempool-optimint-light-client.md b/content/docs/adr/adr-008-mempool-optimint-light-client.md deleted file mode 100644 index 264be83..0000000 --- a/content/docs/adr/adr-008-mempool-optimint-light-client.md +++ /dev/null @@ -1,58 +0,0 @@ -# ADR 008: Light Client Transaction Gossip & Mempool - -## Changelog - -- 20.09.2022: Initial Draft -- 29.09.2022: Rename Optimint to rollmint -- 22.01.2023: Rename rollmint to Rollkit - -## Context - -Rollkit Light Clients cannot validate transactions without a state. Therefore Light Clients should not gossip incoming transactions, and the mempool can be disabled. - -### Explanation - -There is a wish for the light clients to participate in the P2P Layer. One of the ways a full node participates in the network is to gossip valid transactions throughout the network. Each full node that receives a transaction sends an ABCI message, `CheckTx`, to the application layer to check for validity and receives an `abci.ResponseCheckTx` . -There are 2 Types of checks. -Current stateless checks: - -- Check that the size is less than the configured maximum transaction size. -- Call any Pre-Check hooks if defined -- Check if the proxy connection has an error -- Check if the transaction is already in cache / mempool - -Stateful: - -- Checks if transactions and messages are valid based on a committed state - -Light clients cannot do stateful checks because they don't have access to the state. -Light clients can do stateless checks. However, creating invalid transactions that pass the current stateless checks is easy. Light clients could therefore support a DOS attack of the network when they gossip invalid transactions. -If light clients do not check transactions, they do not need the mempool. - -### Libp2p pubsub - -If the transaction originates from the light client i.e. submitting a new transaction, then this transaction must be gossiped to the network. - -A light client will use the [fan-out](https://docs.libp2p.io/concepts/publish-subscribe/#fan-out) functionality of pubsub. It will send its transaction to the network but will not subscribe to receiving and propagating other transactions. - -## Alternative Approaches - -- We create more rigorous stateless checks on the transactions that would reduce or prevent the DOS attack and enable transaction gossiping - -## Status - -Proposed - -## Consequences - -### Positive - -- Reduction of complexity and keeping the light client lightweight - -### Negative - -- Light clients do not participate in gossiping transactions - -## References - -Issue #100 [References](https://github.com/evstack/ev-node/issues/100#issuecomment-921848268) diff --git a/content/docs/adr/adr-009-state-fraud-proofs.md b/content/docs/adr/adr-009-state-fraud-proofs.md deleted file mode 100644 index f5beb0d..0000000 --- a/content/docs/adr/adr-009-state-fraud-proofs.md +++ /dev/null @@ -1,283 +0,0 @@ -# ADR 009: State Fraud Proofs - -## Changelog - -- 2022-11-03: Initial draft -- 2023-02-02: Update design with Deep Subtrees and caveats - -## Authors - -Manav Aggarwal (@Manav-Aggarwal) - -## Context - -This ADR introduces a design for state fraud proofs in optimistic cosmos-sdk using Rollkit. -It implements parts of Section 4 (Fraud Proofs) of Al-Bassam et al’s paper “Fraud and Data Availability Proofs: Detecting Invalid Blocks in Light Clients”. -Some previous notes regarding this topic in the context of cosmos-sdk are described in Matthew Di Ferrante's [notes](https://github.com/evstack/ev-node/issues/132). - -Rollkit's design consists of a single sequencer that posts blocks to the DA layer, and multiple (optional) full nodes. Sequencers gossip block headers to full nodes and full nodes fetch posted blocks from the DA layer. Full nodes then execute transactions in these blocks to update their state, and gossip block headers over P2P to Rollkit light nodes. Once State Fraud Proofs are enabled, when a block contains a fraudulent state transition, Rollkit full nodes can detect it by comparing intermediate state roots (ISRs) between transactions, and generate a state fraud proof that can be gossiped over P2P to Rollkit light nodes. These Rollkit light nodes can then use this state fraud proof to verify whether a fraudulent state transition occurred or not by themselves. - -Overall, State Fraud Proofs will enable trust-minimization between full nodes and light node as long as there is at least one honest full node in the system that will generate state fraud proofs. - -Note that Rollkit State Fraud Proofs are still a work in progress and will require new methods on top of ABCI, specifically, `GenerateFraudProof`, `VerifyFraudProof`, and `GetAppHash`. - -List of caveats and required modifications to push State Fraud Proofs towards completion: - -- Add ability for light nodes to receive and verify state fraud proofs. -- Add inclusion proofs over transactions so fraud proof verifiers have knowledge over which transaction is being fraud proven. -- Check for badly formatted underlying data before verifying state transition inside the State Machine. -- Limit number of state witnesses permissible in a state fraud proof since state keys accessed by a transaction can be limited by the state machine. -- Write end to end network tests covering different scenarios that can occur in case of state fraud proof submission by a full node. -- Support for multiple sequencers, in which case, fraud proof detection works the same as described above. -- Support more ABCI-compatible State Machines, in addition to the Cosmos SDK state machine. - -```mermaid -sequenceDiagram - title State Fraud Proofs - - participant User - participant Block Producer - participant DA Layer - participant Full Node - participant Light Client - - User->>Block Producer: Send Tx - Block Producer->>Block Producer: Generate Block - Block Producer->>Full Node: Gossip Header - Full Node->>Full Node: Verify Header - Full Node->>Light Client: Gossip Header - - Block Producer->>Full Node: Gossip Block - Block Producer->>DA Layer: Publish Block - DA Layer->>Full Node: Retrieve Block - Full Node->>Full Node: Verify Block - Full Node->>Full Node: Generate Fraud Proof - Full Node->>Light Client: Gossip Fraud Proof - Light Client->>Light Client: Verify Fraud Proof -``` - -## Alternative Approaches - -For light clients to detect invalid blocks and verify it themselves, alternatives include downloading the whole state themselves or using zero-knowledge validity proof systems. - -## Detailed Design - -### Detecting Fraudulent State Transitions - -Rollkit blocks contain a field called `Intermediate State Roots` in block data: - -```go - -// Data defines Rollkit block data. -type Data struct { - Txs Txs - IntermediateStateRoots IntermediateStateRoots -} -``` - -These Intermediate State Roots (ISRs) are initially generated by a Rollkit sequencer during block execution which uses the ABCI interface. - -The following ABCI methods are called during block execution: -`BeginBlock` at the start of a block -`DeliverTx` for each transaction -`EndBlock` at the end of a block - -After each of the above ABCI method calls, we generate an intermediate state root using a new ABCI method we introduce: - -```protobuf -service ABCIApplication { - rpc GetAppHash(RequestGetAppHash) returns (ResponseGetAppHash); -} - -message RequestGetAppHash {} - -message ResponseGetAppHash { - bytes app_hash = 1; -} -``` - -This `GetAppHash` ABCI method returns an equivalent of `CommitID` hash in the ABCI method `Commit` and thus provides a way to extract ISRs from an app without doing any disk write operations. - -Full nodes use these ISRs to detect fraudulent state transitions. A full node must also execute all state transitions (`BeginBlock`, `DeliverTx`, and `EndBlock` calls) and compute its own Intermediate State Roots (ISRs). After each state transition, a full node compares the corresponding ISR with the ISR given by the Sequencer. If it finds a mismatch between its own computed ISR and one given by the Sequencer, a fraudulent transition is detected and it moves on to generate a State Fraud Proof. - -### Generating State Fraud Proofs - -Note: Starting from this section, this ADR refers to State Fraud Proofs simply as Fraud Proofs. - -We introduce the following ABCI method to enable Fraud Proof Generation in the Cosmos SDK: - -```protobuf -service ABCIApplication { - rpc GenerateFraudProof(RequestGenerateFraudProof) returns (ResponseGenerateFraudProof); -} -``` - -With this new ABCI method, a Rollkit Full Node can send a request to a Cosmos SDK app to generate a Fraud Proof. In this request, it includes a list of all the state transitions from the start of the block upto the fraudulent state transition. The last non-nil state transition in this list corresponds to the fraudulent state transition. - -```protobuf -message RequestGenerateFraudProof { - // All state transitions upto and including the fraudulent state transition - RequestBeginBlock begin_block_request = 1 [(gogoproto.nullable) = false]; - repeated RequestDeliverTx deliver_tx_requests = 2; - RequestEndBlock end_block_request = 3; -} -``` - -The `GenerateFraudProof` method in the Cosmos SDK app receives this list of state transitions and takes the following steps to generate a fraud proof: - -- Revert local state to the last committed state -- Execute all the non-fraudulent state transitions -- Enable tracing and execute the fraudulent state transition. Tracing stores logs of what state, specifically key/value pairs, is accessed during this fraudulent state transition and generates corresponding merkle inclusion proofs of each action (read, write, delete) log. These logs correspond to state witnesses needed to re-execute this state transition. -- Revert local state back to the last committed state -- Execute all the non-fraudulent state transitions again -- Construct a State Fraud Proof with the state witnesses generated earlier which looks like this: - -```protobuf - -// Represents a single-round state fraudProof -message FraudProof { - // The block height during which the fraudulent state transition occurred - uint64 block_height = 1; - // Intermediate State Root right before the fraudulent state transition - bytes pre_state_app_hash = 2; - // Intermediate State Root right after the fraudulent state transition - bytes expected_valid_app_hash = 3; - - // Map from an app module name to a State Witness - map state_witness = 4; - - // Fraudulent state transition has to be one of these - // Only one of these three can be non-nil - RequestBeginBlock fraudulent_begin_block = 5; - RequestDeliverTx fraudulent_deliver_tx = 6; - RequestEndBlock fraudulent_end_block = 7; -} - -// State witness with a list of all witness data -message StateWitness { - // store level merkle inclusion proof - tendermint.crypto.ProofOp proof = 1; - // merkle root hash of the substore - bytes root_hash = 2; - // List of witness data - repeated WitnessData witness_data = 3; -} - -// Witness data containing operation, a key/value pair, and Merkle -// inclusion proofs needed for corresponding operation for key/value pair -message WitnessData { - Operation operation = 1; - bytes key = 2; - // only set for "write" operation - bytes value = 3; - repeated tendermint.crypto.ProofOp proofs = 4; -} - -enum Operation { - WRITE = 0 [(gogoproto.enumvalue_customname) = "write"]; - READ = 1 [(gogoproto.enumvalue_customname) = "read"]; - DELETE = 2 [(gogoproto.enumvalue_customname) = "delete"]; -} -``` - -Finally, return this generated state fraud proof back to the Rollkit Full Node: - -```protobuf -message ResponseGenerateFraudProof { - FraudProof fraud_proof = 1; -} -``` - -Note that currently the only underlying store supported by Cosmos SDK is the Merkle IAVL+ tree. As part of generating state witnesses, we added preliminary support for Deep Subtrees to this library [here](https://github.com/rollkit/iavl/tree/deepsubtrees_0.19.x). It enables import and export of partial state and adds tracing to IAVL trees. Note that documentation and exploring optimizations of Deep Subtrees is a work in progress. - -### Gossiping Fraud Proofs - -After a Rollkit Full Node generates a Fraud Proof, it gossips the Fraud Proof over P2P to Rollkit light clients. - -### Verifying Fraud Proofs - -Rollkit light clients should be able to use these gossiped state fraud proofs to verify whether a fraudulent state transition occurred or not by themselves. - -There are four stages of verification that must occur for a Fraud Proof. The first three stages take place in Rollkit and verify that the fraud proof itself was generated correctly. The fourth stage takes place in a Cosmos SDK app to actually execute the fraudulent state transition embedded in the fraud proof. - -#### **Stage One** - -Verify that both the `appHash` (ISR) and the fraudulent state transition in the `FraudProof` exist as part of a block published on the DA layer within a specified fraud proof window. This involves verifying that the blob corresponding to the block is posted on the DA layer via a Blob Inclusion Proof and verifying the share(s) containing the fraudulent state transition and `appHash` were part of that blob via Share Inclusion Proof(s). - -#### **Stage Two** - -Go through the `state_witness` list in the `FraudProof` and verify that all the store level merkle inclusion proofs are valid: the corresponding `root_hash` was included in a merkle tree with root `appHash`. - -#### **Stage Three** - -Go through the `WitnessData` in each `StateWitness` and verify that the first substore level merkle inclusion proof is valid: the corresponding `key` was included in a merkle tree with root `root_hash`. Note that we can only verify the first witness in this witnessData with current root hash. Other proofs are verified in the IAVL tree when re-executing operations in the underlying IAVL Deep Subtree. - -#### **Stage Four** - -Spin up a new Cosmos SDK app and initialize its store with Deep Subtrees constructed using witness data in the `FraudProof`. After this initialization, the app hash representing the state of the app should match the `appHash` inside the `FraudProof`. This store should now contain all the key/value pairs and underlying tree structure necessary to execute the fraudulent state transition contained in the `FraudProof`. - -We introduce the following ABCI method to enable Fraud Proof Verification in the Cosmos SDK: - -```protobuf -service ABCIApplication { - rpc VerifyFraudProof(RequestVerifyFraudProof) returns (ResponseVerifyFraudProof); -} -``` - -With this new ABCI method, a Rollkit light client can send a request to a newly initialized Cosmos SDK app to verify whether executing a state transition on the app would lead to a particular app hash. In this request, it includes the fraudulent state transition and an expected app hash to match against. - -```protobuf -message RequestVerifyFraudProof { - FraudProof fraud_proof = 1; - - // Note: to be removed. Moved inside state fraud proof - bytes expected_valid_app_hash = 2; -} -``` - -The `VerifyFraudProof` method in the Cosmos SDK app receives this fraudulent state transition and expected app hash, and takes the following steps to complete this verification step: - -- Execute the provided fraudulent state transition -- Get the app hash and compare it against the expected app hash -- Return a boolean representing whether the two app hashes above match or not. - -```protobuf -message ResponseVerifyFraudProof { - bool success = 1; -} -``` - -If a fraud proof is successfully verified, the Rollkit light client can halt and wait for an off-chain social recovery process. Otherwise, it ignores the Fraud Proof and proceeds as usual. - -## Status - -Proposed - -## Consequences - -### Positive - -- Enables trust-minimization between Rollkit Full nodes and Light clients. -- Introduces an honest minority assumption for Rollkit full nodes. - -### Negative - -- Breaks ABCI compatibility and requires maintaining an ABCI version specific to Rollkit. - - - -## Working Branches - -A prototype implementation of the above design is available in the following working branches: - -- [Rollkit](https://github.com/evstack/ev-node/releases/tag/v0.6.0): Contains fraud proof detection and gossiping logic. As fraud proofs are currently a work in progress, this logic can be toggled using a flag `--rollkit.experimental_insecure_fraud_proofs`. By default, this flag is set to `false`. -- [Cosmos-SDK](https://github.com/rollkit/cosmos-sdk-old/tree/manav/fraudproof_iavl_prototype): Implements the new ABCI methods described. -- [Tendermint](https://github.com/rollkit/tendermint/tree/abci_fraud_proofs): Contains modifications to the ABCI interface described. -- [IAVL](https://github.com/rollkit/iavl/tree/deepsubtrees_0.19.x): Adds support for Deep Subtrees and tracing. - -## References - -> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! - -- -- diff --git a/content/docs/adr/adr-010-exec-api.md b/content/docs/adr/adr-010-exec-api.md deleted file mode 100644 index fd2ee81..0000000 --- a/content/docs/adr/adr-010-exec-api.md +++ /dev/null @@ -1,313 +0,0 @@ -# ADR 10: Execution API - -## Changelog - -- 2025.01.13: Initial draft -- 2025.04.24: Various improvements - -## Context - -Introduction of the Execution API makes rollkit very generic and execution-environment agnostic. -It removes all ABCI-centric code for full interoperability with other types of VMs. - -The Execution API serves as a bridge between Rollkit and various execution environments (VMs), -allowing Rollkit to remain agnostic to the specific implementation details of the execution layer. -This separation enables: - -1. Support for multiple VM types (EVM, WASM, etc.) -2. Easier integration with different execution environments -3. Cleaner separation of concerns between consensus and execution -4. More flexible and maintainable architecture - -## Alternative Approaches - -1. Maintain current state: keep ABCI interface and implement other VMs inside ABCI application. - - Pros: No changes required to existing code - - Cons: ABCI-specific code remains, limiting flexibility -2. Migrate to Engine API. - - Pros: Standard interface for EVM-based chains - - Cons: Too specific to EVM, not suitable for other VMs - -## Decision - -New generic Execution API is proposed. -It was designed to accommodate ABCI, Engine API, and any other VM / execution environment. - -## Detailed Design - -### API - -Execution API consist of 4 methods defined in [go-execution](https://github.com/rollkit/go-execution) repository. - -```go -// Executor defines a common interface for interacting with the execution client. -type Executor interface { - // InitChain initializes the blockchain with genesis information. - InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) (stateRoot types.Hash, maxBytes uint64, err error) - - // GetTxs retrieves all available transactions from the execution client's mempool. - GetTxs(ctx context.Context) ([]types.Tx, error) - - // ExecuteTxs executes a set of transactions to produce a new block header. - ExecuteTxs(ctx context.Context, txs []types.Tx, blockHeight uint64, timestamp time.Time, prevStateRoot types.Hash) (updatedStateRoot types.Hash, maxBytes uint64, err error) - - // SetFinal marks a block at the given height as final. - SetFinal(ctx context.Context, blockHeight uint64) error -} -``` - -### API Methods specification - -#### `InitChain` - -##### Description - -Initializes the blockchain's state based on genesis information. This method is invoked at the beginning of the blockchain's lifecycle to prepare the execution environment for subsequent operations. - -##### Inputs - -- `ctx` (`context.Context`): Context for managing request timeouts and cancellations. -- `genesisTime` (`time.Time`): The initial timestamp of the chain. -- `initialHeight` (`uint64`): The starting height of the chain. -- `chainID` (`string`): A unique identifier of the chain network. - -##### Outputs - -- `stateRoot` (`types.Hash`): The resulting state root after initializing the chain. -- `maxBytes` (`uint64`): Maximum block size in bytes, as defined by the execution client's genesis configuration. -- `err` (`error`): An error, if the initialization process fails. - -##### Expected Behavior - -- Initialize the chain according to the genesis. -- Generate an initial `stateRoot` representing the genesis state of the chain. -- Return the maximum allowable block size (`maxBytes`). -- Ensure all necessary state is initialized for subsequent operations. - -#### `GetTxs` - -##### Description - -Fetches all pending transactions from the execution client's mempool. -Transactions returned by execution client will be passed by rollkit to sequencer. - -##### Inputs - -- `ctx` (`context.Context`): Context for managing request timeouts and cancellations. - -##### Outputs - -- `txs` (`[]types.Tx`): Slice of transactions retrieved from the execution client's mempool; ordering doesn't matter. -- `err` (`error`): An error, if any, while retrieving transactions. - -##### Expected Behavior - -- Access the mempool and retrieve all available transactions. -- If no transactions are available, return an empty slice without error. -- Do not remove ("reap") transactions from mempool. -- Ensure transactions are valid and properly formatted. - -#### `ExecuteTxs` - -##### Description - -Executes a given set of transactions, updating the blockchain state. - -##### Inputs - -- `ctx` (`context.Context`): Context for managing request timeouts and cancellations. -- `txs` (`[]types.Tx`): A slice of transactions to be executed. -- `blockHeight` (`uint64`): The height of the block these transactions belong to. -- `timestamp` (`time.Time`): The block's timestamp. -- `prevStateRoot` (`types.Hash`): The state root of the chain before applying the transactions. - -##### Outputs - -- `updatedStateRoot` (`types.Hash`): The resulting state root after applying the transactions. -- `maxBytes` (`uint64`): Maximum block size in bytes, as allowed for the block being produced. -- `err` (`error`): An error, if any, during the execution process. - -##### Expected Behavior - -- Validate and apply the provided transactions to the current blockchain state. -- Generate an updated `stateRoot` reflecting changes introduced by the transactions. -- Enforce block size and validity limits, returning errors if constraints are violated. -- Respect the ordering of transactions. -- Update the mempool to remove all executed transactions. -- Ensure atomic execution - either all transactions succeed or none do. - -#### `SetFinal` - -##### Description - -Marks a block at the specified height as final, guaranteeing immutability for consensus purposes. - -##### Inputs - -- `ctx` (`context.Context`): Context for managing request timeouts and cancellations. -- `blockHeight` (`uint64`): The height of the block to be finalized. - -##### Outputs - -- `err` (`error`): An error, if any, during the finalization process. - -##### Expected Behavior - -- Update the execution client's internal state to reflect that the specified block is final and immutable. -- Ensure additional guarantees like cleaning up unnecessary resources associated with blocks deemed final. -- Prevent any modifications to finalized blocks. -- Optimize storage for finalized blocks if possible. - -#### General Notes - -1. **Thread-Safety**: All methods are not expected to be thread-safe, concurrent calls are not planned. -2. **Error Handling**: All methods should follow robust error handling practices, ensuring meaningful errors are returned when issues occur. -3. **Context Usage**: Methods should respect context-based deadlines and cancellations for long-running operations. -4. **State Management**: The execution environment is responsible for maintaining its own state and ensuring consistency. -5. **Atomicity**: Operations that modify state should be atomic - either fully succeed or fully fail. - -### Types - -The Execution API was designed to be highly generic. As a result, the types introduced in the API are intentionally abstract to ensure compatibility across various virtual machines and execution environments. - -1. `types.Hash` represents a cryptographic hash. To maintain generality, it is implemented as an alias for `[]byte`. -2. `types.Tx` represents a transaction in the most basic form. It is also defined as an alias for `[]byte`. - -This design choice ensures maximum flexibility and allows the API to remain independent of specific implementations or formats. It facilitates interoperability across different execution environments while keeping the API lightweight and adaptable. - -### Block Size Management - -The Execution API includes `maxBytes` as a return value in both `InitChain` and `ExecuteTxs` methods. This parameter plays a crucial role in block size management: - -1. **Initial Configuration**: During `InitChain`, the execution environment returns the maximum block size allowed by its genesis configuration. This value is used by Rollkit to enforce block size limits during block production. - -2. **Dynamic Adjustment**: In `ExecuteTxs`, the execution environment can return an updated `maxBytes` value. This allows for dynamic adjustment of block size limits based on: - - Network conditions - - Resource availability - - Protocol-specific requirements - - Other runtime factors - -3. **Implementation Requirements**: - - The execution environment must ensure that blocks produced do not exceed the returned `maxBytes` value - - If a block would exceed the limit, the execution environment should return an error - - The value should be consistent with the execution environment's capabilities and constraints - - A protocol overhead buffer is subtracted from the DA layer's max blob size to account for block headers and encoding - -4. **System-wide Coordination**: - - Rollkit gets the initial `maxBytes` value from the DA layer and passes it to the sequencer - - The sequencer uses this value to limit the size of transaction batches it creates - - The sequencer's `PopUpToMaxBytes` method ensures transactions don't exceed the size limit - - This coordination ensures consistent block size constraints across the entire system - -5. **Usage in Rollkit**: - - Rollkit uses this value to validate block sizes before submission - - Helps prevent oversized blocks from being produced - - Enables dynamic adjustment of block size limits without protocol changes - -### Implementation Guidelines - -1. **State Management**: - - Execution environments must maintain their own state - - State transitions should be atomic - - State should be persisted appropriately - - State should be recoverable after crashes - -2. **Error Handling**: - - Return meaningful error messages - - Handle context cancellations gracefully - - Ensure proper cleanup on errors - - Maintain state consistency even after errors - -3. **Performance Considerations**: - - Optimize for common operations - - Consider caching where appropriate - - Handle large state sizes efficiently - - Minimize unnecessary state transitions - -4. **Security**: - - Validate all inputs thoroughly - - Prevent unauthorized state modifications - - Ensure proper access control - - Handle sensitive data appropriately - -### Sequence Diagrams - -#### Block production - -```mermaid -sequenceDiagram - participant D as DA layer - participant S as sequencer - participant R as Rollkit - participant E as exec-env - - R ->> +E: InitChain - E -->> -R: stateRoot, maxBytes - loop Every block time - R ->> +E: GetTxs - E -->> -R: txs - loop For each tx in txs - R ->> S: SubmitTransaction - end - R ->> +S: GetNextBatch - S -->> -R: batch, time - R ->> +E: ExecuteTxs - E -->> -R: stateRoot, maxBytes - R ->> +D: Submit - D -->> -R: IDs - R ->> E: SetFinal - end -``` - -#### Block sync from P2P network or DA - -```mermaid -sequenceDiagram - participant P as P2P network / DA - participant S as sequencer - participant R as Rollkit - participant E as exec-env - - R ->> +E: InitChain - E -->> -R: stateRoot, maxBytes - loop Every block time - P ->> R: next batch - R ->> +S: VerifyBatch - S -->> -R: success, error - R ->> +E: ExecuteTxs - E -->> -R: stateRoot, maxBytes - R ->> E: SetFinal - end -``` - -## Status - -Accepted - -## Consequences - -### Positive - -1. Simplification of rollkit's logic. -2. Better separation of concerns. -3. Removal of ABCI dependencies. -4. Increased flexibility for different execution environments. -5. Cleaner architecture with well-defined boundaries. - -### Negative - -1. More difficult deployment (another binary is needed). -2. Need to reimplement ABCI execution environment. -3. Additional complexity in coordinating between components. -4. Potential performance overhead from additional abstraction layer. - -### Neutral - -1. Need to introduce new API exposed by rollkit. -2. Changes to existing deployment procedures. -3. Updates to documentation and tooling required. - -## References - -- [Rollkit EPIC for Execution API](https://github.com/evstack/ev-node/issues/1802) -- [go-execution repository](https://github.com/rollkit/go-execution) diff --git a/content/docs/adr/adr-011-remote-signer.md b/content/docs/adr/adr-011-remote-signer.md deleted file mode 100644 index e7ca4e9..0000000 --- a/content/docs/adr/adr-011-remote-signer.md +++ /dev/null @@ -1,97 +0,0 @@ -# ADR 011: Remote Signing Service for Fast and Secure Digital Signatures - -## Changelog - -- 2025-03-14: Initial ADR creation including remote and local signing implementations, gRPC client/server flow. -- 2025-04-23: Renumbered from ADR-010 to ADR-011 to maintain chronological order. - -## Context - -Rollkit with single or multiple signers requires a fast, secure, and flexible digital signing solution. Currently, nodes perform local signing using keys stored on disk, in plain. However, this approach distributes private keys across many nodes, increasing the risk of compromise. By centralizing the signing process behind a remote service—ideally backed by an HSM (Hardware Security Module)—we can both secure our keys and achieve high availability. This remote signing service will also allow clients written in different languages to use a unified API through gRPC. - -## Alternative Approaches - -- **Local File-Based Signing Only:** - While simpler to implement and sufficient for development, this approach requires each node to hold a copy of the private key, increasing the risk of key compromise and reducing overall system security. - -- **REST API for Remote Signing:** - A RESTful service could be implemented for signing, but it would likely have higher latency and overhead compared to gRPC. Additionally, REST is less efficient for binary data transfers required for cryptographic operations. - -- **Direct HSM Integration at Each Node:** - Integrating HSMs directly with every node would be secure but expensive and complex to manage at scale. - -The chosen approach of a centralized, remote signing service via gRPC offers a balance of security, performance, and ease of multi-language support. - -## Decision - -We will implement a remote signing service that uses gRPC to communicate between the client node and the signing server. The signing server will act as a front for our secure key storage (which could be an HSM or similar secure system) to sign messages on demand. Additionally, a local file-based signing implementation will be provided for development and fallback scenarios. The interface for signing remains consistent across implementations: - -```mermaid -graph TD - A[Client Node] -->|"gRPC Call: Sign()/GetPublic()"| B[HSM / Remote Signing Server] - B -->|Uses| C["Secure Key Storage (HSM)"] - B -->|Returns Signature/Public Key| A -``` - -```go -type Signer interface { - // Cryptographically sign the given bytes. - Sign(message []byte) ([]byte, error) - - // Return a public key paired with this private key. - GetPublic() ([]byte, error) -} -``` - -## Detailed Design - -### GRPC API - -```proto -syntax = "proto3"; -package signer; -import "google/protobuf/empty.proto"; -// The SignRequest holds the bytes we want to sign. -message SignRequest { - bytes message = 1; -} -// The SignResponse returns the signature bytes. -message SignResponse { - bytes signature = 1; -} -// GetPublicRequest requests the public key -message GetPublicRequest {} -// The GetPublicResponse returns the public key. -message GetPublicResponse { - bytes public_key = 1; -} -// The SignerService defines the RPCs to sign and to retrieve the public key. -service SignerService { - rpc Sign(SignRequest) returns (SignResponse); - rpc GetPublic(GetPublicRequest) returns (GetPublicResponse); -} -``` - -Signing operations will typically require very high throughput and minimal latency. Nodes frequently request digital signatures, expecting quick responses. Public key retrievals, while less frequent than signatures, still occur regularly for validation purposes. - -## Status - -Proposed - -## Consequences - -## Positive - - • Enhanced Security: Private keys remain secure on a centralized, controlled system. - • High Availability: Multiple instances of the signing service behind a load balancer can provide failover. - • Language Agnosticism: gRPC allows clients in multiple programming languages to use the same service. - -## Negative - - • Increased Complexity: Additional operational overhead for managing a remote service. - • Network Dependency: Nodes depend on network connectivity to access the signing service. - • Potential Latency: Although gRPC is optimized, remote calls add a slight delay compared to local signing. - -## Neutral - - • Fallback Capability: The availability of both local and remote implementations ensures flexibility during development and production. diff --git a/content/docs/adr/adr-012-based-sequencing.md b/content/docs/adr/adr-012-based-sequencing.md deleted file mode 100644 index 2e3456a..0000000 --- a/content/docs/adr/adr-012-based-sequencing.md +++ /dev/null @@ -1,322 +0,0 @@ -# ADR 012: Based Sequencing - -## Changelog - -- 2025-04-09: Initial draft -- 2025-04-09: Added optional UX optimization where full nodes can relay user transactions to base layer -- 2025-04-09: Added rationale for VerifyBatch utility in a based setup -- 2025-04-10: Added Relaying Costs and Fee Compensation via EVM -- 2025-11-27: Updated to reflect actual implementation with epoch-based forced inclusion - -## Context - -Most chains today rely on single sequencers to form batches of user transactions, despite the availability of base layers (like Celestia) that provide data availability and canonical ordering guarantees. A single sequencer introduces liveness and censorship risks, as well as complexity in proposer election, fault tolerance, and bridge security. - -Based sequencing eliminates this reliance by having the base layer determine transaction ordering. This ADR describes the **epoch-based forced inclusion** implementation where **every full node acts as its own proposer** by independently: - -- Reading forced inclusion transactions from the base layer at epoch boundaries -- Applying deterministic batching rules -- Executing transactions to compute state updates - -This approach ensures consistency, removes the need for trusted intermediaries, and improves decentralization and resilience. - -## Alternative Approaches - -### Single Sequencer - -- A designated sequencer collects transactions and publishes them to the base layer. -- Simpler for UX and latency control, but introduces centralization and failure points. - -### Leader-Elected Proposer (e.g., BFT committee or rotating proposer) - -- Some nodes are elected to act as proposers for efficiency. -- Still introduces trust assumptions, coordination complexity, and MEV-related risks. - -### Continuous DA Polling - -- Full nodes continuously poll DA and form batches based on size thresholds. -- More complex coordination and can lead to inconsistent batch boundaries across nodes. - -The epoch-based approach provides deterministic batch boundaries while minimizing DA queries and ensuring all honest nodes derive identical blocks. - -## Decision - -We adopt a based sequencing model where every full node in the network acts as its own proposer using an epoch-based forced inclusion mechanism: - -### Core Principles - -1. **Epoch Boundaries**: Transactions are retrieved from DA in epochs defined by `DAEpochForcedInclusion` -2. **Deterministic Batch Formation**: All nodes apply the same rules to form batches from queued transactions -3. **MaxBytes Enforcement**: Individual blocks respect a maximum byte limit (2MB default) -4. **Transaction Smoothing**: Large transaction sets can be smoothed across multiple blocks within an epoch -5. **No Trusted Sequencer**: All ordering comes from the base layer - -### Sequencing Model - -The `BasedSequencer` implementation: - -- **Only retrieves transactions from DA** via forced inclusion namespace -- **Ignores transactions submitted via `SubmitBatchTxs`** (no mempool) -- **Fetches at epoch boundaries** to minimize DA queries -- **Queues transactions** and creates batches respecting `MaxBytes` -- **Validates blob sizes** against absolute maximum to prevent oversized submissions - -### Transaction Flow - -```mermaid -sequenceDiagram - participant User - participant DA as Base Layer (Celestia) - participant NodeA as Full Node A - participant NodeB as Full Node B - participant ExecA as Execution Engine A - participant ExecB as Execution Engine B - - Note over User: User posts transaction to DA
forced inclusion namespace - - User->>DA: Submit blob to forced inclusion namespace - - Note over NodeA,NodeB: At epoch start (e.g., DA height 100, 110, 120...) - - NodeA->>DA: RetrieveForcedIncludedTxs(epochStart) - NodeB->>DA: RetrieveForcedIncludedTxs(epochStart) - - DA-->>NodeA: Txs from epoch [100-109] - DA-->>NodeB: Txs from epoch [100-109] - - Note over NodeA,NodeB: Queue transactions and create batches
respecting MaxBytes - - NodeA->>NodeA: createBatchFromQueue(MaxBytes) - NodeB->>NodeB: createBatchFromQueue(MaxBytes) - - NodeA->>ExecA: ExecuteTxs(batch) - NodeB->>ExecB: ExecuteTxs(batch) - - ExecA-->>NodeA: State root - ExecB-->>NodeB: State root - - Note over NodeA,NodeB: Both nodes produce identical blocks -``` - -## Detailed Design - -### Epoch-Based Retrieval - -**Epoch Calculation**: - -- Epoch number: `((daHeight - daStartHeight) / daEpochSize) + 1` -- Epoch boundaries: `[start, end]` where transactions must be included - -**Example with `DAEpochForcedInclusion = 10`**: - -- DA heights 100-109 = Epoch 1 -- DA heights 110-119 = Epoch 2 -- DA heights 120-129 = Epoch 3 - -**Retrieval Logic** (`ForcedInclusionRetriever`): - -1. Check if DA height is at epoch start -2. If not at epoch start, return empty transaction set -3. If at epoch start, fetch all blobs from forced inclusion namespace for entire epoch -4. Return `ForcedInclusionEvent` with transactions and DA height range - -### Batch Formation - -**BasedSequencer Queue Management**: - -```go -// On GetNextBatch: -1. Retrieve forced inclusion transactions for current epoch -2. Validate blob sizes (skip oversized blobs) -3. Add valid transactions to internal queue -4. Create batch from queue respecting MaxBytes -5. Return batch (may be partial if queue exceeds MaxBytes) -``` - -**Batch Creation** (`createBatchFromQueue`): - -- Iterate through queued transactions -- Accumulate until `totalBytes + txSize > MaxBytes` -- Stop at limit and preserve remaining transactions for next block -- Clear queue when all transactions consumed - -### Block Production - -**Executor Flow** (`block/internal/executing/executor.go`): - -1. **Retrieve Batch**: Call `sequencer.GetNextBatch(MaxBytes: 2MB)` -2. **Handle Empty Batch**: Skip block production if no transactions -3. **Create Block**: Form block header and data with batch transactions -4. **Execute**: Apply transactions via execution engine -5. **Update State**: Store DA height from sequencer in state -6. **Sign Header**: Based sequencer returns empty signature -7. **Persist**: Save block to store -8. **Broadcast**: Propagate header and data to P2P network - -### Transaction Smoothing - -When forced inclusion transactions exceed `MaxBytes`: - -**Block 1**: - -``` -Epoch [100-109] contains 3MB of transactions -Block at DA height 100: 2MB (partial) -Remaining in queue: 1MB -``` - -**Block 2**: - -``` -Block at DA height 101: 1MB (remainder) + new regular txs -Queue cleared -``` - -This ensures all epoch transactions are eventually included while respecting block size limits. - -### Forced Inclusion Verification - -Full nodes verify that batches include all required forced inclusion transactions via `Syncer.verifyForcedInclusionTxs`: - -1. Retrieve forced inclusion transactions for current DA height -2. Check all forced txs are present in block -3. Allow deferral within epoch boundaries -4. Reject blocks that: - - Censor forced inclusion transactions after epoch end - - Skip forced transactions without valid reason - -### Data Structures - -**ForcedInclusionEvent**: - -```go -type ForcedInclusionEvent struct { - StartDaHeight uint64 // Epoch start DA height - EndDaHeight uint64 // Last processed DA height in epoch - Txs [][]byte // All transactions from epoch -} -``` - -**BasedSequencer State**: - -```go -type BasedSequencer struct { - daHeight atomic.Uint64 // Current DA height - txQueue [][]byte // Queued transactions awaiting inclusion -} -``` - -### Configuration - -**Genesis Configuration**: - -- `DAStartHeight`: Starting DA height for the chain -- `DAEpochForcedInclusion`: Number of DA blocks per epoch (e.g., 10) - -**Constants**: - -- `DefaultMaxBlobSize`: 2MB per batch/block -- Enforced both at submission and retrieval - -### Systems Affected - -- **BasedSequencer**: Implements epoch-based transaction retrieval -- **ForcedInclusionRetriever**: Fetches transactions from DA at epochs -- **Executor**: Drives block production using sequencer batches -- **Syncer**: Verifies forced inclusion compliance -- **DA Client**: Must support forced inclusion namespace - -### APIs - -**Sequencer Interface**: - -```go -// Returns empty response - based sequencer ignores submissions -SubmitBatchTxs(ctx, req) (*SubmitBatchTxsResponse, error) - -// Retrieves next batch from forced inclusion queue -GetNextBatch(ctx, req) (*GetNextBatchResponse, error) - -// Always returns true for based sequencer -VerifyBatch(ctx, req) (*VerifyBatchResponse, error) -``` - -**Forced Inclusion Retrieval**: - -```go -// Retrieves forced inclusion txs at DA height (epoch start) -RetrieveForcedIncludedTxs(ctx, daHeight) (*ForcedInclusionEvent, error) -``` - -### Block Time Characteristics - -- **Block time is a function of DA layer block time** -- With `DAEpochForcedInclusion = 10` and Celestia ~12s block time: - - Minimum block time: ~12s (if transactions present) - - Maximum epoch duration: ~120s (10 blocks) -- **Lazy mode has no effect** - based sequencing inherently follows DA timing -- **No headers are published to DA** - only forced inclusion blobs - -### Security Considerations - -**Trust Model**: - -- No trusted sequencer required -- All nodes derive identical state from DA -- Invalid blocks are automatically rejected by execution rules - -**Attack Vectors**: - -- Invalid State: Rejected by execution engine during `ExecuteTxs` -- Blob Spam: Limited by DA namespace fees and size validation -- Incorrect Batch: Each node independently derives batches, inconsistent nodes fall out of sync - -### Efficiency - -- Minimal DA Queries: Only fetch at epoch boundaries -- Bounded Latency: Epoch duration provides upper bound -- Transaction Queuing: Smooth large batches across multiple blocks - -## Status - -Implemented - -## Consequences - -### Positive - -- **Eliminates single sequencer dependency** - fully decentralized ordering -- **Deterministic consensus** - all nodes converge on same state -- **Censorship resistance** - forced inclusion verified by all nodes -- **Simplified architecture** - no proposer election or coordination -- **Economic sustainability** - fee recipient mechanism enables relay compensation - -### Negative - -- **Block time tied to DA layer** - cannot be independently configured -- **Minimum latency** - at least one DA block time -- **Epoch-based batching** - cannot include transactions mid-epoch - -### Neutral - -- **No mempool in based sequencer** - transactions only via forced inclusion -- **Queue management required** - full nodes maintain transaction queues -- **DA namespace dependency** - requires forced inclusion namespace support - -## Future Enhancements - -1. **Transaction Relaying**: Implement full node RPC endpoints to accept and relay user transactions to DA -2. **Dynamic Epochs**: Adjust epoch size based on transaction volume or network conditions -3. **Priority Mechanisms**: Support application-specific transaction ordering within epochs -4. **Light Client Integration**: Implement header verification without full re-execution -5. **Cross-Chain Inclusion**: Enable forced inclusion from multiple DA layers - -## References - - - -- [EthResearch: Based Rollups](https://ethresear.ch/t/based-rollups-superpowers-from-l1-sequencing/15016) -- [Taiko: Based Sequencing](https://taiko.mirror.xyz/7dfMydX1FqEx9_sOvhRt3V8hJksKSIWjzhCVu7FyMZU) -- [Surge Rollup](https://www.surge.wtf/) -- [Spire](https://www.spire.dev/) -- [Puffer UniFi](https://www.puffer.fi/unifi) diff --git a/content/docs/adr/adr-013-single-sequencer.md b/content/docs/adr/adr-013-single-sequencer.md deleted file mode 100644 index 2038a9b..0000000 --- a/content/docs/adr/adr-013-single-sequencer.md +++ /dev/null @@ -1,225 +0,0 @@ -# ADR 13: Single Sequencer - -## Changelog - -- 2024-10-01: Initial draft - -## Context - -Rollkit supports modular sequencer implementations and a single sequencer is a simple and efficient solution that can serve as a starting point for developers who don't need the complexity of a decentralized sequencing solution. - -The single sequencer needs to implement the Generic Sequencer interface defined in the `core/sequencer` package, provide transaction batching capabilities, and reliably submit these batches to a DA layer. It should also maintain state to track submitted batches and provide verification capabilities. - -## Alternative Approaches - -### Decentralized Sequencer - -A decentralized sequencer would distribute the sequencing responsibility across multiple nodes, providing better censorship resistance and fault tolerance. However, this approach introduces significant complexity in terms of consensus, leader election, and coordination between nodes. It would also require more resources to operate and maintain. - -This approach was not chosen for the initial implementation because: - -1. It adds unnecessary complexity for many use cases -2. It requires more development time and resources -3. Many projects start with a single sequencer and gradually move towards decentralization - -### Embedded Sequencer in Nodes - -Another approach would be to embed sequencing functionality directly into nodes. This would simplify the architecture by eliminating a separate sequencer component. - -This approach was not chosen because: - -1. It couples sequencing logic with node logic, reducing modularity -2. It makes it harder to upgrade or replace the sequencing component independently -3. It doesn't allow for a dedicated sequencing service that can be optimized separately - -## Decision - -We implement a standalone single sequencer that: - -1. Implements the Generic Sequencer interface from the `core/sequencer` package -2. Batches transactions and submits them to a DA layer at regular intervals -3. Provides metrics for monitoring and observability - -The single sequencer is a separate repository and can be deployed as a standalone service or as a Docker container. - -## Detailed Design - -### User Requirements - -- Developers need a simple, reliable sequencer that can order transactions and submit them to a DA layer -- The sequencer should be easy to deploy and configure -- The sequencer should provide metrics for monitoring -- The sequencer should be able to recover from crashes and maintain state - -### Systems Affected - -- Nodes that interact with the sequencer -- DA layer where batches are submitted - -### Data Structures - -The single sequencer uses the following key data structures: - -1. **BatchQueue**: A queue to store batches of transactions waiting to be processed - - ```go - type BatchQueue struct { - queue []sequencing.Batch // In-memory queue of batches waiting to be processed - mu sync.Mutex // Mutex to ensure thread-safe access to the queue - } - ``` - -2. **Sequencer**: The main sequencer structure that implements the Generic Sequencer interface - - ```go - type Sequencer struct { - dalc *da.DAClient // Client for interacting with the Data Availability layer - batchTime time.Duration // Time interval between batch submissions - ctx context.Context // Context for controlling the sequencer's lifecycle - maxSize uint64 // Maximum size of a batch in bytes - - chainId sequencing.ChainId // Identifier for the chain this sequencer serves - - tq *TransactionQueue // Queue for storing pending transactions - lastBatchHash []byte // Hash of the last processed batch - lastBatchHashMutex sync.RWMutex // Mutex for thread-safe access to lastBatchHash - - seenBatches map[string]struct{} // Map to track batches that have been processed - seenBatchesMutex sync.Mutex // Mutex for thread-safe access to seenBatches - bq *BatchQueue // Queue for storing batches ready for processing - - db *badger.DB // BadgerDB instance for persistent storage - dbMux sync.Mutex // Mutex for safe concurrent DB access - - metrics *Metrics // Structure to hold metrics for monitoring - } - ``` - -3. **Metrics**: Structure to hold metrics for monitoring - - ```go - type Metrics struct { - GasPrice metrics.Gauge // Tracks the gas price used for DA submissions - LastBlobSize metrics.Gauge // Tracks the size of the last submitted blob - TransactionStatus metrics.Counter // Counts transaction status outcomes - NumPendingBlocks metrics.Gauge // Tracks the number of blocks waiting to be submitted - IncludedBlockHeight metrics.Gauge // Tracks the height of the last included block in the DA layer - } - ``` - -### APIs - -The single sequencer implements the Generic Sequencer interface from the `core/sequencer` package: - -```go -type Sequencer interface { - SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) - GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) - VerifyBatch(ctx context.Context, req VerifyBatchRequest) (*VerifyBatchResponse, error) -} -``` - -1. **SubmitBatchTxs**: - - This method is responsible for accepting a batch of transactions from a client. It takes a context and a request containing the chain ID and the batch of transactions to be submitted. - - The method first validates the chain ID to ensure it matches the expected ID for the sequencer. If the ID is invalid, it returns an error. - - Upon successful validation, the method adds the transactions to the internal transaction queue (`TransactionQueue`) for processing. - - It then triggers the batch submission process, which involves retrieving the next batch of transactions and submitting them to the designated Data Availability (DA) layer. - - Finally, it returns a response indicating the success or failure of the submission. - -2. **GetNextBatch**: - - This method retrieves the next batch of transactions that are ready to be processed by the application. It takes a context and a request containing the chain ID and the last batch hash. - - The method first checks if the chain ID is valid. If not, it returns an error. - - It then verifies the last batch hash to ensure that the client is requesting the correct next batch. - - If a valid batch is found, it prepares the batch response, which includes the batch of transactions and a timestamp. - - If no transactions are available, it returns an empty batch response. - - Note that this method is used by the node to get a sequencer soft-confirmed batch that the sequencer promises to publish to the DA layer. - -3. **VerifyBatch**: - - This method is used to verify the that a batch received (soft-confirmed) from the sequencer was actually published on the DA layer. It takes a context and a request containing the chain ID and the batch hash. - - Similar to the other methods, it first validates the chain ID. - - It then checks if the provided batch hash exists in the internal data structure that tracks seen batches. - - If the batch hash is found, it returns a response indicating that the batch is valid. If not, it returns a response indicating that the batch is invalid. - - Once this method returns true for batch, a node can mark the block associated to this batch as `DA included` and mark it as fully confirmed from its view. - -These methods work together to ensure that the single sequencer can effectively manage transaction submissions, retrievals, and verifications, providing a reliable interface for clients to interact with the sequencer. - -### Efficiency Considerations - -- The sequencer uses a configurable batch time to balance between latency and efficiency -- Transactions are batched to reduce the number of DA submissions -- The sequencer maintains an in-memory queue for fast access and a persistent database for durability -- Exponential backoff is used for DA submission retries to handle temporary failures - -### Access Patterns - -- Clients will submit transactions to the sequencer at varying rates -- The sequencer will batch transactions and submit them to the DA layer at regular intervals -- Nodes will request the next batch from the sequencer to process transactions - -### Logging, Monitoring, and Observability - -The sequencer provides the following metrics: - -- Gas price of DA submissions -- Size of the last submitted blob -- Transaction status counts -- Number of pending blocks -- Last included block height - -These metrics can be exposed via Prometheus for monitoring. - -### Security Considerations - -- The single sequencer is a single point of failure and control -- Access control is not implemented in the initial version, but can be added in future versions -- The sequencer validates chain IDs to ensure transactions are submitted to the correct application - -### Privacy Considerations - -- The sequencer has access to all transactions before they are submitted to the DA layer -- Transactions are not encrypted, so sensitive data should not be included in transactions - -### Testing - -The single sequencer includes: - -- Unit tests for core functionality -- Integration tests with a mock DA layer -- Test coverage reporting via Codecov - -### Breaking Changes - -This is a new component and does not introduce breaking changes to existing systems. - -## Status - -Proposed - -## Consequences - -### Positive - -- Provides a simple, production-ready sequencer for developers -- Implements the Generic Sequencer interface, making it compatible with existing Rollkit components -- Includes metrics for monitoring and observability -- Maintains state to track submitted batches and provide verification -- Can be deployed as a standalone service or as a Docker container - -### Negative - -- Single design introduces a single point of failure -- No built-in access control or authentication in the initial version -- Limited scalability compared to a distributed sequencer - -### Neutral - -- Requires a separate deployment and management of the sequencer service -- Developers need to configure the sequencer to connect to their chosen DA layer - -## References - -- [Generic Sequencer Interface](https://github.com/evstack/ev-node/blob/main/core/sequencer/sequencing.go) -- [Rollkit Repository](https://github.com/evstack/ev-node) -- [Single Sequencer Repository](https://github.com/rollkit/centralized-sequencer) diff --git a/content/docs/adr/adr-014-header-and-data-separation.md b/content/docs/adr/adr-014-header-and-data-separation.md deleted file mode 100644 index b8d68a7..0000000 --- a/content/docs/adr/adr-014-header-and-data-separation.md +++ /dev/null @@ -1,223 +0,0 @@ -# Header and Data Separation ADR - -## Abstract - -The separation of header and data structures in Rollkit unlocks expanding the sequencing scheme beyond single sequencing and unlocks the use of a decentralized sequencer mode. This means that the creation of list of the transactions can be done by another network as well while nodes still produce headers after executing that list of transactions. This overall change is akin to the proposer-builder separation in the Ethereum protocol, where the Rollkit header producer acts as the proposer, and the sequencer, which produces a list of transactions, acts as the builder. - -### Before Separation - -```mermaid -flowchart LR - CS[Single Sequencer] -->|Creates| B[Block] - B -->|Contains| SH1[SignedHeader] - B -->|Contains| D1[Data] - - class CS,B,SH1,D1 node -``` - -### After Separation - -```mermaid -flowchart LR - HP[Header Producer] -->|Creates| SH2[SignedHeader] - SEQ[Sequencer] -->|Creates| D2[Data] - SH2 -.->|References via DataCommitment| D2 - - class HP,SEQ,SH2,D2 node -``` - -## Protocol/Component Description - -Before, Rollkit only supported the use of a single sequencer that was responsible for creating a list of transactions by reaping its mempool, executing them to produce a header, and putting them together in a block. Rollkit headers and data were encapsulated within a single block structure. The block struct looked like this: - -```go -// Block defines the structure of Rollkit block. -type Block struct { - SignedHeader SignedHeader - Data Data -} -``` - -The separation of header and data into distinct structures allows them to be processed independently. The `SignedHeader` struct now focuses on the header information, while the `Data` struct handles transaction data separately. This separation is particularly beneficial in unlocking based sequencing, where users submit transactions directly to the Data Availability layer which acts as the entity responsible for creating the list of transactions. - -```mermaid -classDiagram - class Block { - SignedHeader - Data - } - - class SignedHeader { - Header - Signature - } - - class Header { - ParentHash - Height - Timestamp - ChainID - DataCommitment - StateRoot - ExtraData - } - - class Data { - Metadata - Txs - } - - Block *-- SignedHeader - Block *-- Data - SignedHeader *-- Header -``` - -This change also affects how full nodes sync. Previously, full nodes would apply the transactions from the `Block` struct and verify that the `header` in `SignedHeader` matched their locally produced header. Now, with the separation, full nodes obtain the transaction data separately (via the DA layer directly in based sequencer mode, or via p2p gossip/DA layer in single sequencer mode) and verify it against the header signed by the header producer once they have both components. If a full node receives the header/data via a p2p gossip layer, they should wait to see the same header/data on the DA layer before marking the corresponding block as finalized in their view. - -This ensures that the data integrity and consistency are maintained across the network. - -```go -// SignedHeader struct consists of the header and a signature -type SignedHeader struct { - Header // Rollkit Header - Signature Signature // Signature of the header producer - ... -} - -// Header struct focusing on header information -type Header struct { - // Hash of the previous block header. - ParentHash Hash - // Height represents the block height (aka block number) of a given header - Height uint64 - // Block creation timestamp - Timestamp uint64 - // The Chain ID - ChainID string - // Pointer to location of associated block data aka transactions in the DA layer - DataCommitment Hash - // Commitment representing the state linked to the header - StateRoot Hash - // Arbitrary field for additional metadata - ExtraData []byte -} - -// Data defines Rollkit block data. -type Data struct { - *Metadata // Defines metadata for Data struct to help with p2p gossiping. - Txs Txs // List of transactions to be executed -} -``` - -The `publishBlock` method in `manager.go` now creates the header and data structures separately. This decoupling allows for the header to be submitted to the DA layer independently of the block data, which can be built by a separate network. This change supports the transition from a single sequencer mode to a decentralized sequencer mode, making the system more modular. - -## Message Structure/Communication Format - -### Header Producer - -Before the separation: Only the entire `Block` struct composed of both header and data was submitted to the DA layer. The `Block` and `SignedHeader` were both gossipped over two separate p2p layers: gossiping `Block` to just full nodes and gossiping the `SignedHeader` to full nodes and future light nodes to join that will only sync headers (and proofs). - -After the separation: The `SignedHeader` and `Data` are submitted separately to the DA layer. Note that the `SignedHeader` has a `Header` that is linked to the `Data` via a `DataCommitment` from the DA layer. `SignedHeader` and `Data` are both gossipped over two separate p2p layers: gossiping `Data` to just full nodes and gossiping the `SignedHeader` to full nodes and future light nodes to join that will only sync headers (and proofs). - -In based sequencing mode, the header producer is equivalent to a full node. - -### Before Separation - -```mermaid -flowchart LR - - CS1[Single Sequencer] -->|Submits Block| DA1[DA Layer] - CS1 -->|Gossips Block| FN1[Full Nodes] - CS1 -->|Gossips SignedHeader| LN1[Light Nodes] - - class CS1,DA1,FN1,LN1 node -``` - -### After Separation - Single Sequencer Mode - -```mermaid -flowchart LR - - CS2[Single Sequencer] -->|Submits Data| DA2[DA Layer] - HP2[Header Producer] -->|Submits SignedHeader| DA2 - - CS2 -->|Gossips Data| FN2[Full Nodes] - HP2 -->|Gossips SignedHeader| FN2 - HP2 -->|Gossips SignedHeader| LN2[Light Nodes] - - class CS2,HP2,DA2,FN2,LN2 node -``` - -### After Separation - Based Mode - -```mermaid -flowchart LR - - Users -->|Submit Txs| DA3[DA Layer] - FN3[Full Node/Header Producer] -->|Reads Data| DA3 - - class Users,DA3,FN3,LN3 node -``` - -### Syncing Full Node - -Before the separation: Full Nodes get the entire `Block` struct via p2p or the DA layer. They can choose to apply the block as soon as they get it via p2p OR just wait to see it on the DA layer. This depends on whether a full node opts in to the p2p layer or not. Gossiping the `SignedHeader` over p2p is primarily for light nodes to get the header. - -After the separation: Full nodes get the `Data` struct and the `SignedHeader` struct separately over p2p and DA layers. In code, this refers to the `HeaderStore` and the `DataStore` in block manager. A Full node should wait for having both the `Data` struct and the corresponding `SignedHeader` to it before applying the block data to its associated state machine. This is so that the full node can verify that its locally produced header's state commitment after it applies the `Data` associated to a block is consistent with the `Header` inside the `SignedHeader` that is received from the header producer. The `Header` should contain a link to its associated Data via a `DataCommitment` that is a pointer to the location of the `Data` on the DA layer. - -```mermaid -sequenceDiagram - participant FN as Full Node - participant P2P as P2P Network - participant DA as DA Layer - participant SM as State Machine - - Note over FN,DA: After Separation - Sync Process - - P2P->>FN: Receive Data - P2P->>FN: Receive SignedHeader - FN->>DA: Verify Data availability - FN->>DA: Verify SignedHeader availability - FN->>FN: Match Data with SignedHeader via DataCommitment - FN->>SM: Apply Data to state machine - FN->>FN: Verify locally produced header matches received Header - FN->>FN: Mark block as finalized -``` - -In a single sequencer mode, before, a full node marks a block finalized, it should verify that both the `SignedHeader` and `Data` associated to it were made available on the DA layer by checking it directly or verifying DA inclusion proofs. - -In based sequencing mode, blocks can be instantly finalized since the `Data` is directly always derived from the DA layer and already exists there. There's no need for a `SignedHeader` to exist on the DA layer. - -```mermaid -sequenceDiagram - participant DA as DA Layer - participant FN as Full Node - participant SM as State Machine - - Note over DA,FN: Based Sequencing Mode - - DA->>FN: Data already available - FN->>FN: Read Data from DA - FN->>FN: Execute transactions - FN->>FN: Produce Header - FN->>SM: Apply state changes - FN->>FN: Finalize Block - Note right of FN: No need to submit SignedHeader to DA -``` - -## Assumptions and Considerations - -- Considerations include ensuring that headers and data are correctly synchronized and validated to prevent inconsistencies. -- Ensure that all components interacting with headers and data are updated to handle them as separate entities. -- Security measures should be in place to prevent unauthorized access or tampering with headers and data during transmission and storage. -- Performance optimizations may be necessary to handle the increased complexity of managing separate header and data structures, especially in high-throughput environments. -- Testing and validation processes should be updated to account for the new structure and ensure that all components function correctly in both single and decentralized sequencer modes. - -## Implementation - -The implementation of this separation can be found in the Rollkit repository, specifically in the changes made to the `manager.go` file. The `publishBlock` method illustrates the creation of separate header and data structures, and the associated logic for handling them independently. See [Rollkit PR #1789](https://github.com/evstack/ev-node/pull/1789) - -## References - -- [Rollkit PR #1789](https://github.com/evstack/ev-node/pull/1789) -- [Proposer-Builder Separation](https://www.alchemy.com/overviews/proposer-builder-separation) diff --git a/content/docs/adr/adr-015-rollkit-minimal-header.md b/content/docs/adr/adr-015-rollkit-minimal-header.md deleted file mode 100644 index ac38def..0000000 --- a/content/docs/adr/adr-015-rollkit-minimal-header.md +++ /dev/null @@ -1,180 +0,0 @@ -# Rollkit Minimal Header - -## Abstract - -This document specifies a minimal header format for Rollkit, designed to eliminate the dependency on CometBFT's header format. This new format can then be used to produce an execution layer tailored header if needed. For example, the new ABCI Execution layer can have an ABCI-specific header for IBC compatibility. This allows Rollkit to define its own header structure while maintaining backward compatibility where necessary. - -## Protocol/Component Description - -The Rollkit minimal header is a streamlined version of the traditional header, focusing on essential information required for block processing and state management for nodes. This header format is designed to be lightweight and efficient, facilitating faster processing and reduced overhead. - -### Rollkit Minimal Header Structure - -```txt -┌─────────────────────────────────────────────┐ -│ Rollkit Minimal Header │ -├─────────────────────┬───────────────────────┤ -│ ParentHash │ Hash of previous block│ -├─────────────────────┼───────────────────────┤ -│ Height │ Block number │ -├─────────────────────┼───────────────────────┤ -│ Timestamp │ Creation time │ -├─────────────────────┼───────────────────────┤ -│ ChainID │ Chain identifier │ -├─────────────────────┼───────────────────────┤ -│ DataCommitment │ Pointer to block data │ -│ │ on DA layer │ -├─────────────────────┼───────────────────────┤ -│ StateRoot │ State commitment │ -├─────────────────────┼───────────────────────┤ -│ ExtraData │ Additional metadata │ -│ │ (e.g. sequencer info) │ -└─────────────────────┴───────────────────────┘ -``` - -## Message Structure/Communication Format - -The header is defined in GoLang as follows: - -```go -// Header struct focusing on header information -type Header struct { - // Hash of the previous block header. - ParentHash Hash - // Height represents the block height (aka block number) of a given header - Height uint64 - // Block creation timestamp - Timestamp uint64 - // The Chain ID - ChainID string - // Pointer to location of associated block data aka transactions in the DA layer - DataCommitment []byte - // Commitment representing the state linked to the header - StateRoot Hash - // Arbitrary field for additional metadata - ExtraData []byte -} -``` - -In case the chain has a specific designated proposer or a proposer set, that information can be put in the `extraData` field. So in single sequencer mode, the `sequencerAddress` can live in `extraData`. For base sequencer mode, this information is not relevant. - -This minimal Rollkit header can be transformed to be tailored to a specific execution layer as well by inserting additional information typically needed. - -### EVM execution client - -- `transactionsRoot`: Merkle root of all transactions in the block. Can be constructed from unpacking the `DataCommitment` in Rollkit Header. -- `receiptsRoot`: Merkle root of all transaction receipts, which store the results of transaction execution. This can be inserted by the EVM execution client. -- `Gas Limit`: Max gas allowed in the block. -- `Gas Used`: Total gas consumed in this block. - -#### Transformation to EVM Header - -```txt -┌─────────────────────────────────────────────┐ -│ Rollkit Minimal Header │ -└───────────────────┬─────────────────────────┘ - │ - ▼ Transform -┌─────────────────────────────────────────────┐ -│ EVM Header │ -├─────────────────────┬───────────────────────┤ -│ ParentHash │ From Rollkit Header │ -├─────────────────────┼───────────────────────┤ -│ Height/Number │ From Rollkit Header │ -├─────────────────────┼───────────────────────┤ -│ Timestamp │ From Rollkit Header │ -├─────────────────────┼───────────────────────┤ -│ ChainID │ From Rollkit Header │ -├─────────────────────┼───────────────────────┤ -│ TransactionsRoot │ Derived from │ -│ │ DataCommitment │ -├─────────────────────┼───────────────────────┤ -│ StateRoot │ From Rollkit Header │ -├─────────────────────┼───────────────────────┤ -│ ReceiptsRoot │ Added by EVM client │ -├─────────────────────┼───────────────────────┤ -│ GasLimit │ Added by EVM client │ -├─────────────────────┼───────────────────────┤ -│ GasUsed │ Added by EVM client │ -├─────────────────────┼───────────────────────┤ -│ ExtraData │ From Rollkit Header │ -└─────────────────────┴───────────────────────┘ -``` - -### ABCI Execution - -This header can be transformed into an ABCI-specific header for IBC compatibility. - -- `Version`: Required by IBC clients to correctly interpret the block's structure and contents. -- `DataHash`: A hash of the block's transaction data, enabling IBC clients to verify that the data has not been tampered with. Can be constructed from unpacking the `DataCommitment` in Rollkit header. -- `ValidatorHash`: Current validator set's hash, which IBC clients use to verify that the block was validated by the correct set of validators. This can be the IBC attester set of the chain for backward compatibility with the IBC Tendermint client, if needed. -- `NextValidatorsHash`: The hash of the next validator set, allowing IBC clients to anticipate and verify upcoming validators. -- `AppHash`: Same as the `StateRoot` in the Rollkit Header. -- `ProposerAddress`: The address of the block proposer, allowing IBC clients to track and verify the entities proposing new blocks. Can be constructed from the `extraData` field in the Rollkit Header. - -#### Transformation to ABCI Header - -```txt -┌─────────────────────────────────────────────┐ -│ Rollkit Minimal Header │ -└───────────────────┬─────────────────────────┘ - │ - ▼ Transform -┌─────────────────────────────────────────────┐ -│ ABCI Header │ -├─────────────────────┬───────────────────────┤ -│ Height │ From Rollkit Header │ -├─────────────────────┼───────────────────────┤ -│ Time │ From Rollkit Header │ -├─────────────────────┼───────────────────────┤ -│ ChainID │ From Rollkit Header │ -├─────────────────────┼───────────────────────┤ -│ AppHash │ From StateRoot │ -├─────────────────────┼───────────────────────┤ -│ DataHash │ From DataCommitment │ -├─────────────────────┼───────────────────────┤ -│ Version │ Added for IBC │ -├─────────────────────┼───────────────────────┤ -│ ValidatorHash │ Added for IBC │ -├─────────────────────┼───────────────────────┤ -│ NextValidatorsHash │ Added for IBC │ -├─────────────────────┼───────────────────────┤ -│ ProposerAddress │ From ExtraData │ -└─────────────────────┴───────────────────────┘ -``` - -## Assumptions and Considerations - -- The Rollkit minimal header is designed to be flexible and adaptable, allowing for integration with various execution layers such as EVM and ABCI, without being constrained by CometBFT's header format. -- The `extraData` field provides a mechanism for including additional metadata, such as sequencer information, which can be crucial for certain chain configurations. -- The transformation of the Rollkit header into execution layer-specific headers should be done carefully to ensure compatibility and correctness, especially for IBC and any other cross-chain communication protocols. - -### Header Transformation Flow - -```txt -┌─────────────────────────────────────────────┐ -│ Rollkit Minimal Header │ -│ │ -│ A lightweight, flexible header format │ -│ with essential fields for block processing │ -└───────────┬─────────────────┬───────────────┘ - │ │ - ▼ ▼ -┌───────────────────┐ ┌─────────────────────┐ -│ EVM Header │ │ ABCI Header │ -│ │ │ │ -│ For EVM-based │ │ For IBC-compatible │ -│ execution layers │ │ execution layers │ -└───────────────────┘ └─────────────────────┘ -``` - -## Implementation - -Pending implementation. - -## References - -- [Ethereum Developer Documentation](https://ethereum.org/en/developers/docs/): Comprehensive resources for understanding Ethereum's architecture, including block and transaction structures. -- [Tendermint Core Documentation](https://docs.tendermint.com/master/spec/): Detailed documentation on Tendermint, which includes information on ABCI and its header format. -- [ABCI Specification](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md): The official specification for the Application Blockchain Interface (ABCI), which describes how applications can interact with the Tendermint consensus engine. -- [IBC Protocol Specification](https://github.com/cosmos/ibc): Documentation on the Inter-Blockchain Communication (IBC) protocol, which includes details on how headers are used for cross-chain communication. diff --git a/content/docs/adr/adr-016-genesis-file.md b/content/docs/adr/adr-016-genesis-file.md deleted file mode 100644 index 4016f91..0000000 --- a/content/docs/adr/adr-016-genesis-file.md +++ /dev/null @@ -1,167 +0,0 @@ -# ADR 016: Genesis File - -## Changelog - -- 2025-03-21: Initial draft - -## Context - -Rollkit currently uses a simple genesis state structure (`RollkitGenesis`) that is embedded within the block manager code. This structure defines the initial state of a Rollkit chain but lacks a formal specification and a standardized file format. - -Currently, the genesis information is passed directly to the `NewManager` function as a parameter, without a clear process for how this information should be serialized, validated, or shared between nodes. This creates challenges for initializing new chains and ensuring all nodes start from the same state. - -The current `RollkitGenesis` structure contains basic fields like `GenesisTime`, `InitialHeight`, `ChainID`, and `ProposerAddress`. This structure is used to initialize the chain state when no existing state is found in the store. - -When a full node is created, a `RollkitGenesis` instance is populated from a CometBFT GenesisDoc, and the genesis file is loaded from disk using a function that expects the CometBFT genesis format. - -However, this approach has several limitations: - -1. It relies on CometBFT's genesis format, which may include fields that are not relevant to Rollkit -2. There's no explicit validation of the genesis file specific to Rollkit's needs -3. The conversion from CometBFT's GenesisDoc to RollkitGenesis is implicit and not well-documented -4. There's no standardized way to create, share, or modify the genesis file -5. The use of `GenesisTime` is problematic for chains that are a function of a DA layer, as the chain's starting point should be defined by a DA block height rather than a timestamp -6. The hardcoded `ProposerAddress` field doesn't allow for flexibility in different sequencing mechanisms - -## Alternative Approaches - -### 1. Continue Using CometBFT Genesis Format - -We could continue using the CometBFT genesis format and enhance the conversion logic to extract Rollkit-specific values. This approach has the advantage of compatibility with existing tools but means Rollkit is constrained by the structure of the CometBFT genesis file. - -### 2. Create a Completely Custom Genesis Format (Chosen) - -We will define a completely new genesis file format specific to Rollkit with no dependency on CometBFT's format. This gives us maximum flexibility to define fields that are relevant to chains that rely on a DA layer. - -### 3. Hybrid Approach - -Define a Rollkit-specific genesis file format that: - -1. Contains only the fields needed by Rollkit -2. Supports importing/exporting to/from CometBFT genesis format for compatibility -3. Includes validation specific to Rollkit's requirements - -## Decision - -We will implement a dedicated Rollkit genesis file format that is completely decoupled from CometBFT's format. This will allow us to define fields that are specifically relevant to chains of this type, such as `genesisDAStartHeight` instead of `genesisTime`, and a flexible `extraData` field instead of hardcoding a `proposerAddress`. - -The new genesis format will be defined in its own package (`genesis`) and will include validation and serialization methods. - -## Detailed Design - -### Genesis File Structure - -The new genesis file structure will contain the following key fields: - -1. **GenesisDAStartHeight**: The DA layer height at which the chain starts, replacing the traditional `GenesisTime` field. This provides a more accurate starting point for chains built on a DA layer. - -2. **InitialHeight**: The initial block height of the chain. - -3. **ChainID**: A unique identifier for the chain. - -4. **ExtraData**: A flexible field that can contain chain-specific configuration, such as proposer/sequencer information or consensus parameters. This replaces the hardcoded `ProposerAddress` field. - -5. **AppState**: Application-specific genesis state as a JSON object. - -The `ExtraData` field is particularly important as it allows different node types (sequencer, full node) to store type-specific information. For example, a single sequencer setup might include the sequencer's address in this field, while a decentralized setup might include validator set information. - -The `AppState` field should contain the genesis file specific to the execution layer as a JSON Object. It will be passed on the execution layer during initialization. - -### Genesis Validation - -The genesis structure will include validation logic to ensure all required fields are present and valid. This would include checks that: - -- The DA start height is non-zero -- The chain ID is present and properly formatted -- The initial height is at least 1 -- Other format-specific validations - -### ExtraData Structure - -Since `ExtraData` is a flexible field that can contain different types of information, we'll define a common configuration structure that can be serialized into this field. This configuration would include: - -- Proposer address for single sequencer mode -- Validator information for multiple sequencers or validator-based consensus -- Consensus parameters as needed - -The package will provide helper functions for encoding and decoding this information to and from the `ExtraData` field. - -### Genesis File I/O - -The genesis package will provide functions to: - -- Load a genesis file from disk -- Save a genesis file to disk -- Parse a genesis document from JSON -- Serialize a genesis document to JSON -- Validate a genesis document - -### Command-Line Tools - -We will provide CLI commands to initialize, validate, and inspect genesis files. These would include: - -- `init`: Initialize a new genesis file with a specified chain ID -- `validate`: Validate an existing genesis file -- `add-proposer`: Set the proposer address in the extra data field - -The commands would use flags to set various genesis parameters like the initial height, DA start height, and proposer address. - -### Integration with Node Types - -#### Sequencer Node - -Sequencer nodes need to interpret the `ExtraData` field to determine if they are authorized to create blocks. In a single sequencer model, this would involve: - -1. Loading the genesis file -2. Decoding the extra data to extract the proposer address -3. Checking if the node's signing key matches the proposer address -4. Initializing the node based on its role (sequencer or non-sequencer) - -#### Full Node - -Full nodes use the genesis file to initialize their state and validate incoming blocks. This process would involve: - -1. Loading the genesis file -2. Decoding the extra data to access consensus parameters and proposer information -3. Initializing the node with the genesis information - -### Initial State Creation - -The block manager's `getInitialState` function will be updated to work with the new genesis format. This would involve: - -1. Loading the state from the store if available -2. If no state is found, initializing a new state based on the genesis information: - - Decoding the extra data to get the proposer address - - Determining the genesis time based on the DA start height (by querying the DA layer) - - Initializing the chain with the appropriate parameters - - Creating and saving the genesis block -3. Performing sanity checks when resuming from an existing state - -## Status - -Proposed - -## Consequences - -### Positive - -1. **DA-Centric Design**: Using `genesisDAStartHeight` instead of `genesisTime` provides a more accurate starting point for chains built on a DA layer. -2. **Flexibility**: The `extraData` field allows for different sequencing mechanisms and chain-specific configurations. -3. **Simplicity**: A custom genesis format allows us to include only what's needed for Rollkit chains. -4. **Independence**: No dependency on CometBFT's genesis format allows Rollkit to evolve independently. -5. **Better Semantics**: The structure more accurately reflects how chains initialize and operate. - -### Negative - -1. **Breaking Change**: Existing code that directly uses `RollkitGenesis` will need to be updated. -2. **Tooling Requirements**: New tools will need to be developed for genesis file creation and management. -3. **Compatibility Issue**: No direct compatibility with existing Cosmos ecosystem tools that expect CometBFT genesis format. - -### Neutral - -1. **Documentation Requirements**: The new genesis format will need to be documented for users. -2. **Testing Requirements**: Comprehensive tests will be needed for the new functionality. - -## References - -- [Block Manager Implementation](https://github.com/evstack/ev-node/blob/main/block) diff --git a/content/docs/adr/adr-017-node-pruning.md b/content/docs/adr/adr-017-node-pruning.md deleted file mode 100644 index 5be81d4..0000000 --- a/content/docs/adr/adr-017-node-pruning.md +++ /dev/null @@ -1,947 +0,0 @@ -# ADR 017: Node Pruning - -## Changelog - -- 2025-03-22: Initial draft - -## Context - -Rollkit currently stores all historical blocks, headers, and state data in the node's local storage without any mechanism to prune old data. As chains grow, this leads to increasing storage requirements that can become challenging to manage for node operators, especially for long-running chains. - -The current storage implementation in Rollkit uses a key-value store (BadgerDB) to persist blockchain data. The `DefaultStore` implementation in the `store` package maintains blocks, headers, signatures, and state with no built-in mechanism for removing old data that may no longer be needed for node operation. - -It's worth noting that in the header and data sync service, the store is primarily used for P2P gossiping purposes. Since the retrieved headers and data are already cached in `headerCache` and `dataCache` respectively, the contents in the store can be safely deleted once they have been retrieved and processed. - -For chains that have been operational for a significant period or have high transaction throughput, the storage requirements can grow substantially, leading to: - -1. Increased disk space requirements for node operators -2. Longer startup times as the node loads and processes more data -3. Potential performance degradation as the database size grows -4. Higher resource requirements for deploying new nodes - -A pruning mechanism would allow node operators to reduce storage requirements while maintaining the ability to serve recent chain data and participate in consensus. Additionally, providing support for attaching external volumes for specific block height ranges would allow for more flexible storage management over extended periods of time. - -## High-Level Architecture - -```mermaid -graph TB - subgraph Node["Rollkit Node"] - API[API Layer] - Store[Store Interface] - PM[Pruning Manager] - VM[Volume Manager] - Cache[Block Cache] - end - - subgraph Volumes["Storage Volumes"] - MV[Main Volume
Recent Blocks
Hot Storage] - HV[Hot Archive Volume
Frequently Accessed
SSD] - WV[Warm Archive Volume
Occasional Access
HDD] - CV[Cold Archive Volume
Rare Access
Object Storage] - end - - subgraph Config["Configuration"] - PS[Pruning Strategy] - VC[Volume Config] - PC[Performance Config] - end - - API --> Store - Store --> PM - Store --> VM - Store --> Cache - - PM --> VM - VM --> MV - VM --> HV - VM --> WV - VM --> CV - - PS --> PM - VC --> VM - PC --> VM - - classDef configNode fill:#f9f,stroke:#333,stroke-width:2px,color:black - classDef volumeNode fill:#bbf,stroke:#333,stroke-width:2px,color:black - classDef nodeComponent fill:#bfb,stroke:#333,stroke-width:2px,color:black - - class PS,VC,PC configNode - class MV,HV,WV,CV volumeNode - class API,Store,PM,VM,Cache nodeComponent -``` - -## Alternative Approaches - -### 1. Full Historical Node / Archive Node (Status Quo) - -Continue with the current approach where nodes store the entire history of the blockchain. This approach ensures that any historical data can be queried but requires ever-increasing storage capacity. - -### 2. Pruning with Height-Based Retention - -Implement a pruning mechanism that retains blocks and associated data only up to a certain number of blocks from the current height. This is the most common approach in blockchain systems and balances storage efficiency with access to recent history. - -### 3. Pruning with State Snapshots - -Create periodic state snapshots and prune all block data except for the snapshots. This approach dramatically reduces storage requirements but limits the ability to query historical transactions or state transitions. - -### 4. External Volume Storage - -Store historical blocks in separate external volumes that can be mounted and unmounted as needed. This allows for flexible storage management but adds complexity to the implementation and operation. - -### 5. Configurable Pruning Strategies with Volume Support (Chosen) - -Implement a flexible pruning system that allows node operators to choose from different pruning strategies based on their needs, with added support for external volume management: - -- **None** (default): No pruning, keep all data (archive mode) -- **Default**: Keep recent blocks (configurable, e.g., last 100 blocks) and periodic snapshots -- **Custom**: Allow specifying exact retention policies for blocks, snapshots, and other data -- **Volume-Based**: Allow specifying block height ranges to be stored in separate volumes - -## Decision - -We will implement a configurable pruning system that allows node operators to choose from different pruning strategies, with added support for external volume storage for specific block height ranges. This approach provides flexibility while addressing the storage concerns for long-running chains and enables efficient long-term data management. - -The pruning system will be implemented as an extension to the existing `DefaultStore` in the `store` package, with configuration options available through the node configuration. - -## Detailed Design - -### Volume Types and Storage Systems - -The pruning system is designed to work with various types of storage volumes and systems: - -1. **Local Storage Volumes**: - - **Linux Logical Volumes (LVM)**: - - Flexible volume management allowing dynamic resizing - - Ability to stripe data across multiple physical devices - - Snapshot capabilities for backup purposes - - **Direct Mount Points**: - - Traditional filesystem mounts (ext4, xfs, etc.) - - Direct attached storage devices - - RAID arrays - - **Bind Mounts**: - - Remapping directories to different locations - - Useful for reorganizing storage without changing physical layout - -2. **Network Storage**: - - **NFS (Network File System)**: - - Remote filesystem mounts - - Shared storage across multiple nodes - - Suitable for warm storage tiers - - **SAN (Storage Area Network)**: - - High-performance block storage - - Suitable for hot storage tiers - - iSCSI or Fiber Channel connectivity - -3. **Cloud Storage Systems**: - - **Object Storage**: - - Amazon S3 and compatible systems - - Google Cloud Storage - - Azure Blob Storage - - Suitable for cold storage tiers - - **Archive Storage**: - - Amazon S3 Glacier - - Google Cloud Archive Storage - - Azure Archive Storage - - Lowest cost, highest latency - - **File Storage**: - - Amazon EFS - - Google Filestore - - Azure Files - - Mountable as network filesystems - -4. **Volume Performance Classifications**: - - **Hot Storage** (Lowest Latency): - - Local NVMe SSDs - - High-performance SAN volumes - - Used for recent blocks and frequently accessed data - - **Warm Storage** (Medium Latency): - - Local HDDs - - NFS mounts - - Network block storage - - Used for moderately old or occasionally accessed data - - **Cold Storage** (High Latency): - - Object storage - - Archive storage - - Used for historical data with infrequent access - -5. **Implementation Considerations**: - - **Local Volumes**: - - Direct filesystem access - - Native OS-level caching - - Immediate consistency - - **Network Volumes**: - - Connection management - - Caching strategies - - Network latency handling - - **Cloud Storage**: - - API-based access - - Eventual consistency - - Bandwidth costs - - Lifecycle management - -6. **Volume Interface Requirements**: - Each volume type must implement: - - ```go - type VolumeInterface interface { - // Core operations - Read(ctx context.Context, key []byte) ([]byte, error) - Write(ctx context.Context, key []byte, value []byte) error - Delete(ctx context.Context, key []byte) error - - // Performance metrics - GetLatency() time.Duration - GetThroughput() int64 - - // Storage type - GetStorageType() StorageType - GetTier() StorageTier - - // Lifecycle - Mount(ctx context.Context) error - Unmount(ctx context.Context) error - } - ``` - -7. **Storage Type Configuration**: - - ```yaml - volumes: - - path: "/data/rollkit/volumes/volume-hot-1" - type: "local" # local, nfs, san, s3, glacier, etc. - storage_class: "nvme" # nvme, ssd, hdd, etc. - performance: - tier: "nvme" - estimated_latency_ms: 5 - - path: "s3://my-bucket/archive" - type: "s3" - storage_class: "standard" - performance: - tier: "cold" - estimated_latency_ms: 500 - credentials: - profile: "archive-storage" - ``` - -This variety of storage options allows node operators to: - -- Implement cost-effective tiered storage strategies -- Scale storage capacity independently of compute resources -- Optimize for different performance requirements -- Leverage existing storage infrastructure -- Use cloud storage for long-term archival - -### Pruning Configuration - -The pruning configuration will be added to the node's configuration file with the following options: - -```go -type PruningConfig struct { - // Strategy determines the pruning approach - // Options: "none", "default", "custom", "volume-based" - Strategy string `mapstructure:"strategy"` - - // KeepRecent specifies the number of recent blocks to keep (used in "default" and "custom" strategies) - KeepRecent uint64 `mapstructure:"keep_recent"` - - // KeepEvery specifies the periodic blocks to keep for historical reference (used in "custom" strategy) - // For example, if set to 100, every 100th block will be kept - KeepEvery uint64 `mapstructure:"keep_every"` - - // SnapshotInterval specifies how often to create state snapshots (used in "default" and "custom" strategies) - // For example, if set to 1000, a snapshot will be created every 1000 blocks - SnapshotInterval uint64 `mapstructure:"snapshot_interval"` - - // PruneInterval specifies how often (in blocks) the pruning process should run - PruneInterval uint64 `mapstructure:"prune_interval"` - - // VolumeConfig specifies configuration for volume-based storage - VolumeConfig *VolumeStorageConfig `mapstructure:"volume_config"` -} - -// VolumeStorageConfig defines how block data is distributed across different volumes -type VolumeStorageConfig struct { - // Enabled indicates whether volume-based storage is enabled - Enabled bool `mapstructure:"enabled"` - - // MainVolumePath is the path to the main storage volume (for recent blocks) - MainVolumePath string `mapstructure:"main_volume_path"` - - // Volumes is a list of archive volumes for specific block height ranges - Volumes []ArchiveVolume `mapstructure:"volumes"` - - // TargetMaxVolumeSize is the target maximum size in bytes for a single volume - // Used for determining when to create new volumes and calculating max block heights - TargetMaxVolumeSize int64 `mapstructure:"target_max_volume_size"` - - // SizeMonitoringInterval is how often (in blocks) to recalculate volume statistics - // and adjust max block heights based on current block sizes - SizeMonitoringInterval uint64 `mapstructure:"size_monitoring_interval"` - - // AutoCreateVolumes indicates whether new volumes should be created automatically - // when a volume approaches TargetMaxVolumeSize - AutoCreateVolumes bool `mapstructure:"auto_create_volumes"` - - // VolumesDirectory is the base directory where new volumes will be created - // This directory should be managed by the node operator, with different storage devices - // mounted as subdirectories following a specific pattern (e.g., "volume-*") - // The application will only create new volumes in empty subdirectories that match this pattern - VolumesDirectory string `mapstructure:"volumes_directory"` - - // VolumePattern is the pattern used to identify volume directories (e.g., "volume-*") - VolumePattern string `mapstructure:"volume_pattern"` -} - -// ArchiveVolume defines a single archive volume for a specific block height range -type ArchiveVolume struct { - // Path is the filesystem path to the volume - Path string `mapstructure:"path"` - - // StartHeight is the inclusive starting block height for this volume - StartHeight uint64 `mapstructure:"start_height"` - - // EndHeight is the inclusive ending block height for this volume - // If 0, the volume extends indefinitely - EndHeight uint64 `mapstructure:"end_height"` - - // ReadOnly indicates whether this volume is read-only - ReadOnly bool `mapstructure:"read_only"` - - // Performance characteristics of the volume for query optimization - // This helps the system make intelligent decisions about data placement and retrieval - Performance VolumePerformance `mapstructure:"performance"` -} - -// VolumePerformance defines the performance characteristics of a volume -type VolumePerformance struct { - // Tier indicates the storage tier of this volume (e.g., "fast", "standard", "cold") - // Used for optimizing query routing and data placement - Tier string `mapstructure:"tier"` - - // EstimatedLatencyMs is the estimated average latency for read operations in milliseconds - // This is calculated based on historical access patterns and updated periodically - EstimatedLatencyMs int64 `mapstructure:"estimated_latency_ms"` - - // QueryStats tracks query patterns and performance metrics for this volume - // These statistics are built over time and used for optimization - QueryStats *VolumeQueryStats `mapstructure:"query_stats"` -} - -// VolumeQueryStats tracks query patterns and performance for a volume -type VolumeQueryStats struct { - // Average query latency over time - AvgQueryLatencyMs int64 `mapstructure:"avg_query_latency_ms"` - - // Query frequency by block height ranges - QueryFrequencyByRange map[string]int64 `mapstructure:"query_frequency_by_range"` - - // Last updated timestamp - LastUpdated int64 `mapstructure:"last_updated"` -} -``` - -### Volume Directory Management - -The volume management system is designed to work in harmony with node operators' disk management practices. Here's how it works: - -1. **Base Volume Directory Structure**: - - Node operators specify a base `VolumesDirectory` in the configuration - - This directory serves as the root for all volume management operations - - Subdirectories must follow a specific pattern (e.g., `volume-*`) for automatic discovery - - A single physical device can host multiple logical volumes - -2. **Multi-Tiered Archive System**: - - Volumes can be flagged with performance characteristics - - Performance tiers (e.g., "fast" for SSD, "standard" for HDD, "cold" for archival storage) - - Query statistics are built over time to optimize data placement and retrieval - - System automatically routes queries to the most appropriate volume based on: - - Query patterns - - Data access frequency - - Volume performance characteristics - - Historical latency measurements - -3. **Node Operator Responsibilities**: - - Create the base volume directory structure - - Mount different storage devices as subdirectories under `VolumesDirectory` - - Ensure subdirectories follow the required naming pattern - - Manage disk mounts and unmounts at the system level - - Configure performance characteristics for volumes based on underlying hardware - -4. **Volume Discovery and Creation**: - - The application scans `VolumesDirectory` for subdirectories matching the pattern - - Only empty subdirectories are considered for new volume creation - - This ensures the application respects the disk management strategy defined by operators - - Multiple volumes can be created on the same physical device - -5. **Volume Creation Process**: - When AutoCreateVolumes is enabled: - - System monitors volume utilization - - When a new volume is needed, it searches for empty subdirectories matching the pattern - - Creates new volume only in qualifying empty subdirectories - - Logs warning if no suitable subdirectory is found - - Node operator must ensure appropriate devices are mounted before volumes can be created - - Example directory structure with performance tiers: - - ```bash - /data/rollkit/volumes/ # VolumesDirectory - ├── volume-main/ # Main volume (fast tier - NVMe SSD) - ├── volume-hot-1/ # Hot data volume (fast tier - SSD) - ├── volume-hot-2/ # Hot data volume (fast tier - SSD) - ├── volume-warm-1/ # Warm data volume (standard tier - HDD) - ├── volume-warm-2/ # Warm data volume (standard tier - HDD) - └── volume-cold-1/ # Cold storage volume (cold tier - HDD) - ``` - -6. **Configuration Example with Performance Tiers**: - - ```yaml - pruning: - volume_config: - enabled: true - main_volume_path: "/data/rollkit/volumes/volume-main" - volumes_directory: "/data/rollkit/volumes" - volume_pattern: "volume-*" - auto_create_volumes: true - target_max_volume_size: 1099511627776 # 1 TB - size_monitoring_interval: 1000 - volumes: - - path: "/data/rollkit/volumes/volume-hot-1" - performance: - tier: "fast" - estimated_latency_ms: 5 - - path: "/data/rollkit/volumes/volume-warm-1" - performance: - tier: "standard" - estimated_latency_ms: 20 - - path: "/data/rollkit/volumes/volume-cold-1" - performance: - tier: "cold" - estimated_latency_ms: 100 - ``` - -7. **Query Optimization**: - - System maintains statistics about query patterns per volume - - Frequently accessed data can be automatically migrated to faster volumes - - Less frequently accessed data can be moved to slower, higher-capacity volumes - - Query routing takes into account both data location and volume performance - - Multiple volumes on the same physical device share underlying performance characteristics - -This approach ensures: - -- Clear separation of concerns between system-level disk management and application-level volume management -- Node operators maintain control over physical storage allocation -- Application remains storage-agnostic while respecting operator's disk management choices -- Predictable and safe volume creation process -- Optimal query performance through intelligent data placement -- Efficient use of storage tiers based on access patterns -- Support for multiple volumes per physical device -- Automatic performance optimization based on real usage patterns - -### Store Interface Extension - -The `Store` interface in `store/types.go` will be extended to include pruning and volume management functionality: - -```go -type Store interface { - // ... existing methods ... - - // PruneBlocks removes block data up to the specified height based on the pruning strategy - PruneBlocks(ctx context.Context, height uint64) error - - // SetPruningStrategy sets the pruning strategy for the store - SetPruningStrategy(strategy PruningStrategy) error - - // GetPruningStrategy returns the current pruning strategy - GetPruningStrategy() PruningStrategy - - // AddVolume adds a new volume for a specific block height range - AddVolume(ctx context.Context, volume ArchiveVolume) error - - // RemoveVolume removes a volume from the store configuration - RemoveVolume(ctx context.Context, volumePath string) error - - // ListVolumes returns a list of all configured volumes - ListVolumes(ctx context.Context) ([]ArchiveVolume, error) - - // MoveBlocksToVolume moves blocks within specified height range to a target volume - MoveBlocksToVolume(ctx context.Context, startHeight, endHeight uint64, targetVolume string) error - - // GetVolumeStats returns statistics for a specific volume - GetVolumeStats(ctx context.Context, volumePath string) (*VolumeStats, error) - - // GetAllVolumeStats returns statistics for all volumes - GetAllVolumeStats(ctx context.Context) (map[string]*VolumeStats, error) - - // MonitorVolumeSize starts monitoring volume size and triggers volume creation when needed - MonitorVolumeSize(ctx context.Context) error - - // GetVolumePerformance returns performance metrics for a specific volume - GetVolumePerformance(ctx context.Context, volumePath string) (*VolumePerformance, error) -} -``` - -### Multi-Volume Store Implementation - -To support external volumes, we will implement a `MultiVolumeStore` that wraps the `DefaultStore`: - -```go -type MultiVolumeStore struct { - // The main store (for recent blocks) - mainStore *DefaultStore - - // Archive stores mapped by path - archiveStores map[string]*DefaultStore - - // Volume configuration - volumeConfig *VolumeStorageConfig - - // Maps block heights to volume paths for quick lookups - heightToVolume map[uint64]string - - // Mutex for concurrent access - mu sync.RWMutex -} -``` - -This implementation will: - -1. Route read requests to the appropriate volume based on the requested block height -2. Handle write operations to the main volume -3. Manage opening and closing volumes as needed -4. Implement background processes for moving data between volumes based on the configuration - -### Volume Management - -The volume management system will include: - -1. **Volume Registry**: A component that tracks which block heights are stored in which volumes -2. **Volume Router**: Logic to direct read/write operations to the appropriate volume -3. **Volume Migration**: Tools to move blocks between volumes -4. **Auto-Volume Creation**: Optionally create new volumes when existing ones reach capacity - -### Volume Size Monitoring and Dynamic Block Height Calculation - -The Volume Registry will implement a size-based monitoring system to manage volume capacity: - -1. **Size Monitoring**: - - Track current volume size and block sizes - - Calculate average block size over configurable window - - Monitor growth rate and project volume utilization - -2. **Dynamic Block Height Calculation**: - - ```go - type VolumeStats struct { - CurrentSize int64 - AvgBlockSize int64 - BlockSizeWindow []int64 - ProjectedMaxHeight uint64 - } - ``` - - The registry will periodically: - - Calculate remaining capacity: `remainingBytes = targetMaxVolumeSize - currentVolumeSize` - - Estimate blocks that can fit: `remainingBlocks = remainingBytes / avgBlockSize` - - Set projected max height: `projectedMaxHeight = currentHeight + remainingBlocks` - - Apply safety margin to account for block size variance - -3. **Volume Transition Planning**: - - Trigger new volume creation when projected to reach `targetMaxVolumeSize` within configurable threshold - - Smooth transition by pre-allocating new volumes before current volume is full - - Consider block size trends when planning transitions - -4. **Monitoring Metrics**: - - Current volume utilization percentage - - Projected blocks until volume full - - Block size statistics (min, max, average, trend) - - Volume growth rate - -This approach provides several advantages: - -- More accurate capacity planning based on actual data size -- Adaptation to varying block sizes -- Early warning for volume transitions -- Better resource utilization through dynamic adjustment - -#### Volume Mounting and Unmounting - -The store will support dynamically mounting and unmounting volumes: - -```go -// MountVolume mounts a volume for read/write operations -func (s *MultiVolumeStore) MountVolume(ctx context.Context, volume ArchiveVolume) error { - // Implementation details -} - -// UnmountVolume safely unmounts a volume -func (s *MultiVolumeStore) UnmountVolume(ctx context.Context, volumePath string, force bool) error { - // Implementation details -} -``` - -This will allow for efficient resource usage by only keeping actively needed volumes mounted. - -### Block Lookup and Retrieval - -The block lookup process will be updated to check multiple volumes: - -1. First, check if the requested block is in the main volume -2. If not, consult the volume registry to determine which archive volume contains the block -3. If the volume is mounted, retrieve the block from there -4. If the volume is not mounted, automatically mount it, retrieve the block, and optionally unmount it after use - -### Pruning Implementation With Volume Support - -The pruning system will be enhanced to work with the multi-volume setup: - -1. **Volume-Aware Pruning**: The pruning process will be aware of volumes and their configuration -2. **Block Migration**: Instead of deletion, blocks can be migrated to archive volumes -3. **Volume Rotation**: Full volumes can be archived and replaced with new volumes -4. **Volume Consolidation**: Multiple sparse volumes can be consolidated into a single volume - -The pruning manager will implement these strategies based on the configuration. - -### Data Organization Within Volumes - -Each volume will have a standardized internal structure: - -```bash -/volume_root - /blocks # Block data organized by height - /headers # Block headers - /signatures # Block signatures - /state # State snapshots - /indexes # Index files for quick lookups - /metadata.json # Volume metadata (height range, creation time, etc.) -``` - -This structure ensures consistency across volumes and simplifies migration operations. - -### Volume-Based Pruning Strategy - -A new "volume-based" pruning strategy will be implemented with the following behavior: - -1. Keep recent blocks (configurable number) in the main volume -2. Automatically migrate older blocks to archive volumes based on the configuration -3. Create new archive volumes when existing ones reach capacity -4. Optionally compress older volumes to save space - -This strategy provides a balance between accessibility and storage efficiency. - -### API Extensions for Volume Management - -The node's API will be extended to include endpoints for volume management: - -1. List all configured volumes and their status -2. Add/remove volumes -3. Mount/unmount volumes -4. Move blocks between volumes -5. Query volume statistics (usage, block range, etc.) - -These endpoints will enable operators to manage storage resources effectively. - -### Performance Considerations - -To maintain performance with the multi-volume setup: - -1. **Caching**: Frequently accessed blocks will be cached in memory regardless of their source volume -2. **Prefetching**: When retrieving blocks from archive volumes, nearby blocks will be prefetched -3. **Background Processing**: Data migration between volumes will happen in the background to minimize impact on node performance -4. **Index Optimization**: Each volume will maintain optimized indexes for fast lookups - -### Migration Path - -For existing nodes transitioning to the multi-volume setup: - -1. An upgrade procedure will create the initial volume configuration -2. Existing block data will remain in the main volume -3. A migration tool will be provided to redistribute blocks according to the new configuration -4. The migration can be performed online with minimal disruption - -### Migration CLI Tool Design - -To facilitate the transition of existing nodes to the new pruning system, we will provide a dedicated CLI tool as part of the Rollkit binary. This tool will enable safe and controlled migration while addressing potential challenges. - -#### CLI Structure - -The migration tool will be accessible through the main Rollkit command: - -```bash -rollkit migrate-storage [subcommand] [flags] -``` - -##### Subcommands - -1. **analyze**: Scans existing data and plans migration - - ```bash - rollkit migrate-storage analyze [--data-dir=] - ``` - - - Estimates storage requirements - - Generates migration plan with volume configuration - - Reports potential issues - - Estimates duration and resource needs - -2. **backup**: Creates pre-migration backup - - ```bash - rollkit migrate-storage backup [--data-dir=] [--backup-dir=] - ``` - - - Full backup with checksums - - Backup metadata and manifest generation - -3. **execute**: Performs migration - - ```bash - rollkit migrate-storage execute [--config=] [--data-dir=] [--dry-run] [--parallel=] - ``` - - - Supports resumable operations - - Dry-run mode for testing - - Configurable parallelism - -4. **verify**: Validates migration - - ```bash - rollkit migrate-storage verify [--data-dir=] - ``` - - - Data integrity checks - - Block height continuity validation - - Volume configuration verification - -5. **rollback**: Reverts migration - - ```bash - rollkit migrate-storage rollback [--data-dir=] [--backup-dir=] - ``` - - - Restores from backup - - Validates restoration - -#### Migration Configuration - -The tool uses a YAML configuration file: - -```yaml -migration: - source_dir: /path/to/existing/data - - target: - strategy: "volume-based" - volumes: - - path: /path/to/main/volume - type: "main" - target_max_volume_size: 1099511627776 # 1 TB - performance: - tier: "fast" - estimated_latency_ms: 5 - - path: /path/to/archive/volume1 - type: "archive" - start_height: 0 - end_height: 0 # Will be dynamically calculated - target_max_volume_size: 1099511627776 # 1 TB - performance: - tier: "cold" - estimated_latency_ms: 100 - - settings: - parallel_workers: 4 - batch_size: 10000 - checkpoint_interval: 1000 - validate_checksums: true - size_monitoring_interval: 1000 - - backup: - enabled: true - path: /path/to/backup - compress: true - - monitoring: - log_level: "info" - metrics_enabled: true - progress_report_interval: 60 - volume_stats_enabled: true -``` - -```go -type MigrationConfig struct { - SourceDir string `mapstructure:"source_dir"` - Target TargetConfig `mapstructure:"target"` - Settings MigrationSettings `mapstructure:"settings"` - Backup BackupConfig `mapstructure:"backup"` - Monitoring MonitoringConfig `mapstructure:"monitoring"` -} - -type TargetConfig struct { - Strategy string `mapstructure:"strategy"` - Volumes []VolumeConfig `mapstructure:"volumes"` -} - -type VolumeConfig struct { - Path string `mapstructure:"path"` - Type string `mapstructure:"type"` - StartHeight uint64 `mapstructure:"start_height"` - EndHeight uint64 `mapstructure:"end_height"` - TargetMaxVolumeSize int64 `mapstructure:"target_max_volume_size"` - Performance VolumePerformance `mapstructure:"performance"` -} - -type MigrationSettings struct { - ParallelWorkers int `mapstructure:"parallel_workers"` - BatchSize int `mapstructure:"batch_size"` - CheckpointInterval int `mapstructure:"checkpoint_interval"` - ValidateChecksums bool `mapstructure:"validate_checksums"` - SizeMonitoringInterval int `mapstructure:"size_monitoring_interval"` -} - -type BackupConfig struct { - Enabled bool `mapstructure:"enabled"` - Path string `mapstructure:"path"` - Compress bool `mapstructure:"compress"` -} - -type MonitoringConfig struct { - LogLevel string `mapstructure:"log_level"` - MetricsEnabled bool `mapstructure:"metrics_enabled"` - ProgressReportInterval int `mapstructure:"progress_report_interval"` - VolumeStatsEnabled bool `mapstructure:"volume_stats_enabled"` -} -``` - -#### Safety Features - -1. **Checkpointing** - - Progress checkpoints at configurable intervals - - Resume capability after interruption - - Transaction logs for all operations - -2. **Validation** - - Data checksums - - Block height continuity checks - - State consistency verification - - Configuration validation - -3. **Performance** - - Parallel data movement - - Batch processing - - Progress tracking - - Resource monitoring - -4. **Monitoring** - - Detailed operation logs - - Progress metrics - - Error diagnostics - - Performance statistics - -#### Migration Process Example - -1. Pre-migration analysis and backup: - - ```bash - # Analyze existing data - rollkit migrate-storage analyze --data-dir=/path/to/node - - # Create backup - rollkit migrate-storage backup --data-dir=/path/to/node --backup-dir=/path/to/backup - ``` - -2. Execute migration and verify: - - ```bash - # Perform migration - rollkit migrate-storage execute --config=migration-config.yaml - - # Verify results - rollkit migrate-storage verify --data-dir=/path/to/node - ``` - -3. Rollback if needed: - - ```bash - # Revert changes - rollkit migrate-storage rollback --data-dir=/path/to/node --backup-dir=/path/to/backup - ``` - - This CLI-based approach provides a structured and safe way to migrate existing nodes while addressing key challenges: - - - Data integrity through validation - - Rollback capability via backups - - Resumable operations for large datasets - - Progress monitoring and tracking - - Pre-migration configuration validation - - Parallel processing for performance - -## Status - -Proposed - -## Consequences - -### Positive - -1. **Reduced Storage Requirements**: Nodes will require less disk space, especially for long-running chains -2. **Improved Performance**: Database operations may become faster with a smaller dataset and optimized volume management -3. **Lower Barrier to Entry**: New node operators can join with less storage overhead -4. **Flexible Storage Management**: - - Operators can choose pruning strategies that fit their needs - - Support for multiple storage tiers (hot, warm, cold) - - Dynamic volume management based on access patterns -5. **Cost Optimization**: - - Efficient use of available storage resources - - Less frequently accessed data can be stored on cheaper storage tiers - - Automatic data placement based on access patterns -6. **Historical Data Management**: - - Important historical blocks can be preserved in dedicated volumes - - Configurable retention policies for different data types - - Support for external archival storage - -### Negative - -1. **Limited Historical Queries**: - - Depending on the pruning strategy, some historical data may not be immediately available - - Increased latency for accessing archived data -2. **Implementation Complexity**: - - Added complexity in the storage layer - - More complex failure recovery scenarios - - Need for careful coordination between pruning and volume management -3. **Operational Considerations**: - - Additional operational knowledge required for volume management - - More complex monitoring and maintenance requirements - - Need for careful capacity planning -4. **Migration Challenges**: - - Existing nodes may face challenges during migration to the new system - - Potential downtime during initial setup - - Need for careful backup procedures -5. **Resource Overhead**: - - Additional CPU/memory usage for pruning operations - - Network bandwidth for data migration between volumes - - Storage overhead for maintaining multiple volume metadata - -### Neutral - -1. **Configuration Requirements**: - - Node operators need to understand and configure pruning options - - Need to define appropriate storage tiers and volume strategies - - Regular review and adjustment of configurations may be necessary -2. **Network Diversity**: - - Network may consist of both archive nodes and pruned nodes - - Different nodes may implement different pruning strategies - - Varying data availability across the network -3. **Infrastructure Dependencies**: - - Success depends on underlying storage infrastructure capabilities - - May require specific storage hardware for optimal performance - - Network requirements for distributed storage solutions -4. **Monitoring and Maintenance**: - - Regular monitoring of pruning operations required - - Need to track volume usage and performance metrics - - Periodic review of pruning effectiveness - -## References - -- [Rollkit Store Implementation](https://github.com/evstack/ev-node/blob/main/pkg/store/store.go) -- [Block components](https://github.com/evstack/ev-node/blob/main/block/components.go) -- [Store Interface](https://github.com/evstack/ev-node/blob/main/pkg/store/types.go) diff --git a/content/docs/adr/adr-018-rpc.md b/content/docs/adr/adr-018-rpc.md deleted file mode 100644 index 5301ace..0000000 --- a/content/docs/adr/adr-018-rpc.md +++ /dev/null @@ -1,109 +0,0 @@ -# ADR 018: Store RPC Layer Implementation using Connect-RPC - -## Changelog - -- 2024-03-25: Initial proposal -- 2025-04-23: Renumbered from ADR-017 to ADR-018 to maintain chronological order. - -## Context - -The Evolve store package provides a critical interface for storing and retrieving blocks, commits, and state data. Currently, this functionality is only available locally through direct Go package imports. To enable remote access to this data and improve the system's scalability and interoperability, we need to implement a remote procedure call (RPC) layer. - -Connect-Go has been chosen as the RPC framework due to its modern features, excellent developer experience, and compatibility with both gRPC and HTTP/1.1 protocols. - -## Alternative Approaches - -### Pure gRPC - -- Pros: Mature ecosystem, wide adoption -- Cons: More complex setup, less flexible protocol support, requires more boilerplate - -### REST API - -- Pros: Familiar, widely supported -- Cons: No built-in streaming, manual schema definition required - -## Decision - -Implement a Connect-Go service layer that exposes the Store interface functionality through a well-defined protocol buffer schema and Connect-Go service definitions. - -[Connect-Go](https://connectrpc.com/docs/go/getting-started/) is a lightweight gRPC library that provides REST out of the box, allowing users to switch between implementations. - -## Detailed Design - -### Protocol Buffer Definitions - -```protobuf -syntax = "proto3"; - -package evnode.store.v1; - -message Block { - SignedHeader header = 1; - Data data = 2; - bytes signature = 3; -} - -message GetBlockRequest { - oneof identifier { - uint64 height = 1; - bytes hash = 2; - } -} - -message GetBlockResponse { - Block block = 1; -} - -message GetStateResponse { - State state = 1; -} - -service StoreService { - // Query Methods - rpc GetBlock(GetBlockRequest) returns (GetBlockResponse) {} - rpc GetState(google.protobuf.Empty) returns (GetStateResponse) {} -} -``` - -### Implementation Structure - -```txt -pkg/ - rpc/ - server/ - server.go // Connect-RPC server implementation - client/ - client.go // Connect-RPC client implementation -``` - -## Status - -Proposed - -## Consequences - -### Positive - -- Enables remote access to store data -- Type-safe API interactions -- Protocol flexibility (gRPC and HTTP/1.1) -- Modern developer experience -- Built-in streaming support - -### Negative - -- Additional dependency on Connect-Go -- Need to maintain protocol buffer definitions -- Potential version compatibility challenges - -### Neutral - -- Requires generating and maintaining additional code -- Need for proper API versioning strategy - -## References - -- [Connect-RPC Documentation](https://connectrpc.com/docs/go/getting-started/) -- [Protocol Buffers Documentation](https://protobuf.dev) -- [Store Interface Documentation](../../pkg/store/types.go) diff --git a/content/docs/adr/adr-019-forced-inclusion-mechanism.md b/content/docs/adr/adr-019-forced-inclusion-mechanism.md deleted file mode 100644 index c603b0d..0000000 --- a/content/docs/adr/adr-019-forced-inclusion-mechanism.md +++ /dev/null @@ -1,839 +0,0 @@ -# ADR 019: Forced Inclusion Mechanism - -## Changelog - -- 2025-03-24: Initial draft -- 2025-04-23: Renumbered from ADR-018 to ADR-019 to maintain chronological order. -- 2025-11-10: Updated to reflect actual implementation -- 2026-02-23: Added sequencer catch-up mode documentation - -## Context - -In a single-sequencer rollup architecture, users depend entirely on the sequencer to include their transactions in blocks. This creates several problems: - -1. **Censorship Risk**: A malicious or coerced sequencer can selectively exclude transactions -2. **Liveness Failure**: If the sequencer goes offline, no new transactions can be processed -3. **Centralization**: Users must trust a single entity to behave honestly -4. **No Recourse**: Users have no alternative path to submit transactions if the sequencer refuses them - -While eventual solutions like decentralized sequencer networks exist, they introduce significant complexity. We need a simpler mechanism that provides censorship resistance and liveness guarantees while maintaining the performance benefits of a single sequencer. - -## Alternative Approaches - -### Decentralized Sequencer - -A fully decentralized sequencer network would eliminate single points of failure but requires: - -- Complex consensus mechanisms -- Increased latency due to coordination -- More infrastructure and operational complexity - -### Automatic Sequencer Failover - -Implementing automatic failover to backup sequencers when the primary goes down requires: - -- Complex monitoring and health checks -- Coordination between sequencers to prevent forks -- Does not solve censorship issues with a malicious sequencer - -## Decision - -We implement a **forced inclusion mechanism** that allows users to submit transactions directly to the Data Availability (DA) layer. This approach provides: - -1. **Censorship Resistance**: Users can always bypass the sequencer by posting to DA -2. **Verifiable Inclusion**: Full nodes verify that sequencers include all forced transactions -3. **Based Rollup Option**: A based sequencer mode for fully DA-driven transaction ordering -4. **Simplicity**: No complex timing mechanisms or fallback modes - -### High-Level Architecture - -``` -┌─────────────────────────────────────────────────────────────────┐ -│ User Actions │ -├─────────────────────────────────────────────────────────────────┤ -│ │ -│ Normal Path: Forced Inclusion Path: │ -│ Submit tx to Sequencer ────► Submit tx directly to DA │ -│ (Fast) (Censorship-resistant) │ -│ │ -└──────────┬────────────────────────────────────┬─────────────────┘ - │ │ - ▼ ▼ - ┌─────────────┐ ┌──────────────────┐ - │ Sequencer │ │ DA Layer │ - │ (Mempool) │ │ (Forced Inc. NS) │ - └──────┬──────┘ └─────────┬────────┘ - │ │ - │ 1. Fetch forced inc. txs │ - │◄────────────────────────────────────┘ - │ - │ 2. Prepend forced txs to batch - │ - ▼ - ┌─────────────┐ - │ Block │ - │ Production │ - └──────┬──────┘ - │ - │ 3. Submit block to DA - │ - ▼ - ┌─────────────┐ - │ DA Layer │ - └──────┬──────┘ - │ - │ 4. Full nodes retrieve block - │ - ▼ - ┌─────────────────────┐ - │ Full Nodes │ - │ (Verification) │ - │ │ - │ 5. Verify forced │ - │ inc. txs are │ - │ included │ - └─────────────────────┘ -``` - -### Key Components - -1. **Forced Inclusion Namespace**: A dedicated DA namespace where users can post transactions -2. **DA Retriever**: Fetches forced inclusion transactions from DA using epoch-based scanning -3. **Single Sequencer**: Enhanced to include forced transactions from DA in every batch -4. **Based Sequencer**: Alternative sequencer that ONLY retrieves transactions from DA -5. **Verification**: Full nodes validate that blocks include all forced transactions - -## Detailed Design - -### User Requirements - -Users can submit transactions in two ways: - -1. **Normal Path**: Submit to sequencer's mempool/RPC (fast, low cost) -2. **Forced Inclusion Path**: Submit directly to DA forced inclusion namespace (censorship-resistant) - -No additional requirements or monitoring needed from users. - -### Systems Affected - -1. **DA Layer**: New namespace for forced inclusion transactions -2. **Sequencer (Single)**: Fetches and includes forced transactions -3. **Sequencer (Based)**: New sequencer type that only uses DA transactions -4. **DA Retriever**: New component for fetching forced transactions -5. **Syncer**: Verifies forced transaction inclusion in blocks -6. **Configuration**: New fields for forced inclusion settings - -### Data Structures - -#### Forced Inclusion Event - -```go -type ForcedIncludedEvent struct { - Txs [][]byte // Forced inclusion transactions - StartDaHeight uint64 // Start of DA height range - EndDaHeight uint64 // End of DA height range -} -``` - -#### DA Retriever Interface - -```go -type DARetriever interface { - // Retrieve forced inclusion transactions from DA at specified height - RetrieveForcedIncludedTxsFromDA(ctx context.Context, daHeight uint64) (*ForcedIncludedEvent, error) -} -``` - -### APIs and Interfaces - -#### DA Retriever - -The DA Retriever component handles fetching forced inclusion transactions: - -```go -type daRetriever struct { - da coreda.DA - cache cache.CacheManager - genesis genesis.Genesis - logger zerolog.Logger - namespaceForcedInclusionBz []byte - hasForcedInclusionNs bool - daEpochSize uint64 -} - -// RetrieveForcedIncludedTxsFromDA fetches forced inclusion transactions -// Only fetches at epoch boundaries to prevent redundant DA queries -func (r *daRetriever) RetrieveForcedIncludedTxsFromDA( - ctx context.Context, - daHeight uint64, -) (*ForcedIncludedEvent, error) -``` - -#### Single Sequencer Extension - -The single sequencer is enhanced to fetch and include forced transactions: - -```go -type Sequencer struct { - // ... existing fields ... - fiRetriever ForcedInclusionRetriever - genesis genesis.Genesis - daHeight atomic.Uint64 - pendingForcedInclusionTxs []pendingForcedInclusionTx - queue *BatchQueue -} - -type pendingForcedInclusionTx struct { - Data []byte - OriginalHeight uint64 -} - -func (s *Sequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { - // 1. Fetch forced inclusion transactions from DA - forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight.Load()) - - // 2. Process forced txs with size validation and pending queue - forcedTxs := s.processForcedInclusionTxs(forcedEvent, req.MaxBytes) - - // 3. Get batch from mempool queue - batch, err := s.queue.Next(ctx) - - // 4. Prepend forced txs and trim batch to fit MaxBytes - if len(forcedTxs) > 0 { - forcedTxsSize := calculateSize(forcedTxs) - remainingBytes := req.MaxBytes - forcedTxsSize - - // Trim batch transactions to fit - trimmedBatchTxs := trimToSize(batch.Transactions, remainingBytes) - - // Return excluded txs to front of queue - if len(trimmedBatchTxs) < len(batch.Transactions) { - excludedBatch := batch.Transactions[len(trimmedBatchTxs):] - s.queue.Prepend(ctx, Batch{Transactions: excludedBatch}) - } - - batch.Transactions = append(forcedTxs, trimmedBatchTxs...) - } - - return &GetNextBatchResponse{Batch: batch} -} - -// processForcedInclusionTxs validates and queues forced txs -func (s *Sequencer) processForcedInclusionTxs(event *ForcedInclusionEvent, maxBytes uint64) [][]byte { - var validatedTxs [][]byte - var newPendingTxs []pendingForcedInclusionTx - currentSize := 0 - - // Process pending txs from previous epochs first - for _, pendingTx := range s.pendingForcedInclusionTxs { - if !ValidateBlobSize(pendingTx.Data) { - continue // Skip blobs exceeding absolute DA limit - } - if WouldExceedCumulativeSize(currentSize, len(pendingTx.Data), maxBytes) { - newPendingTxs = append(newPendingTxs, pendingTx) - continue - } - validatedTxs = append(validatedTxs, pendingTx.Data) - currentSize += len(pendingTx.Data) - } - - // Process new txs from this epoch - for _, tx := range event.Txs { - if !ValidateBlobSize(tx) { - continue // Skip blobs exceeding absolute DA limit - } - if WouldExceedCumulativeSize(currentSize, len(tx), maxBytes) { - newPendingTxs = append(newPendingTxs, pendingForcedInclusionTx{ - Data: tx, - OriginalHeight: event.StartDaHeight, - }) - continue - } - validatedTxs = append(validatedTxs, tx) - currentSize += len(tx) - } - - s.pendingForcedInclusionTxs = newPendingTxs - return validatedTxs -} -``` - -#### Based Sequencer - -A new sequencer implementation that ONLY retrieves transactions from DA: - -```go -type BasedSequencer struct { - fiRetriever ForcedInclusionRetriever - da coreda.DA - config config.Config - genesis genesis.Genesis - logger zerolog.Logger - mu sync.RWMutex - daHeight uint64 - txQueue [][]byte // Buffer for transactions exceeding batch size -} - -func (s *BasedSequencer) GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) { - - - // Always fetch forced inclusion transactions from DA - forcedEvent, err := s.fiRetriever.RetrieveForcedIncludedTxs(ctx, s.daHeight) - if err != nil && !errors.Is(err, ErrHeightFromFuture) { - return nil, err - } - - // Validate and add transactions to queue - for _, tx := range forcedEvent.Txs { - if ValidateBlobSize(tx) { - s.txQueue = append(s.txQueue, tx) - } - } - - // Create batch from queue respecting MaxBytes - batch := s.createBatchFromQueue(req.MaxBytes) - - return &GetNextBatchResponse{Batch: batch} -} - -// SubmitBatchTxs is a no-op for based sequencer -func (s *BasedSequencer) SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) { - // Based sequencer ignores submitted transactions - return &SubmitBatchTxsResponse{}, nil -} -``` - -#### Syncer Verification - -Full nodes verify forced inclusion in the sync process with support for transaction smoothing across multiple blocks and a configurable grace period: - -```go -func (s *Syncer) verifyForcedInclusionTxs(currentState State, data *Data) error { - // 1. Retrieve forced inclusion transactions from DA for current epoch - forcedEvent, err := s.daRetriever.RetrieveForcedIncludedTxsFromDA(s.ctx, currentState.DAHeight) - if err != nil { - return err - } - - // 2. Build map of transactions in current block - blockTxMap := make(map[string]struct{}) - for _, tx := range data.Txs { - blockTxMap[hashTx(tx)] = struct{}{} - } - - // 3. Check if any pending forced inclusion txs from previous epochs are included - var stillPending []pendingForcedInclusionTx - s.pendingForcedInclusionTxs.Range(func(key, value any) bool { - pending := value.(pendingForcedInclusionTx) - if _, ok := blockTxMap[pending.TxHash]; ok { - // Transaction was included - remove from pending - s.pendingForcedInclusionTxs.Delete(key) - } else { - stillPending = append(stillPending, pending) - } - return true - }) - - // 4. Process new forced inclusion transactions from current epoch - for _, forcedTx := range forcedEvent.Txs { - txHash := hashTx(forcedTx) - if _, ok := blockTxMap[txHash]; !ok { - // Transaction not included yet - add to pending for deferral within epoch - stillPending = append(stillPending, pendingForcedInclusionTx{ - Data: forcedTx, - EpochStart: forcedEvent.StartDaHeight, - EpochEnd: forcedEvent.EndDaHeight, - TxHash: txHash, - }) - } - } - - // 5. Check for malicious behavior: pending txs past their grace boundary - // Grace period provides tolerance for temporary chain congestion - var maliciousTxs, remainingPending []pendingForcedInclusionTx - for _, pending := range stillPending { - // Calculate grace boundary: epoch end + (effective grace periods × epoch size) - effectiveGracePeriod := s.getEffectiveGracePeriod() - graceBoundary := pending.EpochEnd + (effectiveGracePeriod * s.genesis.DAEpochForcedInclusion) - - // If current DA height is past the grace boundary, these txs MUST have been included - if currentState.DAHeight > graceBoundary { - maliciousTxs = append(maliciousTxs, pending) - } else { - remainingPending = append(remainingPending, pending) - } - } - - // 6. Update pending map with only remaining valid pending txs - pendingForcedInclusionTxs = remainingPending - - // 7. Reject block if sequencer censored forced txs past grace boundary - if len(maliciousTxs) > 0 { - return fmt.Errorf("sequencer is malicious: %d forced inclusion transactions past grace boundary not included", len(maliciousTxs)) - } - - return nil -} -``` - -**Key Verification Features**: - -1. **Pending Transaction Tracking**: Maintains a map of forced inclusion transactions that haven't been included yet -2. **Epoch-Based Deferral**: Allows transactions to be deferred (smoothed) across multiple blocks within the same epoch -3. **Strict Epoch Boundary Enforcement**: Once `currentState.DAHeight > pending.EpochEnd`, all pending transactions from that epoch MUST have been included -4. **Censorship Detection**: Identifies malicious sequencers that fail to include forced transactions after epoch boundaries - -**Smoothing Example**: - -``` -Epoch [100-109] contains 3MB of forced inclusion transactions - -Block at DA height 100: - - Includes 2MB of forced txs (partial) - - Remaining 1MB added to pending map with EpochEnd=109 - - ✅ Valid - within epoch boundary - -Block at DA height 105: - - Includes remaining 1MB from pending - - Pending map cleared for those txs - - ✅ Valid - within epoch boundary - -Block at DA height 110 (next epoch): - - If any txs from epoch [100-109] still pending - - ❌ MALICIOUS - epoch boundary violated - - Block rejected, sequencer flagged -``` - -### Implementation Details - -#### Epoch-Based Fetching - -To avoid excessive DA queries, the DA Retriever uses epoch-based fetching: - -- **Epoch Size**: Configurable number of DA blocks (e.g., 10) -- **Epoch Boundaries**: Deterministically calculated based on `DAStartHeight` -- **Fetch Timing**: Only fetch at epoch start to prevent duplicate fetches - -```go -// Calculate epoch boundaries -func (r *daRetriever) calculateEpochBoundaries(daHeight uint64) (start, end uint64) { - epochNum := r.calculateEpochNumber(daHeight) - start = r.genesis.DAStartHeight + (epochNum-1)*r.daEpochSize - end = r.genesis.DAStartHeight + epochNum*r.daEpochSize - 1 - return start, end -} - -// Only fetch at epoch start -if daHeight != epochStart { - return &ForcedIncludedEvent{Txs: [][]byte{}} -} - -// Fetch all heights in epoch range -for height := epochStart; height <= epochEnd; height++ { - // Fetch forced inclusion blobs from this DA height -} -``` - -#### Height From Future Handling - -When DA height is not yet available: - -```go -if errors.Is(err, coreda.ErrHeightFromFuture) { - // Keep current DA height, return empty batch - // Retry same height on next call - return &ForcedIncludedEvent{Txs: [][]byte{}}, nil -} -``` - -#### Sequencer Catch-Up Mode - -When a single sequencer comes back online after downtime spanning multiple DA epochs, it enters **catch-up mode** to ensure consistency with base sequencing behavior. - -**Problem**: If the sequencer was offline for several DA epochs, it missed mempool transactions that were submitted during that time. However, forced inclusion transactions were still being posted to DA and processed by full nodes running in base sequencing mode. When the sequencer restarts, it must produce blocks that match what base sequencing would have produced during the downtime. - -**Solution**: The sequencer detects if it has fallen more than one epoch behind the DA head and enters catch-up mode: - -1. **Detection**: On the first epoch fetch after startup, query `GetLatestDAHeight()` to determine the gap -2. **Catch-Up Mode**: If more than one epoch behind, enter catch-up mode: - - Only produce blocks with forced inclusion transactions (no mempool) - - Use DA epoch end timestamps for block timestamps (to match base sequencing) -3. **Exit**: When `ErrHeightFromFuture` is encountered (reached DA head), exit catch-up mode and resume normal operation - -**Key Behaviors During Catch-Up**: - -- **No Mempool Transactions**: Only forced inclusion transactions are included in blocks -- **Matching Timestamps**: Block timestamps are derived from DA epoch end times to match base sequencing -- **Checkpoint Persistence**: Progress is tracked via checkpoint to handle crashes during catch-up -- **Single Check**: The `GetLatestDAHeight()` query is performed only once per sequencer lifecycle - -**Example**: - -Sequencer offline during epochs 100-150 (5 epochs of 10 blocks each) -Full nodes (base sequencing) produced blocks with forced txs only - -Sequencer restarts: - -1. Checkpoint DA height: 100 -2. Latest DA height: 150 -3. Missed epochs: 5 (more than 1) -4. Enter catch-up mode - -Catch-up process: - -- Epoch 101-110: Produce blocks with forced txs only, use epoch timestamps -- Epoch 111-120: Continue catch-up... -- ... -- Epoch 141-150: Still catching up -- Epoch 151: ErrHeightFromFuture -> exit catch-up mode - -Normal operation resumes: - -- Include both forced txs and mempool txs -- Use current timestamps - -**Benefits**: - -- Ensures sequencer produces identical blocks to what base sequencing would have produced -- Maintains consistency across the network regardless of sequencer downtime -- Automatic detection and recovery without operator intervention -- Safe restart after crashes (checkpoint tracks progress) - -#### Grace Period for Forced Inclusion - -The grace period mechanism provides tolerance for chain congestion while maintaining censorship resistance: - -**Problem**: If the DA layer experiences temporary unavailability or the chain congestion, the sequencer may be unable to fetch forced inclusion transactions from a completed epoch. Without a grace period, full nodes would immediately flag the sequencer as malicious. - -**Solution**: The grace period mechanism allows forced inclusion transactions from epoch N to be included in subsequent epochs before being flagged as malicious. The grace period is dynamically adjusted based on chain fullness. - -**Grace Boundary Calculation**: - -```go -graceBoundary := epochEnd + (effectiveGracePeriod * DAEpochForcedInclusion) - -// Example with base grace period = 1 epoch, DAEpochForcedInclusion = 50: -// - Epoch N ends at DA height 100 -// - Grace boundary = 100 + (1 * 50) = 150 (adjusted dynamically by chain fullness) -// - Transaction must be included while currentDAHeight <= graceBoundary -// - If currentDAHeight > graceBoundary without inclusion, sequencer is malicious -``` - -**Configuration Recommendations**: - -- **Production (default)**: Base grace period of 1 epoch - - Automatically adjusted based on chain fullness - - Balances censorship resistance with reliability -- **High Security / Reliable DA**: Minimum grace period - - Stricter enforcement when block space is available - - Requires 99.9%+ DA uptime - - Faster detection of censorship -- **Unreliable DA**: Network adjusts grace period dynamically - - Higher tolerance (up to 3x base period) when chain is congested - - Reduced censorship resistance temporarily to avoid false positives - -**Verification Logic**: - -1. Forced inclusion transactions from epoch N are tracked with their epoch boundaries -2. Transactions not immediately included are added to pending queue -3. Each block, full nodes check if pending transactions are past their grace boundary -4. If `currentDAHeight > graceBoundary`, the sequencer is flagged as malicious (strictly greater than) -5. Transactions within the grace period (where `currentDAHeight <= graceBoundary`) remain in pending queue without error - -**Benefits**: - -- Prevents false positives during temporary DA outages -- Maintains censorship resistance (transactions must be included within grace window) -- Configurable trade-off between reliability and security -- Allows networks to adapt to their DA layer's reliability characteristics - -**Examples and Edge Cases**: - -Configuration: `DAEpochForcedInclusion = 50`, Base grace period of 1 epoch (dynamically adjusted) - -_Example 1: Normal Inclusion (Within Same Epoch)_ - -``` -- Forced tx submitted to DA at height 75 (epoch 51-100) -- Sequencer fetches at height 101 (next epoch start) -- Sequencer includes tx in block at DA height 105 -- Result: ✅ Valid - included within same epoch -``` - -_Example 2: Grace Period Usage (Included in Next Epoch)_ - -``` -- Forced tx submitted to DA at height 75 (epoch 51-100) -- Sequencer fetches at height 101 -- DA temporarily unavailable, sequencer cannot fetch -- Sequencer includes tx at DA height 125 (epoch 101-150) -- Grace boundary = 100 + (1 × 50) = 150 -- Result: ✅ Valid - within grace period -``` - -_Example 3: Malicious Sequencer (Past Grace Boundary)_ - -``` -- Forced tx submitted to DA at height 75 (epoch 51-100) -- Sequencer fetches at height 101 -- Sequencer deliberately omits tx -- Block produced at DA height 151 (past grace boundary 150) -- Full node detects: currentDAHeight (151) > graceBoundary (150) -- Result: ❌ Block rejected, sequencer flagged as malicious -``` - -_Example 4: Low Chain Activity (Minimum Grace Period)_ - -``` -- Chain is mostly empty (<20% full) -- Grace period is at minimum (0.5x base period) -- Forced tx submitted at height 75 (epoch 51-100) -- Grace boundary ≈ 100 + (0.5 × 50) = 125 -- Stricter enforcement applied when chain is empty -- Result: Faster censorship detection when block space is available -``` - -_Example 5: Multiple Pending Transactions_ - -``` -- Tx A from epoch ending at height 100, grace boundary 150 -- Tx B from epoch ending at height 150, grace boundary 200 -- Current DA height: 155 -- Tx A not included: ❌ Past grace boundary - malicious -- Tx B not included: ✅ Within grace period - still pending -- Result: Block rejected due to Tx A -``` - -_Example 6: High Chain Activity (Extended Grace Period)_ - -``` -- Chain is highly congested (>80% full) -- Grace period is extended (up to 3x base period) -- Forced tx submitted at height 75 (epoch 51-100) -- Grace boundary ≈ 100 + (3 × 50) = 250 -- Higher tolerance during congestion to avoid false positives -- Result: Better operational reliability when block space is scarce -``` - -#### Transaction Queue Management - -The based sequencer uses a simplified queue to handle transactions: - -```go -func (s *BasedSequencer) createBatchFromQueue(maxBytes uint64) *Batch { - var batch [][]byte - var totalBytes uint64 - - for i, tx := range s.txQueue { - txSize := uint64(len(tx)) - // Always respect maxBytes, even for first transaction - if totalBytes+txSize > maxBytes { - // Would exceed max bytes, keep remaining in queue - s.txQueue = s.txQueue[i:] - break - } - - batch = append(batch, tx) - totalBytes += txSize - - // Clear queue if we processed everything - if i == len(s.txQueue)-1 { - s.txQueue = s.txQueue[:0] - } - } - - return &Batch{Transactions: batch} -} -``` - -**Note**: The based sequencer is simpler than the single sequencer - it doesn't need a separate pending queue because `txQueue` naturally handles all transaction buffering. - -### Configuration - -```go -type Genesis struct { - ChainID string - StartTime time.Time - InitialHeight uint64 - ProposerAddress []byte - DAStartHeight uint64 - // Number of DA blocks to scan per forced inclusion fetch - // Higher values reduce DA queries but increase latency - // Lower values increase DA queries but improve responsiveness - DAEpochForcedInclusion uint64 -} - -type DAConfig struct { - // ... existing fields ... - - // Namespace for forced inclusion transactions - ForcedInclusionNamespace string -} - -type NodeConfig struct { - // ... existing fields ... - - // Run node with based sequencer (requires aggregator mode) - BasedSequencer bool -} -``` - -### Configuration Examples - -#### Traditional Sequencer with Forced Inclusion - -```yaml -# genesis.json -{ - "chain_id": "my-rollup", - "da_epoch_forced_inclusion": 10 # Scan 10 DA blocks at a time -} - -# config.toml -[da] -forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" - -[node] -aggregator = true -based_sequencer = false # Use traditional sequencer -``` - -#### Based Sequencer (DA-Only) - -```yaml -# genesis.json -{ - "chain_id": "my-rollup", - "da_epoch_forced_inclusion": 5 # Scan 5 DA blocks at a time -} - -# config.toml -[da] -forced_inclusion_namespace = "0x0000000000000000000000000000000000000000000000000000666f72636564" - -[node] -aggregator = true -based_sequencer = true # Use based sequencer -``` - -### Sequencer Operation Flows - -#### Single Sequencer Flow - -1. Timer triggers GetNextBatch -2. Fetch forced inclusion txs from DA (via DA Retriever) - - Only at epoch boundaries - - Scan epoch range for forced transactions -3. Get batch from mempool queue -4. Prepend forced txs to batch -5. Return batch for block production - -#### Based Sequencer Flow - -1. Timer triggers GetNextBatch -2. Check transaction queue for buffered txs -3. If queue empty or epoch boundary: - - Fetch forced inclusion txs from DA - - Add to queue -4. Create batch from queue (respecting MaxBytes) -5. Return batch for block production - -### Full Node Verification Flow - -1. Receive block from DA -2. Before applying block: - a. Fetch forced inclusion txs from DA at block's DA height (epoch-based) - b. Build map of transactions in block - c. Check if pending forced txs from previous epochs are included - d. Add any new forced txs not yet included to pending queue - e. Calculate grace boundary for each pending tx (dynamically adjusted by chain fullness): - graceBoundary = epochEnd + (effectiveGracePeriod × DAEpochForcedInclusion) - f. Check if any pending txs are past their grace boundary - g. If txs past grace boundary are not included: reject block, flag malicious proposer - h. If txs within grace period: keep in pending queue, allow block -3. Apply block if verification passes - -NOTE: P2P nodes do not perform forced inclusion verification. This is because DA inclusion happens after block production, and DA hints are added later to broadcasted blocks. - -**Grace Period Example** (with base grace period = 1 epoch, `DAEpochForcedInclusion = 50`): - -- Forced tx appears in epoch ending at DA height 100 -- Grace boundary = 100 + (1 × 50) = 150 -- Transaction can be included at any DA height from 101 to 150 -- When currentDAHeight > 150 without inclusion, sequencer is flagged as malicious - -### Efficiency Considerations - -1. **Epoch-Based Fetching**: Reduces DA queries by batching multiple DA heights -2. **Deterministic Epochs**: All nodes calculate same epoch boundaries -3. **Fetch at Epoch Start**: Prevents duplicate fetches as DA height progresses -4. **Transaction Queue**: Buffers excess transactions across multiple blocks -5. **Conditional Fetching**: Only when forced inclusion namespace is configured -6. **Size Pre-validation**: Invalid blobs rejected early, before batch construction -7. **Efficient Queue Operations**: - - Single sequencer: `Prepend()` reuses space before head position - - Based sequencer: Simple slice operations for queue management - -**DA Query Frequency**: - -Every `DAEpochForcedInclusion` DA blocks - -**Attack Vectors**: - -### Security Considerations - -- **Censorship**: Mitigated by forced inclusion verification with grace period - - Transactions must be included within grace window (epoch + grace period) - - Full nodes detect and reject blocks from malicious sequencers - - Grace period = 0 provides immediate detection but requires high DA reliability - - Grace period = 1+ balances censorship resistance with operational tolerance -- **DA Spam**: Limited by DA layer's native spam protection and two-tier blob size limits -- **Block Withholding**: Full nodes can fetch and verify from DA independently -- **Oversized Batches**: Prevented by strict size validation at multiple levels -- **Grace Period Attacks**: - - Malicious sequencer cannot indefinitely delay forced transactions - - Grace boundary is deterministic and enforced by all full nodes - - Longer grace periods extend time to detect censorship (trade-off) - -## Status - -Accepted and Implemented - -## Consequences - -### Positive - -1. **Censorship Resistance**: Users have guaranteed path to include transactions -2. **Verifiable**: Full nodes enforce forced inclusion, detecting malicious sequencers -3. **Simple Design**: No complex timing mechanisms or fallback modes -4. **Based Rollup Option**: Fully DA-driven transaction ordering available (simplified implementation) -5. **Optional**: Forced inclusion can be disabled for permissioned deployments -6. **Efficient**: Epoch-based fetching minimizes DA queries -7. **Flexible**: Configurable epoch size and grace period allow tuning latency vs reliability -8. **Robust Size Handling**: Two-tier size validation prevents DoS and DA rejections -9. **Transaction Preservation**: All valid transactions are preserved in queues, nothing is lost -10. **Strict MaxBytes Compliance**: Batches never exceed limits, preventing DA submission failures -11. **DA Fault Tolerance**: Grace period prevents false positives during temporary chain congestion -12. **Automatic Recovery**: Sequencer catch-up mode ensures consistency after downtime without operator intervention - -### Negative - -1. **Increased Latency**: Forced transactions subject to epoch boundaries -2. **DA Dependency**: Requires DA layer to be enabled on nodes for verification -3. **Higher DA Costs**: Users pay DA posting fees for forced inclusion -4. **Epoch Configuration**: Requires setting `DAEpochForcedInclusion` in genesis (consensus parameter) - -### Neutral - -1. **Two Sequencer Types**: Choice between single (hybrid) and based (DA-only) -2. **Privacy Model Unchanged**: Forced inclusion has same privacy as normal path -3. **Monitoring**: Operators should monitor forced inclusion namespace usage and grace period metrics -4. **Documentation**: Users need guidance on when to use forced inclusion and grace period implications -5. **Genesis Parameters**: `DAEpochForcedInclusion` is a consensus parameter fixed at genesis; grace period adjustment is dynamic - -## References - -- [Evolve Single Sequencer ADR-013](https://github.com/evstack/ev-node/blob/main/docs/adr/adr-013-single-sequencer.md) -- [Evolve Minimal Header ADR-015](https://github.com/evstack/ev-node/blob/main/docs/adr/adr-015-rollkit-minimal-header.md) -- [L2 Beat Stages Framework](https://forum.l2beat.com/t/the-stages-framework/291#p-516-stage-1-requirements-3) -- [GitHub Issue #1914: Add Forced Inclusion Mechanism from the DA layer](https://github.com/evstack/ev-node/issues/1914) diff --git a/content/docs/adr/adr-020-validation.md b/content/docs/adr/adr-020-validation.md deleted file mode 100644 index 5d7ad56..0000000 --- a/content/docs/adr/adr-020-validation.md +++ /dev/null @@ -1,124 +0,0 @@ -# ADR 020: Validation Flow for Single Sequencer - -## Changelog - -- 2025-04-08: Initial draft -- 2025-04-23: Renumbered from ADR-013 to ADR-020 to maintain chronological order. - -## Context - -In the single sequencer model, a single designated sequencer proposes blocks. To ensure correctness and prevent censorship or fraud, full nodes independently verify proposed headers and associated batches. This ADR describes the validation process involved, both in terms of header/batch verification and forkchoice-safe propagation over the p2p network. - -This model enables fast confirmation by gossiping proposed headers and batches, while allowing full nodes to maintain the security guarantees by performing their own validation and verification of data availability. - -## Alternative Approaches - -Other models, such as based sequencing or leaderless batching, avoid reliance on a single sequencer but introduce potential latency and sometimes complexity. This ADR focuses on a performant solution for a single sequencer setup. - -## Decision - -We adopt a validation design that ensures safety through two layers of verification: - -1. **Forkchoice verification** for p2p gossip safety. -2. **Header-batch consistency and signature validation** by full nodes. - -## Detailed Design - -### Entities Involved - -- **Proposer (Sequencer):** - - Collects user transactions - - Builds a batch and submits it to the DA layer - - Constructs batchData ([]ID), where each ID = DAHeight + Commitment - - Applies batch to compute state update and forms a header, signs the header - - Publishes header and batch via p2p gossip - - Stores header on DA layer - -- **Full Node:** - - Receives the batch and header via gossip - - Validates that the gossip update respects forkchoice - - Checks the header signature - - Verifies the batch against the header (DA proofs and commitments) - - Applies the batch to recompute state root and match the state root in the proposed header - -### Verification Pipeline - -The full node performs the following validations: - -#### 1. Forkchoice Safe Verification (for P2P Gossip) - -P2P gossip propagation requires `Verify(untrusted, trusted)`: - -- Verify that `untrusted.LastHash == trusted.Hash()` -- This check ensures linearity and prevents equivocation in gossip -- This forkchoice check is performed **before** header and batch validation - -If this verification fails, the gossip update is dropped. - -#### 2. Header vs Batch Validation - -Once the gossip message passes forkchoice check: - -- Signature Validation: - - Check the signature of the sequencer (proposer) on the header -- Use `VerifyBatch(batchData)`: - - Extract the `[]ID` from header - - Each ID encodes DA height + commitment - - Query DA layer to fetch proofs using `GetProofs(batch data, batch namespace)` - - Calls `Validate(proofs, batch data, batch namespace)` - - Reconstruct commitment locally and match it with commitment in ID to ensure that batch actually corresponds to the batch data in header -- State Root Verification: - - Re-execute batch using local execution layer - - Recompute state root and match with header's declared root - -Only if all of the above pass, the header is marked as valid and added to forkchoice. - -### Diagram - -```mermaid -sequenceDiagram - participant U as User - participant P as Proposer - participant FN as Full Node - - U->>P: Submit Tx - P->>DA: Publish Batch to DA - P->>P: Compute BatchData (DAHeight || Commitment) - P->>P: Execute batch and compute new state root - P->>FN: Gossip Header + Batch - - Note over FN: On Gossip Receive: - FN->>FN: Verify(untrusted, trusted) - alt forkchoice passes - FN->>DA: Fetch Blobs for BatchData - FN->>FN: Compute and match Commitments - FN->>FN: Verify Sequencer Signature - FN->>FN: Re-execute Batch - FN->>FN: Confirm State Root - else forkchoice fails - FN-->>P: Drop gossip message - end -``` - -## Status - -Proposed - -## Consequences - -### Positive - -- Ensures full nodes validate every state transition -- Forkchoice check ensures linear chain progression and safe gossip -- Protects against malicious proposers submitting invalid batches or headers - -### Negative - -- Still relies on a trusted sequencer for liveness -- Full validation increases complexity on full nodes - -### Neutral - -- This ADR aligns with current implementation direction for single-sequencer-based applications - -## References diff --git a/content/docs/adr/adr-021-lazy-aggregation.md b/content/docs/adr/adr-021-lazy-aggregation.md deleted file mode 100644 index 37ece2c..0000000 --- a/content/docs/adr/adr-021-lazy-aggregation.md +++ /dev/null @@ -1,209 +0,0 @@ -# ADR 021: Lazy Aggregation with DA Layer Consistency - -## Changelog - -- 2024-01-24: Initial draft -- 2024-01-24: Revised to use existing empty batch mechanism -- 2024-01-25: Updated with implementation details from aggregation.go - -## Context - -Evolve's lazy aggregation mechanism currently produces blocks at set intervals when no transactions are present, and immediately when transactions are available. However, this approach creates inconsistency with the DA layer (Celestia) as empty blocks are not posted to the DA layer. This breaks the expected 1:1 mapping between DA layer blocks and execution layer blocks in EVM environments. - -## Decision - -Leverage the existing empty batch mechanism and `dataHashForEmptyTxs` to maintain block height consistency. - -## Detailed Design - -### Implementation Details - -1. **Modified Batch Retrieval**: - - The batch retrieval mechanism has been modified to handle empty batches differently. Instead of discarding empty batches, we now return them with the ErrNoBatch error, allowing the caller to create empty blocks with proper timestamps. This ensures that block timing remains consistent even during periods of inactivity. - - ```go - func (m *Manager) retrieveBatch(ctx context.Context) (*BatchData, error) { - res, err := m.sequencer.GetNextBatch(ctx, req) - if err != nil { - return nil, err - } - - if res != nil && res.Batch != nil { - m.logger.Debug("Retrieved batch", - "txCount", len(res.Batch.Transactions), - "timestamp", res.Timestamp) - - var errRetrieveBatch error - // Even if there are no transactions, return the batch with timestamp - // This allows empty blocks to maintain proper timing - if len(res.Batch.Transactions) == 0 { - errRetrieveBatch = ErrNoBatch - } - // Even if there are no transactions, update lastBatchData so we don't - // repeatedly emit the same empty batch, and persist it to metadata. - if err := m.store.SetMetadata(ctx, LastBatchDataKey, convertBatchDataToBytes(res.BatchData)); err != nil { - m.logger.Error("error while setting last batch hash", "error", err) - } - m.lastBatchData = res.BatchData - return &BatchData{Batch: res.Batch, Time: res.Timestamp, Data: res.BatchData}, errRetrieveBatch - } - return nil, ErrNoBatch - } - ``` - -2. **Empty Block Creation**: - - The block publishing logic has been enhanced to create empty blocks when a batch with no transactions is received. This uses the special `dataHashForEmptyTxs` value to indicate an empty batch, maintaining the block height consistency with the DA layer while minimizing overhead. - - ```go - // In publishBlock method - batchData, err := m.retrieveBatch(ctx) - if err != nil { - if errors.Is(err, ErrNoBatch) { - if batchData == nil { - m.logger.Info("No batch retrieved from sequencer, skipping block production") - return nil - } - m.logger.Info("Creating empty block, height: ", newHeight) - } else { - return fmt.Errorf("failed to get transactions from batch: %w", err) - } - } else { - if batchData.Before(lastHeaderTime) { - return fmt.Errorf("timestamp is not monotonically increasing: %s < %s", batchData.Time, m.getLastBlockTime()) - } - m.logger.Info("Creating and publishing block, height: ", newHeight) - m.logger.Debug("block info", "num_tx", len(batchData.Batch.Transactions)) - } - - header, data, err = m.createBlock(ctx, newHeight, lastSignature, lastHeaderHash, batchData) - if err != nil { - return err - } - - if err = m.store.SaveBlockData(ctx, header, data, &signature); err != nil { - return SaveBlockError{err} - } - ``` - -3. **Lazy Aggregation Loop**: - - A dedicated lazy aggregation loop has been implemented with dual timer mechanisms. The `lazyTimer` ensures blocks are produced at regular intervals even during network inactivity, while the `blockTimer` handles normal block production when transactions are available. Transaction notifications from the `Reaper` to the `Manager` are now handled via the `txNotifyCh` channel: when the `Reaper` detects new transactions, it calls `Manager.NotifyNewTransactions()`, which performs a non-blocking signal on this channel. See the tests in `block/lazy_aggregation_test.go` for verification of this behavior. - - ```go - // In Reaper.SubmitTxs - if r.manager != nil && len(newTxs) > 0 { - r.logger.Debug("Notifying manager of new transactions") - r.manager.NotifyNewTransactions() // Signals txNotifyCh - } - - // In Manager.NotifyNewTransactions - func (m *Manager) NotifyNewTransactions() { - select { - case m.txNotifyCh <- struct{}{}: - // Successfully sent notification - default: - // Channel buffer is full, notification already pending - } - } - // Modified lazyAggregationLoop - func (m *Manager) lazyAggregationLoop(ctx context.Context, blockTimer *time.Timer) { - // lazyTimer triggers block publication even during inactivity - lazyTimer := time.NewTimer(0) - defer lazyTimer.Stop() - - for { - select { - case <-ctx.Done(): - return - - case <-lazyTimer.C: - m.logger.Debug("Lazy timer triggered block production") - m.produceBlock(ctx, "lazy_timer", lazyTimer, blockTimer) - - case <-blockTimer.C: - if m.txsAvailable { - m.produceBlock(ctx, "block_timer", lazyTimer, blockTimer) - m.txsAvailable = false - } else { - // Ensure we keep ticking even when there are no txs - blockTimer.Reset(m.config.Node.BlockTime.Duration) - } - case <-m.txNotifyCh: - m.txsAvailable = true - } - } - } - ``` - -4. **Block Production**: - - The block production function centralizes the logic for publishing blocks and resetting timers. It records the start time, attempts to publish a block, and then intelligently resets both timers based on the elapsed time. This ensures that block production remains on schedule even if the block creation process takes significant time. - - ```go - func (m *Manager) produceBlock(ctx context.Context, trigger string, lazyTimer, blockTimer *time.Timer) { - // Record the start time - start := time.Now() - - // Attempt to publish the block - if err := m.publishBlock(ctx); err != nil && ctx.Err() == nil { - m.logger.Error("error while publishing block", "trigger", trigger, "error", err) - } else { - m.logger.Debug("Successfully published block", "trigger", trigger) - } - - // Reset both timers for the next aggregation window - lazyTimer.Reset(getRemainingSleep(start, m.config.Node.LazyBlockInterval.Duration)) - blockTimer.Reset(getRemainingSleep(start, m.config.Node.BlockTime.Duration)) - } - ``` - -### Key Changes - -1. Return batch with timestamp even when empty, allowing proper block timing -2. Use existing `dataHashForEmptyTxs` for empty block indication -3. Leverage current sync mechanisms that already handle empty blocks -4. Implement a dedicated lazy aggregation loop with two timers: - - `blockTimer`: Triggers block production at regular intervals when transactions are available - - `lazyTimer`: Ensures blocks are produced even during periods of inactivity -5. Maintain transaction availability tracking via the `txsAvailable` flag and notification channel - -### Efficiency Considerations - -- Minimal DA layer overhead for empty blocks -- Reuses existing empty block detection mechanism -- Maintains proper block timing using batch timestamps -- Intelligent timer management to account for block production time -- Non-blocking transaction notification channel to prevent backpressure - -## Status - -Implemented - -## Consequences - -### Positive - -- Maintains consistent block heights between DA and execution layers -- Leverages existing empty block mechanisms -- Simpler implementation than sentinel-based approach -- Preserves proper block timing -- Provides deterministic block production even during network inactivity -- Reduces latency for transaction inclusion during active periods - -### Negative - -- Small DA layer overhead for empty blocks -- Additional complexity in timer management - -### Neutral - -- Requires careful handling of batch timestamps -- Maintains backward compatibility with existing Evolve deployments - -## References - -- [Block Manager Implementation](../../block) -- [Block Aggregation Implementation](../../block/internal/executing/executor.go) -- [Lazy Aggregation Tests](../../block/internal/executing/executor.go) diff --git a/content/docs/adr/adr-022-validator-network.md b/content/docs/adr/adr-022-validator-network.md deleted file mode 100644 index ee62914..0000000 --- a/content/docs/adr/adr-022-validator-network.md +++ /dev/null @@ -1,150 +0,0 @@ -# 022 Validator Network - -Date: 2025-05-25 -Status: Draft - -## Context - -When a single sequencer is used there is a limited design space for the token and a limited set of security guarantees. The validator network offers an alternative to using a full consensus protocol, but offers security guarantees with more than one participant verifying the execution and ordering. - -The validator network acts as an extra security layer and soft confirmation enabling the rollup to move faster than the underlying DA layer with added security. Secondly a validator network introduces the opportunity to do more with the token of the chain. - -The original design and implementation was centered around IBC and adding an extra layer of security for counter party chains, so that the user is not solely trusting the sequencer to act correctly - -## Decision - -Evolve will introduce a validator network in which there will be a set of validators verifying execution and construction. - -Validators sign **one Attestation per epoch** that covers every block proposed inside that -epoch. The Attestation must be broadcast as a transaction within a configurable -**SubmissionWindow** (measured in blocks and always ≤ `EpochLength`). -Missing the window does **not** incur slashing but the validator forfeits rewards for that -epoch. -If a validator fails to submit an Attestation for **NonParticipationEpochs** consecutive -epochs it is automatically removed from the active validator set (stake remains bonded -unless separate evidence triggers slashing). - -The design is centered around the proposer producing blocks as fast as possible and asking -for signatures **after the fact, once per epoch**. This maximises throughput while still -obtaining soft-finality from a multi-party validator set. - -### High-level workflow - - 1. Block broadcast — For every height h the sequencer broadcasts the canonical BlockBundle(h) (header, transactions, state root) to all active attesters over gRPC/WebSocket. - 2. Local verification — Each attester independently: - • validates every block header in the epoch and the resulting state transition; - • (optionally) re-executes the blocks using a connected full node; - • after processing the last block of the epoch, what is signs is up to the execution environment. - 3. Attestation submission — The attester sends the epoch signature as a transaction - **within SubmissionWindow**. - 4. Aggregation & quorum — The attester module / contract collects epoch signatures until - ≥ ⅔ of current bonded voting power have signed, providing a soft confirmation of the - whole epoch. - - If quorum is not met by the epoch boundary, the network pauses new proposals until - quorum is reached or **EmergencyMode** governance override is enabled. - 5. Final block commit — After the block is included in the DA layer it will be considered to have a hard confirmation. - -### Signing schemes - -Different signature schemes can be used in conjunction with the validator network. To start we will support ED25519 and later one we plan on adding other signature schemes based on how user demand requires. - -Some potential future additions could be BLS12-381 aggregate and/or a BLS threshold signature. - -### Validator set & staking integration - -The attester layer can plug into different validator‑set providers. Below we outline the existing Cosmos‑SDK flow and an alternative Reth / EVM flow patterned after UniChain’s staking design. Both share the same quorum rule (≥ ⅔ voting power) and slashing philosophy. - -#### Cosmos‑SDK - -Introduce a dedicated x/network module that completely owns the CommitHash and ValidatorHash that appear in every block‑header. Evolve remains untouched; the logic lives entirely in the ABCI application. - -Hashes produced in‑app During EndBlock, x/network gathers the attestation bitmap for height h, computes and returns them in ResponseEndBlock. - -When a relayer queries /block or /header, the application serves the canonical valset hash and commit hash from its KV‑store, ensuring external clients see the attested header even though Evolve itself never verified the signatures. - -Validatorset updates from the staking module (x/staking) remains the single source of truth for bonded power. Every block it emits a ValidatorSetUpdate event. x/network subscribes and mirrors -the active validator bitmap. On a set‑change (say at height 100) the EndBlock hook updates x/network's bitmap before computing the hashes for the next height. - -##### Flow - -```mermaid -sequenceDiagram - participant Val as Validator - participant App as x/network - participant R as Evolve - Val->>App: MsgAttest{h, sig} - loop within epoch - App->>App: store sig, update bitmap - App->>App:EndBlock{ValidatorHash, CommitHash} - end - R->> App: Request for hashes -``` - -Missing participation at the epoch boundary x/network evaluates participation: - -- if validator power‑weighted participation < Quorum (default 2/3) ⇒ return ErrAttestationTimeout and halt new block production; -- validators whose participation < MinParticipation for the entire epoch are auto‑ejected from the attester set via an EditValidator emitted by x/network (their stake remains bonded but they cease to sign until they re‑declare). - -#### Reth/EVM Rollup - -- Stake manager contract holds the validator stake/weight and maps an address to a key. It will emit `StakeSnapshot(epoch)` events that will be consumed by the consensus client. -- Stake mirror listens for staking snapshot events in order to rebuild the validator set. The proposer will always be the same, we do not support rotation at this time. Once the validator set is rebuilt any changes that are witnessed will be applied to the validator network. -- The EVM will work in the non blocking way. The validators will be able to join and leave as they please with the requirement that they submit attestations of execution in order to provide a soft confirmation within an epoch if they would like a reward for their work. - -Solidity Contract - -```txt -contract StakeManager { - struct Validator { uint96 power; bytes32 edKey; bytes blsKey; } - mapping(address => Validator) public validators; - - function stake(uint96 amount, bytes32 edKey, bytes calldata blsKey) external; - function unstake(uint96 amount) external; - function slash(address val, uint96 amt) external /* onlyEvidence */; - function snapshot() external returns (bytes32 root); // called by sequencer each epoch -} -``` - -### Quorum and liveness - - • Quorum rule (per-epoch): `signedVotingPower ≥ 2/3 · totalVotingPower` - • Timers - – `SubmissionWindow` (blocks): max delay after epoch end to include an attestation. - – `AggregationTimeout` (seconds): after window closes; sequencer can advance only if - **EmergencyMode** is enabled, otherwise production halts. - • Safety vs. liveness — Because verification is local and deterministic, equivocation is impossible: the worst failure mode is not reaching quorum (→ halt) which staking incentives should discourage. - -## Architecture & Interfaces - -```mermaid -graph TD - SQ[Sequencer] -- p2p --> A1[Attester 1] - SQ -- p2p --> A2[Attester 2] - SQ -- p2p --> A3[Attester N] - A1 -- SubmitSignature Tx --> SQ - A2 -- SubmitSignature Tx --> SQ - A3 -- SubmitSignature Tx --> SQ -``` - -### Attester service - - • Conn manager — maintains persistent stream to /broadcastBlock and unary client to /SubmitSignature. - • Verifier pipeline: - - 1. basic header checks; - 2. produce signature; - 3. async submit transaction with signatures - -## Security considerations - - • Double-sign protection — Deterministic bytesToSign makes replay impossible; Ed25519 prevents malleability. - • Slashing — Existing Cosmos evidence (MsgEvidence) for missed or duplicate signatures applies unchanged. - • Sybil resistance — validator power is staked; no separate token. - -## Consequences - -- Increased code complexity, more to maintain - -## Future work - -- Multi-sequencer fail-over — once fast-leader-election is required we can revisit consensus purely for sequencer rotation. diff --git a/content/docs/adr/adr-template.md b/content/docs/adr/adr-template.md deleted file mode 100644 index f987c93..0000000 --- a/content/docs/adr/adr-template.md +++ /dev/null @@ -1,72 +0,0 @@ -# ADR {ADR-NUMBER}: {TITLE} - -## Changelog - -- {date}: {changelog} - -## Context - -> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. - -## Alternative Approaches - -> This section contains information around alternative options that are considered before making a decision. It should contain an explanation on why the alternative approach(es) were not chosen. - -## Decision - -> This section records the decision that was made. -> It is best to record as much info as possible from the discussion that happened. This aids in not having to go back to the Pull Request to get the needed information. - -## Detailed Design - -> This section does not need to be filled in at the start of the ADR, but must be completed prior to the merging of the implementation. -> -> Here are some common questions that get answered as part of the detailed design: -> -> - What are the user requirements? -> -> - What systems will be affected? -> -> - What new data structures are needed, what data structures will be changed? -> -> - What new APIs will be needed, what APIs will be changed? -> -> - What are the efficiency considerations (time/space)? -> -> - What are the expected access patterns (load/throughput)? -> -> - Are there any logging, monitoring or observability needs? -> -> - Are there any security considerations? -> -> - Are there any privacy considerations? -> -> - How will the changes be tested? -> -> - If the change is large, how will the changes be broken up for ease of review? -> -> - Will these changes require a breaking (major) release? -> -> - Does this change require coordination with the LazyLedger fork of the SDK or lazyledger-app? - -## Status - -> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. Once the ADR has been implemented mark the ADR as "implemented". If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. - -{Deprecated|Proposed|Accepted|Declined} - -## Consequences - -> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. - -### Positive - -### Negative - -### Neutral - -## References - -> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! - -- {reference link} diff --git a/content/docs/adr/figures/header_shares_commit.jpg b/content/docs/adr/figures/header_shares_commit.jpg deleted file mode 100644 index 3c155dc..0000000 Binary files a/content/docs/adr/figures/header_shares_commit.jpg and /dev/null differ diff --git a/content/docs/adr/meta.json b/content/docs/adr/meta.json deleted file mode 100644 index 045c84a..0000000 --- a/content/docs/adr/meta.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "title": "ADR", - "icon": "FileText", - "pages": ["..."] -} diff --git a/content/docs/api/index.md b/content/docs/api/index.md deleted file mode 100644 index ca4bfd7..0000000 --- a/content/docs/api/index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: API Introduction -description: Evolve Node RPC API reference -full: true -toc: false ---- - -# API Documentation - -The Evolve node exposes a gRPC-Web compatible API over HTTP/1.1 with JSON encoding at `http://localhost:7331` by default. - -## Available Services - -- **Signer Service** — Transaction signing operations -- **Store Service** — Block and state store access -- **P2P Service** — Peer-to-peer networking -- **Config Service** — Node configuration -- **Health Service** — Health checks and status - -Browse the API operations in the sidebar to see detailed request/response schemas for each endpoint. diff --git a/content/docs/api/meta.json b/content/docs/api/meta.json deleted file mode 100644 index c52d617..0000000 --- a/content/docs/api/meta.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "title": "API Documentation", - "description": "RPC API reference", - "icon": "Unplug", - "root": true, - "pages": ["index", "..."] -} diff --git a/content/docs/audit/binary-builders.pdf b/content/docs/audit/binary-builders.pdf deleted file mode 100644 index 70fc69f..0000000 Binary files a/content/docs/audit/binary-builders.pdf and /dev/null differ diff --git a/content/docs/audit/informal-systems.pdf b/content/docs/audit/informal-systems.pdf deleted file mode 100644 index 9b0ee80..0000000 Binary files a/content/docs/audit/informal-systems.pdf and /dev/null differ diff --git a/content/docs/concepts/block-lifecycle.md b/content/docs/concepts/block-lifecycle.md deleted file mode 100644 index a344bff..0000000 --- a/content/docs/concepts/block-lifecycle.md +++ /dev/null @@ -1,705 +0,0 @@ -# Block Components - -## Abstract - -The block package provides a modular component-based architecture for handling block-related operations in full nodes. Instead of a single monolithic manager, the system is divided into specialized components that work together, each responsible for specific aspects of block processing. This architecture enables better separation of concerns, easier testing, and more flexible node configurations. - -The main components are: - -- **Executor**: Handles block production and state transitions (aggregator nodes only) -- **Reaper**: Periodically retrieves transactions and submits them to the sequencer (aggregator nodes only) -- **Submitter**: Manages submission of headers and data to the DA network (aggregator nodes only) -- **Syncer**: Handles synchronization from both DA and P2P sources (all full nodes) -- **Cache Manager**: Coordinates caching and tracking of blocks across all components - -A full node coordinates these components based on its role: - -- **Aggregator nodes**: Use all components for block production, submission, and synchronization -- **Non-aggregator full nodes**: Use only Syncer and Cache for block synchronization - -```mermaid -sequenceDiagram - title Overview of Block Manager - - participant User - participant Sequencer - participant Full Node 1 - participant Full Node 2 - participant DA Layer - - User->>Sequencer: Send Tx - Sequencer->>Sequencer: Generate Block - Sequencer->>DA Layer: Publish Block - - Sequencer->>Full Node 1: Gossip Block - Sequencer->>Full Node 2: Gossip Block - Full Node 1->>Full Node 1: Verify Block - Full Node 1->>Full Node 2: Gossip Block - Full Node 1->>Full Node 1: Mark Block Soft Confirmed - - Full Node 2->>Full Node 2: Verify Block - Full Node 2->>Full Node 2: Mark Block Soft Confirmed - - DA Layer->>Full Node 1: Retrieve Block - Full Node 1->>Full Node 1: Mark Block DA Included - - DA Layer->>Full Node 2: Retrieve Block - Full Node 2->>Full Node 2: Mark Block DA Included -``` - -### Component Architecture Overview - -```mermaid -flowchart TB - subgraph Block Components [Modular Block Components] - EXE[Executor
Block Production] - REA[Reaper
Tx Collection] - SUB[Submitter
DA Submission] - SYN[Syncer
Block Sync] - CAC[Cache Manager
State Tracking] - end - - subgraph External Components - CEXE[Core Executor] - SEQ[Sequencer] - DA[DA Layer] - HS[Header Store/P2P] - DS[Data Store/P2P] - ST[Local Store] - end - - REA -->|GetTxs| CEXE - REA -->|SubmitBatch| SEQ - REA -->|Notify| EXE - - EXE -->|CreateBlock| CEXE - EXE -->|ApplyBlock| CEXE - EXE -->|Save| ST - EXE -->|Track| CAC - - EXE -->|Headers| SUB - EXE -->|Data| SUB - SUB -->|Submit| DA - SUB -->|Track| CAC - - DA -->|Retrieve| SYN - HS -->|Headers| SYN - DS -->|Data| SYN - - SYN -->|ApplyBlock| CEXE - SYN -->|Save| ST - SYN -->|Track| CAC - SYN -->|SetFinal| CEXE - - CAC -->|Coordinate| EXE - CAC -->|Coordinate| SUB - CAC -->|Coordinate| SYN -``` - -## Protocol/Component Description - -The block components are initialized based on the node type: - -### Aggregator Components - -Aggregator nodes create all components for full block production and synchronization capabilities: - -```go -components := block.NewAggregatorComponents( - config, // Node configuration - genesis, // Genesis state - store, // Local datastore - executor, // Core executor for state transitions - sequencer, // Sequencer client - da, // DA client - signer, // Block signing key - // P2P stores and options... -) -``` - -### Non-Aggregator Components - -Non-aggregator full nodes create only synchronization components: - -```go -components := block.NewSyncComponents( - config, // Node configuration - genesis, // Genesis state - store, // Local datastore - executor, // Core executor for state transitions - da, // DA client - // P2P stores and options... (no signer or sequencer needed) -) -``` - -### Component Initialization Parameters - -| **Name** | **Type** | **Description** | -| --------------------------- | ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| signing key | crypto.PrivKey | used for signing blocks and data after creation | -| config | config.BlockManagerConfig | block manager configurations (see config options below) | -| genesis | \*cmtypes.GenesisDoc | initialize the block manager with genesis state (genesis configuration defined in `config/genesis.json` file under the app directory) | -| store | store.Store | local datastore for storing chain blocks and states (default local store path is `$db_dir/evolve` and `db_dir` specified in the `config.yaml` file under the app directory) | -| mempool, proxyapp, eventbus | mempool.Mempool, proxy.AppConnConsensus, \*cmtypes.EventBus | for initializing the executor (state transition function). mempool is also used in the manager to check for availability of transactions for lazy block production | -| dalc | da.DAClient | the data availability light client used to submit and retrieve blocks to DA network | -| headerStore | *goheaderstore.Store[*types.SignedHeader] | to store and retrieve block headers gossiped over the P2P network | -| dataStore | *goheaderstore.Store[*types.SignedData] | to store and retrieve block data gossiped over the P2P network | -| signaturePayloadProvider | types.SignaturePayloadProvider | optional custom provider for header signature payloads | -| sequencer | core.Sequencer | used to retrieve batches of transactions from the sequencing layer | -| reaper | \*Reaper | component that periodically retrieves transactions from the executor and submits them to the sequencer | - -### Configuration Options - -The block components share a common configuration: - -| Name | Type | Description | -| ------------------------ | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -| BlockTime | time.Duration | time interval used for block production and block retrieval from block store ([`defaultBlockTime`][defaultBlockTime]) | -| DABlockTime | time.Duration | time interval used for both block publication to DA network and block retrieval from DA network ([`defaultDABlockTime`][defaultDABlockTime]) | -| DAStartHeight | uint64 | block retrieval from DA network starts from this height | -| LazyBlockInterval | time.Duration | time interval used for block production in lazy aggregator mode even when there are no transactions ([`defaultLazyBlockTime`][defaultLazyBlockTime]) | -| LazyMode | bool | when set to true, enables lazy aggregation mode which produces blocks only when transactions are available or at LazyBlockInterval intervals | -| MaxPendingHeadersAndData | uint64 | maximum number of pending headers and data blocks before pausing block production (default: 100) | -| MaxSubmitAttempts | int | maximum number of retry attempts for DA submissions (default: 30) | -| MempoolTTL | int | number of blocks to wait when transaction is stuck in DA mempool (default: 25) | -| GasPrice | float64 | gas price for DA submissions (-1 for automatic/default) | -| GasMultiplier | float64 | multiplier for gas price on DA submission retries (default: 1.3) | -| Namespace | da.Namespace | DA namespace ID for block submissions (deprecated, use HeaderNamespace and DataNamespace instead) | -| HeaderNamespace | string | namespace ID for submitting headers to DA layer (automatically encoded by the node) | -| DataNamespace | string | namespace ID for submitting data to DA layer (automatically encoded by the node) | -| RequestTimeout | duration | per-request timeout for DA `GetIDs`/`Get` calls; higher values tolerate slow DA nodes, lower values fail faster (default: 30s) | - -### Block Production (Executor Component) - -When the full node is operating as an aggregator, the **Executor component** handles block production. There are two modes of block production, which can be specified in the block manager configurations: `normal` and `lazy`. - -In `normal` mode, the block manager runs a timer, which is set to the `BlockTime` configuration parameter, and continuously produces blocks at `BlockTime` intervals. - -In `lazy` mode, the block manager implements a dual timer mechanism: - -```mermaid -flowchart LR - subgraph Lazy Aggregation Mode - R[Reaper] -->|GetTxs| CE[Core Executor] - CE -->|Txs Available| R - R -->|Submit to Sequencer| S[Sequencer] - R -->|NotifyNewTransactions| N[txNotifyCh] - - N --> E{Executor Logic} - BT[blockTimer] --> E - LT[lazyTimer] --> E - - E -->|Txs Available| P1[Produce Block with Txs] - E -->|No Txs & LazyTimer| P2[Produce Empty Block] - - P1 --> B[Block Creation] - P2 --> B - end -``` - -1. A `blockTimer` that triggers block production at regular intervals when transactions are available -2. A `lazyTimer` that ensures blocks are produced at `LazyBlockInterval` intervals even during periods of inactivity - -The block manager starts building a block when any transaction becomes available in the mempool via a notification channel (`txNotifyCh`). When the `Reaper` detects new transactions, it calls `Manager.NotifyNewTransactions()`, which performs a non-blocking signal on this channel. The block manager also produces empty blocks at regular intervals to maintain consistency with the DA layer, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks. - -The Reaper component periodically retrieves transactions from the core executor and submits them to the sequencer. It runs independently and notifies the Executor component when new transactions are available, enabling responsive block production in lazy mode. - -#### Building the Block - -The Executor component of aggregator nodes performs the following steps to produce a block: - -```mermaid -flowchart TD - A[Timer Trigger / Transaction Notification] --> B[Retrieve Batch] - B --> C{Transactions Available?} - C -->|Yes| D[Create Block with Txs] - C -->|No| E[Create Empty Block] - D --> F[Generate Header & Data] - E --> F - F --> G[Sign Header → SignedHeader] - F --> H[Sign Data → SignedData] - G --> I[Apply Block] - H --> I - I --> J[Update State] - J --> K[Save to Store] - K --> L[Add to pendingHeaders] - K --> M[Add to pendingData] - L --> N[Broadcast Header to P2P] - M --> O[Broadcast Data to P2P] -``` - -- Retrieve a batch of transactions using `retrieveBatch()` which interfaces with the sequencer -- Call `CreateBlock` using executor with the retrieved transactions -- Create separate header and data structures from the block -- Sign the header using `signing key` to generate `SignedHeader` -- Sign the data using `signing key` to generate `SignedData` (if transactions exist) -- Call `ApplyBlock` using executor to generate an updated state -- Save the block, validators, and updated state to local store -- Add the newly generated header to `pendingHeaders` queue -- Add the newly generated data to `pendingData` queue (if not empty) -- Publish the newly generated header and data to channels to notify other components of the sequencer node (such as block and header gossip) - -Note: When no transactions are available, the block manager creates blocks with empty data using a special `dataHashForEmptyTxs` marker. The header and data separation architecture allows headers and data to be submitted and retrieved independently from the DA layer. - -### Block Publication to DA Network (Submitter Component) - -The **Submitter component** of aggregator nodes implements separate submission loops for headers and data, both operating at `DABlockTime` intervals. Headers and data are submitted to different namespaces to improve scalability and allow for more flexible data availability strategies: - -```mermaid -flowchart LR - subgraph Header Submission - H1[pendingHeaders Queue] --> H2[Header Submission Loop] - H2 --> H3[Marshal to Protobuf] - H3 --> H4[Submit to DA] - H4 -->|Success| H5[Remove from Queue] - H4 -->|Failure| H6[Keep in Queue & Retry] - end - - subgraph Data Submission - D1[pendingData Queue] --> D2[Data Submission Loop] - D2 --> D3[Marshal to Protobuf] - D3 --> D4[Submit to DA] - D4 -->|Success| D5[Remove from Queue] - D4 -->|Failure| D6[Keep in Queue & Retry] - end - - H2 -.->|DABlockTime| H2 - D2 -.->|DABlockTime| D2 -``` - -#### Header Submission Loop - -The `HeaderSubmissionLoop` manages the submission of signed headers to the DA network: - -- Retrieves pending headers from the `pendingHeaders` queue -- Marshals headers to protobuf format -- Submits to DA using the generic `submitToDA` helper with the configured `HeaderNamespace` -- On success, removes submitted headers from the pending queue -- On failure, headers remain in the queue for retry - -#### Data Submission Loop - -The `DataSubmissionLoop` manages the submission of signed data to the DA network: - -- Retrieves pending data from the `pendingData` queue -- Marshals data to protobuf format -- Submits to DA using the generic `submitToDA` helper with the configured `DataNamespace` -- On success, removes submitted data from the pending queue -- On failure, data remains in the queue for retry - -#### Generic Submission Logic - -Both loops use a shared `submitToDA` function that provides: - -- Namespace-specific submission based on header or data type -- Retry logic with configurable maximum attempts via `MaxSubmitAttempts` configuration -- Exponential backoff starting at `initialBackoff` (100ms), doubling each attempt, capped at `DABlockTime` -- Gas price management with `GasMultiplier` applied on retries using a centralized `retryStrategy` -- Recursive batch splitting for handling "too big" DA submissions that exceed blob size limits -- Comprehensive error handling for different DA submission failure types (mempool issues, context cancellation, blob size limits) -- Comprehensive metrics tracking for attempts, successes, and failures -- Context-aware cancellation support - -#### Retry Strategy and Error Handling - -The DA submission system implements sophisticated retry logic using a centralized `retryStrategy` struct to handle various failure scenarios: - -```mermaid -flowchart TD - A[Submit to DA] --> B{Submission Result} - B -->|Success| C[Reset Backoff & Adjust Gas Price Down] - B -->|Too Big| D{Batch Size > 1?} - B -->|Mempool/Not Included| E[Mempool Backoff Strategy] - B -->|Context Canceled| F[Stop Submission] - B -->|Other Error| G[Exponential Backoff] - - D -->|Yes| H[Recursive Batch Splitting] - D -->|No| I[Skip Single Item - Cannot Split] - - E --> J[Set Backoff = MempoolTTL * BlockTime] - E --> K[Multiply Gas Price by GasMultiplier] - - G --> L[Double Backoff Time] - G --> M[Cap at MaxBackoff - DABlockTime] - - H --> N[Split into Two Halves] - N --> O[Submit First Half] - O --> P[Submit Second Half] - P --> Q{Both Halves Processed?} - Q -->|Yes| R[Combine Results] - Q -->|No| S[Handle Partial Success] - - C --> T[Update Pending Queues] - T --> U[Post-Submit Actions] -``` - -##### Retry Strategy Features - -- **Centralized State Management**: The `retryStrategy` struct manages attempt counts, backoff timing, and gas price adjustments -- **Multiple Backoff Types**: - - Exponential backoff for general failures (doubles each attempt, capped at `DABlockTime`) - - Mempool-specific backoff (waits `MempoolTTL * BlockTime` for stuck transactions) - - Success-based backoff reset with gas price reduction -- **Intelligent Batch Splitting**: - - Recursively splits batches that exceed DA blob size limits - - Handles partial submissions within split batches - - Prevents infinite recursion with proper base cases -- **Comprehensive Error Classification**: - - `StatusSuccess`: Full or partial successful submission - - `StatusTooBig`: Triggers batch splitting logic - - `StatusNotIncludedInBlock`/`StatusAlreadyInMempool`: Mempool-specific handling - - `StatusContextCanceled`: Graceful shutdown support - - Other errors: Standard exponential backoff - -The manager enforces a limit on pending headers and data through `MaxPendingHeadersAndData` configuration. When this limit is reached, block production pauses to prevent unbounded growth of the pending queues. - -### Block Retrieval from DA Network (Syncer Component) - -The **Syncer component** implements a `RetrieveLoop` through its DARetriever that regularly pulls headers and data from the DA network. The retrieval process supports both legacy single-namespace mode (for backward compatibility) and the new separate namespace mode: - -```mermaid -flowchart TD - A[Start RetrieveLoop] --> B[Get DA Height] - B --> C{DABlockTime Timer} - C --> D[GetHeightPair from DA] - D --> E{Result?} - E -->|Success| F[Validate Signatures] - E -->|NotFound| G[Increment Height] - E -->|Error| H[Retry Logic] - - F --> I[Check Sequencer Info] - I --> J[Mark DA Included] - J --> K[Send to Sync] - K --> L[Increment Height] - L --> M[Immediate Next Retrieval] - - G --> C - H --> N{Retries < 10?} - N -->|Yes| O[Wait 100ms] - N -->|No| P[Log Error & Stall] - O --> D - M --> D -``` - -#### Retrieval Process - -1. **Height Management**: Starts from the latest of: - - DA height from the last state in local store - - `DAStartHeight` configuration parameter - - Maintains and increments `daHeight` counter after successful retrievals - -2. **Retrieval Mechanism**: - - Executes at `DABlockTime` intervals - - Implements namespace migration support: - - First attempts legacy namespace retrieval if migration not completed - - Falls back to separate header and data namespace retrieval - - Tracks migration status to optimize future retrievals - - Retrieves from separate namespaces: - - Headers from `HeaderNamespace` - - Data from `DataNamespace` - - Combines results from both namespaces - - Handles three possible outcomes: - - `Success`: Process retrieved header and/or data - - `NotFound`: No chain block at this DA height (normal case) - - `Error`: Retry with backoff - -3. **Error Handling**: - - Implements retry logic with 100ms delay between attempts - - After 10 retries, logs error and stalls retrieval - - Does not increment `daHeight` on persistent errors - -4. **Processing Retrieved Blocks**: - - Validates header and data signatures - - Checks sequencer information - - Marks blocks as DA included in caches - - Sends to sync goroutine for state update - - Successful processing triggers immediate next retrieval without waiting for timer - - Updates namespace migration status when appropriate: - - Marks migration complete when data is found in new namespaces - - Persists migration state to avoid future legacy checks - -#### Header and Data Caching - -The retrieval system uses persistent caches for both headers and data: - -- Prevents duplicate processing -- Tracks DA inclusion status -- Supports out-of-order block arrival -- Enables efficient sync from P2P and DA sources -- Maintains namespace migration state for optimized retrieval - -For more details on DA integration, see the [Data Availability specification](./data-availability.md). - -#### Out-of-Order Chain Blocks on DA - -Evolve should support blocks arriving out-of-order on DA, like so: -![out-of-order blocks](../reference/specs/out-of-order-blocks.png) - -### Block Sync Service (Syncer Component) - -The **Syncer component** manages the synchronization of headers and data through its P2PHandler and coordination with the Cache Manager: - -#### Architecture - -- **Header Store**: Uses `goheader.Store[*types.SignedHeader]` for header management -- **Data Store**: Uses `goheader.Store[*types.SignedData]` for data management -- **Separation of Concerns**: Headers and data are handled independently, supporting the header/data separation architecture - -#### Synchronization Flow - -1. **Header Sync**: Headers created by the sequencer are sent to the header store for P2P gossip -2. **Data Sync**: Data blocks are sent to the data store for P2P gossip -3. **Cache Integration**: Both header and data caches track seen items to prevent duplicates -4. **DA Inclusion Tracking**: Separate tracking for header and data DA inclusion status - -### Block Publication to P2P network (Executor Component) - -The **Executor component** of aggregator nodes publishes headers and data separately to the P2P network: - -#### Header Publication - -- Headers are sent through the header broadcast channel -- Written to the header store for P2P gossip -- Broadcast to network peers via header sync service - -#### Data Publication - -- Data blocks are sent through the data broadcast channel -- Written to the data store for P2P gossip -- Broadcast to network peers via data sync service - -Non-sequencer full nodes receive headers and data through the P2P sync service and do not publish blocks themselves. - -### Block Retrieval from P2P network (Syncer Component) - -The **Syncer component** retrieves headers and data separately from P2P stores through its P2PHandler: - -#### Header Store Retrieval Loop - -The `HeaderStoreRetrieveLoop`: - -- Operates at `BlockTime` intervals via `headerStoreCh` signals -- Tracks `headerStoreHeight` for the last retrieved header -- Retrieves all headers between last height and current store height -- Validates sequencer information using `assertUsingExpectedSingleSequencer` -- Marks headers as "seen" in the header cache -- Sends headers to sync goroutine via `headerInCh` - -#### Data Store Retrieval Loop - -The `DataStoreRetrieveLoop`: - -- Operates at `BlockTime` intervals via `dataStoreCh` signals -- Tracks `dataStoreHeight` for the last retrieved data -- Retrieves all data blocks between last height and current store height -- Validates data signatures using `assertValidSignedData` -- Marks data as "seen" in the data cache -- Sends data to sync goroutine via `dataInCh` - -#### Soft Confirmations - -Headers and data retrieved from P2P are marked as soft confirmed until both: - -1. The corresponding header is seen on the DA layer -2. The corresponding data is seen on the DA layer - -Once both conditions are met, the block is marked as DA-included. - -#### About Soft Confirmations and DA Inclusions - -The block manager retrieves blocks from both the P2P network and the underlying DA network because the blocks are available in the P2P network faster and DA retrieval is slower (e.g., 1 second vs 6 seconds). -The blocks retrieved from the P2P network are only marked as soft confirmed until the DA retrieval succeeds on those blocks and they are marked DA-included. -DA-included blocks are considered to have a higher level of finality. - -**DAIncluderLoop**: -The `DAIncluderLoop` is responsible for advancing the `DAIncludedHeight` by: - -- Checking if blocks after the current height have both header and data marked as DA-included in caches -- Stopping advancement if either header or data is missing for a height -- Calling `SetFinal` on the executor when a block becomes DA-included -- Storing the Evolve height to DA height mapping for tracking -- Ensuring only blocks with both header and data present are considered DA-included - -### State Update after Block Retrieval (Syncer Component) - -The **Syncer component** uses a `SyncLoop` to coordinate state updates from blocks retrieved via P2P or DA networks: - -```mermaid -flowchart TD - subgraph Sources - P1[P2P Header Store] --> H[headerInCh] - P2[P2P Data Store] --> D[dataInCh] - DA1[DA Header Retrieval] --> H - DA2[DA Data Retrieval] --> D - end - - subgraph SyncLoop - H --> S[Sync Goroutine] - D --> S - S --> C{Header & Data for Same Height?} - C -->|Yes| R[Reconstruct Block] - C -->|No| W[Wait for Matching Pair] - R --> V[Validate Signatures] - V --> A[ApplyBlock] - A --> CM[Commit] - CM --> ST[Store Block & State] - ST --> F{DA Included?} - F -->|Yes| FN[SetFinal] - F -->|No| E[End] - FN --> U[Update DA Height] - end -``` - -#### Sync Loop Architecture - -The `SyncLoop` processes headers and data from multiple sources: - -- Headers from `headerInCh` (P2P and DA sources) -- Data from `dataInCh` (P2P and DA sources) -- Maintains caches to track processed items -- Ensures ordered processing by height - -#### State Update Process - -When both header and data are available for a height: - -1. **Block Reconstruction**: Combines header and data into a complete block -2. **Validation**: Verifies header and data signatures match expectations -3. **ApplyBlock**: - - Validates the block against current state - - Executes transactions - - Captures validator updates - - Returns updated state -4. **Commit**: - - Persists execution results - - Updates mempool by removing included transactions - - Publishes block events -5. **Storage**: - - Stores the block, validators, and updated state - - Updates last state in manager -6. **Finalization**: - - When block is DA-included, calls `SetFinal` on executor - - Updates DA included height - -## Message Structure/Communication Format - -### Component Communication - -The components communicate through well-defined interfaces: - -#### Executor ↔ Core Executor - -- `InitChain`: initializes the chain state with the given genesis time, initial height, and chain ID using `InitChainSync` on the executor to obtain initial `appHash` and initialize the state. -- `CreateBlock`: prepares a block with transactions from the provided batch data. -- `ApplyBlock`: validates the block, executes the block (apply transactions), captures validator updates, and returns updated state. -- `SetFinal`: marks the block as final when both its header and data are confirmed on the DA layer. -- `GetTxs`: retrieves transactions from the application (used by Reaper component). - -#### Reaper ↔ Sequencer - -- `GetNextBatch`: retrieves the next batch of transactions to include in a block. -- `VerifyBatch`: validates that a batch came from the expected sequencer. - -#### Submitter/Syncer ↔ DA Layer - -- `Submit`: submits headers or data blobs to the DA network. -- `Get`: retrieves headers or data blobs from the DA network. -- `GetHeightPair`: retrieves both header and data at a specific DA height. - -## Assumptions and Considerations - -### Component Architecture - -- The block package uses a modular component architecture instead of a monolithic manager -- Components are created based on node type: aggregator nodes get all components, non-aggregator nodes only get synchronization components -- Each component has a specific responsibility and communicates through well-defined interfaces -- Components share a common Cache Manager for coordination and state tracking - -### Initialization and State Management - -- Components load the initial state from the local store and use genesis if not found in the local store, when the node (re)starts -- During startup the Syncer invokes the execution Replayer to re-execute any blocks the local execution layer is missing; the replayer enforces strict app-hash matching so a mismatch aborts initialization instead of silently drifting out of sync -- The default mode for aggregator nodes is normal (not lazy) -- Components coordinate through channels and shared cache structures - -### Block Production (Executor Component) - -- The Executor can produce empty blocks -- In lazy aggregation mode, the Executor maintains consistency with the DA layer by producing empty blocks at regular intervals, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks -- The lazy aggregation mechanism uses a dual timer approach: - - A `blockTimer` that triggers block production when transactions are available - - A `lazyTimer` that ensures blocks are produced even during periods of inactivity -- Empty batches are handled differently in lazy mode - instead of discarding them, they are returned with the `ErrNoBatch` error, allowing the caller to create empty blocks with proper timestamps -- Transaction notifications from the `Reaper` to the `Executor` are handled via a non-blocking notification channel (`txNotifyCh`) to prevent backpressure - -### DA Submission (Submitter Component) - -- The Submitter enforces `MaxPendingHeadersAndData` limit to prevent unbounded growth of pending queues during DA submission issues -- Headers and data are submitted separately to the DA layer using different namespaces, supporting the header/data separation architecture -- The Cache Manager uses persistent caches for headers and data to track seen items and DA inclusion status -- Namespace migration is handled transparently by the Syncer, with automatic detection and state persistence to optimize future operations -- The system supports backward compatibility with legacy single-namespace deployments while transitioning to separate namespaces -- Gas price management in the Submitter includes automatic adjustment with `GasMultiplier` on DA submission retries - -### Storage and Persistence - -- Components use persistent storage (disk) when the `root_dir` and `db_path` configuration parameters are specified in `config.yaml` file under the app directory. If these configuration parameters are not specified, the in-memory storage is used, which will not be persistent if the node stops -- The Syncer does not re-apply blocks when they transition from soft confirmed to DA included status. The block is only marked DA included in the caches -- Header and data stores use separate prefixes for isolation in the underlying database -- The genesis `ChainID` is used to create separate `PubSubTopID`s for headers and data in go-header - -### P2P and Synchronization - -- Block sync over the P2P network works only when a full node is connected to the P2P network by specifying peers via the `P2PConfig.Peers` configuration parameter when starting the full node -- Node's context is passed down to all components to support graceful shutdown and cancellation - -## Metrics - -The block components expose Prometheus metrics for monitoring block production, DA submission/retrieval, sync progress, and errors. See the [Metrics guide](/guides/metrics) for configuration and available metric names. - -## Implementation - -The modular block components are implemented in the following packages: - -- [Executor]: Block production and state transitions (`block/internal/executing/`) -- [Reaper]: Transaction collection and submission (`block/internal/reaping/`) -- [Submitter]: DA submission logic (`block/internal/submitting/`) -- [Syncer]: Block synchronization from DA and P2P (`block/internal/syncing/`) -- [Cache Manager]: Coordination and state tracking (`block/internal/cache/`) -- [Components]: Main components orchestration (`block/components.go`) - -See [tutorial] for running a multi-node network with both aggregator and non-aggregator full nodes. - -## References - -[1] [Go Header][go-header] - -[2] [Block Sync][block-sync] - -[3] [Full Node][full-node] - -[4] [Block Components][Components] - -[5] [Tutorial][tutorial] - -[6] [Header and Data Separation ADR](../../adr/adr-014-header-and-data-separation.md) - -[7] [Evolve Minimal Header](../../adr/adr-015-rollkit-minimal-header.md) - -[8] [Data Availability](./data-availability.md) - -[9] [Lazy Aggregation with DA Layer Consistency ADR](../../adr/adr-021-lazy-aggregation.md) - -[defaultBlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L50 -[defaultDABlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L59 -[defaultLazyBlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L52 -[go-header]: https://github.com/celestiaorg/go-header -[block-sync]: https://github.com/evstack/ev-node/blob/main/pkg/sync/sync_service.go -[full-node]: https://github.com/evstack/ev-node/blob/main/node/full.go -[Executor]: https://github.com/evstack/ev-node/blob/main/block/internal/executing/executor.go -[Reaper]: https://github.com/evstack/ev-node/blob/main/block/internal/reaping/reaper.go -[Submitter]: https://github.com/evstack/ev-node/blob/main/block/internal/submitting/submitter.go -[Syncer]: https://github.com/evstack/ev-node/blob/main/block/internal/syncing/syncer.go -[Cache Manager]: https://github.com/evstack/ev-node/blob/main/block/internal/cache/manager.go -[Components]: https://github.com/evstack/ev-node/blob/main/block/components.go -[tutorial]: https://ev.xyz/guides/full-node diff --git a/content/docs/concepts/data-availability.md b/content/docs/concepts/data-availability.md deleted file mode 100644 index e581a5c..0000000 --- a/content/docs/concepts/data-availability.md +++ /dev/null @@ -1,71 +0,0 @@ -# Data Availability - -Data availability (DA) ensures that all transaction data required to verify the chain's state is accessible to anyone. - -## Why DA Matters - -Without data availability guarantees: - -- Nodes can't verify state transitions -- Users can't prove their balances -- The chain's security model breaks down - -Evolve uses external DA layers to provide these guarantees, rather than storing all data on L1. - -## How Evolve Handles Data Availability - -Evolve currently supports two DA modes: - -### Local DA - -- **Use case**: Development and testing -- **Guarantee**: None (operator can withhold data) -- **Latency**: Instant - -### Celestia - -- **Use case**: Production deployments -- **Guarantee**: Data availability sampling (DAS) -- **Latency**: ~12 seconds to finality - -## DA Flow - -```text -Block Produced - │ - ▼ -┌─────────────────┐ -│ Submitter │ Queues block for DA -└────────┬────────┘ - │ - ▼ -┌─────────────────┐ -│ DA Layer │ Stores and orders data -└────────┬────────┘ - │ - ▼ -┌─────────────────┐ -│ Full Nodes │ Retrieve and verify -└─────────────────┘ -``` - -## Namespaces - -Evolve uses DA namespaces to organize data: - -| Namespace | Purpose | -|-----------|---------| -| Header | Block headers | -| Data | Transaction data | - -## Best Practices - -- **Development**: Use Local DA for fast iteration -- **Testnet**: Use Celestia testnet (Mocha or Arabica) -- **Production**: Use Celestia mainnet or equivalent - -## Learn More - -- [Local DA Guide](/guides/da-layers/local-da) -- [Celestia Guide](/guides/da-layers/celestia) -- [DA Interface Reference](/reference/interfaces/da) diff --git a/content/docs/concepts/fee-systems.md b/content/docs/concepts/fee-systems.md deleted file mode 100644 index 16568ff..0000000 --- a/content/docs/concepts/fee-systems.md +++ /dev/null @@ -1,157 +0,0 @@ -# Fee Systems - -Evolve chains have two layers of fees: execution fees (paid to process transactions) and DA fees (paid to post data). - -## Execution Fees - -### EVM (ev-reth) - -Uses EIP-1559 fee model: - -```text -Transaction Fee = (Base Fee + Priority Fee) × Gas Used -``` - -| Component | Destination | Purpose | -|-----------|-------------|---------| -| Base Fee | Burned (or redirected) | Congestion pricing | -| Priority Fee | Sequencer | Incentive for inclusion | - -#### Base Fee Redirect - -By default, base fees are burned. ev-reth can redirect them to a treasury: - -```json -{ - "config": { - "evolve": { - "baseFeeSink": "0xTREASURY", - "baseFeeRedirectActivationHeight": 0 - } - } -} -``` - -See [Base Fee Redirect](/ev-reth/features/base-fee-redirect) for details. - -### Cosmos SDK (ev-abci) - -Uses standard Cosmos SDK fee model: - -```text -Transaction Fee = Gas Price × Gas Used -``` - -Configure minimum gas prices: - -```toml -# app.toml -minimum-gas-prices = "0.025stake" -``` - -Fees go to the fee collector module and can be distributed via standard Cosmos mechanisms. - -## DA Fees - -Both execution environments incur DA fees when blocks are posted to the DA layer. - -### Cost Factors - -| Factor | Impact | -|--------|--------| -| Block size | Linear cost increase | -| DA gas price | Market-driven, varies | -| Batching | Amortizes overhead | -| Compression | Reduces data size | - -### Who Pays? - -The sequencer pays DA fees from their own funds. They recover costs through: - -- Priority fees from users -- Base fee redirect (if configured) -- External subsidy - -### Optimization Strategies - -#### Lazy Aggregation - -Only produce blocks when there are transactions: - -```yaml -node: - lazy-aggregator: true - lazy-block-time: 1s # Max wait time -``` - -Reduces empty blocks and DA costs. - -#### Batching - -ev-node batches multiple blocks into single DA submissions: - -```yaml -da: - batch-size-threshold: 100000 # bytes - batch-max-delay: 5s -``` - -#### Compression - -Enable blob compression: - -```yaml -da: - compression: true -``` - -## Fee Flow Diagram - -```text -User Transaction - │ - │ Pays: Gas Price × Gas - ▼ -┌─────────────────┐ -│ Sequencer │ -│ │ -│ Receives: │ -│ - Priority fees │ -│ - Base fees* │ -└────────┬────────┘ - │ - │ Pays: DA fees - ▼ -┌─────────────────┐ -│ DA Layer │ -│ (Celestia) │ -└─────────────────┘ - -* If base fee redirect is enabled -``` - -## Estimating Costs - -### Execution Costs - -EVM: - -```bash -cast estimate --rpc-url http://localhost:8545 "transfer(address,uint256)" -``` - -Cosmos: - -```bash -appd tx bank send 1000stake --gas auto --gas-adjustment 1.3 -``` - -### DA Costs - -Depends on: - -- DA layer pricing (e.g., Celestia gas price) -- Data size per block -- Submission frequency - -Use the [Celestia Gas Calculator](/guides/tools/celestia-gas-calculator) for estimates. diff --git a/content/docs/concepts/finality.md b/content/docs/concepts/finality.md deleted file mode 100644 index 3462ec9..0000000 --- a/content/docs/concepts/finality.md +++ /dev/null @@ -1,55 +0,0 @@ -# Finality - -Finality determines when a transaction is irreversible. Evolve has a multi-stage finality model. - -## Finality Stages - -```text -Transaction Submitted - │ - ▼ -┌───────────────────┐ -│ Soft Confirmed │ ← Block produced, gossiped via P2P -└─────────┬─────────┘ - │ - ▼ -┌───────────────────┐ -│ DA Finalized │ ← DA layer confirms inclusion -└───────────────────┘ -``` - -### Soft Confirmation - -When a block is produced and gossiped via P2P: - -- **Latency**: Milliseconds (block time) -- **Guarantee**: Sequencer has committed to this ordering -- **Risk**: Sequencer could equivocate (produce conflicting blocks) - -### DA Finalized - -When the DA layer confirms the block is included: - -- **Latency**: ~6 seconds (Celestia) -- **Guarantee**: Block data is permanently available and ordered -- **Risk**: None (assuming DA layer security) - -## Choosing Finality Thresholds - -| Use Case | Recommended Finality | -|----------|---------------------| -| Display balance | Soft confirmation | -| Accept payment | Soft confirmation | -| Process withdrawal | DA finalized | -| Bridge transfer | DA finalized | - -## Configuration - -Block time affects soft confirmation latency: - -```yaml -node: - block-time: 100ms -``` - -DA finality depends on the DA layer. Celestia provides ~6 second finality. diff --git a/content/docs/concepts/meta.json b/content/docs/concepts/meta.json deleted file mode 100644 index 521f756..0000000 --- a/content/docs/concepts/meta.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "title": "Concepts", - "icon": "Lightbulb", - "pages": ["..."] -} diff --git a/content/docs/concepts/p2p-networking.md b/content/docs/concepts/p2p-networking.md deleted file mode 100644 index c35ab10..0000000 --- a/content/docs/concepts/p2p-networking.md +++ /dev/null @@ -1,60 +0,0 @@ -# P2P - -Every node (both full and light) runs a P2P client using [go-libp2p][go-libp2p] P2P networking stack for gossiping transactions in the chain's P2P network. The same P2P client is also used by the header and block sync services for gossiping headers and blocks. - -Following parameters are required for creating a new instance of a P2P client: - -* P2PConfig (described below) -* [go-libp2p][go-libp2p] private key used to create a libp2p connection and join the p2p network. -* chainID: identifier used as namespace within the p2p network for peer discovery. The namespace acts as a sub network in the p2p network, where peer connections are limited to the same namespace. -* datastore: an instance of [go-datastore][go-datastore] used for creating a connection gator and stores blocked and allowed peers. -* logger - -```go -// P2PConfig stores configuration related to peer-to-peer networking. -type P2PConfig struct { - ListenAddress string // Address to listen for incoming connections - Seeds string // Comma separated list of seed nodes to connect to - BlockedPeers string // Comma separated list of nodes to ignore - AllowedPeers string // Comma separated list of nodes to whitelist -} -``` - -A P2P client also instantiates a [connection gator][conngater] to block and allow peers specified in the `P2PConfig`. - -It also sets up a gossiper using the gossip topic `+` (`txTopicSuffix` is defined in [p2p/client.go][client.go]), a Distributed Hash Table (DHT) using the `Seeds` defined in the `P2PConfig` and peer discovery using go-libp2p's `discovery.RoutingDiscovery`. - -A P2P client provides an interface `SetTxValidator(p2p.GossipValidator)` for specifying a gossip validator which can define how to handle the incoming `GossipMessage` in the P2P network. The `GossipMessage` represents message gossiped via P2P network (e.g. transaction, Block etc). - -```go -// GossipValidator is a callback function type. -type GossipValidator func(*GossipMessage) bool -``` - -The full nodes define a transaction validator (shown below) as gossip validator for processing the gossiped transactions to add to the mempool, whereas light nodes simply pass a dummy validator as light nodes do not process gossiped transactions. - -```go -// newTxValidator creates a pubsub validator that uses the node's mempool to check the -// transaction. If the transaction is valid, then it is added to the mempool -func (n *FullNode) newTxValidator() p2p.GossipValidator { -``` - -```go -// Dummy validator that always returns a callback function with boolean `false` -func (ln *LightNode) falseValidator() p2p.GossipValidator { -``` - -## References - -[1] [client.go][client.go] - -[2] [go-datastore][go-datastore] - -[3] [go-libp2p][go-libp2p] - -[4] [conngater][conngater] - -[client.go]: https://github.com/evstack/ev-node/blob/main/pkg/p2p/client.go -[go-datastore]: https://github.com/ipfs/go-datastore -[go-libp2p]: https://github.com/libp2p/go-libp2p -[conngater]: https://github.com/libp2p/go-libp2p/tree/master/p2p/net/conngater diff --git a/content/docs/concepts/sequencing.md b/content/docs/concepts/sequencing.md deleted file mode 100644 index 1d5fb7d..0000000 --- a/content/docs/concepts/sequencing.md +++ /dev/null @@ -1,120 +0,0 @@ -# Sequencing - -Sequencing is the process of determining the order of transactions in a blockchain. In rollups, the sequencer is the entity responsible for collecting transactions from users, ordering them, and producing blocks that are eventually posted to the data availability (DA) layer. - -Transaction ordering matters because it determines execution outcomes. Two transactions that touch the same state can produce different results depending on which executes first. The sequencer's ordering decisions directly impact users, particularly in DeFi where transaction order can mean the difference between a successful trade and a failed one. - -## The Role of the Sequencer - -A sequencer performs three core functions: - -1. **Transaction collection** — Accepting transactions from users and holding them in a mempool -2. **Ordering** — Deciding which transactions to include and in what order -3. **Block production** — Bundling ordered transactions into blocks and publishing them - -In traditional L1 blockchains, these functions are distributed across validators through consensus. In rollups, sequencing can be handled differently depending on the design goals. - -## Single Sequencer - -The simplest approach is a single sequencer: one designated node that orders all transactions. - -```text -User → Sequencer → Block → DA Layer -``` - -**Advantages:** - -- **Low latency** — No consensus required means block times can be very fast (sub-second) -- **Simple operation** — One node, one source of truth for ordering -- **Predictable performance** — No coordination overhead - -**Disadvantages:** - -- **Centralization** — Single point of control over transaction ordering -- **Censorship risk** — The sequencer can refuse to include specific transactions -- **Liveness dependency** — If the sequencer goes down, the chain halts -- **MEV extraction** — The sequencer has full visibility and can reorder for profit - -Most production rollups today use single sequencers because the performance benefits are significant and the trust assumptions are often acceptable for their use cases. - -## Based Sequencing - -Based sequencing (also called "based rollups") delegates transaction ordering to the underlying DA layer. Instead of a dedicated sequencer, users submit transactions directly to the DA layer, and all rollup nodes independently derive the same ordering from DA blocks. - -```text -User → DA Layer → All Nodes Derive Same Order -``` - -**Advantages:** - -- **Decentralization** — No privileged sequencer role -- **Censorship resistance** — Inherits the censorship resistance of the DA layer -- **Liveness** — Chain stays live as long as the DA layer is live -- **Shared security** — Ordering is secured by the DA layer's consensus - -**Disadvantages:** - -- **Higher latency** — Block times are bounded by DA layer block times (e.g., ~12s for Ethereum, ~6s for Celestia) -- **MEV leakage** — MEV flows to DA layer validators rather than the rollup -- **Complexity** — Requires deterministic derivation rules that all nodes must follow - -Based sequencing is compelling for applications that prioritize decentralization over speed. - -## Hybrid Approaches - -### Forced Inclusion - -Forced inclusion is a mechanism that combines the performance of single sequencing with censorship resistance guarantees. It works as follows: - -1. Users normally submit transactions to the sequencer for fast inclusion -2. If censored, users can submit transactions directly to the DA layer -3. The sequencer must include DA-submitted transactions within a defined time window -4. Failure to include triggers penalties or allows the chain to transition to based mode - -This gives users an escape hatch while maintaining the benefits of centralized sequencing for the common case. - -### Shared Sequencing - -Multiple rollups can share a sequencer or sequencer network. This enables: - -- **Atomic cross-rollup transactions** — Transactions that span multiple rollups can be ordered atomically -- **Shared MEV** — Revenue from cross-rollup MEV can be distributed -- **Reduced costs** — Infrastructure costs are amortized across chains - -Shared sequencing is an active area of research and development. - -## MEV Considerations - -Maximal Extractable Value (MEV) is the profit a sequencer can extract by reordering, inserting, or censoring transactions. Common MEV strategies include: - -- **Frontrunning** — Inserting a transaction before a target transaction -- **Backrunning** — Inserting a transaction immediately after a target -- **Sandwich attacks** — Combining frontrunning and backrunning around a target - -The sequencing design determines who captures MEV: - -| Design | MEV Captured By | -|-------------------|--------------------------| -| Single sequencer | Sequencer operator | -| Based sequencing | DA layer validators | -| Shared sequencing | Shared sequencer network | - -Some rollups implement MEV mitigation through encrypted mempools, fair ordering protocols, or MEV redistribution to users. - -## Choosing a Sequencing Model - -| Factor | Single Sequencer | Based Sequencer | -|------------------------|---------------------------|---------------------| -| Block time | Sub-second possible | DA layer block time | -| Censorship resistance | Requires forced inclusion | Native | -| Liveness | Sequencer must be online | DA layer liveness | -| MEV control | Sequencer controlled | DA layer controlled | -| Operational complexity | Lower | Higher | - -The right choice depends on your application's priorities. High-frequency trading applications might prefer single sequencing for speed. Applications handling high-value, censorship-sensitive transactions might prefer based sequencing for its guarantees. - -## Learn More - -- [Forced Inclusion](/guides/advanced/forced-inclusion) — Implementing censorship resistance with single sequencing -- [Based Sequencing](/guides/advanced/based-sequencing) — Running a based rollup -- [Sequencer Interface](/reference/interfaces/sequencer) — Implementation reference diff --git a/content/docs/concepts/transaction-flow.md b/content/docs/concepts/transaction-flow.md deleted file mode 100644 index 8d05532..0000000 --- a/content/docs/concepts/transaction-flow.md +++ /dev/null @@ -1,53 +0,0 @@ -# Transaction flow - -Chain users use a light node to communicate with the chain P2P network for two primary reasons: - -- submitting transactions -- gossiping headers and fraud proofs - -Here's what the typical transaction flow looks like: - -## Transaction submission - -```mermaid -sequenceDiagram - participant User - participant LightNode - participant FullNode - - User->>LightNode: Submit Transaction - LightNode->>FullNode: Gossip Transaction - FullNode-->>User: Refuse (if invalid) -``` - -## Transaction validation and processing - -```mermaid -sequenceDiagram - participant FullNode - participant Sequencer - - FullNode->>FullNode: Check Validity - FullNode->>FullNode: Add to Mempool (if valid) - FullNode-->>User: Transaction Processed (if valid) - FullNode->>Sequencer: Inform about Valid Transaction - Sequencer->>DALayer: Add to Chain Block -``` - -## Block processing - -```mermaid -sequenceDiagram - participant DALayer - participant FullNode - participant Chain - - DALayer->>Chain: Update State - DALayer->>FullNode: Download & Validate Block -``` - -To transact, users submit a transaction to their light node, which gossips the transaction to a full node. Before adding the transaction to their mempool, the full node checks its validity. Valid transactions are included in the mempool, while invalid ones are refused, and the user's transaction will not be processed. - -If the transaction is valid and has been included in the mempool, the sequencer can add it to a chain block, which is then submitted to the data availability (DA) layer. This results in a successful transaction flow for the user, and the state of the chain is updated accordingly. - -After the block is submitted to the DA layer, the full nodes download and validate the block. diff --git a/content/docs/ev-abci/integration-guide.md b/content/docs/ev-abci/integration-guide.md deleted file mode 100644 index a7d5b46..0000000 --- a/content/docs/ev-abci/integration-guide.md +++ /dev/null @@ -1,131 +0,0 @@ -# Integration Guide - -Integrate ev-abci into a Cosmos SDK application. - -## Overview - -ev-abci replaces CometBFT as the consensus layer. Your ABCI application logic remains unchanged—only the node startup code changes. - -## Prerequisites - -- Cosmos SDK v0.50+ application -- Go 1.22+ - -## Step 1: Add Dependency - -```bash -go get github.com/evstack/ev-abci@latest -``` - -## Step 2: Modify Start Command - -Locate your app's entrypoint (typically `cmd//root.go` or `main.go`). - -### Before (CometBFT) - -```go -import ( - "github.com/cosmos/cosmos-sdk/server" -) - -// In your root command setup: -server.AddCommands(rootCmd, app.DefaultNodeHome, newApp, appExport) -``` - -### After (ev-abci) - -```go -import ( - "github.com/cosmos/cosmos-sdk/server" - evabci "github.com/evstack/ev-abci/server" -) - -// Keep existing commands for init, genesis, keys, etc. -server.AddCommands(rootCmd, app.DefaultNodeHome, newApp, appExport) - -// Replace the start command -startCmd := &cobra.Command{ - Use: "start", - Short: "Run the node", - RunE: func(cmd *cobra.Command, _ []string) error { - return evabci.StartHandler(cmd, newApp) - }, -} -evabci.AddFlags(startCmd) -rootCmd.AddCommand(startCmd) -``` - -## Step 3: Build - -```bash -go build -o appd ./cmd/appd -``` - -## Step 4: Verify - -Check for ev-abci flags: - -```bash -./appd start --help -``` - -Expected flags: - -```text ---evnode.node.aggregator Run as block producer ---evnode.da.address DA layer address ---evnode.signer.passphrase Signer passphrase ---evnode.node.block_time Block production interval -``` - -## Step 5: Initialize - -Standard Cosmos SDK initialization: - -```bash -./appd init mynode --chain-id mychain-1 -./appd keys add mykey --keyring-backend test -./appd genesis add-genesis-account mykey 1000000000stake --keyring-backend test -./appd genesis gentx mykey 1000000stake --chain-id mychain-1 --keyring-backend test -./appd genesis collect-gentxs -``` - -## Step 6: Start - -```bash -./appd start \ - --evnode.node.aggregator \ - --evnode.da.address http://localhost:7980 \ - --evnode.signer.passphrase secret -``` - -## Configuration - -### ev-node Flags - -| Flag | Description | Default | -|------|-------------|---------| -| `--evnode.node.aggregator` | Run as sequencer | `false` | -| `--evnode.node.block_time` | Block interval | `1s` | -| `--evnode.da.address` | DA layer URL | required | -| `--evnode.signer.passphrase` | Signer passphrase | required | -| `--evnode.p2p.peers` | P2P peer addresses | none | - -### Full Node (Non-Sequencer) - -```bash -./appd start \ - --evnode.da.address http://localhost:7980 \ - --evnode.p2p.peers @:26659 -``` - -## RPC Compatibility - -ev-abci provides CometBFT-compatible RPC endpoints. Existing clients work without modification. - -See [RPC Compatibility](/ev-abci/rpc-compatibility) for details. - -## Next Steps - -- [Migration from CometBFT](/ev-abci/migration-from-cometbft) — Migrate existing chain -- [RPC Compatibility](/ev-abci/rpc-compatibility) — Endpoint compatibility diff --git a/content/docs/ev-abci/meta.json b/content/docs/ev-abci/meta.json deleted file mode 100644 index 0e1b7fa..0000000 --- a/content/docs/ev-abci/meta.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "title": "EV-ABCI", - "icon": "Blocks", - "pages": ["...", "modules"] -} diff --git a/content/docs/ev-abci/migration-from-cometbft.md b/content/docs/ev-abci/migration-from-cometbft.md deleted file mode 100644 index 375873e..0000000 --- a/content/docs/ev-abci/migration-from-cometbft.md +++ /dev/null @@ -1,271 +0,0 @@ -# Migrating an Existing Chain to ev-abci - -This guide is for developers of existing Cosmos SDK chains who want to replace their node's default CometBFT consensus engine with the `ev-abci` implementation. By following these steps, you will migrate your chain to run as an `ev-abci` node while preserving chain state. - -## Overview of Migration Process - -The migration process involves the following key phases: - -1. **Code Preparation:** Add migration module, staking wrapper, and upgrade handler to your existing chain -2. **Governance Proposal:** Create and pass a governance proposal to initiate the migration -3. **State Export:** Export the current chain state at the designated upgrade height -4. **Node Reconfiguration:** Wire the `ev-abci` start handler into your node's entrypoint -5. **Migration Execution:** Run `appd evolve-migrate` to transform the exported state -6. **Chain Restart:** Start the new `ev-abci` node with the migrated state - -This document will guide you through each phase. - ---- - -## Phase 1: Code Preparation - Add Migration Module and Staking Wrapper - -The first step prepares your existing chain for migration by integrating the necessary modules. - -### Step 1: Add Migration Manager Module - -Add the `migrationmngr` module to your application. This module manages the transition from a PoS validator set to a sequencer-based model. - -*Note: For detailed information about the migration manager, please refer to the [migration manager documentation](https://github.com/evstack/ev-abci/tree/main/modules/migrationmngr).* - -In your `app.go` file: - -1. Import the migration manager module: - -```go -import ( - // ... - migrationmngr "github.com/evstack/ev-abci/modules/migrationmngr" - migrationmngrkeeper "github.com/evstack/ev-abci/modules/migrationmngr/keeper" - migrationmngrtypes "github.com/evstack/ev-abci/modules/migrationmngr/types" - // ... -) -``` - -1. Add the migration manager keeper to your app struct -2. Register the module in your module manager -3. Configure the migration manager in your app initialization - -### Step 2: Replace Staking Module with Wrapper - -**Goal:** Ensure the `migrationmngr` module is the *sole* source of validator set updates during migration. - -Replace the standard Cosmos SDK `x/staking` module with the **staking wrapper module** provided in `ev-abci`. The wrapper's `EndBlock` method prevents validator updates from the staking module, delegating that responsibility to the `migrationmngr` module during migration. - -In your `app.go` file (and any other files that import the staking module): - -**Replace this:** - -```go -import ( - // ... - "github.com/cosmos/cosmos-sdk/x/staking" - stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - // ... -) -``` - -**With this:** - -```go -import ( - // ... - "github.com/evstack/ev-abci/modules/staking" // The wrapper module - stakingkeeper "github.com/evstack/ev-abci/modules/staking/keeper" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" // Staking types remain the same - // ... -) -``` - -By changing the import path, your application will automatically use the wrapper module. No other changes to your `EndBlocker` method are needed. - ---- - -## Phase 2: Create Upgrade Handler - -Create an upgrade handler in your `app.go` that will be triggered when the governance proposal is executed. - -```go -func (app *App) setupUpgradeHandlers() { - app.UpgradeKeeper.SetUpgradeHandler( - "v2-migrate-to-evolve", // Upgrade name must match governance proposal - func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { - // The upgrade handler can initialize state for the migration manager if needed - // The actual migration will happen during the evolve-migrate step - return app.mm.RunMigrations(ctx, app.configurator, fromVM) - }, - ) -} -``` - -Call this function in your app initialization code in `app.go`. - ---- - -## Phase 3: Create Governance Proposal for Migration - -Create and submit a software upgrade governance proposal to initiate the migration at a specific block height. - -```bash -# Create the governance proposal - tx gov submit-proposal software-upgrade v2-migrate-to-evolve \ - --title "Migrate to Evolve" \ - --description "Upgrade chain to use ev-abci consensus" \ - --upgrade-height \ - --from \ - --chain-id - -# Vote on the proposal (repeat for validators to reach quorum) - tx gov vote yes --from -``` - -Wait for the proposal to pass and for the chain to reach the upgrade height. The chain will halt at the specified height, waiting for the upgrade to be applied. - -### Trigger Migration to Evolve - -After the upgrade proposal has passed, submit the `MsgMigrateToEvolve` message to initiate the actual migration process. This can be done through a governance proposal or directly if your chain's authority allows it. - -```bash -# Submit MsgMigrateToEvolve governance proposal (if using governance) - tx gov submit-proposal migrate-to-evolve \ - --title "Trigger Migration to Evolve" \ - --description "Execute migration to ev-abci consensus" \ - --from \ - --chain-id - -# Or submit directly if authority allows (authority address depends on your chain configuration) - tx migrationmngr migrate-to-evolve \ - --from \ - --chain-id -``` - -Once this message is processed, the migration manager module will handle the transition from the PoS validator set to the sequencer-based model. - ---- - -## Phase 4: Wire ev-abci Start Handler in root.go - -**⚠️ Important:** Complete this phase BEFORE the chain halts at the upgrade height. Do NOT start your node yet - you will start it in Phase 6 after running the migration command. - -Modify your node's entrypoint to use the `ev-abci` server commands. - -### Locate Your Application's Entrypoint - -Open the main entrypoint file for your chain's binary, usually found at `cmd//main.go` or `root.go`. - -### Modify the Start Command - -Add the `ev-abci` start handler to your root command. This is similar to the [Ignite Apps evolve template](https://github.com/ignite/apps/blob/main/evolve/template/init.go#L48-L60). - -```go -// cmd//main.go (or root.go) -package main - -import ( - "os" - - "github.com/cosmos/cosmos-sdk/server" - "github.com/spf13/cobra" - - // Import the ev-abci server package - evabci_server "github.com/evstack/ev-abci/server" - - "/app" -) - -func main() { - rootCmd := &cobra.Command{ - Use: "", - Short: "Your App Daemon (ev-abci enabled)", - } - - // Keep existing commands (keys, export, etc.) - server.AddCommands(rootCmd, app.DefaultNodeHome, app.New, app.MakeEncodingConfig(), tx.DefaultSignModes) - - // --- Wire ev-abci start handler --- - startCmd := &cobra.Command{ - Use: "start", - Short: "Run the full node with ev-abci", - RunE: func(cmd *cobra.Command, _ []string) error { - return evabci_server.StartHandler(cmd, app.New) - }, - } - - evabci_server.AddFlags(startCmd) - rootCmd.AddCommand(startCmd) - // --- End of ev-abci changes --- - - if err := rootCmd.Execute(); err != nil { - server.HandleError(err) - os.Exit(1) - } -} -``` - -### Build Your Application - -Re-build your application's binary with the updated code: - -```sh -go build -o ./cmd/ -``` - -**⚠️ Important:** Do NOT start the node yet. Proceed directly to Phase 5 to run the migration command. - ---- - -## Phase 5: Run evolve-migrate - -After the chain halts at the upgrade height, run the migration command to transform the CometBFT data to Evolve format. - -**⚠️ Critical:** The node must NOT be running when you execute this command. Ensure all node processes are stopped before proceeding. - -```bash -# Run the migration command - evolve-migrate - -# Optional: specify the DA height for the Evolve state (defaults to 1) - evolve-migrate --da-height -``` - -The `evolve-migrate` command performs the following operations: - -1. **Migrates all blocks** from the CometBFT blockstore to the Evolve store -2. **Converts the CometBFT state** to Evolve state format -3. **Creates `ev_genesis.json`** - a minimal genesis file that the node will automatically detect and use on subsequent startups -4. **Saves state** to the ABCI execution store for compatibility -5. **Seeds sync stores** with the latest migrated header and data -6. **Cleans up migration state** from the application database - -**Important Notes:** - -- The migration processes blocks in reverse order (from latest to earliest) -- If blocks are missing (e.g., due to pruning), they will be skipped. Migration stops if more than the configured maximum number of blocks are missing -- Vote extensions are not supported in Evolve - if they were enabled in your chain, they will have no effect after migration -- The command operates on the data in your node's home directory (e.g., `~/.appd/data/`) -- After successful migration, the `ev_genesis.json` file will be used automatically on node restart - ---- - -## Phase 6: Start New ev-abci Node - -Start your node with the migrated state: - -```bash - start -``` - -Verify that the node starts successfully: - -```sh -# Check that ev-abci flags are available - start --help - -# You should see flags like: -# --ev-node.attester-mode -# --ev-node.aggregator -# --ev-node.sequencer-url -# etc. -``` - -Your node is now running with `ev-abci` instead of CometBFT. The chain continues from the same state but with the new consensus engine. diff --git a/content/docs/ev-abci/modules/meta.json b/content/docs/ev-abci/modules/meta.json deleted file mode 100644 index f73c9c0..0000000 --- a/content/docs/ev-abci/modules/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Modules", - "pages": ["..."] -} diff --git a/content/docs/ev-abci/modules/migration-manager.md b/content/docs/ev-abci/modules/migration-manager.md deleted file mode 100644 index 203cd10..0000000 --- a/content/docs/ev-abci/modules/migration-manager.md +++ /dev/null @@ -1,143 +0,0 @@ -# Migration Manager Module - -Coordinates the transition from CometBFT multi-validator consensus to Evolve single-sequencer mode. - -## Purpose - -The migration manager: - -- Stores the designated sequencer address -- Tracks migration height -- Coordinates with the staking wrapper to freeze validators -- Provides the `MsgMigrateToEvolve` message for triggering migration - -## Installation - -### Add to app.go - -```go -import ( - migrationmngr "github.com/evstack/ev-abci/modules/migrationmngr" - migrationmngrkeeper "github.com/evstack/ev-abci/modules/migrationmngr/keeper" - migrationmngrtypes "github.com/evstack/ev-abci/modules/migrationmngr/types" -) - -// Add store key -keys := sdk.NewKVStoreKeys( - // ... other keys - migrationmngrtypes.StoreKey, -) - -// Create keeper -app.MigrationManagerKeeper = migrationmngrkeeper.NewKeeper( - appCodec, - keys[migrationmngrtypes.StoreKey], - app.StakingKeeper, - app.BankKeeper, - authtypes.NewModuleAddress(govtypes.ModuleName).String(), -) - -// Add to module manager -app.ModuleManager = module.NewManager( - // ... other modules - migrationmngr.NewAppModule(appCodec, app.MigrationManagerKeeper), -) -``` - -### Genesis Configuration - -```json -{ - "app_state": { - "migrationmngr": { - "params": { - "sequencer_address": "", - "migration_height": "0" - } - } - } -} -``` - -## Migration Flow - -### 1. Governance Proposal - -Submit a proposal to set migration parameters: - -```bash -appd tx gov submit-proposal set-sequencer \ - --sequencer-address cosmos1... \ - --migration-height 5000001 \ - --from -``` - -### 2. Vote and Pass - -Standard governance voting process. - -### 3. Chain Halts - -At migration height, the chain halts automatically. - -### 4. Run Migration - -```bash -appd evolve-migrate -``` - -### 5. Restart with ev-abci - -```bash -appd start \ - --evnode.node.aggregator \ - --evnode.da.address \ - --evnode.signer.passphrase -``` - -## Messages - -### MsgSetMigrationParams - -Set migration parameters (governance-gated): - -```protobuf -message MsgSetMigrationParams { - string authority = 1; - string sequencer_address = 2; - int64 migration_height = 3; -} -``` - -### MsgMigrateToEvolve - -Trigger the migration (called internally): - -```protobuf -message MsgMigrateToEvolve { - string authority = 1; -} -``` - -## Queries - -```bash -# Get migration params -appd query migrationmngr params - -# Get previous validators (post-migration) -appd query migrationmngr previous-validators -``` - -## State - -| Key | Description | -|-----|-------------| -| `params` | Sequencer address and migration height | -| `previous_validators` | Validator set before migration (for reference) | -| `migration_complete` | Boolean flag | - -## Next Steps - -- [Staking Wrapper](/ev-abci/modules/staking-wrapper) — Freeze validator set -- [Migration from CometBFT](/ev-abci/migration-from-cometbft) — Full migration guide diff --git a/content/docs/ev-abci/modules/staking-wrapper.md b/content/docs/ev-abci/modules/staking-wrapper.md deleted file mode 100644 index e9e7160..0000000 --- a/content/docs/ev-abci/modules/staking-wrapper.md +++ /dev/null @@ -1,96 +0,0 @@ -# Staking Wrapper Module - -A wrapper around the Cosmos SDK staking module that prevents validator set changes during migration. - -## Purpose - -When migrating from CometBFT to Evolve, the validator set must be frozen to allow a clean transition to single-sequencer mode. The staking wrapper: - -- Prevents new delegations and undelegations from affecting the validator set -- Blocks validator creation and updates -- Allows the migration manager to perform the final transition - -## Installation - -Replace your staking module import: - -```go -// Before -import "github.com/cosmos/cosmos-sdk/x/staking" - -// After -import "github.com/evstack/ev-abci/modules/staking" -``` - -The wrapper is API-compatible with the standard staking module. - -## Behavior - -### Normal Operation - -Before migration is triggered, the wrapper behaves identically to the standard staking module: - -- Delegations work normally -- Validator operations work normally -- Rewards distribution works normally - -### During Migration - -Once the migration manager signals migration mode: - -- `EndBlock` returns an empty validator update set -- Delegation changes are recorded but don't affect validators -- Validator creation/modification is blocked - -### After Migration - -Post-migration, the staking module becomes read-only for validator operations. The single sequencer is now the only block producer. - -## Integration - -### app.go - -```go -import ( - stakingkeeper "github.com/evstack/ev-abci/modules/staking/keeper" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" -) - -// In your NewApp function: -app.StakingKeeper = stakingkeeper.NewKeeper( - appCodec, - keys[stakingtypes.StoreKey], - app.AccountKeeper, - app.BankKeeper, - authtypes.NewModuleAddress(govtypes.ModuleName).String(), -) -``` - -### Module Manager - -```go -import ( - staking "github.com/evstack/ev-abci/modules/staking" -) - -// In your module manager: -app.ModuleManager = module.NewManager( - // ... other modules - staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper), -) -``` - -## Queries - -All standard staking queries remain available: - -```bash -appd query staking validators -appd query staking delegations
-appd query staking pool -``` - -## Next Steps - -- [Migration Manager](/ev-abci/modules/migration-manager) — Coordinate the migration -- [Migration from CometBFT](/ev-abci/migration-from-cometbft) — Full migration guide diff --git a/content/docs/ev-abci/overview.md b/content/docs/ev-abci/overview.md deleted file mode 100644 index fc95639..0000000 --- a/content/docs/ev-abci/overview.md +++ /dev/null @@ -1,76 +0,0 @@ -# ev-abci Overview - -ev-abci is an ABCI adapter that allows Cosmos SDK applications to run on Evolve instead of CometBFT. - -## What is ev-abci? - -ev-abci provides: - -- **Drop-in replacement** — Swap CometBFT for Evolve with minimal code changes -- **ABCI compatibility** — Your existing Cosmos SDK modules work unchanged -- **CometBFT RPC compatibility** — Existing clients and tooling continue to work -- **Migration tooling** — Migrate existing chains from CometBFT to Evolve - -## Architecture - -```text -┌─────────────────────────────────────────┐ -│ Your Cosmos App │ -│ ┌─────────────────────────────────┐ │ -│ │ Cosmos SDK Modules │ │ -│ │ (bank, staking, gov, etc.) │ │ -│ └─────────────────────────────────┘ │ -│ │ ABCI │ -│ ┌───────────────▼─────────────────┐ │ -│ │ ev-abci │ │ -│ │ (ABCI adapter + RPC server) │ │ -│ └───────────────┬─────────────────┘ │ -└──────────────────┼──────────────────────┘ - │ Executor Interface -┌──────────────────▼──────────────────────┐ -│ ev-node │ -│ (consensus + DA + P2P) │ -└─────────────────────────────────────────┘ -``` - -ev-abci implements the Executor interface, translating ev-node's calls into ABCI calls to your application. - -## Key Differences from CometBFT - -| Aspect | CometBFT | ev-abci | -|-----------------|----------------------------------|---------------------------| -| Validators | Multiple validators with staking | Single sequencer | -| Consensus | BFT consensus rounds | Sequencer produces blocks | -| Finality | Instant (BFT) | Soft (P2P) → Hard (DA) | -| Block time | ~6s typical | Configurable (100ms+) | -| Vote extensions | Supported | Not supported | - -## Benefits - -- **No validator coordination** — Single sequencer eliminates consensus overhead -- **Faster blocks** — No BFT round-trips, blocks as fast as 100ms -- **DA-secured** — Security from data availability, not validator set -- **Simpler operations** — No validator management, slashing, or jailing - -## Trade-offs - -- **Single sequencer** — One node produces blocks (with forced inclusion for censorship resistance) -- **Different finality model** — Soft confirmation before DA finality -- **No vote extensions** — ABCI++ vote extensions not available - -## Modules - -ev-abci includes helper modules for migration: - -- [Staking Wrapper](/ev-abci/modules/staking-wrapper) — Prevents validator updates during migration -- [Migration Manager](/ev-abci/modules/migration-manager) — Handles validator set transition - -## Repository - -- GitHub: [github.com/evstack/ev-abci](https://github.com/evstack/ev-abci) - -## Next Steps - -- [Cosmos SDK Quickstart](/getting-started/cosmos/quickstart) — Get started -- [Integration Guide](/ev-abci/integration-guide) — Manual integration -- [Migration from CometBFT](/ev-abci/migration-from-cometbft) — Migrate existing chain diff --git a/content/docs/ev-abci/rpc-compatibility.md b/content/docs/ev-abci/rpc-compatibility.md deleted file mode 100644 index 99dffca..0000000 --- a/content/docs/ev-abci/rpc-compatibility.md +++ /dev/null @@ -1,136 +0,0 @@ -# RPC Compatibility - -ev-abci provides CometBFT-compatible RPC endpoints for client compatibility. - -## Overview - -Existing Cosmos SDK clients expect CometBFT RPC endpoints. ev-abci implements these endpoints so tools like: - -- Cosmos SDK CLI -- Keplr wallet -- CosmJS -- Block explorers - -continue to work without modification. - -## Supported Endpoints - -### Query Methods - -| Endpoint | Status | Notes | -|----------|--------|-------| -| `/abci_query` | ✓ | Full support | -| `/block` | ✓ | Full support | -| `/block_by_hash` | ✓ | Full support | -| `/block_results` | ✓ | Full support | -| `/blockchain` | ✓ | Full support | -| `/commit` | ✓ | Full support | -| `/consensus_params` | ✓ | Full support | -| `/genesis` | ✓ | Full support | -| `/health` | ✓ | Full support | -| `/status` | ✓ | Full support | -| `/tx` | ✓ | Full support | -| `/tx_search` | ✓ | Full support | -| `/validators` | ✓ | Returns sequencer | - -### Transaction Methods - -| Endpoint | Status | Notes | -|----------|--------|-------| -| `/broadcast_tx_async` | ✓ | Full support | -| `/broadcast_tx_sync` | ✓ | Full support | -| `/broadcast_tx_commit` | ✓ | Waits for inclusion | -| `/check_tx` | ✓ | Full support | - -### Subscription Methods - -| Endpoint | Status | Notes | -|----------|--------|-------| -| `/subscribe` | ✓ | WebSocket events | -| `/unsubscribe` | ✓ | Full support | -| `/unsubscribe_all` | ✓ | Full support | - -## Unsupported Endpoints - -| Endpoint | Reason | -|----------|--------| -| `/consensus_state` | No BFT consensus | -| `/dump_consensus_state` | No BFT consensus | -| `/net_info` | Different P2P model | -| `/num_unconfirmed_txs` | Different mempool | -| `/unconfirmed_txs` | Different mempool | - -## Behavioral Differences - -### Validators - -`/validators` returns the single sequencer rather than a validator set: - -```json -{ - "validators": [ - { - "address": "...", - "voting_power": "1", - "proposer_priority": "0" - } - ], - "count": "1", - "total": "1" -} -``` - -### Commit - -`/commit` returns a simplified commit structure since there's no BFT voting: - -```json -{ - "signed_header": { - "header": { ... }, - "commit": { - "height": "100", - "signatures": [ - { - "validator_address": "...", - "signature": "..." - } - ] - } - } -} -``` - -### Block Time - -Block timestamps reflect actual production time, which may be faster than CometBFT's typical 6s blocks. - -## Port Configuration - -Default ports match CometBFT: - -| Port | Purpose | -|------|---------| -| 26657 | RPC | -| 26656 | P2P | - -Configure via flags: - -```bash ---evnode.rpc.address tcp://0.0.0.0:26657 ---evnode.p2p.listen /ip4/0.0.0.0/tcp/26656 -``` - -## Client Configuration - -No client changes needed. Point clients at the same RPC URL: - -```javascript -// CosmJS -const client = await StargateClient.connect("http://localhost:26657"); -``` - -```bash -# CLI -appd config node tcp://localhost:26657 -``` diff --git a/content/docs/ev-reth/configuration.md b/content/docs/ev-reth/configuration.md deleted file mode 100644 index 5ef7821..0000000 --- a/content/docs/ev-reth/configuration.md +++ /dev/null @@ -1,128 +0,0 @@ -# ev-reth Configuration - -Configure ev-reth through chainspec (genesis.json) and command-line flags. - -## Chainspec - -The chainspec defines chain parameters. ev-reth uses standard Ethereum genesis format with Evolve extensions. - -### Basic Structure - -```json -{ - "config": { - "chainId": 1337, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "shanghaiTime": 0, - "cancunTime": 0 - }, - "alloc": {}, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x0", - "gasLimit": "0x1c9c380", - "nonce": "0x0", - "timestamp": "0x0" -} -``` - -### Evolve Extensions - -Add under `config.evolve`: - -```json -{ - "config": { - "chainId": 1337, - "evolve": { - "baseFeeSink": "0x...", - "baseFeeRedirectActivationHeight": 0, - "deployAllowlist": { - "admin": "0x...", - "enabled": ["0x..."] - }, - "contractSizeLimit": 49152, - "mintPrecompile": { - "admin": "0x...", - "address": "0x0000000000000000000000000000000000000100" - } - } - } -} -``` - -See [Features](/ev-reth/features/base-fee-redirect) for detailed configuration of each extension. - -## Command-Line Flags - -### RPC - -```bash ---http # Enable HTTP JSON-RPC ---http.addr 0.0.0.0 # Listen address ---http.port 8545 # Listen port ---http.api eth,net,web3 # Enabled APIs -``` - -### Engine API - -```bash ---authrpc.addr 0.0.0.0 # Engine API address ---authrpc.port 8551 # Engine API port ---authrpc.jwtsecret jwt.hex # JWT secret file -``` - -### Data - -```bash ---datadir /data # Data directory ---chain genesis.json # Chainspec file -``` - -## Docker - -Default `docker-compose.yml`: - -```yaml -services: - reth: - image: ghcr.io/evstack/ev-reth:latest - ports: - - "8545:8545" - - "8551:8551" - volumes: - - ./data:/data - - ./genesis.json:/genesis.json - - ./jwt.hex:/jwt.hex - command: - - node - - --chain=/genesis.json - - --http - - --http.addr=0.0.0.0 - - --http.api=eth,net,web3,txpool - - --authrpc.addr=0.0.0.0 - - --authrpc.jwtsecret=/jwt.hex -``` - -## JWT Secret - -Generate for Engine API authentication: - -```bash -openssl rand -hex 32 > jwt.hex -``` - -Both ev-reth and ev-node must use the same secret. - -## Next Steps - -- [Engine API](/ev-reth/engine-api) — Communication protocol -- [Chainspec Reference](/reference/configuration/ev-reth-chainspec) — Full field reference diff --git a/content/docs/ev-reth/engine-api.md b/content/docs/ev-reth/engine-api.md deleted file mode 100644 index 2f8f078..0000000 --- a/content/docs/ev-reth/engine-api.md +++ /dev/null @@ -1,177 +0,0 @@ -# Engine API - -ev-node communicates with ev-reth through the Ethereum Engine API, the same protocol used by Ethereum consensus clients. - -## Overview - -The Engine API is a JSON-RPC interface authenticated with JWT. ev-node acts as the consensus client, driving ev-reth (execution client) to build and finalize blocks. - -## Authentication - -All Engine API calls require JWT authentication: - -```bash -# Generate shared secret -openssl rand -hex 32 > jwt.hex -``` - -Configure both sides: - -- ev-reth: `--authrpc.jwtsecret jwt.hex` -- ev-node: `--evm.jwt-secret jwt.hex` - -## Block Production Flow - -```text -ev-node ev-reth - │ │ - │ 1. engine_forkchoiceUpdatedV3 │ - │ (headBlockHash, payloadAttributes) │ - │─────────────────────────────────────────►│ - │ │ - │ 2. {payloadId} │ - │◄─────────────────────────────────────────│ - │ │ - │ 3. engine_getPayloadV3(payloadId) │ - │─────────────────────────────────────────►│ - │ │ - │ 4. {executionPayload, blockValue} │ - │◄─────────────────────────────────────────│ - │ │ - │ [ev-node broadcasts to P2P, submits DA] │ - │ │ - │ 5. engine_newPayloadV3(executionPayload)│ - │─────────────────────────────────────────►│ - │ │ - │ 6. {status: VALID} │ - │◄─────────────────────────────────────────│ - │ │ - │ 7. engine_forkchoiceUpdatedV3 │ - │ (newHeadBlockHash) │ - │─────────────────────────────────────────►│ - │ │ -``` - -## Methods - -### engine_forkchoiceUpdatedV3 - -Update the fork choice and optionally start building a new block. - -**Request:** - -```json -{ - "method": "engine_forkchoiceUpdatedV3", - "params": [ - { - "headBlockHash": "0x...", - "safeBlockHash": "0x...", - "finalizedBlockHash": "0x..." - }, - { - "timestamp": "0x...", - "prevRandao": "0x...", - "suggestedFeeRecipient": "0x...", - "withdrawals": [], - "parentBeaconBlockRoot": "0x..." - } - ] -} -``` - -**Response:** - -```json -{ - "payloadStatus": { - "status": "VALID", - "latestValidHash": "0x..." - }, - "payloadId": "0x..." -} -``` - -### engine_getPayloadV3 - -Retrieve a built payload. - -**Request:** - -```json -{ - "method": "engine_getPayloadV3", - "params": ["0x...payloadId"] -} -``` - -**Response:** - -```json -{ - "executionPayload": { - "parentHash": "0x...", - "feeRecipient": "0x...", - "stateRoot": "0x...", - "receiptsRoot": "0x...", - "logsBloom": "0x...", - "prevRandao": "0x...", - "blockNumber": "0x1", - "gasLimit": "0x...", - "gasUsed": "0x...", - "timestamp": "0x...", - "extraData": "0x", - "baseFeePerGas": "0x...", - "blockHash": "0x...", - "transactions": ["0x..."] - }, - "blockValue": "0x..." -} -``` - -### engine_newPayloadV3 - -Validate and execute a payload. - -**Request:** - -```json -{ - "method": "engine_newPayloadV3", - "params": [ - { "executionPayload": "..." }, - ["0x...versionedHashes"], - "0x...parentBeaconBlockRoot" - ] -} -``` - -**Response:** - -```json -{ - "status": "VALID", - "latestValidHash": "0x..." -} -``` - -## Status Codes - -| Status | Meaning | -|--------|---------| -| `VALID` | Payload is valid | -| `INVALID` | Payload is invalid | -| `SYNCING` | Node is syncing | -| `ACCEPTED` | Payload accepted but not yet validated | - -## Ports - -| Port | Purpose | -|------|---------| -| 8545 | JSON-RPC (public) | -| 8551 | Engine API (authenticated) | - -## Next Steps - -- [Engine API Reference](/reference/api/engine-api) — Full method reference -- [Configuration](/ev-reth/configuration) — ev-reth settings diff --git a/content/docs/ev-reth/features/base-fee-redirect.md b/content/docs/ev-reth/features/base-fee-redirect.md deleted file mode 100644 index e0f665b..0000000 --- a/content/docs/ev-reth/features/base-fee-redirect.md +++ /dev/null @@ -1,86 +0,0 @@ -# Base Fee Redirect - -Redirect EIP-1559 base fees to a treasury address instead of burning them. - -## Overview - -In standard Ethereum, base fees are burned. ev-reth allows redirecting these fees to a specified address, enabling: - -- Protocol revenue collection -- Treasury funding -- DAO-controlled fee distribution - -## Configuration - -In your chainspec (`genesis.json`): - -```json -{ - "config": { - "evolve": { - "baseFeeSink": "0xYOUR_TREASURY_ADDRESS", - "baseFeeRedirectActivationHeight": 0 - } - } -} -``` - -| Field | Description | -|-------|-------------| -| `baseFeeSink` | Address to receive base fees | -| `baseFeeRedirectActivationHeight` | Block height to activate (0 = genesis) | - -## How It Works - -```text -Transaction Fee = Base Fee + Priority Fee - -Standard Ethereum: -├── Base Fee → Burned -└── Priority Fee → Block producer - -With Base Fee Redirect: -├── Base Fee → baseFeeSink address -└── Priority Fee → Block producer (fee recipient) -``` - -## Example - -Treasury at `0x1234...`: - -```json -{ - "config": { - "chainId": 1337, - "evolve": { - "baseFeeSink": "0x1234567890123456789012345678901234567890", - "baseFeeRedirectActivationHeight": 0 - } - } -} -``` - -All base fees from block 0 onward go to the treasury. - -## Activation at Later Height - -To activate after chain launch: - -```json -{ - "config": { - "evolve": { - "baseFeeSink": "0x...", - "baseFeeRedirectActivationHeight": 1000000 - } - } -} -``` - -Fees are burned until block 1,000,000, then redirected. - -## Use Cases - -- **Protocol treasury** — Fund development, grants, or operations -- **Staking rewards** — Distribute to token holders -- **Burn address** — Set to `0x0` to explicitly burn (default behavior) diff --git a/content/docs/ev-reth/features/contract-size-limits.md b/content/docs/ev-reth/features/contract-size-limits.md deleted file mode 100644 index ee90d24..0000000 --- a/content/docs/ev-reth/features/contract-size-limits.md +++ /dev/null @@ -1,73 +0,0 @@ -# Contract Size Limits - -Increase the maximum contract bytecode size beyond Ethereum's 24KB limit. - -## Overview - -Ethereum limits contract size to 24,576 bytes (24KB) via [EIP-170](https://eips.ethereum.org/EIPS/eip-170). ev-reth allows increasing this limit for use cases requiring larger contracts: - -- Complex DeFi protocols -- On-chain game logic -- ZK verification contracts - -## Configuration - -In your chainspec (`genesis.json`): - -```json -{ - "config": { - "evolve": { - "contractSizeLimit": 49152 - } - } -} -``` - -| Field | Description | Default | -|-------|-------------|---------| -| `contractSizeLimit` | Max bytecode size in bytes | 24576 (24KB) | - -## Common Values - -| Size | Bytes | Use Case | -|------|-------|----------| -| 24KB | 24576 | Ethereum default | -| 48KB | 49152 | 2x limit | -| 64KB | 65536 | 2.67x limit | -| 128KB | 131072 | Large contracts | - -## Trade-offs - -**Pros:** - -- Deploy larger, more complex contracts -- Avoid splitting logic across multiple contracts -- Simpler contract architecture - -**Cons:** - -- Higher deployment gas costs -- Longer deployment times -- May impact block gas limits - -## Example - -Allow contracts up to 64KB: - -```json -{ - "config": { - "chainId": 1337, - "evolve": { - "contractSizeLimit": 65536 - } - } -} -``` - -## Considerations - -- This is a chain-wide setting—affects all deployments -- Existing tooling may warn about large contracts -- Consider gas costs for deployment and interaction diff --git a/content/docs/ev-reth/features/deploy-allowlist.md b/content/docs/ev-reth/features/deploy-allowlist.md deleted file mode 100644 index 7b44b59..0000000 --- a/content/docs/ev-reth/features/deploy-allowlist.md +++ /dev/null @@ -1,77 +0,0 @@ -# Deploy Allowlist - -Restrict contract deployment to a set of approved addresses. - -## Overview - -By default, any address can deploy contracts. The deploy allowlist restricts deployment to explicitly approved addresses, useful for: - -- Permissioned chains -- Controlled rollouts -- Compliance requirements - -## Configuration - -In your chainspec (`genesis.json`): - -```json -{ - "config": { - "evolve": { - "deployAllowlist": { - "admin": "0xADMIN_ADDRESS", - "enabled": [ - "0xDEPLOYER_1", - "0xDEPLOYER_2" - ] - } - } - } -} -``` - -| Field | Description | -|-------|-------------| -| `admin` | Address that can modify the allowlist | -| `enabled` | Addresses allowed to deploy contracts | - -## How It Works - -1. User attempts `CREATE` or `CREATE2` opcode -2. ev-reth checks if sender is in `enabled` list -3. If not allowed, transaction reverts - -## Admin Operations - -The admin can modify the allowlist via precompile calls: - -```solidity -interface IDeployAllowlist { - function addDeployer(address deployer) external; - function removeDeployer(address deployer) external; - function isAllowed(address deployer) external view returns (bool); -} -``` - -Precompile address: `0x0000000000000000000000000000000000000101` - -## Disabling - -To allow unrestricted deployment, omit the `deployAllowlist` config entirely or set an empty `enabled` list with no admin. - -## Example: Single Deployer - -```json -{ - "config": { - "evolve": { - "deployAllowlist": { - "admin": "0xAdminAddress", - "enabled": ["0xAdminAddress"] - } - } - } -} -``` - -Only the admin can deploy contracts initially. They can add more deployers later. diff --git a/content/docs/ev-reth/features/meta.json b/content/docs/ev-reth/features/meta.json deleted file mode 100644 index d7e5484..0000000 --- a/content/docs/ev-reth/features/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Features", - "pages": ["..."] -} diff --git a/content/docs/ev-reth/features/mint-precompile.md b/content/docs/ev-reth/features/mint-precompile.md deleted file mode 100644 index d876c7b..0000000 --- a/content/docs/ev-reth/features/mint-precompile.md +++ /dev/null @@ -1,87 +0,0 @@ -# Mint Precompile - -A custom precompile for minting native tokens. - -## Overview - -The mint precompile allows authorized addresses to mint native tokens (ETH equivalent) directly. This enables: - -- Bridge minting (mint when assets are bridged in) -- Inflation schedules -- Programmatic rewards -- Airdrops - -## Configuration - -In your chainspec (`genesis.json`): - -```json -{ - "config": { - "evolve": { - "mintPrecompile": { - "admin": "0xMINT_ADMIN_ADDRESS", - "address": "0x0000000000000000000000000000000000000100" - } - } - } -} -``` - -| Field | Description | -|-------|-------------| -| `admin` | Address authorized to call mint | -| `address` | Precompile address (conventionally `0x100`) | - -## Interface - -```solidity -interface IMintPrecompile { - // Mint native tokens to recipient - function mint(address recipient, uint256 amount) external; -} -``` - -## Usage - -From an authorized contract: - -```solidity -contract Bridge { - IMintPrecompile constant MINT = IMintPrecompile(0x0000000000000000000000000000000000000100); - - function bridgeIn(address recipient, uint256 amount) external { - // Verify bridge proof... - - // Mint native tokens - MINT.mint(recipient, amount); - } -} -``` - -## Security - -- Only the `admin` address can call `mint()` -- Calls from other addresses revert -- The admin is typically a bridge contract or multisig - -## Changing Admin - -The admin cannot be changed after genesis. To update, you would need a chain upgrade with a new chainspec. - -## Example: Bridge Setup - -```json -{ - "config": { - "evolve": { - "mintPrecompile": { - "admin": "0xBridgeContractAddress", - "address": "0x0000000000000000000000000000000000000100" - } - } - } -} -``` - -The bridge contract can mint tokens when users bridge assets from another chain. diff --git a/content/docs/ev-reth/meta.json b/content/docs/ev-reth/meta.json deleted file mode 100644 index d3d6e95..0000000 --- a/content/docs/ev-reth/meta.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "title": "EV-Reth", - "icon": "Cpu", - "pages": ["...", "features"] -} diff --git a/content/docs/ev-reth/overview.md b/content/docs/ev-reth/overview.md deleted file mode 100644 index afd142f..0000000 --- a/content/docs/ev-reth/overview.md +++ /dev/null @@ -1,69 +0,0 @@ -# ev-reth Overview - -ev-reth is a modified [reth](https://github.com/paradigmxyz/reth) Ethereum execution client optimized for Evolve rollups. - -## What is ev-reth? - -ev-reth extends reth with: - -- **Engine API integration** — Driven by ev-node for block production -- **Rollup-specific features** — Base fee redirect, deploy allowlist, custom precompiles -- **Configurable chain parameters** — Contract size limits, custom gas settings - -## Architecture - -```text -┌─────────────────────────────────────────┐ -│ ev-node │ -│ (consensus + DA + P2P) │ -└─────────────────┬───────────────────────┘ - │ Engine API - │ (JWT authenticated) -┌─────────────────▼───────────────────────┐ -│ ev-reth │ -│ (EVM execution) │ -│ ┌───────────┐ ┌───────────────────┐ │ -│ │ State DB │ │ Transaction Pool │ │ -│ └───────────┘ └───────────────────┘ │ -│ ┌───────────────────────────────────┐ │ -│ │ EVM + Precompiles │ │ -│ └───────────────────────────────────┘ │ -└─────────────────────────────────────────┘ -``` - -ev-node drives ev-reth through the Engine API: - -1. ev-node calls `engine_forkchoiceUpdated` with payload attributes -2. ev-reth builds a block from pending transactions -3. ev-node calls `engine_getPayload` to retrieve the block -4. ev-node broadcasts and submits to DA -5. ev-node calls `engine_newPayload` to finalize - -## Features - -| Feature | Description | -|---------|-------------| -| [Base Fee Redirect](/ev-reth/features/base-fee-redirect) | Send base fees to treasury instead of burning | -| [Deploy Allowlist](/ev-reth/features/deploy-allowlist) | Restrict who can deploy contracts | -| [Contract Size Limits](/ev-reth/features/contract-size-limits) | Increase max contract size beyond 24KB | -| [Mint Precompile](/ev-reth/features/mint-precompile) | Native token minting for bridges | - -## When to Use ev-reth - -Use ev-reth when you want: - -- Full EVM compatibility -- Ethereum tooling (Foundry, Hardhat, etc.) -- Standard wallet support (MetaMask, etc.) -- High-performance Rust execution - -## Repository - -- GitHub: [github.com/evstack/ev-reth](https://github.com/evstack/ev-reth) -- Based on: [paradigmxyz/reth](https://github.com/paradigmxyz/reth) - -## Next Steps - -- [EVM Quickstart](/getting-started/evm/quickstart) — Get started -- [Configuration](/ev-reth/configuration) — Chainspec and settings -- [Engine API](/ev-reth/engine-api) — How ev-node communicates with ev-reth diff --git a/content/docs/getting-started/cosmos/meta.json b/content/docs/getting-started/cosmos/meta.json deleted file mode 100644 index 57b6fa9..0000000 --- a/content/docs/getting-started/cosmos/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Cosmos SDK", - "pages": ["quickstart"] -} diff --git a/content/docs/getting-started/cosmos/quickstart.md b/content/docs/getting-started/cosmos/quickstart.md deleted file mode 100644 index 2e65d74..0000000 --- a/content/docs/getting-started/cosmos/quickstart.md +++ /dev/null @@ -1,86 +0,0 @@ -# Cosmos SDK Quickstart - -Get a Cosmos SDK chain running on Evolve using Ignite CLI. - -## Prerequisites - -- Go 1.22+ -- [Ignite CLI](https://docs.ignite.com/welcome/install) - -## 1. Start Local DA - -```bash -go install github.com/evstack/ev-node/tools/local-da@latest -local-da -``` - -Keep this running in a separate terminal. - -## 2. Create a New Chain - -```bash -ignite scaffold chain mychain --address-prefix mychain -cd mychain -``` - -## 3. Add Evolve - -Install the Evolve plugin for Ignite: - -```bash -ignite app install -g github.com/ignite/apps/evolve -``` - -Add Evolve to your chain: - -```bash -ignite evolve add -``` - -This modifies your chain to use ev-abci instead of CometBFT. - -## 4. Build and Initialize - -```bash -make install - -mychaind init mynode --chain-id mychain-1 -mychaind keys add mykey --keyring-backend test -mychaind genesis add-genesis-account mykey 1000000000stake --keyring-backend test -mychaind genesis gentx mykey 1000000stake --chain-id mychain-1 --keyring-backend test -mychaind genesis collect-gentxs -``` - -## 5. Start the Chain - -```bash -mychaind start \ - --evnode.node.aggregator \ - --evnode.da.address http://localhost:7980 \ - --evnode.signer.passphrase secret -``` - -You should see blocks being produced: - -```text -INF block marked as DA included blockHeight=1 -INF block marked as DA included blockHeight=2 -``` - -## 6. Interact - -In another terminal: - -```bash -# Check balance -mychaind query bank balances $(mychaind keys show mykey -a --keyring-backend test) - -# Send tokens -mychaind tx bank send mykey mychain1... 1000stake --keyring-backend test --chain-id mychain-1 -y -``` - -## Next Steps - -- [Integrate ev-abci](/ev-abci/integration-guide) — Manual integration without Ignite -- [Migrate from CometBFT](/ev-abci/migration-from-cometbft) — Migrate an existing chain with state -- [Connect to Celestia](/guides/da-layers/celestia) — Production DA layer diff --git a/content/docs/getting-started/custom/implement-executor.md b/content/docs/getting-started/custom/implement-executor.md deleted file mode 100644 index 7a1d518..0000000 --- a/content/docs/getting-started/custom/implement-executor.md +++ /dev/null @@ -1,224 +0,0 @@ -# Executor Interface - -The Executor interface is the boundary between ev-node and your execution layer. ev-node calls these methods during block production and synchronization. This page documents each method, its contract, and example implementations. - -## Interface Overview - -```go -type Executor interface { - InitChain(ctx context.Context, genesis Genesis) ([]byte, error) - GetTxs(ctx context.Context) ([][]byte, error) - ExecuteTxs(ctx context.Context, txs [][]byte, height uint64, timestamp time.Time) (*ExecutionResult, error) - SetFinal(ctx context.Context, height uint64) error -} -``` - -## InitChain - -Called once when the chain starts for the first time. - -```go -func (e *MyExecutor) InitChain(ctx context.Context, genesis Genesis) ([]byte, error) -``` - -**Parameters:** - -- `genesis` — Contains initial state, chain ID, and configuration - -**Returns:** - -- Initial state root (hash of genesis state) -- Error if initialization fails - -**Responsibilities:** - -- Parse genesis data -- Initialize state storage -- Set up initial accounts/balances -- Return deterministic state root - -**Example:** - -```go -func (e *MyExecutor) InitChain(ctx context.Context, genesis Genesis) ([]byte, error) { - // Parse genesis - var state GenesisState - if err := json.Unmarshal(genesis.AppState, &state); err != nil { - return nil, err - } - - // Initialize state - for addr, balance := range state.Balances { - e.db.Set([]byte(addr), []byte(balance)) - } - - // Compute and return state root - return e.db.Hash(), nil -} -``` - -## GetTxs - -Called by the sequencer to get pending transactions for the next block. - -```go -func (e *MyExecutor) GetTxs(ctx context.Context) ([][]byte, error) -``` - -**Returns:** - -- Slice of transaction bytes from your mempool -- Error if retrieval fails - -**Responsibilities:** - -- Return transactions ready for inclusion -- Optionally prioritize by fee, nonce, etc. -- Remove invalid transactions - -**Example:** - -```go -func (e *MyExecutor) GetTxs(ctx context.Context) ([][]byte, error) { - txs := e.mempool.GetPending(100) // Get up to 100 txs - return txs, nil -} -``` - -## ExecuteTxs - -The core execution method. Called for every block. - -```go -func (e *MyExecutor) ExecuteTxs( - ctx context.Context, - txs [][]byte, - height uint64, - timestamp time.Time, -) (*ExecutionResult, error) -``` - -**Parameters:** - -- `txs` — Ordered transactions to execute -- `height` — Block height -- `timestamp` — Block timestamp - -**Returns:** - -- `ExecutionResult` containing new state root and gas used -- Error only for system failures (not tx failures) - -**Responsibilities:** - -- Execute each transaction in order -- Update state -- Track gas usage -- Handle transaction failures gracefully -- Return new state root - -**Example:** - -```go -func (e *MyExecutor) ExecuteTxs( - ctx context.Context, - txs [][]byte, - height uint64, - timestamp time.Time, -) (*ExecutionResult, error) { - var totalGas uint64 - - for _, txBytes := range txs { - tx, err := DecodeTx(txBytes) - if err != nil { - continue // Skip invalid tx - } - - gas, err := e.executeTx(tx) - if err != nil { - // Log but continue - tx failure != block failure - continue - } - - totalGas += gas - } - - // Commit state changes - stateRoot := e.db.Commit() - - return &ExecutionResult{ - StateRoot: stateRoot, - GasUsed: totalGas, - }, nil -} -``` - -## SetFinal - -Called when a block is confirmed on the DA layer. - -```go -func (e *MyExecutor) SetFinal(ctx context.Context, height uint64) error -``` - -**Parameters:** - -- `height` — The block height that is now DA-finalized - -**Responsibilities:** - -- Mark state as finalized -- Prune old state if desired -- Trigger any finality-dependent logic - -**Example:** - -```go -func (e *MyExecutor) SetFinal(ctx context.Context, height uint64) error { - // Mark height as final - e.finalHeight = height - - // Optionally prune old state - if height > 100 { - e.db.Prune(height - 100) - } - - return nil -} -``` - -## State Management Tips - -1. **Determinism** — ExecuteTxs must be deterministic. Same inputs must produce same state root. - -2. **Atomicity** — Either all state changes for a block commit, or none do. - -3. **Crash recovery** — State should be recoverable after crash. ev-node will replay blocks if needed. - -4. **Gas metering** — Track computational cost to prevent DoS. - -## Testing - -Test your executor in isolation: - -```go -func TestExecuteTxs(t *testing.T) { - exec := NewMyExecutor() - - // Initialize - _, err := exec.InitChain(ctx, genesis) - require.NoError(t, err) - - // Execute - result, err := exec.ExecuteTxs(ctx, txs, 1, time.Now()) - require.NoError(t, err) - require.NotEmpty(t, result.StateRoot) -} -``` - -## Next Steps - -- [Executor Interface Reference](/reference/interfaces/executor) — Full type definitions -- [Testapp Source](https://github.com/evstack/ev-node/tree/main/apps/testapp) — Reference implementation -- [EVM Quickstart](/getting-started/evm/quickstart) — Using the EVM executor (ev-reth) -- [Cosmos SDK Quickstart](/getting-started/cosmos/quickstart) — Using the Cosmos SDK executor (ev-abci) diff --git a/content/docs/getting-started/custom/meta.json b/content/docs/getting-started/custom/meta.json deleted file mode 100644 index fe72ded..0000000 --- a/content/docs/getting-started/custom/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Custom Executor", - "pages": ["implement-executor"] -} diff --git a/content/docs/getting-started/evm/deploy-contracts.md b/content/docs/getting-started/evm/deploy-contracts.md deleted file mode 100644 index 09e18ca..0000000 --- a/content/docs/getting-started/evm/deploy-contracts.md +++ /dev/null @@ -1,144 +0,0 @@ -# Deploy Contracts - -Deploy smart contracts to your Evolve EVM chain using Foundry or Hardhat. - -## Network Configuration - -| Setting | Local | Testnet (example) | -|---------|-------|-------------------| -| RPC URL | | | -| Chain ID | 1337 | Your chain ID | -| Currency | ETH | Your native token | - -## Foundry - -### Install - -```bash -curl -L https://foundry.paradigm.xyz | bash -foundryup -``` - -### Configure - -Create or update `foundry.toml`: - -```toml -[profile.default] -src = "src" -out = "out" -libs = ["lib"] - -[rpc_endpoints] -local = "http://localhost:8545" -``` - -### Deploy - -```bash -# Deploy a contract -forge create src/MyContract.sol:MyContract \ - --rpc-url local \ - --private-key $PRIVATE_KEY - -# Deploy with constructor args -forge create src/Token.sol:Token \ - --rpc-url local \ - --private-key $PRIVATE_KEY \ - --constructor-args "MyToken" "MTK" 18 - -# Deploy and verify (if explorer supports it) -forge create src/MyContract.sol:MyContract \ - --rpc-url local \ - --private-key $PRIVATE_KEY \ - --verify -``` - -### Interact - -```bash -# Call a read function -cast call $CONTRACT_ADDRESS "balanceOf(address)" $WALLET_ADDRESS --rpc-url local - -# Send a transaction -cast send $CONTRACT_ADDRESS "transfer(address,uint256)" $TO_ADDRESS 1000 \ - --rpc-url local \ - --private-key $PRIVATE_KEY -``` - -## Hardhat - -### Install - -```bash -npm init -y -npm install --save-dev hardhat @nomicfoundation/hardhat-toolbox -npx hardhat init -``` - -### Configure - -Update `hardhat.config.js`: - -```javascript -require("@nomicfoundation/hardhat-toolbox"); - -module.exports = { - solidity: "0.8.24", - networks: { - local: { - url: "http://localhost:8545", - accounts: [process.env.PRIVATE_KEY], - }, - }, -}; -``` - -### Deploy - -Create `scripts/deploy.js`: - -```javascript -const hre = require("hardhat"); - -async function main() { - const Contract = await hre.ethers.getContractFactory("MyContract"); - const contract = await Contract.deploy(); - await contract.waitForDeployment(); - - console.log("Deployed to:", await contract.getAddress()); -} - -main().catch((error) => { - console.error(error); - process.exit(1); -}); -``` - -Run: - -```bash -npx hardhat run scripts/deploy.js --network local -``` - -## Prefunded Accounts - -The default chainspec includes prefunded accounts for testing. Check your `genesis.json` `alloc` section for available addresses. - -To add your own: - -```json -{ - "alloc": { - "0xYourAddress": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - } - } -} -``` - -## Next Steps - -- [Configure ev-reth](/getting-started/evm/setup-ev-reth) — Chainspec customization -- [Base Fee Redirect](/ev-reth/features/base-fee-redirect) — Send fees to treasury -- [Deploy Allowlist](/ev-reth/features/deploy-allowlist) — Restrict contract deployment diff --git a/content/docs/getting-started/evm/meta.json b/content/docs/getting-started/evm/meta.json deleted file mode 100644 index 7b94163..0000000 --- a/content/docs/getting-started/evm/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "EVM", - "pages": ["quickstart", "setup-ev-reth", "deploy-contracts"] -} diff --git a/content/docs/getting-started/evm/quickstart.md b/content/docs/getting-started/evm/quickstart.md deleted file mode 100644 index d0a192e..0000000 --- a/content/docs/getting-started/evm/quickstart.md +++ /dev/null @@ -1,91 +0,0 @@ -# EVM Quickstart - -Get an EVM rollup running locally in under 5 minutes. - -## Prerequisites - -- Go 1.22+ -- Docker -- Git - -## 1. Start Local DA - -```bash -go install github.com/evstack/ev-node/tools/local-da@latest -local-da -``` - -You should see: - -```text -INF Listening on host=localhost port=7980 -``` - -Keep this running in a separate terminal. - -## 2. Start ev-reth - -```bash -git clone https://github.com/evstack/ev-reth.git -cd ev-reth -docker compose up -d -``` - -This starts reth with Evolve's Engine API configuration. The default ports: - -- `8545` — JSON-RPC -- `8551` — Engine API - -## 3. Start ev-node - -In a new terminal: - -```bash -git clone https://github.com/evstack/ev-node.git -cd ev-node -make build-evm -``` - -Initialize and start: - -```bash -./build/evm init --evnode.node.aggregator --evnode.signer.passphrase secret - -./build/evm start \ - --evnode.node.aggregator \ - --evnode.signer.passphrase secret \ - --evnode.node.block_time 1s -``` - -You should see blocks being produced: - -```text -INF block marked as DA included blockHeight=1 -INF block marked as DA included blockHeight=2 -``` - -## 4. Connect a Wallet - -Add the network to MetaMask: - -| Setting | Value | -|---------|-------| -| Network Name | Evolve Local | -| RPC URL | | -| Chain ID | 1337 | -| Currency | ETH | - -## 5. Deploy a Contract - -With Foundry: - -```bash -forge create src/Counter.sol:Counter --rpc-url http://localhost:8545 --private-key -``` - -## Next Steps - -- [Configure ev-reth](/getting-started/evm/setup-ev-reth) — Customize chainspec, features -- [Deploy Contracts](/getting-started/evm/deploy-contracts) — Foundry and Hardhat setup -- [Connect to Celestia](/guides/da-layers/celestia) — Production DA layer -- [Run a Full Node](/guides/running-nodes/full-node) — Non-sequencer node setup diff --git a/content/docs/getting-started/evm/setup-ev-reth.md b/content/docs/getting-started/evm/setup-ev-reth.md deleted file mode 100644 index 8475312..0000000 --- a/content/docs/getting-started/evm/setup-ev-reth.md +++ /dev/null @@ -1,134 +0,0 @@ -# Configure ev-reth - -ev-reth is a modified [reth](https://github.com/paradigmxyz/reth) client with Evolve-specific features. This guide covers configuration options. - -## Chainspec - -The chainspec (`genesis.json`) defines your chain's parameters. ev-reth extends the standard Ethereum genesis format with Evolve-specific fields. - -### Minimal Chainspec - -```json -{ - "config": { - "chainId": 1337, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "shanghaiTime": 0, - "cancunTime": 0 - }, - "alloc": { - "0xYOUR_ADDRESS": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - } - }, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x0", - "gasLimit": "0x1c9c380", - "nonce": "0x0", - "timestamp": "0x0" -} -``` - -### Evolve Extensions - -Add these under `config.evolve`: - -```json -{ - "config": { - "chainId": 1337, - "evolve": { - "baseFeeSink": "0xTREASURY_ADDRESS", - "baseFeeRedirectActivationHeight": 0, - "deployAllowlist": { - "admin": "0xADMIN_ADDRESS", - "enabled": ["0xDEPLOYER1", "0xDEPLOYER2"] - }, - "contractSizeLimit": 49152, - "mintPrecompile": { - "admin": "0xMINT_ADMIN", - "address": "0x0000000000000000000000000000000000000100" - } - } - } -} -``` - -| Field | Description | -|-------|-------------| -| `baseFeeSink` | Address to receive base fees instead of burning | -| `deployAllowlist` | Restrict contract deployment to allowlisted addresses | -| `contractSizeLimit` | Override default 24KB contract size limit | -| `mintPrecompile` | Enable native token minting precompile | - -## Docker Configuration - -The default `docker-compose.yml` in ev-reth: - -```yaml -services: - reth: - image: ghcr.io/evstack/ev-reth:latest - ports: - - "8545:8545" # JSON-RPC - - "8551:8551" # Engine API - volumes: - - ./data:/data - - ./genesis.json:/genesis.json - - ./jwt.hex:/jwt.hex - command: - - node - - --chain=/genesis.json - - --http - - --http.addr=0.0.0.0 - - --http.api=eth,net,web3,txpool - - --authrpc.addr=0.0.0.0 - - --authrpc.jwtsecret=/jwt.hex -``` - -### JWT Secret - -Generate a JWT secret for Engine API authentication: - -```bash -openssl rand -hex 32 > jwt.hex -``` - -Both ev-reth and ev-node must use the same JWT secret. - -## Environment Variables - -| Variable | Description | Default | -|----------|-------------|---------| -| `RUST_LOG` | Log level | `info` | -| `RETH_DATA_DIR` | Data directory | `/data` | - -## Command Line Flags - -Common flags when running ev-reth directly: - -```bash -ev-reth node \ - --chain genesis.json \ - --http \ - --http.addr 0.0.0.0 \ - --http.port 8545 \ - --http.api eth,net,web3,txpool,debug,trace \ - --authrpc.addr 0.0.0.0 \ - --authrpc.port 8551 \ - --authrpc.jwtsecret jwt.hex -``` - -## Next Steps - -- [ev-reth Features](/ev-reth/features/base-fee-redirect) — Detailed feature documentation -- [ev-reth Chainspec Reference](/reference/configuration/ev-reth-chainspec) — Full configuration reference diff --git a/content/docs/getting-started/meta.json b/content/docs/getting-started/meta.json deleted file mode 100644 index 5050de6..0000000 --- a/content/docs/getting-started/meta.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "title": "Getting Started", - "icon": "Rocket", - "pages": ["cosmos", "evm", "custom"] -} diff --git a/content/docs/guides/advanced/based-sequencing.md b/content/docs/guides/advanced/based-sequencing.md deleted file mode 100644 index bf1f235..0000000 --- a/content/docs/guides/advanced/based-sequencing.md +++ /dev/null @@ -1,76 +0,0 @@ -# Based Sequencing - -Based sequencing is a sequencing model where transaction ordering is determined by the base layer (Celestia). Even in based mode, the chain still uses a single sequencer node, but it deterministically derives the next batch of transactions from the base layer rather than choosing its own ordering. This removes the sequencer's ability to censor or reorder transactions. - -## How Based Sequencing Works - -### Transaction Submission - -Users submit transactions to the base layer's forced inclusion namespace. These transactions are posted as blobs to the DA layer, where they become part of the canonical transaction ordering. - -```text -User → Base Layer (DA) → Full Nodes retrieve and execute -``` - -### Deterministic Batch Construction - -All full nodes independently construct identical batches by: - -1. **Retrieving forced inclusion transactions** from the base layer at epoch boundaries -2. **Applying forkchoice rules** to determine batch composition: - - `MaxBytes`: Maximum byte size per batch (respects block size limits) - - DA epoch boundaries -3. **Smoothing large transactions** across multiple blocks when necessary - -### Epoch-Based Processing - -Forced inclusion transactions are retrieved in epochs defined by `DAEpochForcedInclusion`. For example, with an epoch size of 10: - -- DA heights 100-109 form one epoch -- DA heights 110-119 form the next epoch -- Transactions from each epoch must be included before the epoch ends - -Epochs durations determine the block time in based sequencing. -Additionally, because no headers are published, the lazy mode has no effect. The block time is a factor of the DA layer's block time. - -## Block Smoothing - -When forced inclusion transactions exceed the `MaxBytes` limit for a single block, they can be "smoothed" across multiple blocks within the same epoch. This ensures that: - -- Large transactions don't block the chain -- All transactions are eventually included -- The system remains censorship-resistant - -### Example - -```text -Epoch [100, 104]: - - Block 1: Includes 1.5 MB of forced inclusion txs (partial) - - Block 2: Includes remaining 0.5 MB + new regular txs - - All epoch transactions included before DA height 105 -``` - -## Trust Assumptions - -Based sequencing minimizes trust assumptions: - -- **No trusted sequencer** - ordering comes from the base layer -- **No proposer selection** - every full node derives blocks independently -- **Deterministic consensus** - all honest nodes converge on the same chain -- **Base layer security** - inherits the security guarantees of the DA layer -- **No malicious actor concern** - invalid blocks are automatically rejected by validation rules - -## Comparison with Single Sequencer - -| Feature | Based Sequencing | Single Sequencer | -| --------------------- | ----------------------------- | ----------------------------- | -| Decentralization | ✅ Fully decentralized | ❌ Single point of control | -| Censorship Resistance | ✅ Guaranteed by base layer | ⚠️ Guaranteed by base layer | -| Latency | ⚠️ Depends on DA layer (~12s) | ✅ Low latency (configurable) | -| Block Time Control | ❌ Factor of DA block time | ✅ Configurable by sequencer | -| Trust Assumptions | ✅ Minimal (only DA layer) | ❌ Trust the sequencer | - -## Further Reading - -- [Data Availability](../data-availability.md) - Understanding the DA layer -- [Transaction Flow](../transaction-flow.md) - How transactions move through the system diff --git a/content/docs/guides/advanced/custom-precompiles.md b/content/docs/guides/advanced/custom-precompiles.md deleted file mode 100644 index 94eaa69..0000000 --- a/content/docs/guides/advanced/custom-precompiles.md +++ /dev/null @@ -1,279 +0,0 @@ -# Custom Precompiles - -ev-reth supports custom EVM precompiled contracts for chain-specific functionality. This guide covers the built-in precompiles and how to add custom ones. - -## What Are Precompiles? - -Precompiles are special contracts at predefined addresses that execute native code instead of EVM bytecode. They're used for: - -- Computationally expensive operations (cryptography, hashing) -- Chain-specific functionality (minting, governance) -- Operations impossible or inefficient in Solidity - -## Built-in ev-reth Precompiles - -### Mint Precompile - -Allows an authorized address to mint native tokens. Useful for bridging scenarios. - -**Address:** `0x0000000000000000000000000000000000000100` - -**Configuration (chainspec):** - -```json -{ - "config": { - "evolve": { - "mintPrecompile": { - "admin": "0xBridgeContract", - "address": "0x0000000000000000000000000000000000000100" - } - } - } -} -``` - -**Interface:** - -```solidity -interface IMint { - /// @notice Mint native tokens to a recipient - /// @param recipient Address to receive tokens - /// @param amount Amount to mint (in wei) - function mint(address recipient, uint256 amount) external; -} -``` - -**Usage:** - -```solidity -// Only callable by admin address -IMint(0x0000000000000000000000000000000000000100).mint( - 0xRecipient, - 1 ether -); -``` - -See [Mint Precompile Reference](/ev-reth/features/mint-precompile) for details. - -## Creating Custom Precompiles - -Custom precompiles require modifying ev-reth source code. - -### Step 1: Define the Precompile - -Create a new precompile in `crates/precompiles/src/`: - -```rust -// my_precompile.rs -use revm::precompile::{Precompile, PrecompileOutput, PrecompileResult}; -use revm::primitives::{Bytes, U256}; - -pub const MY_PRECOMPILE_ADDRESS: Address = address!("0000000000000000000000000000000000000200"); - -pub fn my_precompile(input: &Bytes, gas_limit: u64) -> PrecompileResult { - // Check gas - let gas_used = 1000; // Base gas cost - if gas_used > gas_limit { - return Err(PrecompileError::OutOfGas); - } - - // Parse input - // input[0..4] = function selector - // input[4..] = encoded arguments - - // Execute logic - let result = process_input(input)?; - - Ok(PrecompileOutput { - gas_used, - bytes: result, - }) -} - -fn process_input(input: &Bytes) -> Result { - // Your custom logic here - Ok(Bytes::new()) -} -``` - -### Step 2: Register the Precompile - -Add the precompile to the precompile set: - -```rust -// In precompiles/src/lib.rs -pub fn evolve_precompiles(chain_spec: &ChainSpec) -> PrecompileSet { - let mut precompiles = standard_precompiles(); - - // Add mint precompile if configured - if let Some(mint_config) = &chain_spec.evolve.mint_precompile { - precompiles.insert(mint_config.address, mint_precompile); - } - - // Add your custom precompile - if chain_spec.evolve.my_feature_enabled { - precompiles.insert(MY_PRECOMPILE_ADDRESS, my_precompile); - } - - precompiles -} -``` - -### Step 3: Add Chainspec Configuration - -Define configuration structure: - -```rust -// In chainspec types -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct MyPrecompileConfig { - pub address: Address, - pub admin: Option
, - pub some_parameter: u64, -} -``` - -Update chainspec parsing to include new config. - -### Step 4: Build and Test - -```bash -# Build ev-reth -cargo build --release - -# Run tests -cargo test --package ev-reth-precompiles -``` - -## Precompile Best Practices - -### Gas Metering - -Charge gas proportional to computation: - -```rust -fn my_precompile(input: &Bytes, gas_limit: u64) -> PrecompileResult { - // Base cost - let mut gas_used = 100; - - // Per-byte cost for input processing - gas_used += input.len() as u64 * 3; - - // Additional cost for expensive operations - if requires_crypto_operation(input) { - gas_used += 10000; - } - - if gas_used > gas_limit { - return Err(PrecompileError::OutOfGas); - } - - // Process... -} -``` - -### Access Control - -For privileged operations, check caller: - -```rust -fn admin_only_precompile( - input: &Bytes, - context: &PrecompileContext, - config: &MyConfig, -) -> PrecompileResult { - // Verify caller is admin - if context.caller != config.admin { - return Err(PrecompileError::Custom("unauthorized".into())); - } - - // Process... -} -``` - -### Input Validation - -Always validate input thoroughly: - -```rust -fn my_precompile(input: &Bytes) -> PrecompileResult { - // Check minimum length - if input.len() < 36 { // 4 byte selector + 32 byte arg - return Err(PrecompileError::InvalidInput); - } - - // Validate selector - let selector = &input[0..4]; - if selector != MY_FUNCTION_SELECTOR { - return Err(PrecompileError::InvalidInput); - } - - // Parse and validate arguments - let amount = U256::from_be_slice(&input[4..36]); - if amount.is_zero() { - return Err(PrecompileError::InvalidInput); - } - - // Process... -} -``` - -### Determinism - -Precompiles must be deterministic: - -- No random number generation -- No external network calls -- No time-dependent logic -- Same input always produces same output - -## Testing Precompiles - -### Unit Tests - -```rust -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_my_precompile_success() { - let input = encode_input(/* args */); - let result = my_precompile(&input, 100000).unwrap(); - assert_eq!(result.bytes, expected_output()); - } - - #[test] - fn test_my_precompile_out_of_gas() { - let input = encode_input(/* args */); - let result = my_precompile(&input, 10); // Too little gas - assert!(matches!(result, Err(PrecompileError::OutOfGas))); - } -} -``` - -### Integration Tests - -Test precompile calls from Solidity: - -```solidity -// test/MyPrecompile.t.sol -contract MyPrecompileTest is Test { - address constant PRECOMPILE = 0x0000000000000000000000000000000000000200; - - function testPrecompileCall() public { - (bool success, bytes memory result) = PRECOMPILE.call( - abi.encodeWithSignature("myFunction(uint256)", 100) - ); - assertTrue(success); - // Assert result... - } -} -``` - -## See Also - -- [Mint Precompile](/ev-reth/features/mint-precompile) - Built-in minting -- [ev-reth Configuration](/ev-reth/configuration) - Chainspec setup -- [ev-reth Overview](/ev-reth/overview) - Architecture diff --git a/content/docs/guides/advanced/forced-inclusion.md b/content/docs/guides/advanced/forced-inclusion.md deleted file mode 100644 index b7c5199..0000000 --- a/content/docs/guides/advanced/forced-inclusion.md +++ /dev/null @@ -1,88 +0,0 @@ -# Forced Inclusion - -Forced inclusion is a censorship-resistance mechanism that allows users to submit transactions directly to the DA layer when the sequencer refuses to include them. This ensures users always have an escape hatch in a single sequencer model. - -## How the Single Sequencer Model Works - -1. **Transaction Submission:** - - Users submit transactions to the execution environment via RPC or other interfaces. -2. **Transaction Collection and Ordering:** - - The execution environment collects incoming transactions. - - The sequencer requests a batch of transactions from the execution environment to be included in the next block. -3. **Block Production:** - - **Without lazy mode:** the sequencer produces new blocks at fixed intervals. - - **With lazy mode:** the sequencer produces a block once either - - enough transactions are collected - - the lazy-mode block interval elapses - More info in the [lazy mode configuration guide](../config.md#lazy-mode-lazy-aggregator). - - Each block contains a batch of ordered transactions and metadata. - -4. **Data Availability Posting:** - - The sequencer posts the block data to the configured DA layer (e.g., Celestia). - - This ensures that anyone can access the data needed to reconstruct the chain state. - -5. **State Update:** - - The sequencer updates the chain state based on the new block and makes the updated state available to light clients and full nodes. - -## Transaction Flow Diagram - -```mermaid -sequenceDiagram - participant User - participant ExecutionEnv as Execution Environment - participant Sequencer - participant DA as Data Availability Layer - - User->>ExecutionEnv: Submit transaction - Sequencer->>ExecutionEnv: Request batch for block - ExecutionEnv->>Sequencer: Provide batch of transactions - Sequencer->>DA: Post block data - Sequencer->>ExecutionEnv: Update state - ExecutionEnv->>User: State/query response -``` - -## Forced Inclusion - -While the single sequencer controls transaction ordering, the system provides a censorship-resistance mechanism called **forced inclusion**. This ensures users can always include their transactions even if the sequencer refuses to process them. - -### How Forced Inclusion Works - -1. **Direct DA Submission:** - - Users can submit transactions directly to the DA layer's forced inclusion namespace - - These transactions bypass the sequencer entirely - -2. **Epoch-Based Retrieval:** - - The sequencer retrieves forced inclusion transactions from the DA layer at epoch boundaries - - Epochs are defined by `DAEpochForcedInclusion` in the genesis configuration - -3. **Mandatory Inclusion:** - - The sequencer MUST include all forced inclusion transactions from an epoch before the epoch ends - - Full nodes verify that forced inclusion transactions are properly included - -4. **Smoothing:** - - If forced inclusion transactions exceed block size limits (`MaxBytes`), they can be spread across multiple blocks within the same epoch - - All transactions must be included before moving to the next epoch - -### Example - -```text -Epoch [100, 109] (epoch size = 10): - - User submits tx directly to DA at height 102 - - Sequencer retrieves forced txs at epoch start (height 100) - - Sequencer includes forced tx in blocks before height 110 -``` - -See [Based Sequencing](./based-sequencing.md) for a fully decentralized alternative that relies entirely on forced inclusion. - -## Detecting Malicious Sequencer Behavior - -Full nodes continuously monitor the sequencer to ensure it follows consensus rules, particularly around forced inclusion: - -### Censorship Detection - -If a sequencer fails to include forced inclusion transactions past their epoch boundary, full nodes will: - -1. **Detect the violation** - missing transactions from past epochs -2. **Reject invalid blocks** - do not build on top of censoring blocks -3. **Log the violation** with transaction hashes and epoch details -4. **Halt consensus** - the chain cannot progress with a malicious sequencer diff --git a/content/docs/guides/advanced/meta.json b/content/docs/guides/advanced/meta.json deleted file mode 100644 index baff7a7..0000000 --- a/content/docs/guides/advanced/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Advanced", - "pages": ["..."] -} diff --git a/content/docs/guides/ai-docs.md b/content/docs/guides/ai-docs.md deleted file mode 100644 index ae43d79..0000000 --- a/content/docs/guides/ai-docs.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: AI-Ready Documentation -description: Use Evolve docs with AI coding tools like Claude Code, Cursor, ChatGPT, and more ---- - -# AI-Ready Documentation - -Evolve documentation is optimized for AI-assisted development. Every page is available as clean, structured markdown — ready to feed into your favorite AI coding tool. - -## Available Endpoints - -| Endpoint | Description | -|----------|-------------| -| [`/llms.txt`](/llms.txt) | Curated index of all documentation pages with titles, URLs, and descriptions | -| [`/llms-full.txt`](/llms-full.txt) | Complete documentation content in a single markdown file | -| `/api/md/{page-path}` | Individual page as processed markdown (e.g., `/api/md/overview/architecture`) | - -## Using with AI Tools - -### Claude Code - -Point Claude Code at the full documentation: - -```bash -claude "Read https://ev.xyz/llms-full.txt, I want to ask questions about Evolve" -``` - -Or reference specific pages: - -```bash -claude "Read https://ev.xyz/api/md/guides/quick-start and help me set up my first rollup" -``` - -### Cursor - -In Cursor, add the docs URL to your project context: - -1. Open **Settings** → **Features** → **Docs** -2. Add `https://ev.xyz/llms.txt` as a documentation source -3. Cursor will index the documentation for use in chat and completions - -### ChatGPT - -Paste the llms.txt URL directly into a conversation: - -``` -Read https://ev.xyz/llms-full.txt and help me understand how to deploy an EVM rollup with Evolve. -``` - -### Other AI Tools - -Any AI tool that supports the [llms.txt standard](https://llmstxt.org/) can automatically discover and consume Evolve documentation. The `/llms.txt` endpoint follows the community standard used by Vercel, Cloudflare, Supabase, and other developer platforms. - -## Copy from the UI - -Every documentation page includes **Copy Markdown** and **Open** buttons (visible in the table of contents on desktop, or above the title on mobile). Use these to quickly copy page content into any AI tool. - -## How It Works - -The LLM-optimized endpoints are automatically generated from the same markdown source files that power this documentation site. When a new page is added, it's instantly available through all endpoints — no extra configuration needed. - -- **`/llms.txt`** follows the [llms.txt specification](https://llmstxt.org/), similar to `robots.txt` but for AI agents -- **`/llms-full.txt`** concatenates all pages into a single file, ideal for loading into an AI's context window -- **Per-page markdown** strips frontmatter, imports, and JSX — delivering clean, readable content optimized for language models diff --git a/content/docs/guides/celestia-gas-calculator.mdx b/content/docs/guides/celestia-gas-calculator.mdx deleted file mode 100644 index daef852..0000000 --- a/content/docs/guides/celestia-gas-calculator.mdx +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Celestia Gas Calculator -description: Interactive estimator that mirrors Celestia MsgPayForBlobs gas logic with a focus on header sizing. ---- - -# Celestia Gas Calculator - -Interactive calculator to estimate Celestia DA costs based on your rollup's block production rate and transaction throughput. All calculations mirror Celestia's `DefaultEstimateGas` logic, with fees reported in `TIA` based on your specified gas price in `uTIA / gas`. - -> **Important**: These are estimates only. Actual costs may vary based on network conditions, gas price fluctuations, and blob size optimizations. Use these projections as a planning guide, not exact values. - -## How it works - -The calculator is organized into five sections: - -### 1. Block production - -Configure your rollup's block production rate. Set your block time (e.g., 250ms, 1s) to establish the base cadence of block production. - -### 2. Batching strategy - -Select how blocks are batched before submission to the DA layer. Four strategies are available: - -- **Immediate**: Submits as soon as any blocks are available. Best for low-latency requirements where cost is not a concern. -- **Size-based**: Waits until the batch reaches a size threshold (fraction of max blob size). Best for maximizing blob utilization and minimizing costs when latency is flexible. -- **Time-based**: Waits for a time interval before submitting. Provides predictable submission timing aligned with DA block times. -- **Adaptive** (Recommended): Balances between size and time constraints—submits when either the size threshold is reached OR the max delay expires. - -Configure strategy parameters: - -- **DA block time**: The block time of the DA chain (default: 6s for Celestia) -- **Batch size threshold**: For size/adaptive strategies, the fraction of max blob size to fill before submitting (default: 80%) -- **Batch max delay**: For time/adaptive strategies, the maximum wait time before submitting (default: DA block time) -- **Batch minimum items**: Minimum number of blocks to accumulate before submission - -### 3. Data workload - -Model your transaction throughput and calldata usage: - -- **EVM mode**: Customize your transaction mix across common ERC-20, ERC-721, ERC-1155, and native transfers. The visual donut chart shows the weighted distribution of transaction types and calculates the average calldata bytes per transaction. Use "Randomize configuration" for quick testing or manually adjust weights in the customization panel. -- **Cosmos SDK mode**: Coming soon - -The calculator translates your transaction rate and calldata into Celestia blob gas requirements, projecting costs per submission, per second, and annually. - -For EVM workloads, data submissions are chunked into 500 KiB blobs (mirroring the batching logic in `da_submitter.go`). If a cadence produces more than 500 KiB of calldata in a window, the tool automatically simulates multiple blobs—and therefore multiple PayForBlobs transactions—so base gas and data gas scale accordingly. - -### 4. Gas parameters - -Review the Celestia mainnet gas parameters used for calculations: - -- **Fixed cost**: 65,000 gas per submission -- **Gas per blob byte**: 8 gas per byte -- **Share size**: 480 bytes -- **Per-blob static gas**: 0 gas - -Set your expected gas price and optionally account for the one-time 10,000 gas surcharge if this is the first transaction for the account. - -> **Note**: Gas parameters are currently locked to Celestia mainnet defaults. Live parameter fetching and manual overrides will be added in a future update. - -### 5. Estimation - -View comprehensive cost breakdowns including: - -- Total gas per submission and corresponding fees -- Detailed breakdown of header costs, data costs, and baseline gas -- Annual cost projections -- Throughput metrics (transactions per second, month, and year) - - diff --git a/content/docs/guides/cometbft-to-evolve.md b/content/docs/guides/cometbft-to-evolve.md deleted file mode 100644 index 155dc30..0000000 --- a/content/docs/guides/cometbft-to-evolve.md +++ /dev/null @@ -1,50 +0,0 @@ -# How to Turn Your CometBFT App into an Evolve App - -This guide will walk you through the process of turning your existing CometBFT app into an Evolve app. By integrating Evolve into your CometBFT-based blockchain, you can leverage enhanced modularity and data availability features. - -This guide assumes you have a CometBFT app set up and [Ignite CLI](https://docs.ignite.com) installed. - -:::warning -This tutorial is currently being updated to reflect the latest changes using the evolve ignite app. -Please check back later for the updated version. -::: - -## Install Evolve {#install-evolve} - -You need to install Evolve in your CometBFT app. Open a terminal in the directory where your app is located and run the following command: - -```bash -ignite app install github.com/ignite/apps/evolve@%evolveIgniteAppVersion% -``` - -## Add Evolve Features to Your CometBFT App {#add-evolve-features} - -Now that Evolve is installed, you can add Evolve features to your existing blockchain app. Run the following command to integrate Evolve: - -```bash -ignite evolve add -``` - -## Initialize Evolve {#initialize-evolve} - -To prepare your app for Evolve, you'll need to initialize it. - -Run the following command to initialize Evolve: - -```bash -ignite evolve init -``` - -## Start Your Evolve App {#start-evolve-app} - -Once everything is configured, you can start your Evolve-enabled CometBFT app or (simply evolve app). Use the following command to start your blockchain: - -```bash - start --evnode.aggregator -``` - -## Summary - -By following this guide, you've successfully converted your CometBFT app into an Evolve app. - -To learn more about how to config your DA, Sequencing, and Execution, please check out those tutorial sections. diff --git a/content/docs/guides/create-genesis.md b/content/docs/guides/create-genesis.md deleted file mode 100644 index 5886325..0000000 --- a/content/docs/guides/create-genesis.md +++ /dev/null @@ -1,130 +0,0 @@ -# How to create a genesis for your chain - -This guide will walk you through the process of setting up a genesis for your chain. Follow the steps below to initialize your chain, add a genesis account, and start the chain. - -## Pre-requisities - -For this guide you need to have a chain directory where you have created and built your chain. - -If you don't have a chain directory yet, you can initialize a simple ignite chain by following [this tutorial](./gm-world.md) - -:::tip -This guide will use the simple ignite chain created in linked guide. Make sure to update any relevant variables to match your chain. -::: - -## 1. Setting variables - -First, set the necessary variables for your chain in the terminal, here is an example: - -```sh -VALIDATOR_NAME=validator1 -CHAIN_ID=gm -KEY_NAME=chain-key -CHAINFLAG="--chain-id ${CHAIN_ID}" -TOKEN_AMOUNT="10000000000000000000000000stake" -STAKING_AMOUNT="1000000000stake" -``` - -## Rebuild your chain - -Ensure that `.gm` folder is present at `/Users/you/.gm` (if not, follow a [Guide](./gm-world.md) to set it up) and run the following command to (re)generate an entrypoint binary out of the code: - -```sh -just install -``` - -Once completed, run the following command to ensure that the `/Users/you/.gm` directory is present: - -```sh -ignite evolve init -``` - -This (re)creates an `gmd` binary that will be used for the rest of the tutorials to run all the operations on the chain. - -## Resetting existing genesis/chain data - -Reset any existing chain data: - -```sh -gmd comet unsafe-reset-all -``` - -Reset any existing genesis data: - -```sh -rm -rf $HOME/.$CHAIN_ID/config/gentx -rm $HOME/.$CHAIN_ID/config/genesis.json -``` - -## Initializing the validator - -Initialize the validator with the chain ID you set: - -```sh -gmd init $VALIDATOR_NAME --chain-id $CHAIN_ID -``` - -## Adding a key to keyring backend - -Add a key to the keyring-backend: - -```sh -gmd keys add $KEY_NAME --keyring-backend test -``` - -## Adding a genesis account - -Add a genesis account with the specified token amount: - -```sh -gmd genesis add-genesis-account $KEY_NAME $TOKEN_AMOUNT --keyring-backend test -``` - -## Setting the staking amount in the genesis transaction - -Set the staking amount in the genesis transaction: - -```sh -gmd genesis gentx $KEY_NAME $STAKING_AMOUNT --chain-id $CHAIN_ID --keyring-backend test -``` - -## Collecting genesis transactions - -Collect the genesis transactions: - -```sh -gmd genesis collect-gentxs -``` - -## Configuring the genesis file - -Copy the centralized sequencer address into `genesis.json`: - -```sh -ADDRESS=$(jq -r '.address' ~/.$CHAIN_ID/config/priv_validator_key.json) -PUB_KEY=$(jq -r '.pub_key' ~/.$CHAIN_ID/config/priv_validator_key.json) -jq --argjson pubKey "$PUB_KEY" '.consensus["validators"]=[{"address": "'$ADDRESS'", "pub_key": $pubKey, "power": "1000", "name": "Evolve Sequencer"}]' ~/.$CHAIN_ID/config/genesis.json > temp.json && mv temp.json ~/.$CHAIN_ID/config/genesis.json -``` - -## Starting the chain - -Finally, start the chain with your start command. - -For example, start the simple ignite chain with the following command: - -```sh -gmd start --evnode.node.aggregator -``` - -## Share the genesis file - -Once the sequencer is running, share the genesis file with your peers. You can find the genesis file at `~/.${CHAIN_ID}/config/genesis.json`. -Before doing so, add a `da_start_height` field to the genesis file, that corresponds to the height at which the first height was included on the DA layer. This height can be fetched directly from the [sequencer RPC](https://github.com/evstack/ev-node/blob/v1.0.0-beta.5/proto/evnode/v1/state_rpc.proto). - -```sh -jq '.da_start_height = 1' ~/.$CHAIN_ID/config/genesis.json > temp.json && mv temp.json ~/.$CHAIN_ID/config/genesis.json -``` - -## Summary - -By following these steps, you will set up the genesis for your chain, initialize the validator, add a genesis account, and start the chain. This guide provides a basic framework for configuring and starting your chain using the gm-world binary. Make sure you initialized your chain correctly, and use the `gmd` command for all operations. diff --git a/content/docs/guides/da-layers/celestia.md b/content/docs/guides/da-layers/celestia.md deleted file mode 100644 index 307dde6..0000000 --- a/content/docs/guides/da-layers/celestia.md +++ /dev/null @@ -1,198 +0,0 @@ -# Celestia - -This guide covers connecting your Evolve chain to Celestia for production data availability. - -## Prerequisites - -- Completed an Evolve quickstart tutorial -- Familiarity with running a Celestia light node - -## Running a Celestia Light Node - -Before starting your Evolve chain, you need a Celestia light node running and synced. - -Follow the [Celestia light node documentation](https://docs.celestia.org/how-to-guides/light-node) to install, initialize, and start a light node for your target network (Arabica, Mocha, or Mainnet). - -## Configuring Evolve for Celestia - -### Required Configuration - -The following flags are required to connect to Celestia: - -| Flag | Description | -|------|-------------| -| `--evnode.da.address` | Celestia node RPC endpoint | -| `--evnode.da.auth_token` | JWT authentication token | -| `--evnode.da.header_namespace` | Namespace for block headers | -| `--evnode.da.data_namespace` | Namespace for transaction data | - -### Get DA Block Height - -Query the current DA height to set as your starting point: - -```bash -DA_BLOCK_HEIGHT=$(celestia header network-head | jq -r '.result.header.height') -echo "Your DA_BLOCK_HEIGHT is $DA_BLOCK_HEIGHT" -``` - -### Get Authentication Token - -Generate a write token for your light node: - -**Arabica:** - -```bash -AUTH_TOKEN=$(celestia light auth write --p2p.network arabica) -``` - -**Mocha:** - -```bash -AUTH_TOKEN=$(celestia light auth write --p2p.network mocha) -``` - -**Mainnet:** - -```bash -AUTH_TOKEN=$(celestia light auth write) -``` - -### Set Namespaces - -Choose unique namespaces for your chain's headers and data: - -```bash -DA_HEADER_NAMESPACE="my_chain_headers" -DA_DATA_NAMESPACE="my_chain_data" -``` - -The namespace values are automatically encoded by ev-node for Celestia compatibility. - -You can use the same namespace for both headers and data, or separate them for optimized syncing (light clients can sync headers only). - -### Set DA Address - -Default Celestia light node port is 26658. **Note:** Connection to a celestia-node DA requires a websocket connection: - -```bash -DA_ADDRESS=ws://localhost:26658 -``` - -## Running Your Chain - -Start your chain with Celestia configuration: - -```bash -evnode start \ - --evnode.node.aggregator \ - --evnode.da.auth_token $AUTH_TOKEN \ - --evnode.da.header_namespace $DA_HEADER_NAMESPACE \ - --evnode.da.data_namespace $DA_DATA_NAMESPACE \ - --evnode.da.address $DA_ADDRESS -``` - -For Cosmos SDK chains: - -```bash -appd start \ - --evnode.node.aggregator \ - --evnode.da.auth_token $AUTH_TOKEN \ - --evnode.da.header_namespace $DA_HEADER_NAMESPACE \ - --evnode.da.data_namespace $DA_DATA_NAMESPACE \ - --evnode.da.address $DA_ADDRESS -``` - -## Viewing Your Chain Data - -Once running, you can view your chain's data on Celestia block explorers: - -- [Celenium (Arabica)](https://arabica.celenium.io/) -- [Celenium (Mocha)](https://mocha.celenium.io/) -- [Celenium (Mainnet)](https://celenium.io/) - -Search by your namespace or account address to see submitted blobs. - -## Configuration Options - -### Gas Price - -By default, ev-node uses automatic gas price detection. Keep the default unless you have an operational reason to override it: - -```bash ---evnode.da.gas_price 0.01 -``` - -Higher gas prices result in faster inclusion during congestion. Omit this flag to use the automatic default. - -### Block Time - -Set the expected DA block time (affects retry timing): - -```bash ---evnode.da.block_time 6s -``` - -Celestia's block time is approximately 6 seconds. - -### Multiple Signing Addresses - -For high-throughput chains, use multiple signing addresses to avoid nonce conflicts: - -```bash ---evnode.da.signing_addresses celestia1abc...,celestia1def...,celestia1ghi... -``` - -All addresses must be funded and loaded in the Celestia node's keyring. - -## Funding Your Account - -### Testnet (Mocha/Arabica) - -Get testnet TIA from faucets: - -- [Mocha Faucet](https://faucet.celestia-mocha.com/) -- [Arabica Faucet](https://faucet.celestia-arabica.com/) - -### Mainnet - -Purchase TIA and transfer to your Celestia light node address. - -Check your address: - -```bash -celestia state account-address -``` - -## Troubleshooting - -### Out of Funds - -If you see `Code: 19` errors, your account is out of TIA: - -1. Fund your account -2. Increase gas price to unstick pending transactions -3. Restart your chain - -See [Troubleshooting Guide](/guides/operations/troubleshooting) for details. - -### Connection Refused - -Verify your Celestia node is running: - -```bash -curl http://localhost:26658/header/sync_state -``` - -### Token Expired - -Regenerate your auth token: - -```bash -celestia light auth write --p2p.network -``` - -## See Also - -- [Local DA Guide](/guides/da-layers/local-da) - Development setup -- [Troubleshooting](/guides/operations/troubleshooting) - Common issues -- [Configuration Reference](/reference/configuration/ev-node-config) - All DA options diff --git a/content/docs/guides/da-layers/local-da.md b/content/docs/guides/da-layers/local-da.md deleted file mode 100644 index 280fcf9..0000000 --- a/content/docs/guides/da-layers/local-da.md +++ /dev/null @@ -1,188 +0,0 @@ -# Local DA - -Local DA is a development-only data availability layer for testing Evolve chains without connecting to a real DA network. - -## Overview - -Local DA provides: - -- Fast, local blob storage -- No authentication required -- No gas fees -- Instant "finality" - -**Warning:** Local DA is for development only. It provides no actual data availability guarantees. - -## Installation - -Install the local-da binary: - -```bash -go install github.com/evstack/ev-node/tools/local-da@latest -``` - -Or build from source: - -```bash -cd ev-node/tools/local-da -go build -o local-da . -``` - -## Running Local DA - -Start the local DA server: - -```bash -local-da -``` - -Default output: - -```text -INF NewLocalDA: initialized LocalDA module=local-da -INF Listening on host=localhost maxBlobSize=1974272 module=da port=7980 -INF server started listening on=localhost:7980 module=da -``` - -### Configuration - -| Flag | Default | Description | -|------|---------|-------------| -| `--host` | `localhost` | Listen address | -| `--port` | `7980` | Listen port | - -Example with custom port: - -```bash -local-da --port 8080 -``` - -## Connecting Your Chain - -Start your Evolve chain with the local DA address: - -```bash -evnode start \ - --evnode.node.aggregator \ - --evnode.da.address http://localhost:7980 -``` - -For Cosmos SDK chains: - -```bash -appd start \ - --evnode.node.aggregator \ - --evnode.da.address http://localhost:7980 -``` - -## Features - -### No Authentication - -Unlike Celestia, local DA requires no auth token: - -```bash -# Celestia requires ---evnode.da.auth_token - -# Local DA does not ---evnode.da.address http://localhost:7980 -``` - -### No Namespace Required - -Namespace is optional with local DA: - -```bash -# Optional ---evnode.da.namespace my_namespace -``` - -### Instant Submission - -Blobs are stored immediately with no block time delay. - -## Use Cases - -### Local Development - -Test your chain logic without DA layer complexity: - -```bash -# Terminal 1: Start local DA -local-da - -# Terminal 2: Start your chain -evnode start --evnode.da.address http://localhost:7980 -``` - -### CI/CD Testing - -Use local DA in automated tests: - -```bash -# Start local DA in background -local-da & -LOCAL_DA_PID=$! - -# Run tests -go test ./... - -# Cleanup -kill $LOCAL_DA_PID -``` - -### Integration Testing - -Test multi-node setups locally: - -```bash -# Start local DA -local-da --port 7980 - -# Start sequencer -evnode start \ - --evnode.node.aggregator \ - --evnode.da.address http://localhost:7980 \ - --evnode.p2p.listen /ip4/0.0.0.0/tcp/7676 - -# Start full node (separate terminal) -evnode start \ - --evnode.da.address http://localhost:7980 \ - --evnode.p2p.peers /ip4/127.0.0.1/tcp/7676/p2p/ -``` - -## Limitations - -Local DA is **not suitable for**: - -- Production deployments -- Security testing -- Performance benchmarking (no real network latency) -- Testing DA-specific features (proofs, commitments) - -## Transitioning to Celestia - -When ready for production, switch to Celestia: - -1. Set up a Celestia light node -2. Update your start command: - -```bash -# From local DA ---evnode.da.address http://localhost:7980 - -# To Celestia ---evnode.da.address http://localhost:26658 ---evnode.da.auth_token $AUTH_TOKEN ---evnode.da.header_namespace $HEADER_NAMESPACE ---evnode.da.data_namespace $DATA_NAMESPACE -``` - -See [Celestia Guide](/guides/da-layers/celestia) for full instructions. - -## See Also - -- [Celestia Guide](/guides/da-layers/celestia) - Production DA setup -- [EVM Quickstart](/getting-started/evm/quickstart) - Getting started with EVM -- [Cosmos Quickstart](/getting-started/cosmos/quickstart) - Getting started with Cosmos SDK diff --git a/content/docs/guides/da-layers/meta.json b/content/docs/guides/da-layers/meta.json deleted file mode 100644 index 44195e6..0000000 --- a/content/docs/guides/da-layers/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "DA Layers", - "pages": ["..."] -} diff --git a/content/docs/guides/da/blob-decoder.md b/content/docs/guides/da/blob-decoder.md deleted file mode 100644 index 8879f39..0000000 --- a/content/docs/guides/da/blob-decoder.md +++ /dev/null @@ -1,158 +0,0 @@ -# Blob Decoder Tool - -The blob decoder is a utility tool for decoding and inspecting blobs from Celestia (DA) layers. It provides both a web interface and API for decoding blob data into human-readable format. - -## Overview - -The blob decoder helps developers and operators inspect the contents of blobs submitted to DA layers. It can decode: - -- Raw blob data (hex or base64 encoded) -- Block data structures -- Transaction payloads -- Protobuf-encoded messages - -## Usage - -### Starting the Server - -```bash -# Run with default port (8080) -go run tools/blob-decoder/main.go -``` - -The server will start and display: - -- Web interface URL: `http://localhost:8080` -- API endpoint: `http://localhost:8080/api/decode` - -### Web Interface - -1. Open your browser to `http://localhost:8080` -2. Paste your blob data in the input field -3. Select the encoding format (hex or base64) -4. Click "Decode" to see the parsed output - -### API Usage - -The decoder provides a REST API for programmatic access: - -```bash -# Decode hex-encoded blob -curl -X POST http://localhost:8080/api/decode \ - -H "Content-Type: application/json" \ - -d '{ - "data": "0x1234abcd...", - "encoding": "hex" - }' - -# Decode base64-encoded blob -curl -X POST http://localhost:8080/api/decode \ - -H "Content-Type: application/json" \ - -d '{ - "data": "SGVsbG8gV29ybGQ=", - "encoding": "base64" - }' -``` - -#### API Request Format - -```json -{ - "data": "string", // The encoded blob data - "encoding": "string" // Either "hex" or "base64" -} -``` - -#### API Response Format - -```json -{ - "success": true, - "decoded": { - // Decoded data structure - }, - "error": "string" // Only present if success is false -} -``` - -## Supported Data Types - -### Block Data - -The decoder can parse ev-node block structures: - -- Block height -- Timestamp -- Parent hash -- Transaction list -- Validator information -- Data commitments - -### Transaction Data - -Decodes individual transactions including: - -- Transaction type -- Sender/receiver addresses -- Value/amount -- Gas parameters -- Payload data - -### Protobuf Messages - -Automatically detects and decodes protobuf-encoded messages used in ev-node: - -- Block headers -- Transaction batches -- State updates -- DA commitments - -## Examples - -### Decoding a Block Blob - -```bash -# Example block blob (hex encoded) -curl -X POST http://localhost:8080/api/decode \ - -H "Content-Type: application/json" \ - -d '{ - "data": "0a2408011220...", - "encoding": "hex" - }' -``` - -Response: - -```json -{ - "success": true, - "decoded": { - "height": 100, - "timestamp": "2024-01-15T10:30:00Z", - "parentHash": "0xabc123...", - "transactions": [ - { - "type": "transfer", - "from": "0x123...", - "to": "0x456...", - "value": "1000000000000000000" - } - ] - } -} -``` - -### Decoding DA Commitment - -```bash -curl -X POST http://localhost:8080/api/decode \ - -H "Content-Type: application/json" \ - -d '{ - "data": "eyJjb21taXRtZW50IjogIi4uLiJ9", - "encoding": "base64" - }' -``` - -### Celestia - -For Celestia blobs, you can decode namespace data and payment information from [celenium](https://celenium.io/namespaces). diff --git a/content/docs/guides/da/celestia-da.md b/content/docs/guides/da/celestia-da.md deleted file mode 100644 index b74b847..0000000 --- a/content/docs/guides/da/celestia-da.md +++ /dev/null @@ -1,147 +0,0 @@ -# Using Celestia as DA - -## 🌞 Introduction {#introduction} - -This tutorial serves as a comprehensive guide for deploying your chain on Celestia's data availability (DA) network. From the Evolve perspective, there's no difference in posting blocks to Celestia's testnets or Mainnet Beta. - -Before proceeding, ensure that you have completed the [gm-world](../gm-world.md) tutorial, which covers installing the Testapp CLI and running a chain against a local DA network. - -## 🪶 Running a Celestia light node - -Before you can start your chain node, you need to initiate, sync, and fund a light node on one of Celestia's networks on a compatible version: - -Find more information on how to run a light node in the [Celestia documentation](https://celestia.org/run-a-light-node/#start-up-a-node). - -:::code-group - -```sh [Arabica] -Evolve Version: %celestiaNodeArabicaEvolveTag% -Celestia Node Version: %celestiaNodeArabicaTag% -``` - -```sh [Mocha] -Evolve Version: %celestiaNodeMochaEvolveTag% -Celestia Node Version: %celestiaNodeMochaTag% -``` - -```sh [Mainnet] -Evolve Version: %celestiaNodeMainnetEvolveTag% -Celestia Node Version: %celestiaNodeMainnetTag% -``` - -::: - -- [Arabica Devnet](https://docs.celestia.org/how-to-guides/arabica-devnet) -- [Mocha Testnet](https://docs.celestia.org/how-to-guides/mocha-testnet) -- [Mainnet Beta](https://docs.celestia.org/how-to-guides/mainnet) - -The main difference lies in how you fund your wallet address: using testnet TIA or [TIA](https://docs.celestia.org/learn/tia#overview-of-tia) for Mainnet Beta. - -After successfully starting a light node, it's time to start posting the batches of blocks of data that your chain generates to Celestia. - -## 🏗️ Prerequisites {#prerequisites} - -- `gmd` CLI installed from the [gm-world](../gm-world.md) tutorial. - -## 🛠️ Configuring flags for DA - -Now that we are posting to the Celestia DA instead of the local DA, the `evolve start` command requires three DA configuration flags: - -- `--evnode.da.start_height` -- `--evnode.da.auth_token` -- `--evnode.da.namespace` - -:::tip -Optionally, you could also set the `--evnode.da.block_time` flag. This should be set to the finality time of the DA layer, not its actual block time, as Evolve does not handle reorganization logic. The default value is 15 seconds. -::: - -Let's determine which values to provide for each of them. - -First, let's query the DA layer start height using our light node. - -```bash -DA_BLOCK_HEIGHT=$(celestia header network-head | jq -r '.result.header.height') -echo -e "\n Your DA_BLOCK_HEIGHT is $DA_BLOCK_HEIGHT \n" -``` - -The output of the command above will look similar to this: - -```bash - Your DA_BLOCK_HEIGHT is 2127672 -``` - -Now, let's obtain the authentication token of your light node using the following command: - -:::code-group - -```bash [Arabica Devnet] -AUTH_TOKEN=$(celestia light auth write --p2p.network arabica) -echo -e "\n Your DA AUTH_TOKEN is $AUTH_TOKEN \n" -``` - -```bash [Mocha Testnet] -AUTH_TOKEN=$(celestia light auth write --p2p.network mocha) -echo -e "\n Your DA AUTH_TOKEN is $AUTH_TOKEN \n" -``` - -```bash [Mainnet Beta] -AUTH_TOKEN=$(celestia light auth write) -echo -e "\n Your DA AUTH_TOKEN is $AUTH_TOKEN \n" -``` - -::: - -The output of the command above will look similar to this: - -```bash - Your DA AUTH_TOKEN is eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJwdWJsaWMiLCJyZWFkIiwid3JpdGUiXX0.cSrJjpfUdTNFtzGho69V0D_8kyECn9Mzv8ghJSpKRDE -``` - -Next, let's set up the namespace to be used for posting data on Celestia. Evolve supports separate namespaces for headers and data, but for simplicity, we'll use a single namespace for both: - -```bash -DA_NAMESPACE="fancy_namespace" -``` - -**Advanced Configuration:** For production deployments, you can use separate namespaces for headers and data to optimize syncing: - -- `--evnode.da.header_namespace` for block headers -- `--evnode.da.data_namespace` for transaction data - -The namespace values are automatically encoded by the node to ensure compatibility with Celestia. - -[Learn more about namespaces](https://docs.celestia.org/tutorials/node-tutorial#namespaces). - -Lastly, set your DA address for your light node, which by default runs at -port 26658: - -```bash -DA_ADDRESS=http://localhost:26658 -``` - -## 🔥 Running your chain connected to Celestia light node - -Finally, let's initiate the chain node with all the flags: - -```bash -gmd start \ - --evnode.node.aggregator \ - --evnode.da.auth_token $AUTH_TOKEN \ - --evnode.da.header_namespace $DA_NAMESPACE \ - --evnode.da.data_namespace $DA_NAMESPACE \ - --evnode.da.address $DA_ADDRESS -``` - -Now, the chain is running and posting blocks (aggregated in batches) to Celestia. You can view your chain by using your namespace or account on one of Celestia's block explorers. - -For example, [here on Celenium for Arabica](https://arabica.celenium.io/). - -Other explorers: - -- [Arabica testnet](https://docs.celestia.org/how-to-guides/arabica-devnet) -- [Mocha testnet](https://docs.celestia.org/how-to-guides/mocha-testnet) -- [Mainnet Beta](https://docs.celestia.org/how-to-guides/mainnet) - -## 🎉 Next steps - -Congratulations! You've built a local chain that posts data to Celestia's DA layer. Well done! Now, go forth and build something great! Good luck! diff --git a/content/docs/guides/da/local-da.md b/content/docs/guides/da/local-da.md deleted file mode 100644 index 2775400..0000000 --- a/content/docs/guides/da/local-da.md +++ /dev/null @@ -1,51 +0,0 @@ -# Using Local DA - -## Introduction {#introduction} - -This tutorial serves as a comprehensive guide for using the [local-da](../../../tools/local-da) with your chain. - -Before proceeding, ensure that you have completed the [build a chain](../gm-world.md) tutorial, which covers setting-up, building and running your chain. - -## Setting Up a Local DA Network - -To set up a local DA network node on your machine, run the following script to install and start the local DA node: - -```bash -go install github.com/evstack/ev-node/tools/local-da@latest -``` - -This script will build and run the node, which will then listen on port `7980`. - -## Configuring your chain to connect to the local DA network - -To connect your chain to the local DA network, you need to pass the `--evnode.da.address` flag with the local DA node address. - -## Run your chain - -Start your chain node with the following command, ensuring to include the DA address flag: - -:::code-group - -```sh [Quick Start] -testapp start --evnode.da.address http://localhost:7980 -``` - -```sh [gm-world Chain] -testapp start \ - --evnode.node.aggregator \ - --evnode.da.address http://localhost:7980 \ -``` - -::: - -You should see the following log message indicating that your chain is connected to the local DA network: - -```shell -11:07AM INF NewLocalDA: initialized LocalDA module=local-da -11:07AM INF Listening on host=localhost maxBlobSize=1974272 module=da port=7980 -11:07AM INF server started listening on=localhost:7980 module=da -``` - -## Summary - -By following these steps, you will set up a local DA network node and configure your chain to post data to it. This setup is useful for testing and development in a controlled environment. You can find more information in the [local-da README](../../../tools/local-da/README.md) diff --git a/content/docs/guides/da/meta.json b/content/docs/guides/da/meta.json deleted file mode 100644 index 32edbc2..0000000 --- a/content/docs/guides/da/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "DA", - "pages": ["local-da", "visualizer", "celestia-da"] -} diff --git a/content/docs/guides/da/visualizer.md b/content/docs/guides/da/visualizer.md deleted file mode 100644 index 55ebc99..0000000 --- a/content/docs/guides/da/visualizer.md +++ /dev/null @@ -1,240 +0,0 @@ -# DA Visualizer - -The Data Availability (DA) Visualizer is a built-in monitoring tool in Evolve that provides real-time insights into blob submissions to the DA layer. It offers a web-based interface for tracking submission statistics, monitoring DA layer health, and analyzing blob details. - -**Note**: Only aggregator nodes submit data to the DA layer. Non-aggregator nodes will not display submission data. - -## Overview - -The DA Visualizer provides: - -- Real-time monitoring of blob submissions (last 100 submissions) -- Success/failure statistics and trends -- Gas price tracking and cost analysis -- DA layer health monitoring -- Detailed blob inspection capabilities -- Recent submission history - -## Enabling the DA Visualizer - -The DA Visualizer is disabled by default. To enable it, use the following configuration: - -### Via Command-line Flag - -```bash -testapp start --rollkit.rpc.enable_da_visualization -``` - -### Via Configuration File - -Add the following to your `evnode.yml` configuration file: - -```yaml -rpc: - enable_da_visualization: true -``` - -## Accessing the DA Visualizer - -Once enabled, the DA Visualizer is accessible through your node's RPC server. By default, this is: - -``` -http://localhost:7331/da -``` - -The visualizer provides several API endpoints and a web interface: - -### Web Interface - -Navigate to `http://localhost:7331/da` in your web browser to access the interactive dashboard. - -### API Endpoints - -The following REST API endpoints are available for programmatic access: - -#### Get Recent Submissions - -```bash -GET /da/submissions -``` - -Returns the most recent blob submissions (up to 100 kept in memory). - -#### Get Blob Details - -```bash -GET /da/blob?id={blob_id} -``` - -Returns detailed information about a specific blob submission. - -#### Get DA Statistics - -```bash -GET /da/stats -``` - -Returns aggregated statistics including: - -- Total submissions count -- Success/failure rates -- Average gas price -- Total gas spent -- Average blob size -- Submission trends - -#### Get DA Health Status - -```bash -GET /da/health -``` - -Returns the current health status of the DA layer including: - -- Connection status -- Recent error rates -- Performance metrics -- Last successful submission timestamp - -## Features - -### Real-time Monitoring - -The dashboard automatically updates every 30 seconds, displaying: - -- Recent submission feed with status indicators (last 100 submissions) -- Success rate percentage -- Current gas price trends -- Submission history - -### Submission Details - -Each submission entry shows: - -- Timestamp -- Blob ID with link to detailed view -- Number of blobs in the batch -- Submission status (success/failure) -- Gas price used -- Error messages (if any) - -### Statistics Dashboard - -The statistics section provides: - -- **Performance Metrics**: Success rate, average submission time -- **Cost Analysis**: Total gas spent, average gas price over time -- **Volume Metrics**: Total blobs submitted, average blob size -- **Trend Analysis**: Hourly and daily submission patterns - -### Health Monitoring - -The health status indicator shows: - -- 🟢 **Healthy**: DA layer responding normally -- 🟡 **Warning**: Some failures but overall functional -- 🔴 **Critical**: High failure rate or connection issues - -## Use Cases - -### For Node Operators - -- Monitor the reliability of DA submissions -- Track gas costs and optimize gas price settings -- Identify patterns in submission failures -- Ensure DA layer connectivity - -### For Developers - -- Debug DA submission issues -- Analyze blob data structure -- Monitor application-specific submission patterns -- Test DA layer integration - -### For Network Monitoring - -- Track overall network DA usage -- Identify congestion periods -- Monitor gas price fluctuations -- Analyze submission patterns across the network - -## Configuration Options - -When enabling the DA Visualizer, you may want to adjust related RPC settings: - -```yaml -rpc: - address: "0.0.0.0:7331" # Bind to all interfaces for remote access - enable_da_visualization: true -``` - -**Security Note**: If binding to all interfaces (`0.0.0.0`), ensure proper firewall rules are in place to restrict access to trusted sources only. - -## Troubleshooting - -### Visualizer Not Accessible - -1. Verify the DA Visualizer is enabled: - - Check your configuration file or ensure the flag is set - - Look for log entries confirming "DA visualization endpoints registered" - -2. Check the RPC server is running: - - Verify the RPC address in logs - - Ensure no port conflicts - -3. For remote access: - - Ensure the RPC server is bound to an accessible interface - - Check firewall settings - -### No Data Displayed - -1. Verify your node is in aggregator mode (only aggregators submit to DA) -2. Check DA layer connectivity in the node logs -3. Ensure transactions are being processed -4. Note that the visualizer only keeps the last 100 submissions in memory - -### API Errors - -- **404 Not Found**: DA Visualizer not enabled -- **500 Internal Server Error**: Check node logs for DA connection issues -- **Empty responses**: No submissions have been made yet - -## Example Usage - -### Using curl to access the API - -```bash -# Get recent submissions (returns up to 100) -curl http://localhost:7331/da/submissions - -# Get specific blob details -curl http://localhost:7331/da/blob?id=abc123... - -# Get statistics -curl http://localhost:7331/da/stats - -# Check DA health -curl http://localhost:7331/da/health -``` - -### Monitoring with scripts - -```bash -#!/bin/bash -# Simple monitoring script - -while true; do - health=$(curl -s http://localhost:7331/da/health | jq -r '.status') - if [ "$health" != "healthy" ]; then - echo "DA layer issue detected: $health" - # Send alert... - fi - sleep 30 -done -``` - -## Related Configuration - -For complete DA layer configuration options, see the [Config Reference](../../learn/config.md#data-availability-configuration-da). - -For metrics and monitoring setup, see the [Metrics Guide](../metrics.md). diff --git a/content/docs/guides/deploy-overview.md b/content/docs/guides/deploy-overview.md deleted file mode 100644 index 2207a0c..0000000 --- a/content/docs/guides/deploy-overview.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -description: This page provides an overview of some common ways to deploy chains. ---- - -# Deploying Your Chain - -One of the benefits of building chains with Evolve is the flexibility you have as a developer to choose things like the DA layer, the settlement scheme, and the execution environment. - -The challenge that comes with this flexibility is that there are more services that now need to be deployed and managed while running your chain. - -In the tutorials so far, you've seen various helper scripts used to make things easier. While great for tutorials, there are better ways to deploy and manage chains than using various bash scripts. - -In this section, you'll see a few examples of how you can deploy your chain environment with all your services running in a more production-ready way. - -:::warning[Disclaimer] -These examples are for educational purposes only. Before deploying your chain for production use you should fully understand the services you are deploying and your choice in deployment method. -::: diff --git a/content/docs/guides/deploy/mainnet.md b/content/docs/guides/deploy/mainnet.md deleted file mode 100644 index ba2c505..0000000 --- a/content/docs/guides/deploy/mainnet.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -description: Checklist and guide for launching an EVM mainnet using ev-node and ev-reth. ---- - -# EVM Mainnet Checklist - -This guide covers launching a mainnet using **ev-reth** and **ev-node**. - -## Ev-node - -Ev-node is the sequencer that creates blocks, propagates them to other peers, and submits them to the DA layer. - -### Chain ID - -- Pick a unique EVM chain ID for your network -- Verify it does not collide with existing chains at [chainlist.org](https://chainlist.org) - -### Block Time - -- Pick a block time for your chain -- **Optional:** Decide if you would like lazy blocks - -### Data Availability (DA) - -| Configuration | Description | -|---|---| -| Header Namespace | Required. Namespace for block headers. | -| Data Namespace | Required. Namespace for block data. Two namespaces are recommended, but one can be used. | -| Forced Inclusion Namespace | Optional. For censorship resistance. | -| DA Block Time | Used for syncing with the DA layer. Set this to the block time of the underlying DA chain. | - -#### Batching Strategy - -Blob submission to the DA layer is controlled by the batching strategy, not the DA block time. Choose a strategy based on your latency vs. cost trade-off: - -| Strategy | Behavior | -|---|---| -| `immediate` | Submits as soon as any items are available. Lowest latency, highest cost. | -| `size` | Waits until the batch reaches a size threshold (fraction of max blob size). | -| `time` | Waits for a time interval (`batch_max_delay`) before submitting. Default strategy. | -| `adaptive` | Submits when either the size threshold or the max delay is reached, whichever comes first. | - -Related configuration flags: - -- `da.batching_strategy` -- Strategy name (default: `time`) -- `da.batch_size_threshold` -- Fraction of max blob size before submitting, 0.0-1.0 (default: 0.8). Applies to `size` and `adaptive`. -- `da.batch_max_delay` -- Maximum wait time before submitting regardless of size (default: DA block time). Applies to `time` and `adaptive`. -- `da.batch_min_items` -- Minimum items to accumulate before considering submission (default: 1). - -#### DA Account Funding - -The DA account needs tokens to submit blobs to the DA layer. If the account runs dry, blob submission stops and your chain halts. - -- Fund the DA account with sufficient tokens before launch -- Set up balance monitoring with alerts at a threshold that gives you enough runway to top up (e.g. alert when balance drops below 48 hours of estimated submission costs) -- Establish a process for topping up the DA account - -### Sequencer Key Management - -The sequencer signing key is the most security-critical component of your chain. A compromised key allows an attacker to produce arbitrary blocks. - -- Use an HSM or remote signer for the sequencer key in production -- do not store plaintext keys on disk -- Restrict access to the sequencer machine to a minimal set of operators -- Have a key rotation plan ready before launch - -### P2P - -- Configure p2p peers for a stable network -- The sequencer should be connected to **at least two full nodes you control** -- Third-party full nodes should connect to your full nodes, **not** directly to the sequencer - -### Network Security - -- Place the sequencer behind a firewall; only allow p2p connections from your own full nodes -- Apply rate limiting on public RPC endpoints to prevent abuse -- Consider DDoS mitigation (e.g. Cloudflare, HAProxy) in front of public-facing full nodes -- Restrict SSH and management ports to a VPN or bastion host - -### Metrics and Monitoring - -- Set up a metric gathering system (Prometheus + Grafana recommended) -- ev-metrics was created to help with basic metrics and alerting - -Key metrics to monitor: - -| Metric | Why | -|---|---| -| Block production rate | Detect if the sequencer has stalled | -| DA submission lag / failures | Catch blob submission issues before they become critical | -| Peer count | Ensure network connectivity is healthy | -| Mempool depth | Detect congestion or spam | -| Disk usage | Prevent nodes from running out of storage | -| Sequencer balance | Ensure the sequencer can pay for DA submissions | -| DA account balance | Chain halts if this runs dry | -| RPC latency / error rate | Catch degraded user experience | - -### RPC - -- Use the full nodes connected to the sequencer for public/application RPCs -- **Do not expose the sequencer directly** - -## Ev-reth - -Ev-reth is the execution engine. It uses reth as a library to make custom configurations for the Evolve use case. Changes are documented in the readme. - -### Precompiles - -Ev-reth comes with a set of optional precompiles: - -| Precompile | Description | -|---|---| -| Basefee Redirect | Redirects the basefee (burned under EIP-1559) to a specified address | -| Native Mint & Burn | Allows minting and burning the native token. Can be used with a bridge like Hyperlane. | - -### Checklist - -- Decide which precompiles are needed -- Set admin accounts for precompiles / basefee redirect -- Decide if a proxy contract is needed (provided proxy contract) -- Decide on EIP-1559 configurations -- Configure basefee -- Optional: Feevault contract - -### Backup and Recovery - -Establish a backup and recovery strategy before launch. See the [Reth State Backup](../evm/reth-backup.md) guide for detailed instructions. - -- Take periodic state snapshots (frequency depends on your RTO requirements) -- Test the recovery procedure on a staging environment before mainnet launch -- Keep at least one full node with archival state as a fallback -- Document the recovery runbook so any operator can execute it - -### Upgrade Strategy - -Plan how you will ship new versions of ev-node and ev-reth to mainnet. - -- **Rolling restart order:** Upgrade full nodes first, then the sequencer. This ensures full nodes can handle the new version before the sequencer starts producing blocks with it. -- **Hard fork coordination:** If a release includes consensus-breaking changes, coordinate an activation height with all node operators in advance. -- **Rollback plan:** Know how to revert to the previous binary and state if an upgrade causes issues. Test this on a staging network. -- **Communication:** Establish a channel (Telegram, Discord, etc.) to notify node operators of upcoming upgrades and activation heights. - -## Chain Startup Flow - -### Genesis - -Configuring the genesis is the first step to starting the chain. - -If using the proxy admin contract alongside both native mint/burn and basefee redirect, set the admin of those to the proxy contract. This allows the chain to modify the admin to a multisig later. - -#### Genesis Token Distribution - -Define the initial token supply and allocation before generating the genesis file. - -- Decide the total initial supply and how it is split (team, treasury, partners, bridge reserves, etc.) -- Configure genesis balances in the `alloc` section of the genesis file -- Ensure the sequencer EOA has enough balance to submit transactions -- If using a bridge (e.g. Hyperlane), reserve sufficient supply for the bridge contract - -:::info -All flows below assume usage of the proxy admin contract. -::: - -### Flow 1: Full Setup - -**Basefee redirect + feevault + native mint/burn + bridge (Hyperlane)** - -#### Genesis Setup - -Embed the proxy contract with an EOA address as admin. The EOA must have at least one token to submit transactions. The proxy contract will have a predictable address, which is added to the `EvolveConfig` in the Chain Config as admin for feevault and native mint/burn. - -**Steps:** - -1. Pick an EOA as admin of the proxy contract -2. Set EOA and create alloc of proxy contract for the genesis file -3. Set the admin proxy contract as admin in Evolve config: - -```json -{ - "evolve": { - "baseFeeSink": "", - "baseFeeRedirectActivationHeight": 0, - "mintAdmin": "", - "mintPrecompileActivationHeight": 0, - "contractSizeLimit": 131072, - "contractSizeLimitActivationHeight": 0 - } -} -``` - -4. Pick a max contract size (24kb default, 128kb is a safe upgrade) -5. Pick EIP-1559 config: - -```json -{ - "baseFeeMaxChangeDenominator": 8, - "baseFeeElasticityMultiplier": 2, - "initialBaseFeePerGas": 1000000000 -} -``` - -#### Post Genesis - -- Deploy Hyperlane native mint contract (if using Hyperlane) -- Provide funds to partner wallets deploying on your chain -- Connect full nodes via reth p2p on top of the Evolve system (consult reth documentation for key discovery and connection) - -### Flow 2: Minimal Setup - -**Basefee redirect only, with an EOA receiving all funds.** - -#### Genesis Setup - -1. Set EOA address as the sink in the Evolve config -2. Pick a max contract size (24kb default, 128kb is a safe upgrade) -3. Pick EIP-1559 config: - -```json -{ - "baseFeeMaxChangeDenominator": 8, - "baseFeeElasticityMultiplier": 2, - "initialBaseFeePerGas": 1000000000 -} -``` - -#### Post Genesis - -- Provide funds to partner wallets deploying on your chain -- Connect full nodes via reth p2p on top of the Evolve system (consult reth documentation for key discovery and connection) diff --git a/content/docs/guides/deploy/meta.json b/content/docs/guides/deploy/meta.json deleted file mode 100644 index 0a5dbce..0000000 --- a/content/docs/guides/deploy/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Deploy your chain", - "pages": ["overview", "testnet", "mainnet"] -} diff --git a/content/docs/guides/deploy/overview.md b/content/docs/guides/deploy/overview.md deleted file mode 100644 index 0c1b1e1..0000000 --- a/content/docs/guides/deploy/overview.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -description: This page provides an overview of some common ways to deploy chains. ---- - -# 🚀 Deploying Your Chain - -One of the benefits of building chains with Evolve is the flexibility you have as a developer to choose things like the DA layer, the settlement scheme, and the execution environment. - -You can learn more about Evolve architecture [here](../../learn/specs/overview.md). - -The challenge that comes with this flexibility is that there are more services that now need to be deployed and managed while running your chain. - -In the tutorials so far, you've seen various helper scripts used to make things easier. While great for tutorials, there are better ways to deploy and manage chains than using various bash scripts. - -## 🏗️ Deployment Scales - -Depending on your needs and the stage of your chain development, there are different deployment approaches you can take: - -### 🏠 Local Development - -For development and testing purposes, you can deploy your chain locally using containerized environments. This approach provides: - -- Quick iteration and testing -- No external dependencies -- Full control over the environment -- Cost-effective development - -### 🌐 Testnet Deployment - -When you're ready to test with real network conditions, you can deploy to testnet environments. This includes: - -- Integration with testnet DA networks -- Real network latency and conditions -- Multi-node testing scenarios -- Pre-production validation - -## 📚 Available Deployment Guides - -Choose the deployment approach that matches your current needs: - -- [🌐 Testnet Deployment](./testnet.md) - Deploy on testnet with external DA networks - -:::warning[Disclaimer] -These examples are for educational purposes only. Before deploying your chain for production use you should fully understand the services you are deploying and your choice in deployment method. -::: - -## 🎉 Next Steps - -For production mainnet deployments, consider additional requirements such as monitoring, security audits, infrastructure hardening, and operational procedures that go beyond the scope of these tutorials. diff --git a/content/docs/guides/deploy/testnet.md b/content/docs/guides/deploy/testnet.md deleted file mode 100644 index f2fa50c..0000000 --- a/content/docs/guides/deploy/testnet.md +++ /dev/null @@ -1,298 +0,0 @@ -# 🚀 Evolve EVM Deployment Guide - -This tutorial is going to show you how to deploy a Evolve testnet, focusing on the architecture choices and components that make up a complete EVM-based chain deployment. - -You can learn more about Evolve EVM architecture [here](../../learn/execution.md). - -:::tip -This tutorial explores Evolve, currently in Alpha. If you encounter bugs, please report them via a GitHub issue ticket or reach out in our Telegram group. -::: - -## 🏗️ Architecture Overview - -The following diagram illustrates the complete deployment architecture with component interactions: - -```mermaid -graph TB - subgraph "Sequencer Stack" - SEQ_RETH[RETH Service] - SEQ_EVOLVE[EVOLVE Service
--aggregator=true] - SEQ_RETH <--> SEQ_EVOLVE - end - - subgraph "Full Node Stack 1" - FN1_RETH[RETH Service] - FN1_EVOLVE[EVOLVE Service
--aggregator=false] - FN1_RETH <--> FN1_EVOLVE - end - - subgraph "Full Node Stack 2" - FN2_RETH[RETH Service] - FN2_EVOLVE[EVOLVE Service
--aggregator=false] - FN2_RETH <--> FN2_EVOLVE - end - - subgraph "Full Node Stack 3" - FN3_RETH[RETH Service] - FN3_EVOLVE[EVOLVE Service
--aggregator=false] - FN3_RETH <--> FN3_EVOLVE - end - - subgraph "Celestia DA Stack" - CELESTIA_APP[Celestia-App
Consensus Layer] - CELESTIA_NODE[Celestia-Node
DA Sampling & API] - CELESTIA_APP <--> CELESTIA_NODE - end - - %% P2P connections between Evolve nodes - SEQ_EVOLVE <--> FN1_EVOLVE - SEQ_EVOLVE <--> FN2_EVOLVE - SEQ_EVOLVE <--> FN3_EVOLVE - FN1_EVOLVE <--> FN2_EVOLVE - FN2_EVOLVE <--> FN3_EVOLVE - FN1_EVOLVE <--> FN3_EVOLVE - - %% DA connections - SEQ_EVOLVE -->|Post Blobs
Auth Token| CELESTIA_NODE - FN1_EVOLVE -->|Retrieve Blobs
Auth Token| CELESTIA_NODE - FN2_EVOLVE -->|Retrieve Blobs
Auth Token| CELESTIA_NODE - FN3_EVOLVE -->|Retrieve Blobs
Auth Token| CELESTIA_NODE - - %% User interactions - USERS[Users/Applications] --> FN1_RETH - USERS --> FN2_RETH - USERS --> FN3_RETH - - classDef sequencer fill:#e1f5fe - classDef fullnode fill:#f3e5f5 - classDef celestia fill:#fff3e0 - classDef user fill:#e8f5e8 - - class SEQ_RETH,SEQ_EVOLVE sequencer - class FN1_RETH,FN1_EVOLVE,FN2_RETH,FN2_EVOLVE,FN3_RETH,FN3_EVOLVE fullnode - class CELESTIA_APP,CELESTIA_NODE celestia - class USERS user -``` - -**Key Interactions:** - -- **Engine API**: RETH ↔ EVOLVE communication within each stack -- **P2P Network**: EVOLVE nodes sync blocks and share chain state -- **Data Availability**: Sequencer posts blobs, full nodes retrieve blobs from Celestia -- **User Access**: Applications connect to full node RETH services for JSON-RPC access - -## 💻 Pre-requisites {#prerequisites} - -Make sure you understand the sequencing topology you want to use by reading the [Sequencing Overview](../..//learn/sequencing/overview.md). - -## 🛠️ Dependencies {#dependencies} - -### 🔄 Choosing Your Sequencing Topology {#choosing-sequencing-topology} - -First, you need to choose a sequencing topology for your Evolve EVM chain. The sequencing topology determines how transactions are ordered and blocks are produced in your chain. - -Currently, Evolve supports one sequencing implementation: - -### 🔄 Single Sequencer - -- **Description**: The simplest sequencing architecture where one node is responsible for ordering transactions and producing blocks -- **Use Cases**: Development, testing, and production deployments requiring simplicity and low latency -- **Advantages**: Easy setup, fast block production, independence from DA block time -- **Requirements**: One sequencer node, multiple optional full nodes - -For detailed information about sequencing topologies, see the [Sequencing Overview](../../learn/sequencing/overview.md) and [Single Sequencer](../../learn/sequencing/single.md) documentation. - -## 🏗️ Deployment Architecture {#deployment-architecture} - -### 🔄 Single Sequencer Topology - -In a single sequencer deployment, you will run: - -1. **One Sequencer Node** (Required) - - Handles transaction ordering and block production - - Posts data to the Data Availability layer - - Serves as the primary source of truth for the chain - -2. **Multiple Full Nodes** (Optional, but recommended) - - Sync blocks from the sequencer - - Provide redundancy and decentralization - - Can serve user queries and transactions - - Scale horizontally based on demand - -## 🛠️ Setting up your environment {#setting-up-your-environment} - -In addition to choosing your sequencing topology, we need to understand the components that make up your deployment. - -We will use a combination of RETH and EVOLVE services for this tutorial and run them together to create your EVM chain. - -Each node in your Evolve EVM deployment (whether sequencer or full node) consists of two primary services working together: - -### ⚡ RETH Service - -- **Purpose**: Provides the Ethereum Virtual Machine (EVM) execution environment -- **Technology**: Rust-based Ethereum client (Reth) that handles transaction execution -- **Responsibilities**: - - Processing EVM transactions - - Maintaining the EVM state - - Providing Ethereum JSON-RPC API endpoints - - Managing the execution layer consensus - -### 🔗 EVOLVE Service - -- **Purpose**: Handles chain-specific functionality and consensus -- **Technology**: Evolve node implementation -- **Responsibilities**: - - Block production and validation - - Data availability integration - - P2P networking between chain nodes - - Chain consensus mechanisms - - Communication with the execution layer (RETH) - -### 🔄 Service Interaction - -The two services work together through well-defined interfaces: - -1. **Engine API**: Evolve communicates with RETH using the Engine API (typically on port 8551) -2. **JWT Authentication**: Secure communication between services using shared JWT secrets -3. **Block Coordination**: Evolve orchestrates block production while RETH executes transactions - -## ⚙️ Node Configurations {#node-configurations} - -### 🎯 Sequencer Node Configuration - -The single sequencer node runs both RETH and EVOLVE services with specific settings: - -- **RETH**: Configured to accept blocks from the Evolve sequencer -- **EVOLVE**: Configured with `--evnode.node.aggregator=true` to enable block production -- **Role**: Produces blocks, orders transactions, posts to DA layer - -### 📡 Full Node Configuration - -Each full node also runs both RETH and EVOLVE services but in sync mode: - -- **RETH**: Configured to process blocks received from the network -- **EVOLVE**: Configured with `--evnode.node.aggregator=false` to sync from the sequencer -- **Role**: Syncs blocks, serves queries, provides redundancy - -### 🔑 Key Integration Points - -All nodes require: - -- Shared JWT secret for Engine API authentication -- Matching genesis configuration between EVOLVE nodes -- Proper network configuration for service communication -- Coordinated startup sequence (typically RETH first, then EVOLVE) - -### ⏰ Block Time Configuration - -You can customize timing parameters for your chain. While there are many configuration arguments available for the Evolve binary, two important timing-related flags are: - -#### 🎯 Sequencer Block Time - -- **Flag**: `--evnode.node.block_time` -- **Default**: 1s (1 block per second) -- **Purpose**: Controls how frequently the sequencer produces new blocks -- **Customization**: Can be adjusted based on throughput requirements and latency preferences - -#### 📊 Data Availability Block Time - -- **Flag**: `--evnode.da.block_time` -- **Default**: 6s -- **Purpose**: Controls how frequently blobs are posted to the Celestia chain -- **Function**: Each 6 seconds (by default), batched block data is submitted to Celestia for data availability - -## 🌌 Data Availability Layer: Celestia {#celestia-da} - -Your Evolve EVM chain connects to Celestia as the Data Availability (DA) layer. The Evolve EVM Celestia DA stack consists of two key services: - -### 🏛️ Celestia-App Service - -- **Purpose**: Provides the consensus layer for the Celestia network -- **Responsibilities**: - - Processing and ordering transactions on the Celestia network - - Maintaining the canonical state of the DA layer - - Participating in Tendermint consensus - -### 🌐 Celestia-Node Service - -- **Purpose**: Provides data availability sampling and networking -- **Responsibilities**: - - Data availability sampling (DAS) to verify data availability - - P2P networking for block and data propagation - - Light client functionality for resource-constrained environments - - API endpoints for chains to submit and retrieve data - -### 🔗 Celestia Integration - -Both sequencer and full node Evolve services need to communicate with the celestia-node service, but for different purposes: - -#### 📤 Sequencer Node Communication - -- **Purpose**: Batch posting of block data (blobs) to Celestia -- **Operation**: The sequencer Evolve service submits batched block data to Celestia via the celestia-node API -- **Frequency**: Occurs regularly as new blocks are produced and need to be made available - -#### 📥 Full Node Communication - -- **Purpose**: Retrieving block data (blobs) from Celestia -- **Operation**: Full node Evolve services query and download historical block data via the celestia-node API -- **Frequency**: Occurs during initial sync and ongoing block validation - -#### 🔑 Common Integration Points - -1. **Authentication**: Evolve requires an auth token generated by the celestia-node so that Evolve can send transactions on its behalf. Both sequencer and full node types use these JWT tokens for secure communication with celestia-node -2. **Namespace Isolation**: Data is organized using Celestia namespaces (automatically encoded by the node for proper formatting) -3. **API Endpoints**: Both sequencer and full nodes use the same celestia-node API interface -4. **Network Configuration**: All nodes must be configured to connect to the same Celestia network - -### 🛠️ Deployment Considerations - -When deploying with Celestia DA: - -- **Light Node**: Most chains run a celestia-node in light mode for cost efficiency -- **Network Selection**: Choose between Arabica (devnet), Mocha (testnet), or Mainnet Beta -- **Funding**: Ensure your celestia-node wallet has sufficient TIA tokens for data submission - -We now have all we need to understand the components for deploying a Evolve EVM chain. - -### 🚀 Run your Evolve EVM chain {#run-evolve-evm-chain} - -A complete Evolve EVM chain deployment consists of: - -1. **One Sequencer Node**: RETH + EVOLVE (aggregator mode) -2. **N Full Nodes**: RETH + EVOLVE (sync mode) - scale as needed -3. **Celestia Connection**: celestia-node service for data availability - -You can deploy your chain by running the sequencer and full nodes with the proper configuration. - -Congratulations! You have successfully understood how to deploy a Evolve EVM chain. - -## 🐳 Simplified Deployment with Docker Compose {#docker-compose-deployment} - -The deployment of sequencer and full nodes requires running multiple processes and providing specific variables so they can effectively interact with each other. Managing these configurations manually can be complex and error-prone, especially when coordinating JWT secrets, genesis configurations, network settings, and service dependencies across multiple node stacks. - -To save time, we can use ready-to-use Docker Compose stacks that can be customized based on specific needs. These pre-configured stacks handle the complexity of service orchestration, environment variable management, and inter-service communication automatically. - -To make this deployment process easy and painless for node operators, you can use the example implementation available at: [https://github.com/evstack/ev-toolbox/tree/main/ev-stacks](https://github.com/evstack/ev-toolbox/tree/main/ev-stacks/) - -This solution provides: - -- Pre-configured Docker Compose files for sequencer and full node deployments -- Automated handling of JWT secrets and genesis file distribution -- Simplified configuration through environment variables -- Easy scaling of full node instances -- Integrated Celestia node configuration - -:::warning -This deployment approach is suitable for testnets and development environments, but is not suitable for production-grade mainnet deployments, which require additional security considerations, monitoring, backup strategies, and infrastructure hardening. -::: - -## 🎉 Next steps - -Congratulations again! You now know how to deploy Evolve EVM chains and understand the architecture and components needed. - -For detailed setup instructions, see: - -- [Single Sequencer Setup Guide](../evm/single.md) - Step-by-step deployment instructions -- [RETH Backup Guide](../evm/reth-backup.md) - Data protection and backup procedures -- [Celestia DA Guide](../da/celestia-da.md) - Connecting to Celestia networks diff --git a/content/docs/guides/evm/meta.json b/content/docs/guides/evm/meta.json deleted file mode 100644 index 6de5c39..0000000 --- a/content/docs/guides/evm/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "EVM", - "pages": ["single", "reth-backup"] -} diff --git a/content/docs/guides/evm/reth-backup.md b/content/docs/guides/evm/reth-backup.md deleted file mode 100644 index a0ec14b..0000000 --- a/content/docs/guides/evm/reth-backup.md +++ /dev/null @@ -1,243 +0,0 @@ -# Evolve EVM reth State Backup Guide - -## Introduction - -This guide covers how to backup the reth state of a Evolve EVM based blockchain. This implementation provides a production-ready approach to data protection. - -## Prerequisites - -Before starting, ensure you have: - -- A running Evolve full node - Follow the [Evolve Full Node Setup Guide](https://ev.xyz/guides/full-node) to set up your node -- Zstandard (zstd) compression tool installed -- jq JSON processor installed -- Administrative access to the Docker host -- Sufficient disk space for backups (at least 2x the current volume size) -- Access to remote backup storage (optional but recommended) -- Basic understanding of Docker volumes - -## Installing Dependencies - -For Ubuntu/Debian-based Linux distributions, install the required dependencies: - -```bash -# Update package list -sudo apt update - -# Install required tools -sudo apt install -y zstd jq - -# Verify installations -zstd --version -jq --version -``` - -## Key Component to Backup - -Reth datadir : contains the entire EVM state and node data. - -## Performing manual backup - -### 1. Verify Node Synchronization - -```bash -# Check Evolve node status -curl -sX POST \ - -H "Content-Type: application/json" \ - -H "Connect-Protocol-Version: 1" \ - -d "{}" \ - http://:/evolve.v1.HealthService/Livez - -# Verify reth sync status -curl -sX POST \ - -H "Content-Type: application/json" \ - -d '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}' \ - http://: - -# Expected response for a fully synced node: -# {"jsonrpc":"2.0","id":1,"result":false} -``` - -### 2. Stop Services Gracefully - -You will need to stop both evolve and reth-evolve on the fullnode stack, according to your setup. - -Example for docker-compose based setup: - -```bash -# Stop services in correct order -docker compose stop fullnode -docker compose stop reth-evolve - -# Verify all containers are stopped -docker compose ps -``` - -### 3. Create Backup - -```bash -# Create backup directory -# Create backup directory -# IMPORTANT: Set your backup base directory and reth-evolve data directory paths -BACKUP_BASE_DIR="/path/to/backups" -RETH_EVOLVE_DATADIR="/path/to/reth-evolve/datadir" -mkdir -p "${BACKUP_BASE_DIR}" - -# Set backup timestamp -BACKUP_DATE=$(date +%Y%m%d_%H%M%S) - -# Backup reth-evolve datadir using zstandard compression -tar cf - -C "${RETH_EVOLVE_DATADIR}" . | zstd -3 > "${BACKUP_BASE_DIR}/reth_state_${BACKUP_DATE}.tar.zst" - -# Generate checksum -sha256sum "${BACKUP_BASE_DIR}/reth_state_${BACKUP_DATE}.tar.zst" > "${BACKUP_BASE_DIR}/reth_state_${BACKUP_DATE}.tar.zst.sha256" -``` - -### 4. Restart services - -You will need to restart both evolve and reth-evolve on the fullnode stack, according to your setup. - -Example for docker-compose based setup: - -```bash -# Start services -docker compose up -d - -# Monitor startup -docker compose logs -f -``` - -## Automated backup - -### 1. Create the Backup Script - -```bash -sudo nano /usr/local/bin/evolve-backup.sh -``` - -Add the following content - -```bash -#!/bin/bash -# Reth-Evolve Backup Script with Zstandard Compression - -set -euo pipefail - -# Configuration -RETH_EVOLVE_DATADIR="" # IMPORTANT: Set this to your reth-evolve data directory path -BACKUP_BASE_DIR="${BACKUP_BASE_DIR:-/backup/evolve}" -REMOTE_BACKUP="${REMOTE_BACKUP:-backup-server:/backups/evolve}" -RETENTION_DAYS="${RETENTION_DAYS:-7}" -COMPOSE_FILE="${COMPOSE_FILE:-docker-compose.yml}" -ZSTD_LEVEL="${ZSTD_LEVEL:-3}" -ZSTD_THREADS="${ZSTD_THREADS:-0}" # 0 = auto-detect -FULLNODE_IP="${FULLNODE_IP:-localhost}" -FULLNODE_RPC_PORT="${FULLNODE_RPC_PORT:-7331}" -FULLNODE_RETH_IP="${FULLNODE_RETH_IP:-localhost}" -FULLNODE_RETH_PORT="${FULLNODE_RETH_PORT:-8545}" - -# Functions -log() { - echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" -} - -check_sync_status() { - # Check Evolve node health - curl -fsX POST \ - -H "Content-Type: application/json" \ - -H "Connect-Protocol-Version: 1" \ - -d "{}" \ - "http://${FULLNODE_IP}:${FULLNODE_RPC_PORT}/evolve.v1.HealthService/Livez" > /dev/null - - # Check reth sync status - local sync_status=$(curl -sX POST \ - -H "Content-Type: application/json" \ - -d '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}' \ - http://${FULLNODE_RETH_IP}:${FULLNODE_RETH_PORT} | jq -r '.result') - - if [ "$sync_status" != "false" ]; then - log "WARNING: Node is still syncing. Backup may be incomplete." - fi -} - -# Main backup process -main() { - log "Starting Evolve backup process" - - # Setup - BACKUP_DATE=$(date +%Y%m%d_%H%M%S) - BACKUP_DIR="${BACKUP_BASE_DIR}" - - # Create backup directory - mkdir -p "${BACKUP_DIR}" - - # Check sync status - check_sync_status - - # Stop services - log "Stopping Evolve services" - docker compose -f ${COMPOSE_FILE} stop fullnode - docker compose -f ${COMPOSE_FILE} stop reth-evolve - - # Backup reth state using zstandard compression - log "Backing up reth state with zstandard compression" - tar cf - -C ${RETH_EVOLVE_DATADIR} . | zstd -${ZSTD_LEVEL} -T${ZSTD_THREADS} > "${BACKUP_DIR}/reth_state_${BACKUP_DATE}.tar.zst" - - # Generate checksum - sha256sum "${BACKUP_DIR}/reth_state_${BACKUP_DATE}.tar.zst" > "${BACKUP_DIR}/reth_state_${BACKUP_DATE}.tar.zst.sha256" - - # Transfer to remote storage - if [ -n "${REMOTE_BACKUP:-}" ]; then - log "Transferring backup to remote storage" - rsync -avz "${BACKUP_DIR}/reth_state_${BACKUP_DATE}.tar.zst*" "${REMOTE_BACKUP}/" || log "WARNING: Remote transfer failed" - fi - - # Restart services - log "Restarting services" - docker compose -f ${COMPOSE_FILE} up -d - - # Cleanup old backups - log "Cleaning up old backups" - find "${BACKUP_BASE_DIR}" -name "reth_state_*.tar.zst" -mtime +${RETENTION_DAYS} -delete - find "${BACKUP_BASE_DIR}" -name "reth_state_*.tar.zst.sha256" -mtime +${RETENTION_DAYS} -delete - - log "Backup completed successfully" -} - -# Run backup -main "$@" -``` - -### 2. Make Script Executable' - -```bash -sudo chmod +x /usr/local/bin/evolve-backup.sh -``` - -### 3. Schedule Automated Backups - -```bash -# Edit crontab -sudo crontab -e - -# Add daily backup at 2 AM -0 2 * * * /usr/local/bin/evolve-backup.sh >> /var/log/evolve-backup.log 2>&1 -``` - -## Best practices - -### Backup Strategy - -1. Schedule regular backups - Daily backups during low-activity periods -2. Implement retention policies - Keep x days of local backups, y days remote -3. Test restoration procedures - Monthly restoration drills in test environment -4. Monitor backup jobs - Set up alerts for failed backups -5. Use appropriate compression levels - Balance between compression ratio and speed - -### Zstandard Compression Levels - -| Level | Speed | Compression Ratio | Use Case | -|-------|---------|-------------------|---------------------| -| 3 | Default | Balanced | Standard backups | -| 9 | Slower | Better | Long-term archives | -| 19 | Slowest | Best | Maximum compression | diff --git a/content/docs/guides/evm/single.md b/content/docs/guides/evm/single.md deleted file mode 100644 index 759cfd1..0000000 --- a/content/docs/guides/evm/single.md +++ /dev/null @@ -1,161 +0,0 @@ -# Evolve EVM Single Sequencer Setup Guide - -## Introduction - -This guide covers how to set up and run the Single Sequencer implementation of Evolve EVM chains. This implementation provides a centralized approach to transaction sequencing while using EVM as the execution layer. - -## Prerequisites - -Before starting, ensure you have: - -- Go 1.20 or later -- Docker and Docker Compose -- Access to the ev-node and ev-reth repositories -- Git - -## Setting Up the Environment - -### 1. Clone the Evolve Repository - -```bash -git clone --depth 1 --branch %evolveLatestTag% https://github.com/evstack/ev-node.git -cd evolve -``` - -### 2. Build the Evolve EVM Single Sequencer Implementation - -```bash -just build-evm -just build-da -``` - -This will create the following binaries in the `build` directory: - -- `evm` - Sequencer implementation -- `local-da` - Local data availability node for testing - -## Setting Up the Data Availability (DA) Layer - -### Start the Local DA Node - -```bash -cd build -./local-da start -``` - -This will start a local DA node on the default port (26658). - -## Setting Up the EVM Layer - -### 1. Clone the ev-reth Repository - -```bash -git clone --depth 1 https://github.com/evstack/ev-reth.git -cd ev-node -``` - -### 2. Start the EVM Layer Using Docker Compose - -```bash -docker compose up -d -``` - -This will start Reth (Rust Ethereum client) with the appropriate configuration for Evolve. - -### 3. Note the JWT Secret Path - -The JWT secret is typically located at `ev-node/execution/evm/docker/jwttoken/jwt.hex`. You'll need this path for the sequencer configuration. - -## Running the Single Sequencer Implementation - -### 1. Initialize the Sequencer - -```bash -cd build -./evm init --evnode.node.aggregator=true --evnode.signer.passphrase secret -``` - -### 2. Start the Sequencer - -```bash -./evm start \ - --evm.jwt-secret $(cat /path/to/ev-node/execution/evm/docker/jwttoken/jwt.hex) \ - --evm.genesis-hash 0x0a962a0d163416829894c89cb604ae422323bcdf02d7ea08b94d68d3e026a380 \ - --evnode.node.block_time 1s \ - --evnode.node.aggregator=true \ - --evnode.signer.passphrase secret -``` - -Replace `/path/to/` with the actual path to your ev-node repository. - -## Setting Up a Full Node - -To run a full node alongside your sequencer, follow these steps: - -### 1. Initialize a New Node Directory - -```bash -./evm init --home ~/.evolve/evm-fullnode -``` - -### 2. Copy the Genesis File - -Copy the genesis file from the sequencer node to the full node: - -```bash -cp ~/.evolve/evm/config/genesis.json ~/.evolve/evm-fullnode/config/ -``` - -### 3. Get the Sequencer's P2P Address - -Find the sequencer's P2P address in its logs. It will look similar to: - -```bash -INF listening on address=/ip4/127.0.0.1/tcp/26659/p2p/12D3KooWXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -``` - -### 4. Start the Full Node - -```bash -./evm start \ - --home ~/.evolve/evm-fullnode \ - --evm.jwt-secret $(cat /path/to/ev-node/execution/evm/docker/jwttoken/jwt.hex) \ - --evm.genesis-hash 0x0a962a0d163416829894c89cb604ae422323bcdf02d7ea08b94d68d3e026a380 \ - --evnode.node.block_time 1s \ - --evnode.node.aggregator=false \ - --evnode.p2p.peers @127.0.0.1:26659 -``` - -Replace `` with the actual P2P ID from your sequencer's logs. - -## Verifying Node Operation - -After starting your nodes, you should see logs indicating successful block processing: - -```bash -INF block marked as DA included blockHash=XXXX blockHeight=XX module=BlockManager -``` - -## Configuration Reference - -### Common Flags - -| Flag | Description | -| ---------------------------- | --------------------------------------------------- | -| `--evnode.node.aggregator` | Set to true for sequencer mode, false for full node | -| `--evnode.signer.passphrase` | Passphrase for the signer | -| `--evnode.node.block_time` | Block time for the Evolve node | - -### EVM Flags - -| Flag | Description | -| --------------------- | ------------------------------------------------------- | -| `--evm.eth-url` | Ethereum JSON-RPC URL (default `http://localhost:8545`) | -| `--evm.engine-url` | Engine API URL (default `http://localhost:8551`) | -| `--evm.jwt-secret` | JWT secret file path for the Engine API | -| `--evm.genesis-hash` | Genesis block hash of the chain | -| `--evm.fee-recipient` | Address to receive priority fees | - -## Conclusion - -You've now set up and configured the Single Sequencer implementation of Evolve EVM chains. This implementation provides a centralized approach to transaction sequencing while using EVM as the execution layer. diff --git a/content/docs/guides/full-node.md b/content/docs/guides/full-node.md deleted file mode 100644 index 7539850..0000000 --- a/content/docs/guides/full-node.md +++ /dev/null @@ -1,104 +0,0 @@ -# Chain Full Node Setup Guide - -## Introduction - -This guide covers how to set up a full node to run alongside a sequencer node in a Evolve-based blockchain network. A full node maintains a complete copy of the blockchain and helps validate transactions, improving the network's decentralization and security. - -> **Note: The guide on how to run an evolve EVM full node can be found [in the evm section](./evm/single.md#setting-up-a-full-node).** - -## Prerequisites - -Before proceeding, ensure that you have completed the [build a chain](./gm-world.md) tutorial, which covers setting-up, building and running your chain. - -Ensure that you have: - -- A local Data Availability (DA) network node running on port `7980`. -- A Evolve sequencer node running and posting blocks to the DA network. - -## Setting Up Your Full Node - -### Initialize Chain Config and Copy Genesis File - -Let's set a terminal variable for the chain ID. - -```bash -CHAIN_ID=gm -``` - -Initialize the chain config for the full node, lets call it `FullNode` and set the chain ID to your chain ID: - -```bash -gmd init FullNode --chain-id $CHAIN_ID --home $HOME/.${CHAIN_ID}_fn -``` - -Copy the genesis file from the sequencer node: - -```bash -cp $HOME/.$CHAIN_ID/config/genesis.json $HOME/.${CHAIN_ID}_fn/config/genesis.json -``` - -### Set Up P2P Connection to Sequencer Node - -Identify the sequencer node's P2P address from its logs. It will look similar to: - -```text -1:55PM INF listening on address=/ip4/127.0.0.1/tcp/7676/p2p/12D3KooWJbD9TQoMSSSUyfhHMmgVY3LqCjxYFz8wQ92Qa6DAqtmh module=p2p -``` - -Create an environment variable with the P2P address: - -```bash -export P2P_ID="12D3KooWJbD9TQoMSSSUyfhHMmgVY3LqCjxYFz8wQ92Qa6DAqtmh" -``` - -### Start the Full Node - -We are now ready to run our full node. If we are running the full node on the same machine as the sequencer, we need to make sure we update the ports to avoid conflicts. - -Make sure to include these flags with your start command: - -```sh - --rpc.laddr tcp://127.0.0.1:46657 \ - --grpc.address 127.0.0.1:9390 \ - --p2p.laddr "0.0.0.0:46656" \ - --api.address tcp://localhost:1318 -``` - -Run your full node with the following command: - -```bash -gmd start \ - --evnode.da.address http://127.0.0.1:7980 \ - --p2p.seeds $P2P_ID@127.0.0.1:7676 \ - --minimum-gas-prices 0stake \ - --rpc.laddr tcp://127.0.0.1:46657 \ - --grpc.address 127.0.0.1:9390 \ - --p2p.laddr "0.0.0.0:46656" \ - --api.address tcp://localhost:1318 \ - --home $HOME/.${CHAIN_ID}_fn -``` - -Key points about this command: - -- `chain_id` is generally the `$CHAIN_ID`, which is `gm` in this case. -- The ports and addresses are different from the sequencer node to avoid conflicts. Not everything may be necessary for your setup. -- We use the `P2P_ID` environment variable to set the seed node. - -## Verifying Full Node Operation - -After starting your full node, you should see output similar to: - -``` bash -2:33PM DBG indexed transactions height=1 module=txindex num_txs=0 -2:33PM INF block marked as DA included blockHash=7897885B959F52BF0D772E35F8DA638CF8BBC361C819C3FD3E61DCEF5034D1CC blockHeight=5532 module=BlockManager -``` - -This output indicates that your full node is successfully connecting to the network and processing blocks. - -:::tip -If your chain uses EVM as an execution layer and you see an error like `datadir already used by another process`, it means you have to remove all the state from chain data directory (`/root/.yourchain_fn/data/`) and specify a different data directory for the EVM client. -::: - -## Conclusion - -You've now set up a full node running alongside your Evolve sequencer. diff --git a/content/docs/guides/gm-world.md b/content/docs/guides/gm-world.md deleted file mode 100644 index 5b0e6ef..0000000 --- a/content/docs/guides/gm-world.md +++ /dev/null @@ -1,288 +0,0 @@ ---- -title: GM World tutorial -description: Learn how to build and deploy a CosmWasm-based "gm" (good morning) application using Evolve. ---- - -# GM world chain - -## 🌞 Introduction {#introduction} - -This tutorial will guide you through building a evolve `gm-world` chain (`gm` stands for "good morning") using Evolve. Unlike the [quick start guide](./quick-start.md), this tutorial provides a more practical approach to understanding evolve chain development. - -We will cover: - -- Building and configuring a Cosmos-SDK application-specific chain. -- Posting chain data to a Data Availability (DA) network. -- Executing transactions (the end goal). - -No prior understanding of the build process is required, just that it utilizes the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) for blockchain applications. - -:::tip -This tutorial explores Evolve, currently in Alpha. If you encounter bugs, please report them via a GitHub issue ticket or reach out in our Telegram group. -::: - -## 🛠️ Dependencies {#dependencies} - -As we move into more advanced use cases, we use [ignite](https://docs.ignite.com/welcome) to help with managing all the services we need to run. You can [install ignite here](https://docs.ignite.com/welcome/install). - -Once installed, you can verify the installation by running: - -```bash -ignite version -``` - -```bash -Ignite CLI version: v29.3.1-dev -Ignite CLI build date: undefined -Ignite CLI source hash: undefined -Ignite CLI config version: v1 -Cosmos SDK version: v0.53.3 -Buf.build version: undefined -Your OS: darwin -Your arch: arm64 -Your go version: go version go1.25.0 darwin/arm64 -``` - -## Generate your App {#generate-your-app} - -```bash -ignite scaffold chain gm --address-prefix gm -cd gm -``` - -Install a specific version of ignite to use evolve - -```bash -ignite app install -g github.com/ignite/apps/evolve -``` - -Install your app locally: - -```bash -just install -``` - -## Add Evolve Features {#add-evolve-features} - -Enhance your blockchain by adding Evolve features. Use the following command: - -```bash -ignite evolve add -``` - -## Build your chain {#build-your-chain} - -Build your chain using the following command: - -```bash -ignite chain build -``` - -This will create a `~/.gm` folder with all the necessary files to run a chain. - -## Initialize Your Blockchain {#initialize-your-blockchain} - -Before starting your blockchain, you need to initialize it with Evolve support. Initialize the blockchain as follows: - -```bash -ignite evolve init -``` - -It will also initialize 2 accounts `alice` and `bob`: - -## 🚀 Starting your chain {#start-your-chain} - -Now that we have our gm app generated and installed, we can launch our GM chain along with the local DA by running the following command: - -First lets start the local DA network: - -```bash -cd gm -go run github.com/evstack/ev-node/tools/local-da -``` - -you should see logs like: - -```bash -4:58PM INF NewLocalDA: initialized LocalDA module=local-da -4:58PM INF Listening on host=localhost maxBlobSize=1974272 module=da port=7980 -4:58PM INF server started listening on=localhost:7980 module=da -``` - -After which we can start the app: - -```bash -gmd start --evnode.node.aggregator -``` - -You should see an output like this: - -```bash -2:50PM INF creating new client module=evolve namespace= -2:50PM INF No state found in store, initializing new state module=BlockManager -2:50PM INF Initializing chain chainID=gm genesisTime=2025-06-26T12:50:11Z initialHeight=1 module=evolve -2:50PM INF InitChain chainID=gm initialHeight=1 module=baseapp -2:50PM INF initializing blockchain state from genesis.json module=baseapp -2:50PM INF chain initialized successfully appHash=E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855 module=evolve -2:50PM INF using default mempool ttl MempoolTTL=25 module=BlockManager -2:50PM INF service start impl=EventBus module=events msg="Starting EventBus service" -2:50PM INF service start impl=PubSub module=pubsub msg="Starting PubSub service" -2:50PM INF service start impl=IndexerService module=txindex msg="Starting IndexerService service" -2:50PM INF evolve node run loop launched in background goroutine module=server -2:50PM INF serving HTTP listen address=[::]:26657 module=evolve -2:50PM INF starting P2P client module=evolve -2:50PM INF started RPC server addr=127.0.0.1:7331 module=evolve -2:50PM INF listening on address=/ip4/127.0.0.1/tcp/7676/p2p/12D3KooWPN1jqkgZcuF8UMZEa7nSjoF7zPmGHRrCDVrXrpfTLpfJ module=p2p -2:50PM INF listening on address=/ip4/192.168.0.54/tcp/7676/p2p/12D3KooWPN1jqkgZcuF8UMZEa7nSjoF7zPmGHRrCDVrXrpfTLpfJ module=p2p -2:50PM INF no peers - only listening for connections module=p2p -2:50PM INF working in aggregator mode block time=1s module=evolve -2:50PM INF Reaper started interval=1000 module=Reaper -2:50PM INF using pending block height=1 module=BlockManager -2:50PM INF Executing block height=1 module=evolve num_txs=0 timestamp=2025-06-26T14:50:11+02:00 -2:50PM INF block executed successfully appHash=678DE6BBA6E23B000DC5AC86B60245E6EAC503C5C7085495F3B71B22A762EB19 height=1 module=evolve -2:50PM INF indexed block events height=1 module=txindex -2:50PM INF attempting to start executor (Adapter.Start) module=server -2:50PM INF executor started successfully module=server -2:50PM INF creating empty block height=2 module=BlockManager -2:50PM INF Executing block height=2 module=evolve num_txs=0 timestamp=2025-06-26T14:50:30+02:00 -2:50PM INF starting API server... address=tcp://0.0.0.0:1317 module=api-server -2:50PM INF serve module=api-server msg="Starting RPC HTTP server on [::]:1317" -2:50PM INF starting gRPC server... address=localhost:9090 module=grpc-server -2:50PM INF block executed successfully appHash=0B3973A50C42D0184FB86409FC427BD528A790FA45BA2C9E20FDF14A3628CEC8 height=2 module=evolve -``` - -Ignite has successfully launched the GM chain and the local DA network. The GM chain is running on port `7331` and the local DA network is running on port `7980`. - -Good work so far, we have a Chain node, DA network node, now we can start submitting transactions. - -## 💸 Transactions {#transactions} - -First, list your keys: - -```bash -gmd keys list --keyring-backend test -``` - -You should see an output like the following - -```bash -- address: gm17rpwv7lnk96ka00v93rphhvcqqztpn896q0dxx - name: alice - pubkey: '{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A5WPM5WzfNIPrGyha/TlHt0okdlzS1O4Gb1d1kU+xuG+"}' - type: local -- address: gm1r2udsh4za7r7sxvzy496qfazvjp04j4zgytve3 - name: bob - pubkey: '{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A+jOX/CWInFer2IkqgXGo0da9j7Ubq+e1LJWzTMDjwdt"}' - type: local -``` - -For convenience we export two of our keys like this: - -```bash -export ALICE=gm17rpwv7lnk96ka00v93rphhvcqqztpn896q0dxx -export BOB=gm1r2udsh4za7r7sxvzy496qfazvjp04j4zgytve3 -``` - -Now let's submit a transaction that sends coins from one account to another (don't worry about all the flags, for now, we just want to submit transaction from a high-level perspective): - -```bash -gmd tx bank send $BOB $ALICE 42069stake --keyring-backend test --chain-id gm --fees 5000stake -``` - -You'll be prompted to accept the transaction: - -```bash -auth_info: - fee: - amount: [] - gas_limit: "200000" - granter: "" - payer: "" - signer_infos: [] - tip: null -body: - extension_options: [] - memo: "" - messages: - - '@type': /cosmos.bank.v1beta1.MsgSend - amount: - - amount: "42069" - denom: stake - from_address: gm1r2udsh4za7r7sxvzy496qfazvjp04j4zgytve3 - to_address: gm17rpwv7lnk96ka00v93rphhvcqqztpn896q0dxx - non_critical_extension_options: [] - timeout_height: "0" -signatures: [] -confirm transaction before signing and broadcasting [y/N]:``` - -Confirm and sign the transaction as prompted. now you see the transaction hash at the output: - -```bash -//... - -txhash: 677CAF6C80B85ACEF6F9EC7906FB3CB021322AAC78B015FA07D5112F2F824BFF -``` - -## ⚖️ Checking Balances {#balances} - -Query balances after the transaction: - -```bash -gmd query bank balances $ALICE -``` - -The receiver’s balance should show an increase. - -```bash -balances:- amount: "42069" denom: stake -pagination: - next_key: null - total: "0" -``` - -For the sender’s balance: - -```bash -gmd query bank balances $BOB -``` - -Output: - -```bash -balances:- amount: "99957931" denom: stake -pagination: - next_key: null - total: "0" -``` - -## 📦 GM world UI app - -Now that you have an idea of how to interact with the chain with the CLI, let's look at the user interface (UI) application aspect of connecting a wallet to a chain. - -```bash -ignite generate ts-client --yes -ignite s vue -ignite generate composables --yes -cd vue -pnpm install -``` - -Connecting your wallet to your chain is as straightforward as connecting to any other blockchain. It assumes you have the [Keplr](https://www.keplr.app/) wallet extension installed in your browser. - -## 🔗 Connecting your wallet - - -Simply open your browser and go to [`http://localhost:3000`](https://localhost:3000) and use the Ignite UI to interact with your chain. - - -Click the "Connect Wallet" button on the page, and approve the connection request in the Keplr prompt. - -Once authorized, your wallet address will be displayed, confirming that your wallet is successfully connected. - -:::tip -If you run into any issues, make sure your Keplr wallet is updated and set to connect to your local environment. -::: - -## 🎉 Next steps - -Congratulations! You've experienced connecting to a chain from the user side — simple and straightforward. Now, you might consider exploring how to add more application logic to your chain using the Cosmos SDK, as demonstrated in our Wordle App tutorial. diff --git a/content/docs/guides/meta.json b/content/docs/guides/meta.json deleted file mode 100644 index 3a3e6ac..0000000 --- a/content/docs/guides/meta.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "title": "How To Guides", - "description": "Step-by-step tutorials and guides", - "icon": "Wrench", - "root": true, - "pages": [ - "ai-docs", - "quick-start", - "gm-world", - "da", - "deploy", - "evm", - "full-node", - "restart-chain", - "reset-state", - "cometbft-to-evolve", - "migrating-to-ev-abci", - "create-genesis", - "metrics", - "use-tia-for-gas", - "celestia-gas-calculator", - "---More---", - "..." - ] -} diff --git a/content/docs/guides/metrics.md b/content/docs/guides/metrics.md deleted file mode 100644 index 6e47357..0000000 --- a/content/docs/guides/metrics.md +++ /dev/null @@ -1,79 +0,0 @@ -# Evolve Metrics Guide - -## How to configure metrics - -Evolve can report and serve Prometheus metrics, which can be consumed by Prometheus collector(s). - -This functionality is disabled by default. - -To enable Prometheus metrics, set `instrumentation.prometheus=true` in your Evolve node's configuration file. - -Metrics will be served under `/metrics` on port 26660 by default. The listening address can be changed using the `instrumentation.prometheus_listen_addr` configuration option. - -## List of available metrics - -You can find the full list of available metrics in the [Technical Specifications](../learn/specs/block-manager.md#metrics). - -## Viewing Metrics - -Once your Evolve node is running with metrics enabled, you can view the metrics by: - -1. Accessing the metrics endpoint directly: - - ```bash - curl http://localhost:26660/metrics - ``` - -2. Configuring Prometheus to scrape these metrics by adding the following to your `prometheus.yml`: - - ```yaml - scrape_configs: - - job_name: evolve - static_configs: - - targets: ['localhost:26660'] - ``` - -3. Using Grafana with Prometheus as a data source to visualize the metrics. - -## Example Prometheus Configuration - -Here's a basic Prometheus configuration to scrape metrics from a Evolve node: - -```yaml -global: - scrape_interval: 15s - evaluation_interval: 15s - -scrape_configs: - - job_name: evolve - static_configs: - - targets: ['localhost:26660'] -``` - -## Troubleshooting - -If you're not seeing metrics: - -1. Ensure metrics are enabled in your configuration with `instrumentation.prometheus=true` -2. Verify the metrics endpoint is accessible: `curl http://localhost:26660/metrics` -3. Check your Prometheus configuration is correctly pointing to your Evolve node -4. Examine the Evolve node logs for any errors related to the metrics server - -## Advanced Configuration - -For more advanced metrics configuration, you can adjust the following settings in your configuration file: - -```yaml -instrumentation: - prometheus: true - prometheus_listen_addr: ":26660" - max_open_connections: 3 - namespace: "evolve" -``` - -These settings allow you to: - -- Enable/disable Prometheus metrics -- Change the listening address for the metrics server -- Limit the maximum number of open connections to the metrics server -- Set a custom namespace for all metrics diff --git a/content/docs/guides/migrating-to-ev-abci.md b/content/docs/guides/migrating-to-ev-abci.md deleted file mode 100644 index eb6abcd..0000000 --- a/content/docs/guides/migrating-to-ev-abci.md +++ /dev/null @@ -1,286 +0,0 @@ -# Migrating an Existing Chain to ev-abci - -This guide is for developers of existing Cosmos SDK chains who want to replace their node's default CometBFT consensus engine with the `ev-abci` implementation. By following these steps, you will migrate your chain to run as an `ev-abci` node while preserving chain state. - -## Overview of Migration Process - -The migration process involves the following key phases: - -1. **Code Preparation:** Add migration module, staking wrapper, and upgrade handler to your existing chain -2. **Governance Proposal:** Create and pass a governance proposal to initiate the migration -3. **State Export:** Export the current chain state at the designated upgrade height -4. **Node Reconfiguration:** Wire the `ev-abci` start handler into your node's entrypoint -5. **Migration Execution:** Run `appd evolve-migrate` to transform the exported state -6. **Chain Restart:** Start the new `ev-abci` node with the migrated state - -This document will guide you through each phase. - ---- - -## Phase 1: Code Preparation - Add Migration Module and Staking Wrapper - -The first step prepares your existing chain for migration by integrating the necessary modules. - -### Step 1: Add Migration Manager Module - -Add the `migrationmngr` module to your application. This module manages the transition from a PoS validator set to a sequencer-based model. - -*Note: For detailed information about the migration manager, please refer to the [migration manager documentation](https://github.com/evstack/ev-abci/tree/main/modules/migrationmngr).* - -In your `app.go` file: - -1. Import the migration manager module: - -```go -import ( - // ... - migrationmngr "github.com/evstack/ev-abci/modules/migrationmngr" - migrationmngrkeeper "github.com/evstack/ev-abci/modules/migrationmngr/keeper" - migrationmngrtypes "github.com/evstack/ev-abci/modules/migrationmngr/types" - // ... -) -``` - -1. Add the migration manager keeper to your app struct -2. Register the module in your module manager -3. Configure the migration manager in your app initialization - -### Step 2: Replace Staking Module with Wrapper - -**Goal:** Ensure the `migrationmngr` module is the *sole* source of validator set updates during migration. - -Replace the standard Cosmos SDK `x/staking` module with the **staking wrapper module** provided in `ev-abci`. The wrapper's `EndBlock` method prevents validator updates from the staking module, delegating that responsibility to the `migrationmngr` module during migration. - -In your `app.go` file (and any other files that import the staking module): - -**Replace this:** - -```go -import ( - // ... - "github.com/cosmos/cosmos-sdk/x/staking" - stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - // ... -) -``` - -**With this:** - -```go -import ( - // ... - "github.com/evstack/ev-abci/modules/staking" // The wrapper module - stakingkeeper "github.com/evstack/ev-abci/modules/staking/keeper" - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" // Staking types remain the same - // ... -) -``` - -By changing the import path, your application will automatically use the wrapper module. No other changes to your `EndBlocker` method are needed. - ---- - -## Phase 2: Create Upgrade Handler - -Create an upgrade handler in your `app.go` that will be triggered when the governance proposal is executed. - -```go -func (app *App) setupUpgradeHandlers() { - app.UpgradeKeeper.SetUpgradeHandler( - "v2-migrate-to-evolve", // Upgrade name must match governance proposal - func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { - // The upgrade handler can initialize state for the migration manager if needed - // The actual migration will happen during the evolve-migrate step - return app.mm.RunMigrations(ctx, app.configurator, fromVM) - }, - ) -} -``` - -Call this function in your app initialization code in `app.go`. - ---- - -## Phase 3: Create Governance Proposal for Migration - -Create and submit a software upgrade governance proposal to initiate the migration at a specific block height. - -```bash -# Create the governance proposal - tx gov submit-proposal software-upgrade v2-migrate-to-evolve \ - --title "Migrate to Evolve" \ - --description "Upgrade chain to use ev-abci consensus" \ - --upgrade-height \ - --from \ - --chain-id - -# Vote on the proposal (repeat for validators to reach quorum) - tx gov vote yes --from -``` - -Wait for the proposal to pass and for the chain to reach the upgrade height. The chain will halt at the specified height, waiting for the upgrade to be applied. - -### Trigger Migration to Evolve - -After the upgrade proposal has passed, submit the `MsgMigrateToEvolve` message to initiate the actual migration process. This can be done through a governance proposal or directly if your chain's authority allows it. - -```bash -# Submit MsgMigrateToEvolve governance proposal (if using governance) - tx gov submit-proposal migrate-to-evolve \ - --title "Trigger Migration to Evolve" \ - --description "Execute migration to ev-abci consensus" \ - --from \ - --chain-id - -# Or submit directly if authority allows (authority address depends on your chain configuration) - tx migrationmngr migrate-to-evolve \ - --from \ - --chain-id -``` - -Once this message is processed, the migration manager module will handle the transition from the PoS validator set to the sequencer-based model. - ---- - -## Phase 4: Wire ev-abci Start Handler in root.go - -**⚠️ Important:** Complete this phase BEFORE the chain halts at the upgrade height. Do NOT start your node yet - you will start it in Phase 6 after running the migration command. - -Modify your node's entrypoint to use the `ev-abci` server commands. - -### Locate Your Application's Entrypoint - -Open the main entrypoint file for your chain's binary, usually found at `cmd//main.go` or `root.go`. - -### Modify the Start Command - -Add the `ev-abci` start handler to your root command. This is similar to the [Ignite Apps evolve template](https://github.com/ignite/apps/blob/main/evolve/template/init.go#L48-L60). - -```go -// cmd//main.go (or root.go) -package main - -import ( - "os" - - "github.com/cosmos/cosmos-sdk/server" - "github.com/spf13/cobra" - - // Import the ev-abci server package - evabci_server "github.com/evstack/ev-abci/server" - - "/app" -) - -func main() { - rootCmd := &cobra.Command{ - Use: "", - Short: "Your App Daemon (ev-abci enabled)", - } - - // Keep existing commands (keys, export, etc.) - server.AddCommands(rootCmd, app.DefaultNodeHome, app.New, app.MakeEncodingConfig(), tx.DefaultSignModes) - - // --- Wire ev-abci start handler --- - startCmd := &cobra.Command{ - Use: "start", - Short: "Run the full node with ev-abci", - RunE: func(cmd *cobra.Command, _ []string) error { - return server.Start(cmd, evabci_server.StartHandler()) - }, - } - - evabci_server.AddFlags(startCmd) - rootCmd.AddCommand(startCmd) - // --- End of ev-abci changes --- - - if err := rootCmd.Execute(); err != nil { - server.HandleError(err) - os.Exit(1) - } -} -``` - -### Build Your Application - -Re-build your application's binary with the updated code: - -```sh -go build -o ./cmd/ -``` - -**⚠️ Important:** Do NOT start the node yet. Proceed directly to Phase 5 to run the migration command. - ---- - -## Phase 5: Run evolve-migrate - -After the chain halts at the upgrade height, run the migration command to transform the CometBFT data to Evolve format. - -**⚠️ Critical:** The node must NOT be running when you execute this command. Ensure all node processes are stopped before proceeding. - -```bash -# Run the migration command - evolve-migrate - -# Optional: specify the DA height for the Evolve state (defaults to 1) - evolve-migrate --da-height -``` - -The `evolve-migrate` command performs the following operations: - -1. **Migrates all blocks** from the CometBFT blockstore to the Evolve store -2. **Converts the CometBFT state** to Evolve state format -3. **Creates `ev_genesis.json`** - a minimal genesis file that the node will automatically detect and use on subsequent startups -4. **Saves state** to the ABCI execution store for compatibility -5. **Seeds sync stores** with the latest migrated header and data -6. **Cleans up migration state** from the application database - -**Important Notes:** - -- The migration processes blocks in reverse order (from latest to earliest) -- If blocks are missing (e.g., due to pruning), they will be skipped. Migration stops if more than the configured maximum number of blocks are missing -- Vote extensions are not supported in Evolve - if they were enabled in your chain, they will have no effect after migration -- The command operates on the data in your node's home directory (e.g., `~/.appd/data/`) -- After successful migration, the `ev_genesis.json` file will be used automatically on node restart - ---- - -## Phase 6: Start New ev-abci Node - -Start your node with the migrated state: - -```bash - start -``` - -Verify that the node starts successfully: - -```sh -# Check that ev-abci flags are available - start --help - -# You should see flags like: -# --ev-node.attester-mode -# --ev-node.aggregator -# --ev-node.sequencer-url -# etc. -``` - -Your node is now running with `ev-abci` instead of CometBFT. The chain continues from the same state but with the new consensus engine. - ---- - -## Summary - -The migration process follows these key phases: - -1. **Code Preparation:** Modify your chain code to add the migration manager module and staking wrapper -2. **Create Upgrade Handler:** Define the upgrade logic that will be triggered by governance -3. **Governance Proposal:** Submit and pass a software upgrade proposal -4. **Wire Start Handler:** Update your node's entrypoint to use the `ev-abci` start command -5. **Execute Migration:** Run `appd evolve-migrate` to transform the exported state -6. **Restart Chain:** Start the new `ev-abci` node with the migrated state - -This approach ensures a smooth migration with minimal downtime and preserves all chain state and history. diff --git a/content/docs/guides/operations/meta.json b/content/docs/guides/operations/meta.json deleted file mode 100644 index 447c593..0000000 --- a/content/docs/guides/operations/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Operations", - "pages": ["..."] -} diff --git a/content/docs/guides/operations/monitoring.md b/content/docs/guides/operations/monitoring.md deleted file mode 100644 index 6e47357..0000000 --- a/content/docs/guides/operations/monitoring.md +++ /dev/null @@ -1,79 +0,0 @@ -# Evolve Metrics Guide - -## How to configure metrics - -Evolve can report and serve Prometheus metrics, which can be consumed by Prometheus collector(s). - -This functionality is disabled by default. - -To enable Prometheus metrics, set `instrumentation.prometheus=true` in your Evolve node's configuration file. - -Metrics will be served under `/metrics` on port 26660 by default. The listening address can be changed using the `instrumentation.prometheus_listen_addr` configuration option. - -## List of available metrics - -You can find the full list of available metrics in the [Technical Specifications](../learn/specs/block-manager.md#metrics). - -## Viewing Metrics - -Once your Evolve node is running with metrics enabled, you can view the metrics by: - -1. Accessing the metrics endpoint directly: - - ```bash - curl http://localhost:26660/metrics - ``` - -2. Configuring Prometheus to scrape these metrics by adding the following to your `prometheus.yml`: - - ```yaml - scrape_configs: - - job_name: evolve - static_configs: - - targets: ['localhost:26660'] - ``` - -3. Using Grafana with Prometheus as a data source to visualize the metrics. - -## Example Prometheus Configuration - -Here's a basic Prometheus configuration to scrape metrics from a Evolve node: - -```yaml -global: - scrape_interval: 15s - evaluation_interval: 15s - -scrape_configs: - - job_name: evolve - static_configs: - - targets: ['localhost:26660'] -``` - -## Troubleshooting - -If you're not seeing metrics: - -1. Ensure metrics are enabled in your configuration with `instrumentation.prometheus=true` -2. Verify the metrics endpoint is accessible: `curl http://localhost:26660/metrics` -3. Check your Prometheus configuration is correctly pointing to your Evolve node -4. Examine the Evolve node logs for any errors related to the metrics server - -## Advanced Configuration - -For more advanced metrics configuration, you can adjust the following settings in your configuration file: - -```yaml -instrumentation: - prometheus: true - prometheus_listen_addr: ":26660" - max_open_connections: 3 - namespace: "evolve" -``` - -These settings allow you to: - -- Enable/disable Prometheus metrics -- Change the listening address for the metrics server -- Limit the maximum number of open connections to the metrics server -- Set a custom namespace for all metrics diff --git a/content/docs/guides/operations/troubleshooting.md b/content/docs/guides/operations/troubleshooting.md deleted file mode 100644 index e90c8a3..0000000 --- a/content/docs/guides/operations/troubleshooting.md +++ /dev/null @@ -1,283 +0,0 @@ -# Troubleshooting - -Common issues and solutions when running Evolve nodes. - -## Diagnostic Commands - -### Check Node Status - -```bash -# Health check -curl http://localhost:7331/health/live -curl http://localhost:7331/health/ready - -# Node status -curl http://localhost:26657/status -``` - -### View Logs - -If running in the foreground, logs are printed to stderr by default. If running as a background service, use the appropriate command: - -```bash -# foreground (default in dev) -./evnode start ... 2>&1 | tee evnode.log - -# systemd service -journalctl -u evnode -f - -# docker container -docker logs -f -``` - -## Common Issues - -### Node Won't Start - -**Symptom:** Node exits immediately after starting. - -**Solutions:** - -1. Check for port conflicts: - -```bash -lsof -i :26657 -lsof -i :7676 -``` - -1. Verify configuration file syntax: - -```bash -cat ~/.evnode/config/evnode.yml -``` - -1. Check data directory permissions: - -```bash -ls -la ~/.evnode/data -``` - -### DA Connection Failures - -**Symptom:** Logs show `DA layer submission failed` errors. - -**Error example:** - -```text -ERR DA layer submission failed error="connection refused" -``` - -**Solutions:** - -1. Verify DA endpoint is reachable: - -```bash -curl http://localhost:26658/health -``` - -1. Check authentication token (Celestia): - -```bash -celestia light auth write --p2p.network mocha -``` - -1. Verify DA node is fully synced: - -```bash -celestia header sync-state -``` - -### Out of DA Funds - -**Symptom:** `Code: 19` errors in logs. - -**Error example:** - -```text -ERR DA layer submission failed error="Codespace: 'sdk', Code: 19, Message: " -``` - -**Solutions:** - -1. Check DA account balance -2. Fund the account with more tokens -3. Increase gas price to unstick pending transactions: - -```bash ---evnode.da.gas_price 0.05 -``` - -See [Restart Chain Guide](/guides/restart-chain) for detailed steps. - -### P2P Connection Issues - -**Symptom:** Node not syncing, no peers connected. - -**Solutions:** - -1. Verify peer address format: - -```bash -# Correct format -/ip4/1.2.3.4/tcp/7676/p2p/12D3KooWABC... - -# NOT just the peer ID -12D3KooWABC... -``` - -1. Check firewall allows P2P port: - -```bash -sudo ufw status -# Ensure port 7676 (or your P2P port) is allowed -``` - -1. Try DA-only sync mode (no P2P): - -```bash -evnode start --evnode.da.address http://localhost:26658 -# Leave --evnode.p2p.peers empty -``` - -### Node Falling Behind - -**Symptom:** `catching_up: true` in status, height increasing slowly. - -**Solutions:** - -1. Check system resources: - -```bash -htop -df -h -``` - -1. Increase DA request timeout: - -```bash ---evnode.da.request_timeout 60s -``` - -1. Verify DA layer is responding quickly: - -```bash -time curl http://localhost:26658/header/sync_state -``` - -### Execution Layer Desync - -**Symptom:** State root mismatches, execution errors. - -**EVM (ev-reth):** - -```bash -# Check ev-reth logs for errors -journalctl -u ev-reth -f - -# Verify Engine API connectivity -curl -X POST -H "Content-Type: application/json" \ - -H "Authorization: Bearer $(cat jwt.hex)" \ - --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \ - http://localhost:8551 -``` - -**Cosmos SDK (ev-abci):** - -```bash -# Check app hash consistency -curl http://localhost:26657/status | jq '.sync_info' -``` - -## Reset Procedures - -### Soft Reset (Keep Genesis) - -Reset state while keeping configuration: - -```bash -# Stop the node -systemctl stop evnode - -# Clear data directory -rm -rf ~/.evnode/data/* - -# Restart -systemctl start evnode -``` - -### Hard Reset (Full Reinitialize) - -Complete reset including configuration: - -```bash -# Stop the node -systemctl stop evnode - -# Remove everything -rm -rf ~/.evnode - -# Reinitialize -evnode init -``` - -### Reset EVM State (ev-reth) - -```bash -# Stop both nodes -systemctl stop evnode ev-reth - -# Clear ev-reth data -rm -rf ~/.ev-reth/db - -# Clear ev-node cache -rm -rf ~/.evnode/data/cache - -# Restart -systemctl start ev-reth evnode -``` - -## Performance Issues - -### High Memory Usage - -1. Reduce cache size in configuration -2. Enable lazy aggregation mode -3. Limit max pending blocks: - -```bash ---evnode.node.max_pending_blocks 50 -``` - -### High CPU Usage - -1. Increase block time: - -```bash ---evnode.node.block_time 2s -``` - -1. Check for transaction spam -2. Monitor execution layer performance - -### Disk Space - -1. Check disk usage: - -```bash -du -sh ~/.evnode/data/* -``` - -1. Prune old data (if supported by execution layer) -2. Consider moving data to larger disk - -## Getting Help - -1. Check logs for specific error messages -2. Search [GitHub Issues](https://github.com/evstack/ev-node/issues) -3. Join the community Discord for support - -## See Also - -- [Reset State Guide](/guides/reset-state) - Detailed reset procedures -- [Restart Chain Guide](/guides/restart-chain) - Restarting after issues -- [Monitoring Guide](/guides/operations/monitoring) - Proactive monitoring diff --git a/content/docs/guides/operations/upgrades.md b/content/docs/guides/operations/upgrades.md deleted file mode 100644 index 0027f13..0000000 --- a/content/docs/guides/operations/upgrades.md +++ /dev/null @@ -1,272 +0,0 @@ -# Upgrades - -Guide for upgrading Evolve nodes and handling version migrations. - -## Upgrade Types - -### Minor Upgrades (Patch/Minor Version) - -Non-breaking changes, bug fixes, and minor improvements. - -**Process:** - -1. Stop the node -2. Replace binary -3. Restart - -```bash -# Stop -systemctl stop evnode - -# Upgrade (example with go install) -go install github.com/evstack/ev-node@v1.2.3 - -# Restart -systemctl start evnode -``` - -### Major Upgrades (Breaking Changes) - -May require state migration or coordinated network upgrade. - -**Process:** - -1. Review changelog for breaking changes -2. Coordinate upgrade height with network -3. Stop at designated height -4. Upgrade binary -5. Run any migration scripts -6. Restart - -## ev-node Upgrades - -### Check Current Version - -```bash -evnode version -``` - -### Upgrade Binary - -**Using Go:** - -```bash -go install github.com/evstack/ev-node@latest -``` - -**Using Docker:** - -```bash -docker pull evstack/evnode:latest -``` - -**From Source:** - -```bash -cd ev-node -git fetch --tags -git checkout v1.2.3 -make build -``` - -### Configuration Changes - -After upgrading, check for new or changed configuration options: - -1. Review the [changelog](https://github.com/evstack/ev-node/releases) -2. Compare your config with the new defaults -3. Update configuration as needed - -## ev-reth Upgrades - -### Version Compatibility - -ev-reth versions must be compatible with ev-node. Check the compatibility matrix: - -| ev-node | ev-reth | -|---------|---------| -| v1.x | v0.x | - -### Upgrade Process - -```bash -# Stop both nodes -systemctl stop evnode ev-reth - -# Upgrade ev-reth -cd ev-reth -git fetch --tags -git checkout v0.2.0 -cargo build --release - -# Verify chainspec compatibility -# (check for new required fields) - -# Restart -systemctl start ev-reth evnode -``` - -### Database Migrations - -Some ev-reth upgrades require database migration: - -```bash -# Check if migration needed -ev-reth db version - -# Run migration if needed -ev-reth db migrate -``` - -## ev-abci Upgrades - -### Cosmos SDK Compatibility - -ev-abci tracks Cosmos SDK versions. Ensure your app's SDK version is compatible: - -| ev-abci | Cosmos SDK | -|---------|------------| -| v1.x | v0.50.x | - -### Module Upgrades - -For Cosmos SDK apps with custom modules: - -1. Update module dependencies in `go.mod` -2. Run any module migration handlers -3. Update genesis if needed - -```go -// In app.go upgrade handler -app.UpgradeKeeper.SetUpgradeHandler( - "v2", - func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { - // Migration logic - return app.ModuleManager.RunMigrations(ctx, app.Configurator(), fromVM) - }, -) -``` - -## Coordinated Network Upgrades - -For networks with multiple node operators: - -### Planning - -1. Announce upgrade timeline (minimum 1 week notice) -2. Agree on upgrade block height -3. Share upgrade binary/instructions - -### Execution - -1. All nodes stop at designated height -2. Operators upgrade binaries -3. Coordinators verify readiness -4. Network restarts - -### Handling Stragglers - -If some nodes don't upgrade: - -- They will reject new blocks (if consensus rules changed) -- They can sync from upgraded nodes after upgrading - -## Rollback Procedures - -If an upgrade causes issues: - -### ev-node Rollback - -```bash -# Stop -systemctl stop evnode - -# Restore previous binary -cp /backup/evnode-v1.1.0 /usr/local/bin/evnode - -# Optionally restore data -# (only if upgrade corrupted state) -rm -rf ~/.evnode/data -cp -r /backup/evnode-data ~/.evnode/data - -# Restart -systemctl start evnode -``` - -### ev-reth Rollback - -```bash -# Stop -systemctl stop ev-reth evnode - -# Restore binary -cp /backup/ev-reth-v0.1.0 /usr/local/bin/ev-reth - -# Restore database if needed -rm -rf ~/.ev-reth/db -cp -r /backup/ev-reth-db ~/.ev-reth/db - -# Restart -systemctl start ev-reth evnode -``` - -## State Migration - -### Export State - -Before major upgrades, export state: - -```bash -# ev-node -evnode export > state-export.json - -# Cosmos SDK -appd export --height > genesis-export.json -``` - -### Migrate State - -If state format changes: - -```bash -# Run migration tool -evnode migrate state-export.json --to-version v2 > state-migrated.json -``` - -### Import State - -```bash -# Initialize with migrated state -evnode init --genesis state-migrated.json -``` - -## Best Practices - -### Pre-Upgrade Checklist - -- [ ] Review changelog for breaking changes -- [ ] Test upgrade on testnet first -- [ ] Backup current state -- [ ] Backup configuration files -- [ ] Notify dependent services -- [ ] Schedule maintenance window - -### Post-Upgrade Verification - -- [ ] Node starts successfully -- [ ] Blocks are being produced/synced -- [ ] RPC endpoints responding -- [ ] Metrics reporting correctly -- [ ] P2P connections established - -### Automation - -Consider automating upgrades with tools like: - -- Ansible playbooks -- Kubernetes operators -- systemd timers for scheduled upgrades - -## See Also - -- [Troubleshooting Guide](/guides/operations/troubleshooting) - Handling upgrade issues diff --git a/content/docs/guides/quick-start.md b/content/docs/guides/quick-start.md deleted file mode 100644 index e79ca91..0000000 --- a/content/docs/guides/quick-start.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -description: Quickly start a chain node using the Testapp CLI. ---- - -# Quick start guide - -Welcome to Evolve, a chain framework! The easiest way to launch your network node is by using the Testapp CLI. - -## 📦 Install Testapp (CLI) - -To install Evolve, run the following command in your terminal: - -```bash -# Clone the repository -git clone --depth 1 https://github.com/evstack/ev-node.git -cd ev-node - -# Build the testapp binary -just build - -# Optional: Install to your Go bin directory for system-wide access -just install -``` - -Verify the installation by checking the Evolve version: - -```bash -# If you ran 'just install' -testapp version - -# Or if you only ran 'just build' -./build/testapp version -``` - -A successful installation will display the version number and its associated git commit hash. - -```bash -evolve version: v1.0.0-beta.4 -``` - -## 🗂️ Initialize a evolve network node - -To initialize a evolve network node, execute the following command: - -```bash -testapp init --evnode.node.aggregator --evnode.signer.passphrase secret -``` - -## 🚀 Run your evolve network node - -Now that we have our testapp generated and installed, we can launch our chain along with the local DA by running the following command: - -First lets start the local DA network: - -```bash -go install github.com/evstack/ev-node/tools/local-da@latest -local-da -``` - -You should see logs like: - -```bash -9:22AM INF NewLocalDA: initialized LocalDA component=da -9:22AM INF Listening on component=da host=localhost maxBlobSize=1974272 port=7980 -9:22AM INF server started component=da listening_on=localhost:7980 -``` - -To start a basic evolve network node, execute: - -```bash -testapp start --evnode.signer.passphrase secret -``` - -Upon execution, the CLI will output log entries that provide insights into the node's initialization and operation: - -```bash -9:23AM INF creating new client component=main namespace= -KV Executor HTTP server starting on 127.0.0.1:9090 -9:23AM INF KV executor HTTP server started component=main endpoint=127.0.0.1:9090 -9:23AM INF No state found in store, initializing new state component=BlockManager -9:23AM INF using default mempool ttl MempoolTTL=25 component=BlockManager -9:23AM INF starting P2P client component=main -9:23AM INF started RPC server addr=127.0.0.1:7331 component=main -9:23AM INF listening on address address=/ip4/127.0.0.1/tcp/7676/p2p/12D3KooWRzvJuFoQKhQNfaCZWvJFDY4vrCTocdL6H1GCMzywugnV component=main -9:23AM INF listening on address address=/ip4/172.20.10.14/tcp/7676/p2p/12D3KooWRzvJuFoQKhQNfaCZWvJFDY4vrCTocdL6H1GCMzywugnV component=main -9:23AM INF no peers - only listening for connections component=main -9:23AM INF working in aggregator mode block_time=1000 component=main -9:23AM INF using pending block component=BlockManager height=1 -9:23AM INF Reaper started component=Reaper interval=1000 -``` - -## 🎉 Conclusion - -That's it! Your evolve network node is now up and running. It's incredibly simple to start a blockchain (which is essentially what a chain is) these days using Evolve. Explore further and discover how you can build useful applications on Evolve. Good luck! diff --git a/content/docs/guides/raft_production.md b/content/docs/guides/raft_production.md deleted file mode 100644 index f2bb817..0000000 --- a/content/docs/guides/raft_production.md +++ /dev/null @@ -1,98 +0,0 @@ -# Raft Implementation & Production Configuration - -This guide details the Raft consensus implementation in `ev-node`, used for High Availability (HA) of the Sequencer/Aggregator. It is targeted at experienced DevOps and developers configuring production environments. - -## Overview - -`ev-node` uses the [HashiCorp Raft](https://github.com/hashicorp/raft) implementation to manage leader election and state replication when running in **Aggregator Mode**. - -* **Role**: Ensures only one active Aggregator (Leader) produces blocks at a time. -* **Failover**: Automatically elects a new leader if the current leader fails. -* **Safety**: Synchronizes the block production state to prevent double-signing or fork divergence. - -### Architecture - -* **Transport**: TCP-based transport for inter-node communication. -* **Storage**: [BoltDB](https://github.com/etcd-io/bbolt) is used for both the Raft Log (`raft-log.db`) and Stable Store (`raft-stable.db`). Snapshots are stored as files. -* **FSM (Finite State Machine)**: The State Machine applies `RaftBlockState` (Protobuf) containing the latest block height, hash, and timestamp. -* **Safety Checks**: - * **Startup**: Nodes check for divergence between local block store and Raft state. - * **Leadership Transfer**: Before becoming leader, a node waits for its FSM to catch up (`waitForMsgsLanded`) to prevent proposing blocks from a stale state. - * **Shutdown**: The leader attempts to transfer leadership gracefully before shutting down to minimize downtime. - -## Configuration - -Raft is configured via CLI flags or the `config.toml` file under the `[raft]` (or `[rollkit.raft]`) section. - -### Essential Flags - -| Flag | Config Key | Description | Production Value | -|------|------------|-------------|------------------| -| `--evnode.raft.enable` | `raft.enable` | Enable Raft consensus. | `true` | -| `--evnode.raft.node_id` | `raft.node_id` | **Unique** identifier for the node. | e.g., `node-01` | -| `--evnode.raft.raft_addr` | `raft.raft_addr` | TCP address for Raft transport. | `0.0.0.0:5001` (Bind to private IP) | -| `--evnode.raft.raft_dir` | `raft.raft_dir` | Directory for Raft data. | `/data/raft` (Must be persistent) | -| `--evnode.raft.peers` | `raft.peers` | Comma-separated list of peer addresses in format `nodeID@host:port`. | `node-1@10.0.0.1:5001,node-2@10.0.0.2:5001,node-3@10.0.0.3:5001` | -| `--evnode.raft.bootstrap` | `raft.bootstrap` | Bootstrap the cluster. **Required** for initial setup. | `true` (See Limitations) | - -### Timeout Tuning - -Raft timeouts should be tuned relative to your **Block Time** (`--evnode.node.block_time`) to utilize the fast failover capabilities without causing instability. - -| Flag | Default | Recommended Tuning | -|------|---------|--------------------| -| `--evnode.raft.heartbeat_timeout` | `1s` | **10-30% of Leader Lease**. For sub-second block times, lower to `50ms-100ms`. | -| `--evnode.raft.leader_lease_timeout` | `500ms` | **Must be < Election Timeout**. Use `500ms` for 1s block times. For slower chains (e.g., 10s blocks), increase to `1s-2s` to tolerate network jitter. | -| `--evnode.raft.send_timeout` | `1s` | Should be `> 2x RTT`. | - -**Relation to Block Time**: -Ideally, a failover should complete within `2 * BlockTime` to minimize user impact. -* **Fast Chain (BlockTime < 1s)**: Tighten timeouts. Heartbeat `50ms`, Lease `250ms`. -* **Standard Chain (BlockTime = 1s)**: Heartbeat `100ms`, Lease `500ms`. -* **Slow Chain (BlockTime > 5s)**: Defaults are usually sufficient (`1s` heartbeat). - -> **Warning**: Setting timeouts too low (< RTT + Jitter) will cause leadership flapping and halted block production. - -## Production Deployment Principles - -### 1. Static Peering & Bootstrap -Current implementation requires **Bootstrap Mode** (`--evnode.raft.bootstrap=true`) for all nodes participating in the cluster initialization. -* **All nodes** should list the full set of peers in `--evnode.raft.peers`. -* The `peers` list format is strict: `NodeID@Host:Port`. -* **Limitation**: Dynamic addition of peers (Run-time Membership Changes) via RPC/CLI is not currently exposed. The cluster membership is static based on the initial bootstrap configuration. - -### 2. Infrastructure Requirements -* **Encrypted Network (CRITICAL)**: Raft traffic is **unencrypted** (plain TCP). You **MUST** run the cluster inside a private network, VPN, or encrypted mesh (e.g., WireGuard, Tailscale). **Never expose Raft ports to the public internet**; doing so allows attackers to hijack the cluster consensus. -* **Cluster Size**: Run an **odd number** of nodes (3 or 5) to tolerate failures (3 nodes tolerate 1 failure; 5 nodes tolerate 2). -* **Storage**: The `--evnode.raft.raft_dir` **MUST** be mounted on persistent storage. Loss of this directory will cause the node to lose its identity and commit history, effectively removing it from the cluster. -* **Network**: Raft requires low-latency, reliable connectivity. Ensure firewall rules allow TCP traffic on `raft_addr`. - -### 3. P2P Interaction & Catch-Up -Raft and P2P work in parallel to ensure reliability: -* **Hot Replication (Raft)**: New blocks produced by the leader are replicated via the Raft transport (Header + Data) to all followers. This ensures low-latency propagation of the chain tip. -* **Catch-Up (P2P)**: If a node falls behind (e.g., disconnected for longer than the Raft log retention), it will receive a **Raft Snapshot** to update its consensus state to the latest head. However, the *historical blocks* between its local state and the new head are fetched via the **P2P Network** (or DA). - * **Implication**: You must ensure P2P connectivity (`--p2p.listen_address` and `--p2p.peers`) is configured even for Raft nodes, to allow them to backfill missing data from peers. - -### 4. Lifecycle Management -* **Rolling Restarts**: You can restart nodes one by one. The `ev-node` implementation handles graceful shutdown (leadership transfer) to minimize impact. -* **State Divergence**: If a node falls too far behind or its local store conflicts with Raft (e.g., due to catastrophic disk failure), it may panic on startup to protect safety. In such cases, a manual extensive recovery (wiping state and re-syncing) may be required. - -### 4. Monitoring -Monitor the following metrics (propagated via Prometheus if enabled): -* **Leadership Changes**: Frequent changes indicate network instability or overloaded nodes. -* **Applied Index vs Commit Index**: A growing lag indicates the FSM cannot keep up. - -## Example Command - -```bash -./ev-node start \ - --node.aggregator \ - --raft.enable \ - --raft.node_id="node-1" \ - --raft.raft_addr="0.0.0.0:5001" \ - --raft.raft_dir="/var/lib/ev-node/raft" \ - --raft.bootstrap=true \ - --raft.peers="node-1@10.0.1.1:5001,node-2@10.0.1.2:5001,node-3@10.0.1.3:5001" \ - --p2p.listen_address="/ip4/0.0.0.0/tcp/26656" \ - ...other flags -``` diff --git a/content/docs/guides/reset-state.md b/content/docs/guides/reset-state.md deleted file mode 100644 index d39de23..0000000 --- a/content/docs/guides/reset-state.md +++ /dev/null @@ -1,147 +0,0 @@ -# How to reset the state of your chain - -This guide will walk you through how you reset the state of your chain. - -:::warning[Disclaimer] -By definition, resetting the state is deleting your chain's data. Make sure you understand the implications of this prior to completing this guide. -::: - -Some reason you might need to reset the state of your chain are: - -- During testing and development -- During upgrades with breaking changes -- Hardforks - -## Prerequisites - -In order to complete this guide, you will need to have completed either the [quick start tutorial](./quick-start.md) or the [build our chain tutorial](./gm-world.md). - -## Quick Start - -When you run your chain with `testapp start` you will create a `.testapp` directory in your root directory. - -This directory will look like the following. - -```bash -tree $HOME/.testapp - -├── config -│   ├── genesis.json -│   ├── node_key.json -│   ├── evnode.yml -│   └── signer.json -└── data - ├── cache - │   ├── data - │   │   ├── da_included.gob - │   │   ├── hashes.gob - │   │   ├── items_by_hash.gob - │   │   └── items_by_height.gob - │   └── header - │   ├── da_included.gob - │   ├── hashes.gob - │   ├── items_by_hash.gob - │   └── items_by_height.gob - ├── executor - │   ├── 000001.sst - │   ├── 000002.vlog - │   ├── 000003.vlog - │   ├── 00003.mem - │   ├── DISCARD - │   ├── KEYREGISTRY - │   ├── LOCK - │   └── MANIFEST - └── testapp - ├── 000001.sst - ├── 000002.sst - ├── 000002.vlog - ├── 000003.sst - ├── 000003.vlog - ├── DISCARD - ├── KEYREGISTRY - └── MANIFEST -``` - -To reset the state of the chain, delete the content of the `data` directory. - -Alternatively, you can use this command. - -```bash -testapp unsafe-clean -``` - -When you launch your chain again with `testapp start` your `data` directory will be re-populated and you will see your chain starting at block height 1 again. - -## gm-world - -When you ran your gm-world chain in the [build your chain tutorial](./gm-world.md), it created a `.gm` directory in your `$HOME` directory. - -This directory will look like the following: - -```bash -tree $HOME/.gm - -├── config -│   ├── app.toml -│   ├── client.toml -│   ├── config.toml -│   ├── genesis.json -│   ├── gentx -│   │   └── gentx-418077c64f0cf5824c24487c9cce38241de677cd.json -│   ├── node_key.json -│   ├── priv_validator_key.json -│   └── evnode.yml -├── data -│   ├── application.db -│   │   ├── 000001.log -│   │   ├── CURRENT -│   │   ├── LOCK -│   │   ├── LOG -│   │   └── MANIFEST-000000 -│   ├── cache -│   │   ├── data -│   │   │   ├── da_included.gob -│   │   │   ├── hashes.gob -│   │   │   ├── items_by_hash.gob -│   │   │   └── items_by_height.gob -│   │   └── header -│   │   ├── da_included.gob -│   │   ├── hashes.gob -│   │   ├── items_by_hash.gob -│   │   └── items_by_height.gob -│   ├── priv_validator_state.json -│   ├── evolve -│   │   ├── 000001.sst -│   │   ├── 000001.vlog -│   │   ├── DISCARD -│   │   ├── KEYREGISTRY -│   │   └── MANIFEST -│   ├── snapshots -│   │   └── metadata.db -│   │   ├── 000001.log -│   │   ├── CURRENT -│   │   ├── LOCK -│   │   ├── LOG -│   │   └── MANIFEST-000000 -│   └── tx_index.db -│   ├── 000001.log -│   ├── CURRENT -│   ├── LOCK -│   ├── LOG -│   └── MANIFEST-000000 -└── keyring-test - ├── 87af99a184613860ee9563be57a9fb4e7b25acb8.address - ├── alice.info - ├── bob.info - └── e24d9eeca2d24193bdd98ed9116ff70f8a2e2b5e.address -``` - -The directories you need to delete to reset your state are in the `data` directory. - -Alternatively, you can run the following command to delete the data directories: - -```bash -gmd comet unsafe-reset-all -``` - -When you launch your chain again with your `gmd start ` command, these data directories will be re-created and you will see your chain starting at block height 1 again. diff --git a/content/docs/guides/restart-chain.md b/content/docs/guides/restart-chain.md deleted file mode 100644 index 52babbd..0000000 --- a/content/docs/guides/restart-chain.md +++ /dev/null @@ -1,101 +0,0 @@ -# How to restart your chain - -This guide will teach you how to restart your Evolve chain. - -## Restart chain - -This section covers the case where you need to restart your chain. - -In order to restart your chain, you simply need to run the `d start [...args]` -command for your chain. - -For example, if you ran the [quick start](./quick-start.md) tutorial, you started your chain with: - -```bash -testapp start -``` - -You would have seen output similar to: - -```bash -I[2024-10-17|14:52:12.845] Creating and publishing block module=BlockManager height=7 -I[2024-10-17|14:52:12.845] finalized block module=BlockManager height=7 num_txs_res=0 num_val_updates=0 block_app_hash= -I[2024-10-17|14:52:12.845] executed block module=BlockManager height=7 app_hash= -I[2024-10-17|14:52:12.846] indexed block events module=txindex height=7 -``` - -If you need to restart your chain, you can run the same command again: - -```bash -testapp start -``` - -You will see that the block height will continue from where it left off: - -```bash -I[2024-10-17|14:52:13.845] Creating and publishing block module=BlockManager height=8 -I[2024-10-17|14:52:13.845] finalized block module=BlockManager height=8 num_txs_res=0 num_val_updates=0 block_app_hash= -I[2024-10-17|14:52:13.845] executed block module=BlockManager height=8 app_hash= -I[2024-10-17|14:52:13.845] indexed block events module=txindex height=8 -``` - -It is important to include any additional flags that you used when you first started your chain. For example, if you used the `--evnode.da.namespace` flag, you will need to include that flag when restarting your chain to ensure your chain continues to publish blobs to the same namespace. - -## Restart chain after running out of funds - -This section covers the case that the node that -you are using to post blocks to your DA and consensus layer runs out of funds (tokens), -and you need to restart your chain. - -In this example, we're using Celestia's [Mocha testnet](https://docs.celestia.org/how-to-guides/mocha-testnet/) -and running the [quick start](./quick-start.md). In this example, our Celestia DA light node -ran out of Mocha testnet TIA and we are unable to post new blocks to Celestia due to a -[`Code: 19`](https://github.com/cosmos/cosmos-sdk/blob/main/types/errors/errors.go#L95) -error. This error is defined by Cosmos SDK as: - -```go -// ErrTxInMempoolCache defines an ABCI typed error where a tx already exists in the mempool. -ErrTxInMempoolCache = Register(RootCodespace, 19, "tx already in mempool") -``` - -In order to get around this error, and the same error on other Evolve chains, you will need to re-fund your Celestia account and increase the gas fee. This will override the transaction that is stuck in the mempool. - -If you top up the balance of your node and don't increase the gas fee, you will still encounter the `Code: 19` error because there is a transaction (posting block to DA) that is duplicate to one that already exists. In order to get around this, you'll need to increase the gas fee and restart the chain. - -### 🟠 Errors in this example {#errors} - -This is what the errors will look like if your DA node runs out of funding or you restart the chain without changing the gas fee: - -```bash -4:51PM INF submitting block to DA layer height=28126 module=BlockManager -4:51PM ERR DA layer submission failed error="Codespace: 'sdk', Code: 19, Message: " attempt=1 module=BlockManager -4:51PM ERR DA layer submission failed Error="Codespace: 'sdk', Code: 19, Message: " attempt=2 module=BlockManager -4:51PM ERR DA layer submission failed error="Codespace: 'sdk', Code: 19, Message: " attempt=3 module=BlockManager -``` - -### 💰 Re-fund your account {#refund-your-account} - -First, you'll need to send more tokens to the account running your Celestia node. If you didn't keep track of your key, you can run the following to get your address: - -```bash -cd $HOME && cd celestia-node -./cel-key list --keyring-backend test --node.type light --p2p.network -``` - -### 🛑 Stopping your chain {#stopping-your-chain} - -You can stop your chain by using `Control + C` in your terminal where the node is running. - -### ⛽ Increase the gas fee {#increase-gas-fee} - -To reiterate, before restarting the chain, you will need to increase the gas fee in order to avoid a `Code: 19` error. See the [How to configure gas price](../learn/config.md#da-gas-price) guide for more information. - -### 🔁 Restarting your chain {#restarting-your-chain} - -Follow the [restart chain](#restart-chain) section above. - -### 🛢️ Reduce gas fee & restart again {#reduce-gas-fee-restart-again} - -In order to save your TIA, we also recommend stopping the chain with `Control + C`, changing the gas fee back to the default (in our case, 8000 utia) and restarting the chain: - -🎊 Congrats! You've successfully restarted your Evolve chain after running out of TIA. diff --git a/content/docs/guides/running-nodes/aggregator.md b/content/docs/guides/running-nodes/aggregator.md deleted file mode 100644 index 49f0776..0000000 --- a/content/docs/guides/running-nodes/aggregator.md +++ /dev/null @@ -1,194 +0,0 @@ -# Aggregator Node - -An aggregator (also called sequencer) is the node responsible for producing blocks in an Evolve chain. It collects transactions, orders them, creates blocks, and submits data to the DA layer. - -## Prerequisites - -- ev-node installed -- Access to a DA layer (Celestia or local-da) -- Signer key for block signing - -## Configuration - -Enable aggregator mode with the `--evnode.node.aggregator` flag: - -```bash -evnode start --evnode.node.aggregator -``` - -### Required Flags - -| Flag | Description | -|------------------------------|-------------------------| -| `--evnode.node.aggregator` | Enable block production | -| `--evnode.da.address` | DA layer endpoint | -| `--evnode.signer.passphrase` | Signer key passphrase | - -### Block Time Configuration - -Control how often blocks are produced: - -```bash -# Produce blocks every 500ms -evnode start \ - --evnode.node.aggregator \ - --evnode.node.block_time 500ms -``` - -Default block time is 1 second. - -## Lazy Aggregation Mode - -Lazy mode only produces blocks when there are transactions, reducing DA costs during low activity periods: - -```bash -evnode start \ - --evnode.node.aggregator \ - --evnode.node.lazy_aggregator \ - --evnode.node.lazy_block_time 30s -``` - -| Flag | Description | -|---------------------------------|--------------------------------------| -| `--evnode.node.lazy_aggregator` | Enable lazy mode | -| `--evnode.node.lazy_block_time` | Max wait between blocks in lazy mode | - -In lazy mode: - -- Blocks are produced immediately when transactions arrive -- If no transactions, wait up to `lazy_block_time` before producing an empty block -- Reduces DA submission costs during idle periods - -## DA Submission Settings - -Configure how blocks are batched and submitted to DA: - -```bash -evnode start \ - --evnode.node.aggregator \ - --evnode.da.address http://localhost:26658 \ - --evnode.da.namespace "my_namespace" \ - --evnode.da.gas_price 0.01 \ - --evnode.da.batching_strategy adaptive -``` - -### Batching Strategies - -| Strategy | Description | -|-------------|---------------------------------------------| -| `immediate` | Submit as soon as blocks are ready | -| `time` | Wait for time interval before submitting | -| `size` | Wait until batch reaches size threshold | -| `adaptive` | Balance between size and time (recommended) | - -### Max Pending Blocks - -Limit how many blocks can be waiting for DA submission: - -```bash ---evnode.node.max_pending_blocks 100 -``` - -When this limit is reached, block production pauses until some blocks are confirmed on DA. - -## Signer Configuration - -The aggregator needs a signer key to sign blocks: - -```bash -# Using file-based signer -evnode start \ - --evnode.node.aggregator \ - --evnode.signer.signer_type file \ - --evnode.signer.signer_path /path/to/keys \ - --evnode.signer.passphrase "your-passphrase" -``` - -## Complete Example - -### EVM Chain (ev-reth) - -```bash -evnode start \ - --evnode.node.aggregator \ - --evnode.node.block_time 1s \ - --evnode.da.address http://localhost:26658 \ - --evnode.da.namespace "my_evm_chain" \ - --evnode.da.gas_price 0.01 \ - --evnode.signer.passphrase "secret" \ - --evnode.rpc.address tcp://0.0.0.0:26657 -``` - -### Cosmos SDK Chain (ev-abci) - -```bash -appd start \ - --evnode.node.aggregator \ - --evnode.node.block_time 1s \ - --evnode.da.address http://localhost:26658 \ - --evnode.da.namespace "my_cosmos_chain" \ - --evnode.signer.passphrase "secret" -``` - -## Monitoring - -Enable metrics to monitor aggregator performance: - -```bash -evnode start \ - --evnode.node.aggregator \ - --evnode.instrumentation.prometheus \ - --evnode.instrumentation.prometheus_listen_addr :2112 -``` - -Key metrics to watch: - -- `evolve_block_height` - Current block height -- `evolve_da_submission_total` - DA submissions count -- `evolve_da_submission_failures` - Failed DA submissions - -Enable the DA visualizer for detailed submission monitoring: - -```bash ---evnode.rpc.enable_da_visualization -``` - -Then access `http://localhost:7331/da` in your browser. - -## Health Checks - -The aggregator exposes health endpoints: - -```bash -# Liveness check -curl http://localhost:7331/health/live - -# Readiness check (includes block production rate) -curl http://localhost:7331/health/ready -``` - -## Troubleshooting - -### Blocks Not Being Produced - -1. Verify aggregator mode is enabled in logs -2. Check DA layer connectivity -3. Ensure signer key is accessible - -### DA Submission Failures - -1. Check DA layer endpoint is reachable -2. Verify DA account has sufficient funds -3. Increase gas price if transactions are being outbid - -### High Pending Block Count - -1. Reduce block time or enable lazy mode -2. Increase DA gas price for faster inclusion -3. Check DA layer congestion - -## See Also - -- [Full Node Guide](/guides/running-nodes/full-node) - Running a non-producing node -- [DA Visualization](/guides/tools/visualizer) - Monitor DA submissions -- [Monitoring Guide](/guides/operations/monitoring) - Prometheus metrics diff --git a/content/docs/guides/running-nodes/attester.md b/content/docs/guides/running-nodes/attester.md deleted file mode 100644 index 66b7e54..0000000 --- a/content/docs/guides/running-nodes/attester.md +++ /dev/null @@ -1,67 +0,0 @@ -# Attester Node - -Attester nodes participate in the validator network to provide faster soft finality through attestations. This is an advanced feature for chains requiring sub-DA-finality confirmation times. - -## Overview - -Attesters: - -- Validate blocks produced by the aggregator -- Sign attestations confirming block validity -- Participate in a soft consensus protocol -- Enable faster finality than DA-only confirmation - -## Status - -The attester network feature is under active development. This documentation will be updated as the feature matures. - -For technical details on the validator network design, see [ADR-022: Validator Network](https://github.com/evstack/ev-node/blob/main/specs/src/adr/adr-022-validator-network.md). - -## How It Works - -### Soft Finality - -Without attesters, finality depends on DA confirmation (~6-12 seconds for Celestia). With an attester network: - -1. Aggregator produces block -2. Attesters validate and sign attestations -3. When threshold of attestations collected, block has soft finality -4. DA finality provides hard finality later - -### Trust Model - -- Soft finality requires trusting the attester set (configurable threshold) -- Hard finality (DA) remains trustless -- Applications can choose which finality level to wait for - -## Configuration (Preview) - -```bash -# Run as attester (preview configuration) -evnode start \ - --evnode.node.attester \ - --evnode.da.address http://localhost:26658 \ - --evnode.p2p.peers /ip4/sequencer.example.com/tcp/7676/p2p/12D3KooW... -``` - -## Use Cases - -### Low-Latency Applications - -Applications requiring confirmation faster than DA finality: - -- Trading platforms -- Gaming -- Real-time settlement - -### Enhanced Security - -Additional validation layer before DA confirmation: - -- Multi-party validation -- Early fraud detection - -## See Also - -- [Finality Concepts](/concepts/finality) - Understanding finality in Evolve -- [Full Node Guide](/guides/running-nodes/full-node) - Running a full node diff --git a/content/docs/guides/running-nodes/full-node.md b/content/docs/guides/running-nodes/full-node.md deleted file mode 100644 index 9b11acc..0000000 --- a/content/docs/guides/running-nodes/full-node.md +++ /dev/null @@ -1,105 +0,0 @@ -# Chain Full Node Setup Guide - -## Introduction - -This guide covers how to set up a full node to run alongside a sequencer node in a Evolve-based blockchain network. A full node maintains a complete copy of the blockchain and helps validate transactions, improving the network's decentralization and security. - -> **Note: The guide on how to run an Evolve EVM full node can be found [in the evm section](../../guides/evm/single.md#setting-up-a-full-node).** - -## Prerequisites - -Before proceeding, ensure that you have completed the [build a chain](../../guides/gm-world.md) tutorial, which covers setting-up, building and running your chain. - -Ensure that you have: - -- A local Data Availability (DA) network node running on port `7980`. -- A Evolve sequencer node running and posting blocks to the DA network. - -## Setting Up Your Full Node - -### Initialize Chain Config and Copy Genesis File - -Let's set a terminal variable for the chain ID. - -```bash -CHAIN_ID=gm -``` - -Initialize the chain config for the full node, lets call it `FullNode` and set the chain ID to your chain ID: - -```bash -gmd init FullNode --chain-id $CHAIN_ID --home $HOME/.${CHAIN_ID}_fn -``` - -Copy the genesis file from the sequencer node: - -```bash -cp $HOME/.$CHAIN_ID/config/genesis.json $HOME/.${CHAIN_ID}_fn/config/genesis.json -``` - -### Set Up P2P Connection to Sequencer Node - -Identify the sequencer node's P2P address from its logs. It will look similar to: - -```text -1:55PM INF listening on address=/ip4/127.0.0.1/tcp/7676/p2p/12D3KooWJbD9TQoMSSSUyfhHMmgVY3LqCjxYFz8wQ92Qa6DAqtmh module=p2p -``` - -Create an environment variable with the P2P address: - -```bash -export P2P_ID="12D3KooWJbD9TQoMSSSUyfhHMmgVY3LqCjxYFz8wQ92Qa6DAqtmh" -``` - -### Start the Full Node - -We are now ready to run our full node. If we are running the full node on the same machine as the sequencer, we need to make sure we update the ports to avoid conflicts. - -Make sure to include these flags with your start command: - -```sh - --rpc.laddr tcp://127.0.0.1:46657 \ - --grpc.address 127.0.0.1:9390 \ - --p2p.laddr "0.0.0.0:46656" \ - --api.address tcp://localhost:1318 -``` - -Run your full node with the following command: - -```bash -gmd start \ - --evnode.da.address http://127.0.0.1:7980 \ - --p2p.seeds $P2P_ID@127.0.0.1:7676 \ - --minimum-gas-prices 0stake \ - --rpc.laddr tcp://127.0.0.1:46657 \ - --grpc.address 127.0.0.1:9390 \ - --p2p.laddr "0.0.0.0:46656" \ - --api.address tcp://localhost:1318 \ - --home $HOME/.${CHAIN_ID}_fn -``` - -Key points about this command: - -- `chain_id` is generally the `$CHAIN_ID`, which is `gm` in this case. -- The ports and addresses are different from the sequencer node to avoid conflicts. Not everything may be necessary for your setup. -- We use the `P2P_ID` environment variable to set the seed node. -- You can set the same values in your app config file instead of passing every option via CLI flags. - -## Verifying Full Node Operation - -After starting your full node, you should see output similar to: - -``` bash -2:33PM DBG indexed transactions height=1 module=txindex num_txs=0 -2:33PM INF block marked as DA included blockHash=7897885B959F52BF0D772E35F8DA638CF8BBC361C819C3FD3E61DCEF5034D1CC blockHeight=5532 module=BlockManager -``` - -This output indicates that your full node is successfully connecting to the network and processing blocks. - -:::tip -If your chain uses EVM as an execution layer and you see an error like `datadir already used by another process`, it means you have to remove all the state from chain data directory (`/root/.yourchain_fn/data/`) and specify a different data directory for the EVM client. -::: - -## Conclusion - -You've now set up a full node running alongside your Evolve sequencer. diff --git a/content/docs/guides/running-nodes/meta.json b/content/docs/guides/running-nodes/meta.json deleted file mode 100644 index 54db198..0000000 --- a/content/docs/guides/running-nodes/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Running Nodes", - "pages": ["..."] -} diff --git a/content/docs/guides/tools/blob-decoder.md b/content/docs/guides/tools/blob-decoder.md deleted file mode 100644 index 8879f39..0000000 --- a/content/docs/guides/tools/blob-decoder.md +++ /dev/null @@ -1,158 +0,0 @@ -# Blob Decoder Tool - -The blob decoder is a utility tool for decoding and inspecting blobs from Celestia (DA) layers. It provides both a web interface and API for decoding blob data into human-readable format. - -## Overview - -The blob decoder helps developers and operators inspect the contents of blobs submitted to DA layers. It can decode: - -- Raw blob data (hex or base64 encoded) -- Block data structures -- Transaction payloads -- Protobuf-encoded messages - -## Usage - -### Starting the Server - -```bash -# Run with default port (8080) -go run tools/blob-decoder/main.go -``` - -The server will start and display: - -- Web interface URL: `http://localhost:8080` -- API endpoint: `http://localhost:8080/api/decode` - -### Web Interface - -1. Open your browser to `http://localhost:8080` -2. Paste your blob data in the input field -3. Select the encoding format (hex or base64) -4. Click "Decode" to see the parsed output - -### API Usage - -The decoder provides a REST API for programmatic access: - -```bash -# Decode hex-encoded blob -curl -X POST http://localhost:8080/api/decode \ - -H "Content-Type: application/json" \ - -d '{ - "data": "0x1234abcd...", - "encoding": "hex" - }' - -# Decode base64-encoded blob -curl -X POST http://localhost:8080/api/decode \ - -H "Content-Type: application/json" \ - -d '{ - "data": "SGVsbG8gV29ybGQ=", - "encoding": "base64" - }' -``` - -#### API Request Format - -```json -{ - "data": "string", // The encoded blob data - "encoding": "string" // Either "hex" or "base64" -} -``` - -#### API Response Format - -```json -{ - "success": true, - "decoded": { - // Decoded data structure - }, - "error": "string" // Only present if success is false -} -``` - -## Supported Data Types - -### Block Data - -The decoder can parse ev-node block structures: - -- Block height -- Timestamp -- Parent hash -- Transaction list -- Validator information -- Data commitments - -### Transaction Data - -Decodes individual transactions including: - -- Transaction type -- Sender/receiver addresses -- Value/amount -- Gas parameters -- Payload data - -### Protobuf Messages - -Automatically detects and decodes protobuf-encoded messages used in ev-node: - -- Block headers -- Transaction batches -- State updates -- DA commitments - -## Examples - -### Decoding a Block Blob - -```bash -# Example block blob (hex encoded) -curl -X POST http://localhost:8080/api/decode \ - -H "Content-Type: application/json" \ - -d '{ - "data": "0a2408011220...", - "encoding": "hex" - }' -``` - -Response: - -```json -{ - "success": true, - "decoded": { - "height": 100, - "timestamp": "2024-01-15T10:30:00Z", - "parentHash": "0xabc123...", - "transactions": [ - { - "type": "transfer", - "from": "0x123...", - "to": "0x456...", - "value": "1000000000000000000" - } - ] - } -} -``` - -### Decoding DA Commitment - -```bash -curl -X POST http://localhost:8080/api/decode \ - -H "Content-Type: application/json" \ - -d '{ - "data": "eyJjb21taXRtZW50IjogIi4uLiJ9", - "encoding": "base64" - }' -``` - -### Celestia - -For Celestia blobs, you can decode namespace data and payment information from [celenium](https://celenium.io/namespaces). diff --git a/content/docs/guides/tools/meta.json b/content/docs/guides/tools/meta.json deleted file mode 100644 index aaff4e2..0000000 --- a/content/docs/guides/tools/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Tools", - "pages": ["..."] -} diff --git a/content/docs/guides/tools/visualizer.md b/content/docs/guides/tools/visualizer.md deleted file mode 100644 index 52565f2..0000000 --- a/content/docs/guides/tools/visualizer.md +++ /dev/null @@ -1,240 +0,0 @@ -# DA Visualizer - -The Data Availability (DA) Visualizer is a built-in monitoring tool in Evolve that provides real-time insights into blob submissions to the DA layer. It offers a web-based interface for tracking submission statistics, monitoring DA layer health, and analyzing blob details. - -**Note**: Only aggregator nodes submit data to the DA layer. Non-aggregator nodes will not display submission data. - -## Overview - -The DA Visualizer provides: - -- Real-time monitoring of blob submissions (last 100 submissions) -- Success/failure statistics and trends -- Gas price tracking and cost analysis -- DA layer health monitoring -- Detailed blob inspection capabilities -- Recent submission history - -## Enabling the DA Visualizer - -The DA Visualizer is disabled by default. To enable it, use the following configuration: - -### Via Command-line Flag - -```bash -testapp start --evnode.rpc.enable_da_visualization -``` - -### Via Configuration File - -Add the following to your `evnode.yml` configuration file: - -```yaml -rpc: - enable_da_visualization: true -``` - -## Accessing the DA Visualizer - -Once enabled, the DA Visualizer is accessible through your node's RPC server. By default, this is: - -```text -http://localhost:7331/da -``` - -The visualizer provides several API endpoints and a web interface: - -### Web Interface - -Navigate to `http://localhost:7331/da` in your web browser to access the interactive dashboard. - -### API Endpoints - -The following REST API endpoints are available for programmatic access: - -#### Get Recent Submissions - -```bash -GET /da/submissions -``` - -Returns the most recent blob submissions (up to 100 kept in memory). - -#### Get Blob Details - -```bash -GET /da/blob?id={blob_id} -``` - -Returns detailed information about a specific blob submission. - -#### Get DA Statistics - -```bash -GET /da/stats -``` - -Returns aggregated statistics including: - -- Total submissions count -- Success/failure rates -- Average gas price -- Total gas spent -- Average blob size -- Submission trends - -#### Get DA Health Status - -```bash -GET /da/health -``` - -Returns the current health status of the DA layer including: - -- Connection status -- Recent error rates -- Performance metrics -- Last successful submission timestamp - -## Features - -### Real-time Monitoring - -The dashboard automatically updates every 30 seconds, displaying: - -- Recent submission feed with status indicators (last 100 submissions) -- Success rate percentage -- Current gas price trends -- Submission history - -### Submission Details - -Each submission entry shows: - -- Timestamp -- Blob ID with link to detailed view -- Number of blobs in the batch -- Submission status (success/failure) -- Gas price used -- Error messages (if any) - -### Statistics Dashboard - -The statistics section provides: - -- **Performance Metrics**: Success rate, average submission time -- **Cost Analysis**: Total gas spent, average gas price over time -- **Volume Metrics**: Total blobs submitted, average blob size -- **Trend Analysis**: Hourly and daily submission patterns - -### Health Monitoring - -The health status indicator shows: - -- 🟢 **Healthy**: DA layer responding normally -- 🟡 **Warning**: Some failures but overall functional -- 🔴 **Critical**: High failure rate or connection issues - -## Use Cases - -### For Node Operators - -- Monitor the reliability of DA submissions -- Track gas costs and optimize gas price settings -- Identify patterns in submission failures -- Ensure DA layer connectivity - -### For Developers - -- Debug DA submission issues -- Analyze blob data structure -- Monitor application-specific submission patterns -- Test DA layer integration - -### For Network Monitoring - -- Track overall network DA usage -- Identify congestion periods -- Monitor gas price fluctuations -- Analyze submission patterns across the network - -## Configuration Options - -When enabling the DA Visualizer, you may want to adjust related RPC settings: - -```yaml -rpc: - address: "0.0.0.0:7331" # Bind to all interfaces for remote access - enable_da_visualization: true -``` - -**Security Note**: If binding to all interfaces (`0.0.0.0`), ensure proper firewall rules are in place to restrict access to trusted sources only. - -## Troubleshooting - -### Visualizer Not Accessible - -1. Verify the DA Visualizer is enabled: - - Check your configuration file or ensure the flag is set - - Look for log entries confirming "DA visualization endpoints registered" - -2. Check the RPC server is running: - - Verify the RPC address in logs - - Ensure no port conflicts - -3. For remote access: - - Ensure the RPC server is bound to an accessible interface - - Check firewall settings - -### No Data Displayed - -1. Verify your node is in aggregator mode (only aggregators submit to DA) -2. Check DA layer connectivity in the node logs -3. Ensure transactions are being processed -4. Note that the visualizer only keeps the last 100 submissions in memory - -### API Errors - -- **404 Not Found**: DA Visualizer not enabled -- **500 Internal Server Error**: Check node logs for DA connection issues -- **Empty responses**: No submissions have been made yet - -## Example Usage - -### Using curl to access the API - -```bash -# Get recent submissions (returns up to 100) -curl http://localhost:7331/da/submissions - -# Get specific blob details -curl http://localhost:7331/da/blob?id=abc123... - -# Get statistics -curl http://localhost:7331/da/stats - -# Check DA health -curl http://localhost:7331/da/health -``` - -### Monitoring with scripts - -```bash -#!/bin/bash -# Simple monitoring script - -while true; do - health=$(curl -s http://localhost:7331/da/health | jq -r '.status') - if [ "$health" != "healthy" ]; then - echo "DA layer issue detected: $health" - # Send alert... - fi - sleep 30 -done -``` - -## Related Configuration - -For complete DA layer configuration options, see the [Config Reference](../../learn/config.md#data-availability-configuration-da). - -For metrics and monitoring setup, see the [Metrics Guide](../metrics.md). diff --git a/content/docs/guides/use-tia-for-gas.md b/content/docs/guides/use-tia-for-gas.md deleted file mode 100644 index 0ef7f75..0000000 --- a/content/docs/guides/use-tia-for-gas.md +++ /dev/null @@ -1,253 +0,0 @@ -# How to use IBC token (TIA) as gas token in your chain - -## 🌞 Introduction {#introduction} - -This tutorial will guide you through building a sovereign `gm-world` chain using Evolve, with TIA as the gas token. Unlike the [quick start guide](./quick-start.md), which uses a native chain token for gas, this tutorial demonstrates how to integrate an IBC-enabled token, TIA, as the gas token within the chain, providing a deeper exploration of sovereign chain development. - -No prior understanding of the build process is required, just that it utilizes the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) for blockchain applications. - -## Requirements {#requirements} - -Before proceeding, ensure that you have completed the [build a chain](./gm-world.md) tutorial, which covers setting-up, building and running your chain. - -:::tip -This tutorial explores Evolve, currently in Alpha. If you encounter bugs, please report them via a GitHub issue ticket or reach out in our Telegram group. -::: - -## Setup your local DA network {#setup-local-da} - -Your local DA network is already running if you followed the [quick start guide](./quick-start.md) or the [build a chain](./gm-world.md). If not, you can start it with the following command: - -```bash -go install github.com/evstack/ev-node/tools/local-da@latest -local-da -``` - -## 🚀 Starting your chain {#start-your-chain} - -Start the chain, posting to the local DA network: - -```bash -gmd start --evnode.node.aggregator --evnode.da.address http://localhost:7980 --minimum-gas-prices="0.02ibc/C3E53D20BC7A4CC993B17C7971F8ECD06A433C10B6A96F4C4C3714F0624C56DA,0.025stake" -``` - -Note that we specified the gas token to be IBC TIA. We still haven't made an IBC connection to Celestia's Mocha testnet, however, if we assume our first channel will be an ICS-20 transfer channel to Celestia, we can already calculate the token denom using this formula: - -```js -"ibc/" + toHex(sha256(toUtf8("transfer/channel-0/utia"))).toUpperCase(); -``` - -Now you should see the logs of the running node: - -```bash -12:21PM INF starting node with ABCI CometBFT in-process module=server -12:21PM INF starting node with Evolve in-process module=server -12:21PM INF service start impl=multiAppConn module=proxy msg="Starting multiAppConn service" -12:21PM INF service start connection=query impl=localClient module=abci-client msg="Starting localClient service" -12:21PM INF service start connection=snapshot impl=localClient module=abci-client msg="Starting localClient service" -12:21PM INF service start connection=mempool impl=localClient module=abci-client msg="Starting localClient service" -12:21PM INF service start connection=consensus impl=localClient module=abci-client msg="Starting localClient service" -12:21PM INF service start impl=EventBus module=events msg="Starting EventBus service" -12:21PM INF service start impl=PubSub module=pubsub msg="Starting PubSub service" -12:21PM INF Using default mempool ttl MempoolTTL=25 module=BlockManager -12:21PM INF service start impl=IndexerService module=txindex msg="Starting IndexerService service" -12:21PM INF service start impl=RPC module=server msg="Starting RPC service" -12:21PM INF service start impl=Node module=server msg="Starting Node service" -12:21PM INF starting P2P client module=server -12:21PM INF serving HTTP listen address=127.0.0.1:26657 module=server -12:21PM INF listening on address=/ip4/127.0.0.1/tcp/26656/p2p/12D3KooWSicdPmMTLf9fJbSSHZc9UVP1CbNqKPpbYVbgxHvbhAUY module=p2p -12:21PM INF listening on address=/ip4/163.172.162.109/tcp/26656/p2p/12D3KooWSicdPmMTLf9fJbSSHZc9UVP1CbNqKPpbYVbgxHvbhAUY module=p2p -12:21PM INF no seed nodes - only listening for connections module=p2p -12:21PM INF working in aggregator mode block time=1000 module=server -12:21PM INF Creating and publishing block height=22 module=BlockManager -12:21PM INF starting gRPC server... address=127.0.0.1:9290 module=grpc-server -12:21PM INF finalized block block_app_hash=235D3710D61F347DBBBDD6FD63AA7687842D1EF9CB475C712856D7DA32F82F09 height=22 module=BlockManager num_txs_res=0 num_val_updates=0 -12:21PM INF executed block app_hash=235D3710D61F347DBBBDD6FD63AA7687842D1EF9CB475C712856D7DA32F82F09 height=22 module=BlockManager -12:21PM INF indexed block events height=22 module=txindex -... -``` - -## ✨ Connecting to Celestia Mocha testnet using IBC {#ibc-to-celestia} - -Next, we will establish an IBC connection with the Celestia Mocha testnet to enable TIA transfers for gas usage on our chain. - -Install the IBC relayer: - -```bash -git clone --depth 1 --branch v2.5.2 https://github.com/cosmos/relayer.git /tmp/relayer -cd /tmp/relayer -make install -``` - -Configure the relayer: - -```bash -rly config init - -mkdir -p "$HOME/.relayer/keys/{gm,mocha-4}" - -echo "global: - api-listen-addr: :5183 - timeout: 10s - memo: '' - light-cache-size: 20 - log-level: info - ics20-memo-limit: 0 - max-receiver-size: 150 -chains: - gm_chain: - type: cosmos - value: - key-directory: '$HOME/.relayer/keys/gm' - key: a - chain-id: gm - rpc-addr: http://localhost:26657 - account-prefix: gm - keyring-backend: test - gas-adjustment: 1.5 - gas-prices: 0.025stake - min-gas-amount: 0 - max-gas-amount: 0 - debug: false - timeout: 20s - block-timeout: '' - output-format: json - sign-mode: direct - extra-codecs: [] - coin-type: 118 - signing-algorithm: '' - broadcast-mode: batch - min-loop-duration: 0s - extension-options: [] - feegrants: null - mocha: - type: cosmos - value: - key-directory: '$HOME/.relayer/keys/mocha-4' - key: a - chain-id: mocha-4 - rpc-addr: https://celestia-testnet-rpc.publicnode.com:443 - account-prefix: celestia - keyring-backend: test - gas-adjustment: 1.5 - gas-prices: 0.15utia - min-gas-amount: 0 - max-gas-amount: 0 - debug: false - timeout: 20s - block-timeout: '' - output-format: json - sign-mode: direct - extra-codecs: [] - coin-type: 118 - signing-algorithm: '' - broadcast-mode: batch - min-loop-duration: 0s - extension-options: [] - feegrants: null -paths: - gm_mocha-4: - src: - chain-id: gm - dst: - chain-id: mocha-4 - src-channel-filter: - rule: '' - channel-list: [] -" > "$HOME/.relayer/config/config.yaml" - -rly keys restore gm_chain a "regret resist either bid upon yellow leaf early symbol win market vital" -rly keys restore mocha a "regret resist either bid upon yellow leaf early symbol win market vital" -``` - -Get the relayer accounts: - -```bash -rly address gm_chain a # => gm1jqevcsld0dqpjp3csfg7alkv3lehvn8uswknrc -rly address mocha a # => celestia1jqevcsld0dqpjp3csfg7alkv3lehvn8u04ymsu -``` - -Note: These accounts should always be the same because of the hardcoded mnemonics that we've loaded in the `rly keys restore` step. - -Fund the relayer on our chain: - -```bash -gmd tx bank send gm-key-2 gm1jqevcsld0dqpjp3csfg7alkv3lehvn8uswknrc 10000000stake --keyring-backend test --chain-id gm --fees 5000stake -y -``` - -Fund the relayer on the Celestia Mocha testnet: - -[Mocha Testnet Faucet Instructions](https://docs.celestia.org/how-to-guides/mocha-testnet#mocha-testnet-faucet). - -Verify the relayer is funded: - -```bash -rly q balance mocha a # => address {celestia1jqevcsld0dqpjp3csfg7alkv3lehvn8u04ymsu} balance {10000000utia} -rly q balance gm_chain a # => address {gm1jqevcsld0dqpjp3csfg7alkv3lehvn8uswknrc} balance {10000000stake} -``` - -Create IBC clients: - -```bash -rly tx client gm_chain mocha gm_mocha-4 --override -rly tx client mocha gm_chain gm_mocha-4 --override -``` - -Create IBC connection: - -```bash -rly tx connection gm_mocha-4 -``` - -Create IBC channel: - -```bash -rly tx channel gm_mocha-4 --src-port transfer --dst-port transfer --version ics20-1 -``` - -Start the relayer: - -```bash -rly start gm_mocha-4 -``` - -Transfer TIA from Mocha to our chain: - -```bash -ACCOUNT_ON_CHAIN="$(evolve keys show -a --keyring-backend test gm-key-2)" -CHANNEL_ID_ON_MOCHA="$(rly q channels mocha gm_chain | jq -r .channel_id | tail -1)" - -rly tx transfer mocha gm_chain 1000000utia "$ACCOUNT_ON_CHAIN" "$CHANNEL_ID_ON_MOCHA" --path gm_mocha-4 -``` - -Verify the account on our chain is funded with IBC TIA: - -```bash -gmd query bank balances "$(evolve keys show -a --keyring-backend test gm-key-2)" -# => -# balances: -# - amount: "1000000" -# denom: ibc/C3E53D20BC7A4CC993B17C7971F8ECD06A433C10B6A96F4C4C3714F0624C56DA -# - amount: "9999999999999999989995000" -# denom: stake -# pagination: -# total: "2" -``` - -## 💸 Transactions {#transactions} - -Finally, send a transaction on our chain using IBC TIA as the gas token: - -```bash -ACCOUNT_ON_CHAIN="$(evolve keys show -a --keyring-backend test gm-key-2)" - -# Send the transaction -TX_HASH=$(evolve tx bank send "$ACCOUNT_ON_CHAIN" "$ACCOUNT_ON_CHAIN" 1stake --keyring-backend test --chain-id gm --gas-prices 0.02ibc/C3E53D20BC7A4CC993B17C7971F8ECD06A433C10B6A96F4C4C3714F0624C56DA -y --output json | jq -r .txhash) - -# Verify success -gmd q tx "$TX_HASH" --output json | jq .code # => 0 -``` - -## 🎉 Next steps - -Congratulations! You've built a local chain that posts to a local DA network and uses TIA as the gas token! diff --git a/content/docs/index.mdx b/content/docs/index.mdx deleted file mode 100644 index 3f9769f..0000000 --- a/content/docs/index.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Evolve Documentation -description: Evolve documentation - build sovereign rollups on any DA layer ---- - -import { Card, Cards } from 'fumadocs-ui/components/card'; -import { BookOpen, Rocket, Wrench, Unplug, Library, Blocks, Cpu, Database, Layers, Play, Upload, Monitor, ArrowRightLeft, Settings, Bot, LayoutDashboard } from 'lucide-react'; - -export const Icon = ({ children }) => ( - {children} -); - -# Evolve Documentation - -Evolve is a sovereign rollup framework that enables developers to build rollups on any Data Availability (DA) layer. - -## Get Started - - - } title="Quick Start" description="Build your first rollup in minutes" href="/docs/guides/quick-start" /> - } title="About Evolve" description="Learn the fundamentals and core concepts" href="/docs/learn/about" /> - } title="Architecture" description="Understand how Evolve components work together" href="/docs/overview/architecture" /> - } title="AI-Ready Docs" description="Use with Claude Code, Cursor, ChatGPT, and more" href="/docs/guides/ai-docs" /> - - -## Learn - - - } title="Data Availability" description="How DA works in Evolve" href="/docs/learn/data-availability" /> - } title="Sequencing" description="Transaction ordering" href="/docs/learn/sequencing" /> - } title="Execution" description="State transitions" href="/docs/learn/execution" /> - } title="Transaction Flow" description="End-to-end transaction lifecycle" href="/docs/learn/transaction-flow" /> - - -## Guides - - - } title="Build a Chain" description="Step-by-step chain tutorial" href="/docs/guides/gm-world" /> - } title="Deploy" description="Deploy your rollup to production" href="/docs/guides/deploy/overview" /> - } title="EVM Rollups" description="Build EVM-compatible rollups" href="/docs/guides/evm/single" /> - } title="Full Node" description="Run a full node" href="/docs/guides/full-node" /> - - -## Reference - - - } title="API Documentation" description="RPC API reference" href="/docs/api" /> - } title="Configuration" description="Node configuration reference" href="/docs/reference/configuration/ev-node-config" /> - } title="EV-ABCI" description="Application blockchain interface" href="/docs/ev-abci/introduction" /> - } title="EV-Reth" description="Reth execution engine integration" href="/docs/ev-reth/introduction" /> - diff --git a/content/docs/learn/about.md b/content/docs/learn/about.md deleted file mode 100644 index 1f49b1d..0000000 --- a/content/docs/learn/about.md +++ /dev/null @@ -1,95 +0,0 @@ -# Introduction - -Evolve is the fastest way to launch your own modular network — without validator overhead or token lock-in. - -Built on Celestia, Evolve offers L1-level control with L2-level performance. - -This isn't a toolkit. It's a launch stack. - -No fees. No middlemen. No revenue share. - -## What is Evolve - -Evolve is a launch stack for L1s. It gives you full control over execution — without CometBFT, validator ops, or lock-in. - -It's [open-source](https://github.com/evstack/ev-node), production-ready, and fully composable. - -At its core is \`ev-node\`, a modular node that exposes an [Execution interface](https://github.com/evstack/ev-node/blob/main/core/execution/execution.go), — letting you bring any VM or execution logic, including Cosmos SDK or custom-built runtimes. - -Evolving from Cosmos SDK? - -Migrate without rewriting your stack. Bring your logic and state to Evolve and shed validator overhead — all while gaining performance and execution freedom. - -Evolve is how you launch your network. Modular. Production-ready. Yours. - -With Evolve, you get: - -- Full control over execution \- use any VM -- Low-cost launch — no emissions, no validator inflation -- Speed to traction — from local devnet to testnet in minutes -- Keep sequencer revenue — monetize directly -- Optional L1 validator network for fast finality and staking - -Powered by Celestia — toward 1GB blocks, multi-VM freedom, and execution without compromising flexibility or cost. - -## What problems is Evolve solving - -### 1\. Scalability and customizability - -Deploying your decentralized application as a smart contract on a shared blockchain has many limitations. Your smart contract has to share computational resources with every other application, so scalability is limited. - -Plus, you're restricted to the execution environment that the shared blockchain uses, so developer flexibility is limited as well. - -### 2\. Security and time to market - -Deploying a new chain might sound like the perfect solution for the problems listed above. While it's somewhat true, deploying a new layer 1 chain presents a complex set of challenges and trade-offs for developers looking to build blockchain products. - -Deploying a legacy layer 1 has huge barriers to entry: time, capital, token emissions and expertise. - -In order to secure the network, developers must bootstrap a sufficiently secure set of validators, incurring the overhead of managing a full consensus network. This requires paying validators with inflationary tokens, putting the network's business sustainability at risk. Network effects are also critical for success, but can be challenging to achieve as the network must gain widespread adoption to be secure and valuable. - -In a potential future with millions of chains, it's unlikely all of those chains will be able to sustainably attract a sufficiently secure and decentralized validator set. - -## Why Evolve - -Evolve solves the challenges encountered during the deployment of a smart contract or a new layer 1, by minimizing these tradeoffs through the implementation of evolve chains. - -With Evolve, developers can benefit from: - -- **Shared security**: Chains inherit security from a data availability layer, by posting blocks to it. Chains reduce the trust assumptions placed on chain sequencers by allowing full nodes to download and verify the transactions in the blocks posted by the sequencer. For optimistic or zk-chains, in case of fraudulent blocks, full nodes can generate fraud or zk-proofs, which they can share with the rest of the network, including light nodes. Our roadmap includes the ability for light clients to receive and verify proofs, so that everyday users can enjoy high security guarantees. - -- **Scalability:** Evolve chains are deployed on specialized data availability layers like Celestia, which directly leverages the scalability of the DA layer. Additionally, chain transactions are executed off-chain rather than on the data availability layer. This means chains have their own dedicated computational resources, rather than sharing computational resources with other applications. - -- **Customizability:** Evolve is built as an open source modular framework, to make it easier for developers to reuse the four main components and customize their chains. These components are data availability layers, execution environments, proof systems, and sequencer schemes. - -- **Faster time-to-market:** Evolve eliminates the need to bootstrap a validator set, manage a consensus network, incur high economic costs, and face other trade-offs that come with deploying a legacy layer 1\. Evolve's goal is to make deploying a chain as easy as it is to deploy a smart contract, cutting the time it takes to bring blockchain products to market from months (or even years) to just minutes. - -- **Sovereignty**: Evolve also enables developers to deploy chains for cases where communities require sovereignty. - -## How can you use Evolve - -As briefly mentioned above, Evolve could be used in many different ways. From chains, to settlement layers, and in the future even to L3s. - -### Chain with any VM - -Evolve gives developers the flexibility to use pre-existing ABCI-compatible state machines or create a custom state machine tailored to their chain needs. Evolve does not restrict the use of any specific virtual machine, allowing developers to experiment and bring innovative applications to life. - -### Cosmos SDK - -Similarly to how developers utilize the Cosmos SDK to build a layer 1 chain, the Cosmos SDK could be utilized to create a Evolve-compatible chain. Cosmos-SDK has great [documentation](https://docs.cosmos.network/main) and tooling that developers can leverage to learn. - -Another possibility is taking an existing layer 1 built with the Cosmos SDK and deploying it as a Evolve chain. Evolve gives your network a forward path. Migrate seamlessly, keep your logic, and evolve into a modular, high-performance system without CometBFT bottlenecks and zero validator overhead. - -### Build a settlement layer - -[Settlement layers](https://celestia.org/learn/modular-settlement-layers/settlement-in-the-modular-stack/) are ideal for developers who want to avoid deploying chains. They provide a platform for chains to verify proofs and resolve disputes. Additionally, they act as a hub for chains to facilitate trust-minimized token transfers and liquidity sharing between chains that share the same settlement layer. Think of settlement layers as a special type of execution layer. - -## When can you use Evolve - -As of today, Evolve provides a single sequencer, an execution interface (Engine API or ABCI), and a connection to Celestia. - -We're currently working on implementing many new and exciting features such as light nodes and state fraud proofs. - -Head down to the next section to learn more about what's coming for Evolve. If you're ready to start building, you can skip to the [Guides](../guides/quick-start.md) section. - -Spoiler alert, whichever you choose, it's going to be a great rabbit hole\! diff --git a/content/docs/learn/config.md b/content/docs/learn/config.md deleted file mode 100644 index fed1135..0000000 --- a/content/docs/learn/config.md +++ /dev/null @@ -1,1067 +0,0 @@ -# Config - -This document provides a comprehensive reference for all configuration options available in Evolve. Understanding these configurations will help you tailor Evolve's behavior to your specific needs, whether you're running an aggregator, a full node, or a light client. - -## Table of Contents - -- [DA-Only Sync Mode](#da-only-sync-mode) -- [Introduction to Configurations](#configs) -- [Base Configuration](#base-configuration) - - [Root Directory](#root-directory) - - [Database Path](#database-path) - - [Chain ID](#chain-id) -- [Node Configuration (`node`)](#node-configuration-node) - - [Aggregator Mode](#aggregator-mode) - - [Light Client Mode](#light-client-mode) - - [Block Time](#block-time) - - [Maximum Pending Blocks](#maximum-pending-blocks) - - [Lazy Mode (Lazy Aggregator)](#lazy-mode-lazy-aggregator) - - [Lazy Block Interval](#lazy-block-interval) -- [Pruning Configuration (`pruning`)](#pruning-configuration-pruning) -- [Data Availability Configuration (`da`)](#data-availability-configuration-da) - - [DA Service Address](#da-service-address) - - [DA Authentication Token](#da-authentication-token) - - [DA Gas Price](#da-gas-price) - - [DA Gas Multiplier](#da-gas-multiplier) - - [DA Submit Options](#da-submit-options) - - [DA Signing Addresses](#da-signing-addresses) - - [DA Namespace](#da-namespace) - - [DA Header Namespace](#da-namespace) - - [DA Data Namespace](#da-data-namespace) - - [DA Block Time](#da-block-time) - - [DA Mempool TTL](#da-mempool-ttl) - - [DA Request Timeout](#da-request-timeout) - - [DA Batching Strategy](#da-batching-strategy) - - [DA Batch Size Threshold](#da-batch-size-threshold) - - [DA Batch Max Delay](#da-batch-max-delay) - - [DA Batch Min Items](#da-batch-min-items) -- [P2P Configuration (`p2p`)](#p2p-configuration-p2p) - - [P2P Listen Address](#p2p-listen-address) - - [P2P Peers](#p2p-peers) - - [P2P Blocked Peers](#p2p-blocked-peers) - - [P2P Allowed Peers](#p2p-allowed-peers) -- [RPC Configuration (`rpc`)](#rpc-configuration-rpc) - - [RPC Server Address](#rpc-server-address) - - [Enable DA Visualization](#enable-da-visualization) - - [Health Endpoints](#health-endpoints) -- [Instrumentation Configuration (`instrumentation`)](#instrumentation-configuration-instrumentation) - - [Enable Prometheus Metrics](#enable-prometheus-metrics) - - [Prometheus Listen Address](#prometheus-listen-address) - - [Maximum Open Connections](#maximum-open-connections) - - [Enable Pprof Profiling](#enable-pprof-profiling) - - [Pprof Listen Address](#pprof-listen-address) -- [Logging Configuration (`log`)](#logging-configuration-log) - - [Log Level](#log-level) - - [Log Format](#log-format) - - [Log Trace (Stack Traces)](#log-trace-stack-traces) -- [Signer Configuration (`signer`)](#signer-configuration-signer) - - [Signer Type](#signer-type) - - [Signer Path](#signer-path) - - [Signer Passphrase](#signer-passphrase) - -## DA-Only Sync Mode - -Evolve supports running nodes that sync exclusively from the Data Availability (DA) layer without participating in P2P networking. This mode is useful for: - -- **Pure DA followers**: Nodes that only need the canonical chain data from DA -- **Resource optimization**: Reducing network overhead by avoiding P2P gossip -- **Simplified deployment**: No need to configure or maintain P2P peer connections -- **Isolated environments**: Nodes that should not participate in P2P communication - -**To enable DA-only sync mode:** - -1. **Leave P2P peers empty** (default behavior): - - ```yaml - p2p: - peers: "" # Empty or omit this field entirely - ``` - -2. **Configure DA connection** (required): - - ```yaml - da: - address: "your-da-service:port" - namespace: "your-namespace" - # ... other DA configuration - ``` - -3. **Optional**: You can still configure P2P listen address for potential future connections, but without peers, no P2P networking will occur. - -When running in DA-only mode, the node will: - -- ✅ Sync blocks and headers from the DA layer -- ✅ Validate transactions and maintain state -- ✅ Serve RPC requests -- ❌ Not participate in P2P gossip or peer discovery -- ❌ Not share blocks with other nodes via P2P -- ❌ Not receive transactions via P2P (only from direct RPC submission) - -## Configs - -Evolve configurations can be managed through a YAML file (typically `evnode.yml` located in `~/.evolve/config/` or `/config/`) and command-line flags. The system prioritizes configurations in the following order (highest priority first): - -1. **Command-line flags:** Override all other settings. -2. **YAML configuration file:** Values specified in the `config.yaml` file. -3. **Default values:** Predefined defaults within Evolve. - -Environment variables can also be used, typically prefixed with your executable's name (e.g., `YOURAPP_CHAIN_ID="my-chain"`). - -## Base Configuration - -These are fundamental settings for your Evolve node. - -### Root Directory - -**Description:** -The root directory where Evolve stores its data, including the database and configuration files. This is a foundational setting that dictates where all other file paths are resolved from. - -**YAML:** -This option is not set within the YAML configuration file itself, as it specifies the location _of_ the configuration file and other application data. - -**Command-line Flag:** -`--home ` -_Example:_ `--home /mnt/data/evolve_node` -_Default:_ `~/.evolve` (or a directory derived from the application name if `defaultHome` is customized). -_Constant:_ `FlagRootDir` - -### Database Path - -**Description:** -The path, relative to the Root Directory, where the Evolve database will be stored. This database contains blockchain state, blocks, and other critical node data. - -**YAML:** -Set this in your configuration file at the top level: - -```yaml -db_path: "data" -``` - -**Command-line Flag:** -`--rollkit.db_path ` -_Example:_ `--rollkit.db_path "node_db"` -_Default:_ `"data"` -_Constant:_ `FlagDBPath` - -### Chain ID - -**Description:** -The unique identifier for your chain. This ID is used to differentiate your network from others and is crucial for network communication and transaction validation. - -**YAML:** -Set this in your configuration file at the top level: - -```yaml -chain_id: "my-evolve-chain" -``` - -**Command-line Flag:** -`--chain_id ` -_Example:_ `--chain_id "super_rollup_testnet_v1"` -_Default:_ `"evolve"` -_Constant:_ `FlagChainID` - -## Node Configuration (`node`) - -Settings related to the core behavior of the Evolve node, including its mode of operation and block production parameters. - -**YAML Section:** - -```yaml -node: - # ... node configurations ... -``` - -### Aggregator Mode - -**Description:** -If true, the node runs in aggregator mode. Aggregators are responsible for producing blocks by collecting transactions, ordering them, and proposing them to the network. - -**YAML:** - -```yaml -node: - aggregator: true -``` - -**Command-line Flag:** -`--rollkit.node.aggregator` (boolean, presence enables it) -_Example:_ `--rollkit.node.aggregator` -_Default:_ `false` -_Constant:_ `FlagAggregator` - -### Light Client Mode - -**Description:** -If true, the node runs in light client mode. Light clients rely on full nodes for block headers and state information, offering a lightweight way to interact with the chain without storing all data. - -**YAML:** - -```yaml -node: - light: true -``` - -**Command-line Flag:** -`--rollkit.node.light` (boolean, presence enables it) -_Example:_ `--rollkit.node.light` -_Default:_ `false` -_Constant:_ `FlagLight` - -### Block Time - -**Description:** -The target time interval between consecutive blocks produced by an aggregator. This duration (e.g., "500ms", "1s", "5s") dictates the pace of block production. - -**YAML:** - -```yaml -node: - block_time: "1s" -``` - -**Command-line Flag:** -`--rollkit.node.block_time ` -_Example:_ `--rollkit.node.block_time 2s` -_Default:_ `"1s"` -_Constant:_ `FlagBlockTime` - -### Maximum Pending Blocks - -**Description:** -The maximum number of blocks that can be pending Data Availability (DA) submission. When this limit is reached, the aggregator pauses block production until some blocks are confirmed on the DA layer. Use 0 for no limit. This helps manage resource usage and DA layer capacity. - -**YAML:** - -```yaml -node: - max_pending_blocks: 100 -``` - -**Command-line Flag:** -`--rollkit.node.max_pending_blocks ` -_Example:_ `--rollkit.node.max_pending_blocks 50` -_Default:_ `0` (no limit) -_Constant:_ `FlagMaxPendingBlocks` - -### Lazy Mode (Lazy Aggregator) - -**Description:** -Enables lazy aggregation mode. In this mode, blocks are produced only when new transactions are available in the mempool or after the `lazy_block_interval` has passed. This optimizes resource usage by avoiding the creation of empty blocks during periods of inactivity. - -**YAML:** - -```yaml -node: - lazy_mode: true -``` - -**Command-line Flag:** -`--rollkit.node.lazy_mode` (boolean, presence enables it) -_Example:_ `--rollkit.node.lazy_mode` -_Default:_ `false` -_Constant:_ `FlagLazyAggregator` - -### Lazy Block Interval - -**Description:** -The maximum time interval between blocks when running in lazy aggregation mode (`lazy_mode`). This ensures that blocks are produced periodically even if there are no new transactions, keeping the chain active. This value is generally larger than `block_time`. - -**YAML:** - -```yaml -node: - lazy_block_interval: "30s" -``` - -**Command-line Flag:** -`--rollkit.node.lazy_block_interval ` -_Example:_ `--rollkit.node.lazy_block_interval 1m` -_Default:_ `"30s"` -_Constant:_ `FlagLazyBlockTime` - -### Pruning Configuration (`pruning`) - -**Description:** -Controls automatic pruning of stored block data and metadata from the local store. Pruning helps manage disk space by periodically removing old blocks and their associated state, while keeping a recent window of history for validation and queries. - -**Pruning Modes:** - -- **`disabled`** (default): Archive mode - keeps all blocks and metadata indefinitely -- **`metadata`**: Prunes only state metadata (execution state snapshots), keeps all blocks -- **`all`**: Prunes both blocks (headers, data, signatures) and metadata - -**How Pruning Works:** - -When pruning is enabled, the pruner runs at the configured interval and removes data beyond the retention window (`pruning_keep_recent`). The system uses intelligent batching to avoid overwhelming the node: - -- **Batch sizes are automatically calculated** based on your `pruning_interval` and `block_time` -- **Catch-up mode**: When first enabling pruning on an existing node, smaller batches (2× blocks per interval) are used to gradually catch up without impacting performance -- **Normal mode**: Once caught up, larger batches (4× blocks per interval) are used for efficient maintenance -- **Progress tracking**: Pruning progress is saved after each batch, so restarts don't lose progress - -**Batch Size Examples:** - -With default settings (15 minute interval, 1 second blocks): -- Catch-up: ~1,800 blocks per run -- Normal: ~3,600 blocks per run - -With high-throughput chain (15 minute interval, 100ms blocks): -- Catch-up: ~18,000 blocks per run -- Normal: ~36,000 blocks per run - -**YAML:** - -```yaml -pruning: - pruning_mode: "all" - pruning_keep_recent: 100000 - pruning_interval: "15m" -``` - -**Command-line Flags:** - -- `--evnode.pruning.pruning_mode ` - - _Description:_ Pruning mode: 'disabled' (keep all), 'metadata' (prune state only), or 'all' (prune blocks and state) - - _Example:_ `--evnode.pruning.pruning_mode all` - - _Default:_ `"disabled"` - -- `--evnode.pruning.pruning_keep_recent ` - - _Description:_ Number of most recent blocks/metadata to retain when pruning is enabled. Must be > 0 when pruning is enabled. - - _Example:_ `--evnode.pruning.pruning_keep_recent 100000` - - _Default:_ `0` - -- `--evnode.pruning.pruning_interval ` - - _Description:_ How often to run the pruning process. Must be >= block_time when pruning is enabled. Larger intervals allow larger batch sizes. - - _Example:_ `--evnode.pruning.pruning_interval 15m` - - _Default:_ `0` (disabled) - -_Constants:_ `FlagPruningMode`, `FlagPruningKeepRecent`, `FlagPruningInterval` - -**Important Notes:** - -- When DA is enabled (DA address is configured), pruning only removes blocks that have been confirmed on the DA layer (for mode `all`) to ensure data safety -- When DA is not enabled (no DA address configured), pruning proceeds based solely on store height, allowing nodes without DA to manage disk space -- The first pruning run after enabling may take several cycles to catch up, processing data in smaller batches -- Pruning cannot be undone - ensure your retention window is sufficient for your use case -- For production deployments, consider keeping at least 100,000 recent blocks -- The pruning interval should be balanced with your disk space growth rate - -## Data Availability Configuration (`da`) - -Parameters for connecting and interacting with the Data Availability (DA) layer, which Evolve uses to publish block data. - -**YAML Section:** - -```yaml -da: - # ... DA configurations ... -``` - -### DA Service Address - -**Description:** -The network address (host:port) of the Data Availability layer service. Evolve connects to this endpoint to submit and retrieve block data. - -**YAML:** - -```yaml -da: - address: "localhost:26659" -``` - -**Command-line Flag:** -`--rollkit.da.address ` -_Example:_ `--rollkit.da.address 192.168.1.100:26659` -_Default:_ `""` (empty, must be configured if DA is used) -_Constant:_ `FlagDAAddress` - -### DA Authentication Token - -**Description:** -The authentication token required to interact with the DA layer service, if the service mandates authentication. - -**YAML:** - -```yaml -da: - auth_token: "YOUR_DA_AUTH_TOKEN" -``` - -**Command-line Flag:** -`--rollkit.da.auth_token ` -_Example:_ `--rollkit.da.auth_token mysecrettoken` -_Default:_ `""` (empty) -_Constant:_ `FlagDAAuthToken` - -### DA Gas Price - -**Description:** -The gas price to use for transactions submitted to the DA layer. A value of -1 indicates automatic gas price determination (if supported by the DA layer). Higher values may lead to faster inclusion of data. - -**YAML:** - -```yaml -da: - gas_price: 0.025 -``` - -**Command-line Flag:** -`--rollkit.da.gas_price ` -_Example:_ `--rollkit.da.gas_price 0.05` -_Default:_ `-1` (automatic) -_Constant:_ `FlagDAGasPrice` - -### DA Gas Multiplier - -**Description:** -A multiplier applied to the gas price when retrying failed DA submissions. Values greater than 1 increase the gas price on retries, potentially improving the chances of successful inclusion. - -**YAML:** - -```yaml -da: - gas_multiplier: 1.1 -``` - -**Command-line Flag:** -`--rollkit.da.gas_multiplier ` -_Example:_ `--rollkit.da.gas_multiplier 1.5` -_Default:_ `1.0` (no multiplication) -_Constant:_ `FlagDAGasMultiplier` - -### DA Submit Options - -**Description:** -Additional options passed to the DA layer when submitting data. The format and meaning of these options depend on the specific DA implementation being used. For example, with Celestia, this can include custom gas settings or other submission parameters in JSON format. - -**Note:** If you configure multiple signing addresses (see [DA Signing Addresses](#da-signing-addresses)), the selected signing address will be automatically merged into these options as a JSON field `signer_address` (matching Celestia's TxConfig schema). If the base options are already valid JSON, the signing address is added to the existing object; otherwise, a new JSON object is created. - -**YAML:** - -```yaml -da: - submit_options: '{"key":"value"}' # Example, format depends on DA layer -``` - -**Command-line Flag:** -`--rollkit.da.submit_options ` -_Example:_ `--rollkit.da.submit_options '{"custom_param":true}'` -_Default:_ `""` (empty) -_Constant:_ `FlagDASubmitOptions` - -### DA Signing Addresses - -**Description:** -A comma-separated list of signing addresses to use for DA blob submissions. When multiple addresses are provided, they will be used in round-robin fashion to prevent sequence mismatches that can occur with high-throughput Cosmos SDK-based DA layers. This is particularly useful for Celestia when submitting many transactions concurrently. - -Each submission will select the next address in the list, and that address will be automatically added to the `submit_options` as `signer_address`. This ensures that the DA layer (e.g., celestia-node) uses the specified account for signing that particular blob submission. - -**Setup Requirements:** - -- All addresses must be loaded into the DA node's keyring and have sufficient funds for transaction fees -- For Celestia, see the guide on setting up multiple accounts in the DA node documentation - -**YAML:** - -```yaml -da: - signing_addresses: - - "celestia1abc123..." - - "celestia1def456..." - - "celestia1ghi789..." -``` - -**Command-line Flag:** -`--evnode.da.signing_addresses ` -_Example:_ `--rollkit.da.signing_addresses celestia1abc...,celestia1def...,celestia1ghi...` -_Default:_ `[]` (empty, uses default DA node behavior) -_Constant:_ `FlagDASigningAddresses` - -**Behavior:** - -- If no signing addresses are configured, submissions use the DA layer's default signing behavior -- If one address is configured, all submissions use that address -- If multiple addresses are configured, they are used in round-robin order to distribute the load and prevent nonce/sequence conflicts -- The address selection is thread-safe for concurrent submissions - -### DA Namespace - -**Description:** -The namespace ID used when submitting blobs (block data) to the DA layer. This helps segregate data from different chains or applications on a shared DA layer. - -**Note:** If only `namespace` is provided, it will be used for both headers and data, otherwise the `data_namespace` will be used for data. Doing so allows speeding up light clients. - -**YAML:** - -```yaml -da: - namespace: "MY_UNIQUE_NAMESPACE_ID" -``` - -**Command-line Flag:** -`--rollkit.da.namespace ` -_Example:_ `--rollkit.da.namespace 0x1234567890abcdef` -_Default:_ `""` (empty) -_Constant:_ `FlagDANamespace` - -### DA Data Namespace - -**Description:** -The namespace ID specifically for submitting transaction data to the DA layer. Transaction data is submitted separately from headers, enabling nodes to sync only the data they need. The namespace value is encoded by the node to ensure proper formatting and compatibility with the DA layer. - -**YAML:** - -```yaml -da: - data_namespace: "DATA_NAMESPACE_ID" -``` - -**Command-line Flag:** -`--rollkit.da.data_namespace ` -_Example:_ `--rollkit.da.data_namespace my_data_namespace` -_Default:_ Falls back to `namespace` if not set -_Constant:_ `FlagDADataNamespace` - -### DA Block Time - -**Description:** -The average block time of the Data Availability chain (specified as a duration string, e.g., "15s", "1m"). This value influences: - -- The frequency of DA layer syncing. -- The maximum backoff time for retrying DA submissions. -- Calculation of transaction expiration when multiplied by `mempool_ttl`. - -**YAML:** - -```yaml -da: - block_time: "6s" -``` - -**Command-line Flag:** -`--rollkit.da.block_time ` -_Example:_ `--rollkit.da.block_time 12s` -_Default:_ `"6s"` -_Constant:_ `FlagDABlockTime` - -### DA Mempool TTL - -**Description:** -The number of DA blocks after which a transaction submitted to the DA layer is considered expired and potentially dropped from the DA layer's mempool. This also controls the retry backoff timing for DA submissions. - -**YAML:** - -```yaml -da: - mempool_ttl: 20 -``` - -**Command-line Flag:** -`--rollkit.da.mempool_ttl ` -_Example:_ `--rollkit.da.mempool_ttl 30` -_Default:_ `20` -_Constant:_ `FlagDAMempoolTTL` - -### DA Request Timeout - -**Description:** -Per-request timeout applied to DA `GetIDs` and `Get` RPC calls while retrieving blobs. Increase this value if your DA endpoint has high latency to avoid premature failures; decrease it to make the syncer fail fast and free resources sooner when the DA node becomes unresponsive. - -**YAML:** - -```yaml -da: - request_timeout: "30s" -``` - -**Command-line Flag:** -`--rollkit.da.request_timeout ` -_Example:_ `--rollkit.da.request_timeout 45s` -_Default:_ `"30s"` -_Constant:_ `FlagDARequestTimeout` - -### DA Batching Strategy - -**Description:** -Controls how blocks are batched before submission to the DA layer. Different strategies offer trade-offs between latency, cost efficiency, and throughput. All strategies pass through the DA submitter which performs additional size checks and may further split batches that exceed the DA layer's blob size limit. - -Available strategies: - -- **`immediate`**: Submits as soon as any items are available. Best for low-latency requirements where cost is not a concern. -- **`size`**: Waits until the batch reaches a size threshold (fraction of max blob size). Best for maximizing blob utilization and minimizing costs when latency is flexible. -- **`time`**: Waits for a time interval before submitting. Provides predictable submission timing aligned with DA block times. -- **`adaptive`**: Balances between size and time constraints—submits when either the size threshold is reached OR the max delay expires. Recommended for most production deployments as it optimizes both cost and latency. - -**YAML:** - -```yaml -da: - batching_strategy: "time" -``` - -**Command-line Flag:** -`--rollkit.da.batching_strategy ` -_Example:_ `--rollkit.da.batching_strategy adaptive` -_Default:_ `"time"` -_Constant:_ `FlagDABatchingStrategy` - -### DA Batch Size Threshold - -**Description:** -The minimum blob size threshold (as a fraction of the maximum blob size, between 0.0 and 1.0) before submitting a batch. Only applies to the `size` and `adaptive` strategies. For example, a value of 0.8 means the batch will be submitted when it reaches 80% of the maximum blob size. - -Higher values maximize blob utilization and reduce costs but may increase latency. Lower values reduce latency but may result in less efficient blob usage. - -**YAML:** - -```yaml -da: - batch_size_threshold: 0.8 -``` - -**Command-line Flag:** -`--rollkit.da.batch_size_threshold ` -_Example:_ `--rollkit.da.batch_size_threshold 0.9` -_Default:_ `0.8` (80% of max blob size) -_Constant:_ `FlagDABatchSizeThreshold` - -### DA Batch Max Delay - -**Description:** -The maximum time to wait before submitting a batch regardless of size. Applies to the `time` and `adaptive` strategies. Lower values reduce latency but may increase costs due to smaller batches. This value is typically aligned with the DA chain's block time to ensure submissions land in consecutive blocks. - -When set to 0, defaults to the DA BlockTime value. - -**YAML:** - -```yaml -da: - batch_max_delay: "6s" -``` - -**Command-line Flag:** -`--rollkit.da.batch_max_delay ` -_Example:_ `--rollkit.da.batch_max_delay 12s` -_Default:_ `0` (uses DA BlockTime) -_Constant:_ `FlagDABatchMaxDelay` - -### DA Batch Min Items - -**Description:** -The minimum number of items (headers or data) to accumulate before considering submission. This helps avoid submitting single items when more are expected soon, improving batching efficiency. All strategies respect this minimum. - -**YAML:** - -```yaml -da: - batch_min_items: 1 -``` - -**Command-line Flag:** -`--rollkit.da.batch_min_items ` -_Example:_ `--rollkit.da.batch_min_items 5` -_Default:_ `1` -_Constant:_ `FlagDABatchMinItems` - -## P2P Configuration (`p2p`) - -Settings for peer-to-peer networking, enabling nodes to discover each other, exchange blocks, and share transactions. - -**YAML Section:** - -```yaml -p2p: - # ... P2P configurations ... -``` - -### P2P Listen Address - -**Description:** -The network address (host:port) on which the Evolve node will listen for incoming P2P connections from other nodes. - -**YAML:** - -```yaml -p2p: - listen_address: "0.0.0.0:7676" -``` - -**Command-line Flag:** -`--rollkit.p2p.listen_address ` -_Example:_ `--rollkit.p2p.listen_address /ip4/127.0.0.1/tcp/26656` -_Default:_ `"/ip4/0.0.0.0/tcp/7676"` -_Constant:_ `FlagP2PListenAddress` - -### P2P Peers - -**Description:** -A comma-separated list of peer addresses (e.g., multiaddresses) that the node will attempt to connect to for bootstrapping its P2P connections. These are often referred to as seed nodes. - -**For DA-only sync mode:** Leave this field empty (default) to disable P2P networking entirely. When no peers are configured, the node will sync exclusively from the Data Availability layer without participating in P2P gossip, peer discovery, or block sharing. This is useful for nodes that only need to follow the canonical chain data from DA. - -**YAML:** - -```yaml -p2p: - peers: "/ip4/some_peer_ip/tcp/7676/p2p/PEER_ID1,/ip4/another_peer_ip/tcp/7676/p2p/PEER_ID2" - # For DA-only sync, leave peers empty: - # peers: "" -``` - -**Command-line Flag:** -`--rollkit.p2p.peers ` -_Example:_ `--rollkit.p2p.peers /dns4/seed.example.com/tcp/26656/p2p/12D3KooW...` -_Default:_ `""` (empty - enables DA-only sync mode) -_Constant:_ `FlagP2PPeers` - -### P2P Blocked Peers - -**Description:** -A comma-separated list of peer IDs that the node should block from connecting. This can be used to prevent connections from known malicious or problematic peers. - -**YAML:** - -```yaml -p2p: - blocked_peers: "PEER_ID_TO_BLOCK1,PEER_ID_TO_BLOCK2" -``` - -**Command-line Flag:** -`--rollkit.p2p.blocked_peers ` -_Example:_ `--rollkit.p2p.blocked_peers 12D3KooW...,12D3KooX...` -_Default:_ `""` (empty) -_Constant:_ `FlagP2PBlockedPeers` - -### P2P Allowed Peers - -**Description:** -A comma-separated list of peer IDs that the node should exclusively allow connections from. If this list is non-empty, only peers in this list will be able to connect. - -**YAML:** - -```yaml -p2p: - allowed_peers: "PEER_ID_TO_ALLOW1,PEER_ID_TO_ALLOW2" -``` - -**Command-line Flag:** -`--rollkit.p2p.allowed_peers ` -_Example:_ `--rollkit.p2p.allowed_peers 12D3KooY...,12D3KooZ...` -_Default:_ `""` (empty, allow all unless blocked) -_Constant:_ `FlagP2PAllowedPeers` - -## RPC Configuration (`rpc`) - -Settings for the Remote Procedure Call (RPC) server, which allows clients and applications to interact with the Evolve node. - -**YAML Section:** - -```yaml -rpc: - # ... RPC configurations ... -``` - -### RPC Server Address - -**Description:** -The network address (host:port) to which the RPC server will bind and listen for incoming requests. - -**YAML:** - -```yaml -rpc: - address: "127.0.0.1:7331" -``` - -**Command-line Flag:** -`--rollkit.rpc.address ` -_Example:_ `--rollkit.rpc.address 0.0.0.0:26657` -_Default:_ `"127.0.0.1:7331"` -_Constant:_ `FlagRPCAddress` - -### Enable DA Visualization - -**Description:** -If true, enables the Data Availability (DA) visualization endpoints that provide real-time monitoring of blob submissions to the DA layer. This includes a web-based dashboard and REST API endpoints for tracking submission statistics, monitoring DA health, and analyzing blob details. Only aggregator nodes submit data to the DA layer, so this feature is most useful when running in aggregator mode. - -**YAML:** - -```yaml -rpc: - enable_da_visualization: true -``` - -**Command-line Flag:** -`--rollkit.rpc.enable_da_visualization` (boolean, presence enables it) -_Example:_ `--rollkit.rpc.enable_da_visualization` -_Default:_ `false` -_Constant:_ `FlagRPCEnableDAVisualization` - -See the [DA Visualizer Guide](../guides/da/visualizer.md) for detailed information on using this feature. - -### Health Endpoints - -#### `/health/live` - -Returns `200 OK` if the process is alive and can access the store. - -```bash -curl http://localhost:7331/health/live -``` - -#### `/health/ready` - -Returns `200 OK` if the node can serve correct data. Checks: - -- P2P is listening (if enabled) -- Has synced blocks -- Not too far behind network -- Non-aggregators: has peers -- Aggregators: producing blocks at expected rate - -```bash -curl http://localhost:7331/health/ready -``` - -Configure max blocks behind: - -```yaml -node: - readiness_max_blocks_behind: 15 -``` - -## Instrumentation Configuration (`instrumentation`) - -Settings for enabling and configuring metrics and profiling endpoints, useful for monitoring node performance and debugging. - -**YAML Section:** - -```yaml -instrumentation: - # ... instrumentation configurations ... -``` - -### Enable Prometheus Metrics - -**Description:** -If true, enables the Prometheus metrics endpoint, allowing Prometheus to scrape operational data from the Evolve node. - -**YAML:** - -```yaml -instrumentation: - prometheus: true -``` - -**Command-line Flag:** -`--rollkit.instrumentation.prometheus` (boolean, presence enables it) -_Example:_ `--rollkit.instrumentation.prometheus` -_Default:_ `false` -_Constant:_ `FlagPrometheus` - -### Prometheus Listen Address - -**Description:** -The network address (host:port) where the Prometheus metrics server will listen for scraping requests. - -See [Metrics](../guides/metrics.md) for more details on what metrics are exposed. - -**YAML:** - -```yaml -instrumentation: - prometheus_listen_addr: ":2112" -``` - -**Command-line Flag:** -`--rollkit.instrumentation.prometheus_listen_addr ` -_Example:_ `--rollkit.instrumentation.prometheus_listen_addr 0.0.0.0:9090` -_Default:_ `":2112"` -_Constant:_ `FlagPrometheusListenAddr` - -### Maximum Open Connections - -**Description:** -The maximum number of simultaneous connections allowed for the metrics server (e.g., Prometheus endpoint). - -**YAML:** - -```yaml -instrumentation: - max_open_connections: 100 -``` - -**Command-line Flag:** -`--rollkit.instrumentation.max_open_connections ` -_Example:_ `--rollkit.instrumentation.max_open_connections 50` -_Default:_ (Refer to `DefaultInstrumentationConfig()` in code, typically a reasonable number like 100) -_Constant:_ `FlagMaxOpenConnections` - -### Enable Pprof Profiling - -**Description:** -If true, enables the pprof HTTP endpoint, which provides runtime profiling data for debugging performance issues. Accessing these endpoints can help diagnose CPU and memory usage. - -**YAML:** - -```yaml -instrumentation: - pprof: true -``` - -**Command-line Flag:** -`--rollkit.instrumentation.pprof` (boolean, presence enables it) -_Example:_ `--rollkit.instrumentation.pprof` -_Default:_ `false` -_Constant:_ `FlagPprof` - -### Pprof Listen Address - -**Description:** -The network address (host:port) where the pprof HTTP server will listen for profiling requests. - -**YAML:** - -```yaml -instrumentation: - pprof_listen_addr: "localhost:6060" -``` - -**Command-line Flag:** -`--rollkit.instrumentation.pprof_listen_addr ` -_Example:_ `--rollkit.instrumentation.pprof_listen_addr 0.0.0.0:6061` -_Default:_ `"localhost:6060"` -_Constant:_ `FlagPprofListenAddr` - -## Logging Configuration (`log`) - -Settings that control the verbosity and format of log output from the Evolve node. These are typically set via global flags. - -**YAML Section:** - -```yaml -log: - # ... logging configurations ... -``` - -### Log Level - -**Description:** -Sets the minimum severity level for log messages to be displayed. Common levels include `debug`, `info`, `warn`, `error`. - -**YAML:** - -```yaml -log: - level: "info" -``` - -**Command-line Flag:** -`--log.level ` (Note: some applications might use a different flag name like `--log_level`) -_Example:_ `--log.level debug` -_Default:_ `"info"` -_Constant:_ `FlagLogLevel` (value: "evolve.log.level", but often overridden by global app flags) - -### Log Format - -**Description:** -Sets the format for log output. Common formats include `text` (human-readable) and `json` (structured, machine-readable). - -**YAML:** - -```yaml -log: - format: "text" -``` - -**Command-line Flag:** -`--log.format ` (Note: some applications might use a different flag name like `--log_format`) -_Example:_ `--log.format json` -_Default:_ `"text"` -_Constant:_ `FlagLogFormat` (value: "evolve.log.format", but often overridden by global app flags) - -### Log Trace (Stack Traces) - -**Description:** -If true, enables the inclusion of stack traces in error logs. This can be very helpful for debugging issues by showing the call stack at the point of an error. - -**YAML:** - -```yaml -log: - trace: false -``` - -**Command-line Flag:** -`--log.trace` (boolean, presence enables it; Note: some applications might use a different flag name like `--log_trace`) -_Example:_ `--log.trace` -_Default:_ `false` -_Constant:_ `FlagLogTrace` (value: "evolve.log.trace", but often overridden by global app flags) - -## Signer Configuration (`signer`) - -Settings related to the signing mechanism used by the node, particularly for aggregators that need to sign blocks. - -**YAML Section:** - -```yaml -signer: - # ... signer configurations ... -``` - -### Signer Type - -**Description:** -Specifies the type of remote signer to use. Common options might include `file` (for key files) or `grpc` (for connecting to a remote signing service). - -**YAML:** - -```yaml -signer: - signer_type: "file" -``` - -**Command-line Flag:** -`--rollkit.signer.signer_type ` -_Example:_ `--rollkit.signer.signer_type grpc` -_Default:_ (Depends on application, often "file" or none if not an aggregator) -_Constant:_ `FlagSignerType` - -### Signer Path - -**Description:** -The path to the signer file (if `signer_type` is `file`) or the address of the remote signer service (if `signer_type` is `grpc` or similar). - -**YAML:** - -```yaml -signer: - signer_path: "/path/to/priv_validator_key.json" # For file signer - # signer_path: "localhost:9000" # For gRPC signer -``` - -**Command-line Flag:** -`--rollkit.signer.signer_path ` -_Example:_ `--rollkit.signer.signer_path ./config` -_Default:_ (Depends on application) -_Constant:_ `FlagSignerPath` - -### Signer Passphrase - -**Description:** -The passphrase required to decrypt or access the signer key, particularly if using a `file` signer and the key is encrypted, or if the aggregator mode is enabled and requires it. This flag is not directly a field in the `SignerConfig` struct but is used in conjunction with it. - -**YAML:** -This is typically not stored in the YAML file for security reasons but provided via flag or environment variable. - -**Command-line Flag:** -`--rollkit.signer.passphrase ` -_Example:_ `--rollkit.signer.passphrase "mysecretpassphrase"` -_Default:_ `""` (empty) -_Constant:_ `FlagSignerPassphrase` -_Note:_ Be cautious with providing passphrases directly on the command line in shared environments due to history logging. Environment variables or secure input methods are often preferred. - ---- - -This reference should help you configure your Evolve node effectively. Always refer to the specific version of Evolve you are using, as options and defaults may change over time. diff --git a/content/docs/learn/data-availability.md b/content/docs/learn/data-availability.md deleted file mode 100644 index 93f1483..0000000 --- a/content/docs/learn/data-availability.md +++ /dev/null @@ -1,33 +0,0 @@ -# Data Availability in Evolve - -Data availability (DA) is a core of Evolve's. Evolve utilize's data availability ensures that all transaction data and block information required to verify the chain's state is accessible to anyone running a node or light client. - -Learn more about data availability: - -- [What is DA](https://celestia.org/what-is-da/) -- [The importance of DA for Rollups](https://medium.com/zeeve/exploring-data-availability-layer-and-its-importance-in-rollups-0a4fbf2e0ffc) - -## How Evolve Handles Data Availability - -Evolve is designed to be DA-agnostic, meaning it can integrate with different data availability layers depending on your needs. The main options are: - -- **Local Data Availability (Local DA):** - - Used for development, testing, and local deployments. - - Not secure for production, as data can be withheld by the node operator. - -- **External Data Availability Layer (DA Interface):** - - Used for production and secure deployments. - - Evolve can post block data to any external DA layer that implements the Evolve [DA client interface](https://github.com/evstack/ev-node/blob/main/block/public.go) (e.g., Celestia). - - Anyone can verify that the data is available and reconstruct the chain state, depending on the guarantees of the chosen DA layer. - -## Best Practices - -- **Use Local DA only for development and testing locally.** -- **Alternatively, you can use [Celestia testnets](https://docs.celestia.org/how-to-guides/participate).** -- **For production, always use a decentralized DA layer that implements the Evolve DA interface.** - -## Learn More - -- [Set up a local DA](../guides/da/local-da.md) -- [Set up Celestia DA](../guides/da/celestia-da.md) -- [Celestia Docs](https://docs.celestia.org/) diff --git a/content/docs/learn/execution.md b/content/docs/learn/execution.md deleted file mode 100644 index c2c7650..0000000 --- a/content/docs/learn/execution.md +++ /dev/null @@ -1,31 +0,0 @@ -# Execution Layers in Evolve - -Evolve is designed to be modular and flexible, allowing different execution layers to be plugged in. Evolve defines a general-purpose execution interface ([see execution.go](https://github.com/evstack/ev-node/blob/main/core/execution/execution.go)) that enables developers to integrate any compatible application as the chain's execution layer. - -This means you can use a variety of Cosmos SDK or Reth compatible applications as the execution environment for your chain: choose the execution environment that best fits your use case. - -## Supported Execution Layers - -### Cosmos SDK Execution Layer - -Evolve natively supports Cosmos SDK-based applications as the execution layer for a chain via the ABCI (Application Blockchain Interface) protocol. The Cosmos SDK provides a rich set of modules for staking, governance, IBC, and more, and is widely used in the Cosmos ecosystem. This integration allows developers to leverage the full power and flexibility of the Cosmos SDK when building their chain applications. - -- [Cosmos SDK Documentation](https://docs.cosmos.network/) -- [Cosmos SDK ABCI Documentation](https://docs.cosmos.network/main/build/abci/introduction) -- [Evolve ABCI Adapter](https://github.com/evstack/ev-abci) - -### Reth - -Reth is a high-performance Ethereum execution client written in Rust. Evolve can integrate Reth as an execution layer, enabling Ethereum-compatible chains to process EVM transactions and maintain Ethereum-like state. This allows developers to build chains that leverage the Ethereum ecosystem, tooling, and smart contracts, while benefiting from Evolve's modular consensus and data availability. - -For more information about Reth, see the official documentation: - -- [Reth GitHub Repository](https://github.com/paradigmxyz/reth) -- [Evolve Reth Integration](https://github.com/evstack/ev-reth) - -## How It Works - -- Evolve acts as the consensus and uses Celestia as its data availability layer. -- The execution layer (Cosmos SDK app or Reth) processes transactions and maintains application state. - -For more details on integrating an execution layer with Evolve, see the respective documentation links above. diff --git a/content/docs/learn/meta.json b/content/docs/learn/meta.json deleted file mode 100644 index 8f0a9b9..0000000 --- a/content/docs/learn/meta.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "title": "Learn", - "description": "Core concepts and architecture", - "icon": "BookOpen", - "root": true, - "pages": [ - "about", - "data-availability", - "sequencing", - "execution", - "specs", - "transaction-flow", - "config" - ] -} diff --git a/content/docs/learn/sequencing/based.md b/content/docs/learn/sequencing/based.md deleted file mode 100644 index c99bf27..0000000 --- a/content/docs/learn/sequencing/based.md +++ /dev/null @@ -1,76 +0,0 @@ -# Based Sequencing - -Based sequencing is a decentralized sequencing model where transaction ordering is determined by the base layer (Celestia) rather than a centralized sequencer. In this model, **every full node acts as its own proposer** by independently and deterministically deriving the next batch of transactions directly from the base layer. - -## How Based Sequencing Works - -### Transaction Submission - -Users submit transactions to the base layer's forced inclusion namespace. These transactions are posted as blobs to the DA layer, where they become part of the canonical transaction ordering. - -```text -User → Base Layer (DA) → Full Nodes retrieve and execute -``` - -### Deterministic Batch Construction - -All full nodes independently construct identical batches by: - -1. **Retrieving forced inclusion transactions** from the base layer at epoch boundaries -2. **Applying forkchoice rules** to determine batch composition: - - `MaxBytes`: Maximum byte size per batch (respects block size limits) - - DA epoch boundaries -3. **Smoothing large transactions** across multiple blocks when necessary - -### Epoch-Based Processing - -Forced inclusion transactions are retrieved in epochs defined by `DAEpochForcedInclusion`. For example, with an epoch size of 10: - -- DA heights 100-109 form one epoch -- DA heights 110-119 form the next epoch -- Transactions from each epoch must be included before the epoch ends - -Epochs durations determine the block time in based sequencing. -Additionally, because no headers are published, the lazy mode has no effect. The block time is a factor of the DA layer's block time. - -## Block Smoothing - -When forced inclusion transactions exceed the `MaxBytes` limit for a single block, they can be "smoothed" across multiple blocks within the same epoch. This ensures that: - -- Large transactions don't block the chain -- All transactions are eventually included -- The system remains censorship-resistant - -### Example - -```text -Epoch [100, 104]: - - Block 1: Includes 1.5 MB of forced inclusion txs (partial) - - Block 2: Includes remaining 0.5 MB + new regular txs - - All epoch transactions included before DA height 105 -``` - -## Trust Assumptions - -Based sequencing minimizes trust assumptions: - -- **No trusted sequencer** - ordering comes from the base layer -- **No proposer selection** - every full node derives blocks independently -- **Deterministic consensus** - all honest nodes converge on the same chain -- **Base layer security** - inherits the security guarantees of the DA layer -- **No malicious actor concern** - invalid blocks are automatically rejected by validation rules - -## Comparison with Single Sequencer - -| Feature | Based Sequencing | Single Sequencer | -| --------------------- | ----------------------------- | ----------------------------- | -| Decentralization | ✅ Fully decentralized | ❌ Single point of control | -| Censorship Resistance | ✅ Guaranteed by base layer | ⚠️ Guaranteed by base layer | -| Latency | ⚠️ Depends on DA layer (~12s) | ✅ Low latency (configurable) | -| Block Time Control | ❌ Factor of DA block time | ✅ Configurable by sequencer | -| Trust Assumptions | ✅ Minimal (only DA layer) | ❌ Trust the sequencer | - -## Further Reading - -- [Data Availability](../data-availability.md) - Understanding the DA layer -- [Transaction Flow](../transaction-flow.md) - How transactions move through the system diff --git a/content/docs/learn/sequencing/meta.json b/content/docs/learn/sequencing/meta.json deleted file mode 100644 index eeb809c..0000000 --- a/content/docs/learn/sequencing/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Sequencing", - "pages": ["overview", "single", "based"] -} diff --git a/content/docs/learn/sequencing/overview.md b/content/docs/learn/sequencing/overview.md deleted file mode 100644 index 4e22e09..0000000 --- a/content/docs/learn/sequencing/overview.md +++ /dev/null @@ -1,45 +0,0 @@ -# Sequencing - -Sequencing is the essential first step for handling your transactions. Think of it as an organizer that takes all incoming transactions, puts them in a clear order, and then groups them into batches. This process is vital for keeping everything consistent and making the chain run. Evolve uses a "Sequencing Interface" with key functions like submitting, retrieving, and verifying these transaction batches, ensuring smooth communication between the chain and the sequencing mechanism, which often acts as a bridge to the underlying network. - -## Sequencing Interface {#sequencing-interface} - -[Sequencing Interface](https://github.com/evstack/ev-node/blob/main/core/sequencer/sequencing.go#L11) defines a sequencing interface for communicating between any sequencing network and Evolve. The key functions of the interface are defined as shown below. - -```go -// Sequencer is a generic interface for a sequencer -type Sequencer interface { - // SubmitBatchTxs submits a batch of transactions to sequencer - // Id is the unique identifier for the target chain - // Batch is the batch of transactions to submit - // returns an error if any from the sequencer - SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) - - // GetNextBatch returns the next batch of transactions from sequencer to - // Id is the unique identifier for the target chain - // LastBatchHash is the cryptographic hash of the last batch received by the - // MaxBytes is the maximum number of bytes to return in the batch - // returns the next batch of transactions and an error if any from the sequencer - GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) - - // VerifyBatch verifies a batch of transactions received from the sequencer - // Id is the unique identifier for the target chain - // BatchHash is the cryptographic hash of the batch to verify - // returns a boolean indicating if the batch is valid and an error if any from the sequencer - VerifyBatch(ctx context.Context, req VerifyBatchResponse) (*VerifyBatchResponse, error) -} -``` - -It mainly consists of: - -- `SubmitBatchTxs` relays the chain transactions from Evolve chain to the sequencing network -- `GetNextBatch` returns the next batch of transactions along with a deterministic timestamp -- `VerifyBatch` validates the sequenced batch - -## Sequencing Implementations {#sequencing-implementations} - -An implementation of the sequencing interface mainly acts as a middleware that connects Evolve chain and the sequencing layer. It implements the sequencing interface functions described above. -There are several implementations of the sequencer available in Evolve: - -- [single-sequencer](./single.md) - The simplest and most widely used sequencing model, where a single node (the sequencer) is responsible for ordering transactions and producing blocks. -- [based-sequencer](./based.md) - A decentralized sequencing model where transaction ordering is determined by the base layer, and every full node acts as its own proposer. diff --git a/content/docs/learn/sequencing/single.md b/content/docs/learn/sequencing/single.md deleted file mode 100644 index 38494af..0000000 --- a/content/docs/learn/sequencing/single.md +++ /dev/null @@ -1,128 +0,0 @@ -# Single Sequencer - -A single sequencer is the simplest sequencing architecture for an Evolve-based chain. In this model, one node (the sequencer) is responsible for ordering transactions, producing blocks, and submitting data to the data availability (DA) layer. - -## How the Single Sequencer Model Works - -1. **Transaction Submission:** - - Users submit transactions to the execution environment via RPC or other interfaces. -2. **Transaction Collection and Ordering:** - - The execution environment collects incoming transactions. - - The sequencer requests a batch of transactions from the execution environment to be included in the next block. -3. **Block Production:** - - **Without lazy mode:** the sequencer produces new blocks at fixed intervals. - - **With lazy mode:** the sequencer produces a block once either - - enough transactions are collected - - the lazy-mode block interval elapses - More info in the [lazy mode configuration guide](../config.md#lazy-mode-lazy-aggregator). - - Each block contains a batch of ordered transactions and metadata. - -4. **Data Availability Posting:** - - The sequencer posts the block data to the configured DA layer (e.g., Celestia). - - This ensures that anyone can access the data needed to reconstruct the chain state. - -5. **State Update:** - - The sequencer updates the chain state based on the new block and makes the updated state available to light clients and full nodes. - -## Transaction Flow Diagram - -```mermaid -sequenceDiagram - participant User - participant ExecutionEnv as Execution Environment - participant Sequencer - participant DA as Data Availability Layer - - User->>ExecutionEnv: Submit transaction - Sequencer->>ExecutionEnv: Request batch for block - ExecutionEnv->>Sequencer: Provide batch of transactions - Sequencer->>DA: Post block data - Sequencer->>ExecutionEnv: Update state - ExecutionEnv->>User: State/query response -``` - -## Forced Inclusion - -While the single sequencer controls transaction ordering, the system provides a censorship-resistance mechanism called **forced inclusion**. This ensures users can always include their transactions even if the sequencer refuses to process them. - -### How Forced Inclusion Works - -1. **Direct DA Submission:** - - Users can submit transactions directly to the DA layer's forced inclusion namespace - - These transactions bypass the sequencer entirely - -2. **Epoch-Based Retrieval:** - - The sequencer retrieves forced inclusion transactions from the DA layer at epoch boundaries - - Epochs are defined by `DAEpochForcedInclusion` in the genesis configuration - -3. **Mandatory Inclusion:** - - The sequencer MUST include all forced inclusion transactions from an epoch before the epoch ends - - Full nodes verify that forced inclusion transactions are properly included - -4. **Smoothing:** - - If forced inclusion transactions exceed block size limits (`MaxBytes`), they can be spread across multiple blocks within the same epoch - - All transactions must be included before moving to the next epoch - -### Example - -```text -Epoch [100, 109] (epoch size = 10): - - User submits tx directly to DA at height 102 - - Sequencer retrieves forced txs at epoch start (height 100) - - Sequencer includes forced tx in blocks before height 110 -``` - -See [Based Sequencing](./based.md) for a fully decentralized alternative that relies entirely on forced inclusion. - -## Detecting Malicious Sequencer Behavior - -Full nodes continuously monitor the sequencer to ensure it follows consensus rules, particularly around forced inclusion: - -### Censorship Detection - -If a sequencer fails to include forced inclusion transactions past their epoch boundary, full nodes will: - -1. **Detect the violation** - missing transactions from past epochs -2. **Reject invalid blocks** - do not build on top of censoring blocks -3. **Log the violation** with transaction hashes and epoch details -4. **Halt consensus** - the chain cannot progress with a malicious sequencer - -### Recovery from Malicious Sequencer - -When a malicious sequencer is detected (censoring forced inclusion transactions): - -**All nodes must restart the chain in based sequencing mode:** - -```bash -# Restart with based sequencing enabled -./evnode start --node.aggregator --node.based_sequencer -``` - -**In based sequencing mode:** - -- No single sequencer controls transaction ordering -- Every full node derives blocks independently from the DA layer -- Forced inclusion becomes the primary (and only) transaction submission method -- Censorship becomes impossible as ordering comes from the DA layer - -**Important considerations:** - -- All full nodes should coordinate the switch to based mode -- The chain continues from the last valid state -- Users submit transactions directly to the DA layer going forward -- This is a one-way transition - moving back to single sequencer requires social consensus - -See [Based Sequencing documentation](./based.md) for details on operating in this mode. - -## Advantages - -- **Simplicity:** Easy to set up and operate, making it ideal for development, testing, and small-scale deployments compared to other more complex sequencers. -- **Low Latency:** Fast block production and transaction inclusion, since there is no consensus overhead among multiple sequencers. -- **Independence from DA block time:** The sequencer can produce blocks on its own schedule, without being tied to the block time of the DA layer, enabling more flexible transaction processing than DA-timed sequencers. -- **Forced inclusion fallback:** Users can always submit transactions via the DA layer if the sequencer is unresponsive or censoring. - -## Disadvantages - -- **Single point of failure:** If the sequencer goes offline, block production stops (though the chain can transition to based mode). -- **Trust requirement:** Users must trust the sequencer to include their transactions in a timely manner (mitigated by forced inclusion). -- **Censorship risk:** A malicious sequencer can temporarily censor transactions until forced inclusion activates or the chain transitions to based mode. diff --git a/content/docs/learn/specs/block-manager.md b/content/docs/learn/specs/block-manager.md deleted file mode 100644 index 0801bd5..0000000 --- a/content/docs/learn/specs/block-manager.md +++ /dev/null @@ -1,758 +0,0 @@ -# Block Components - -## Abstract - -The block package provides a modular component-based architecture for handling block-related operations in full nodes. Instead of a single monolithic manager, the system is divided into specialized components that work together, each responsible for specific aspects of block processing. This architecture enables better separation of concerns, easier testing, and more flexible node configurations. - -The main components are: - -- **Executor**: Handles block production and state transitions (aggregator nodes only) -- **Reaper**: Periodically retrieves transactions and submits them to the sequencer (aggregator nodes only) -- **Submitter**: Manages submission of headers and data to the DA network (aggregator nodes only) -- **Syncer**: Handles synchronization from both DA and P2P sources (all full nodes) -- **Cache Manager**: Coordinates caching and tracking of blocks across all components - -A full node coordinates these components based on its role: - -- **Aggregator nodes**: Use all components for block production, submission, and synchronization -- **Non-aggregator full nodes**: Use only Syncer and Cache for block synchronization - -```mermaid -sequenceDiagram - title Overview of Block Manager - - participant User - participant Sequencer - participant Full Node 1 - participant Full Node 2 - participant DA Layer - - User->>Sequencer: Send Tx - Sequencer->>Sequencer: Generate Block - Sequencer->>DA Layer: Publish Block - - Sequencer->>Full Node 1: Gossip Block - Sequencer->>Full Node 2: Gossip Block - Full Node 1->>Full Node 1: Verify Block - Full Node 1->>Full Node 2: Gossip Block - Full Node 1->>Full Node 1: Mark Block Soft Confirmed - - Full Node 2->>Full Node 2: Verify Block - Full Node 2->>Full Node 2: Mark Block Soft Confirmed - - DA Layer->>Full Node 1: Retrieve Block - Full Node 1->>Full Node 1: Mark Block DA Included - - DA Layer->>Full Node 2: Retrieve Block - Full Node 2->>Full Node 2: Mark Block DA Included -``` - -### Component Architecture Overview - -```mermaid -flowchart TB - subgraph Block Components [Modular Block Components] - EXE[Executor
Block Production] - REA[Reaper
Tx Collection] - SUB[Submitter
DA Submission] - SYN[Syncer
Block Sync] - CAC[Cache Manager
State Tracking] - end - - subgraph External Components - CEXE[Core Executor] - SEQ[Sequencer] - DA[DA Layer] - HS[Header Store/P2P] - DS[Data Store/P2P] - ST[Local Store] - end - - REA -->|GetTxs| CEXE - REA -->|SubmitBatch| SEQ - REA -->|Notify| EXE - - EXE -->|CreateBlock| CEXE - EXE -->|ApplyBlock| CEXE - EXE -->|Save| ST - EXE -->|Track| CAC - - EXE -->|Headers| SUB - EXE -->|Data| SUB - SUB -->|Submit| DA - SUB -->|Track| CAC - - DA -->|Retrieve| SYN - HS -->|Headers| SYN - DS -->|Data| SYN - - SYN -->|ApplyBlock| CEXE - SYN -->|Save| ST - SYN -->|Track| CAC - SYN -->|SetFinal| CEXE - - CAC -->|Coordinate| EXE - CAC -->|Coordinate| SUB - CAC -->|Coordinate| SYN -``` - -## Protocol/Component Description - -The block components are initialized based on the node type: - -### Aggregator Components - -Aggregator nodes create all components for full block production and synchronization capabilities: - -```go -components := block.NewAggregatorComponents( - config, // Node configuration - genesis, // Genesis state - store, // Local datastore - executor, // Core executor for state transitions - sequencer, // Sequencer client - da, // DA client - signer, // Block signing key - // P2P stores and options... -) -``` - -### Non-Aggregator Components - -Non-aggregator full nodes create only synchronization components: - -```go -components := block.NewSyncComponents( - config, // Node configuration - genesis, // Genesis state - store, // Local datastore - executor, // Core executor for state transitions - da, // DA client - // P2P stores and options... (no signer or sequencer needed) -) -``` - -### Component Initialization Parameters - -| **Name** | **Type** | **Description** | -| --------------------------- | ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| signing key | crypto.PrivKey | used for signing blocks and data after creation | -| config | config.BlockManagerConfig | block manager configurations (see config options below) | -| genesis | \*cmtypes.GenesisDoc | initialize the block manager with genesis state (genesis configuration defined in `config/genesis.json` file under the app directory) | -| store | store.Store | local datastore for storing chain blocks and states (default local store path is `$db_dir/evolve` and `db_dir` specified in the `config.yaml` file under the app directory) | -| mempool, proxyapp, eventbus | mempool.Mempool, proxy.AppConnConsensus, \*cmtypes.EventBus | for initializing the executor (state transition function). mempool is also used in the manager to check for availability of transactions for lazy block production | -| dalc | da.DAClient | the data availability light client used to submit and retrieve blocks to DA network | -| headerStore | *goheaderstore.Store[*types.SignedHeader] | to store and retrieve block headers gossiped over the P2P network | -| dataStore | *goheaderstore.Store[*types.SignedData] | to store and retrieve block data gossiped over the P2P network | -| signaturePayloadProvider | types.SignaturePayloadProvider | optional custom provider for header signature payloads | -| sequencer | core.Sequencer | used to retrieve batches of transactions from the sequencing layer | -| reaper | \*Reaper | component that periodically retrieves transactions from the executor and submits them to the sequencer | - -### Configuration Options - -The block components share a common configuration: - -| Name | Type | Description | -| ------------------------ | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -| BlockTime | time.Duration | time interval used for block production and block retrieval from block store ([`defaultBlockTime`][defaultBlockTime]) | -| DABlockTime | time.Duration | time interval used for both block publication to DA network and block retrieval from DA network ([`defaultDABlockTime`][defaultDABlockTime]) | -| DAStartHeight | uint64 | block retrieval from DA network starts from this height | -| LazyBlockInterval | time.Duration | time interval used for block production in lazy aggregator mode even when there are no transactions ([`defaultLazyBlockTime`][defaultLazyBlockTime]) | -| LazyMode | bool | when set to true, enables lazy aggregation mode which produces blocks only when transactions are available or at LazyBlockInterval intervals | -| MaxPendingHeadersAndData | uint64 | maximum number of pending headers and data blocks before pausing block production (default: 100) | -| MaxSubmitAttempts | int | maximum number of retry attempts for DA submissions (default: 30) | -| MempoolTTL | int | number of blocks to wait when transaction is stuck in DA mempool (default: 25) | -| GasPrice | float64 | gas price for DA submissions (-1 for automatic/default) | -| GasMultiplier | float64 | multiplier for gas price on DA submission retries (default: 1.3) | -| Namespace | da.Namespace | DA namespace ID for block submissions (deprecated, use HeaderNamespace and DataNamespace instead) | -| HeaderNamespace | string | namespace ID for submitting headers to DA layer (automatically encoded by the node) | -| DataNamespace | string | namespace ID for submitting data to DA layer (automatically encoded by the node) | -| RequestTimeout | duration | per-request timeout for DA `GetIDs`/`Get` calls; higher values tolerate slow DA nodes, lower values fail faster (default: 30s) | - -### Block Production (Executor Component) - -When the full node is operating as an aggregator, the **Executor component** handles block production. There are two modes of block production, which can be specified in the block manager configurations: `normal` and `lazy`. - -In `normal` mode, the block manager runs a timer, which is set to the `BlockTime` configuration parameter, and continuously produces blocks at `BlockTime` intervals. - -In `lazy` mode, the block manager implements a dual timer mechanism: - -```mermaid -flowchart LR - subgraph Lazy Aggregation Mode - R[Reaper] -->|GetTxs| CE[Core Executor] - CE -->|Txs Available| R - R -->|Submit to Sequencer| S[Sequencer] - R -->|NotifyNewTransactions| N[txNotifyCh] - - N --> E{Executor Logic} - BT[blockTimer] --> E - LT[lazyTimer] --> E - - E -->|Txs Available| P1[Produce Block with Txs] - E -->|No Txs & LazyTimer| P2[Produce Empty Block] - - P1 --> B[Block Creation] - P2 --> B - end -``` - -1. A `blockTimer` that triggers block production at regular intervals when transactions are available -2. A `lazyTimer` that ensures blocks are produced at `LazyBlockInterval` intervals even during periods of inactivity - -The block manager starts building a block when any transaction becomes available in the mempool via a notification channel (`txNotifyCh`). When the `Reaper` detects new transactions, it calls `Manager.NotifyNewTransactions()`, which performs a non-blocking signal on this channel. The block manager also produces empty blocks at regular intervals to maintain consistency with the DA layer, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks. - -The Reaper component periodically retrieves transactions from the core executor and submits them to the sequencer. It runs independently and notifies the Executor component when new transactions are available, enabling responsive block production in lazy mode. - -#### Building the Block - -The Executor component of aggregator nodes performs the following steps to produce a block: - -```mermaid -flowchart TD - A[Timer Trigger / Transaction Notification] --> B[Retrieve Batch] - B --> C{Transactions Available?} - C -->|Yes| D[Create Block with Txs] - C -->|No| E[Create Empty Block] - D --> F[Generate Header & Data] - E --> F - F --> G[Sign Header → SignedHeader] - F --> H[Sign Data → SignedData] - G --> I[Apply Block] - H --> I - I --> J[Update State] - J --> K[Save to Store] - K --> L[Add to pendingHeaders] - K --> M[Add to pendingData] - L --> N[Broadcast Header to P2P] - M --> O[Broadcast Data to P2P] -``` - -- Retrieve a batch of transactions using `retrieveBatch()` which interfaces with the sequencer -- Call `CreateBlock` using executor with the retrieved transactions -- Create separate header and data structures from the block -- Sign the header using `signing key` to generate `SignedHeader` -- Sign the data using `signing key` to generate `SignedData` (if transactions exist) -- Call `ApplyBlock` using executor to generate an updated state -- Save the block, validators, and updated state to local store -- Add the newly generated header to `pendingHeaders` queue -- Add the newly generated data to `pendingData` queue (if not empty) -- Publish the newly generated header and data to channels to notify other components of the sequencer node (such as block and header gossip) - -Note: When no transactions are available, the block manager creates blocks with empty data using a special `dataHashForEmptyTxs` marker. The header and data separation architecture allows headers and data to be submitted and retrieved independently from the DA layer. - -### Block Publication to DA Network (Submitter Component) - -The **Submitter component** of aggregator nodes implements separate submission loops for headers and data, both operating at `DABlockTime` intervals. Headers and data are submitted to different namespaces to improve scalability and allow for more flexible data availability strategies: - -```mermaid -flowchart LR - subgraph Header Submission - H1[pendingHeaders Queue] --> H2[Header Submission Loop] - H2 --> H3[Marshal to Protobuf] - H3 --> H4[Submit to DA] - H4 -->|Success| H5[Remove from Queue] - H4 -->|Failure| H6[Keep in Queue & Retry] - end - - subgraph Data Submission - D1[pendingData Queue] --> D2[Data Submission Loop] - D2 --> D3[Marshal to Protobuf] - D3 --> D4[Submit to DA] - D4 -->|Success| D5[Remove from Queue] - D4 -->|Failure| D6[Keep in Queue & Retry] - end - - H2 -.->|DABlockTime| H2 - D2 -.->|DABlockTime| D2 -``` - -#### Header Submission Loop - -The `HeaderSubmissionLoop` manages the submission of signed headers to the DA network: - -- Retrieves pending headers from the `pendingHeaders` queue -- Marshals headers to protobuf format -- Submits to DA using the generic `submitToDA` helper with the configured `HeaderNamespace` -- On success, removes submitted headers from the pending queue -- On failure, headers remain in the queue for retry - -#### Data Submission Loop - -The `DataSubmissionLoop` manages the submission of signed data to the DA network: - -- Retrieves pending data from the `pendingData` queue -- Marshals data to protobuf format -- Submits to DA using the generic `submitToDA` helper with the configured `DataNamespace` -- On success, removes submitted data from the pending queue -- On failure, data remains in the queue for retry - -#### Generic Submission Logic - -Both loops use a shared `submitToDA` function that provides: - -- Namespace-specific submission based on header or data type -- Retry logic with configurable maximum attempts via `MaxSubmitAttempts` configuration -- Exponential backoff starting at `initialBackoff` (100ms), doubling each attempt, capped at `DABlockTime` -- Gas price management with `GasMultiplier` applied on retries using a centralized `retryStrategy` -- Recursive batch splitting for handling "too big" DA submissions that exceed blob size limits -- Comprehensive error handling for different DA submission failure types (mempool issues, context cancellation, blob size limits) -- Comprehensive metrics tracking for attempts, successes, and failures -- Context-aware cancellation support - -#### Retry Strategy and Error Handling - -The DA submission system implements sophisticated retry logic using a centralized `retryStrategy` struct to handle various failure scenarios: - -```mermaid -flowchart TD - A[Submit to DA] --> B{Submission Result} - B -->|Success| C[Reset Backoff & Adjust Gas Price Down] - B -->|Too Big| D{Batch Size > 1?} - B -->|Mempool/Not Included| E[Mempool Backoff Strategy] - B -->|Context Canceled| F[Stop Submission] - B -->|Other Error| G[Exponential Backoff] - - D -->|Yes| H[Recursive Batch Splitting] - D -->|No| I[Skip Single Item - Cannot Split] - - E --> J[Set Backoff = MempoolTTL * BlockTime] - E --> K[Multiply Gas Price by GasMultiplier] - - G --> L[Double Backoff Time] - G --> M[Cap at MaxBackoff - BlockTime] - - H --> N[Split into Two Halves] - N --> O[Submit First Half] - O --> P[Submit Second Half] - P --> Q{Both Halves Processed?} - Q -->|Yes| R[Combine Results] - Q -->|No| S[Handle Partial Success] - - C --> T[Update Pending Queues] - T --> U[Post-Submit Actions] -``` - -##### Retry Strategy Features - -- **Centralized State Management**: The `retryStrategy` struct manages attempt counts, backoff timing, and gas price adjustments -- **Multiple Backoff Types**: - - Exponential backoff for general failures (doubles each attempt, capped at `BlockTime`) - - Mempool-specific backoff (waits `MempoolTTL * BlockTime` for stuck transactions) - - Success-based backoff reset with gas price reduction -- **Gas Price Management**: - - Increases gas price by `GasMultiplier` on mempool failures - - Decreases gas price after successful submissions (bounded by initial price) - - Supports automatic gas price detection (`-1` value) -- **Intelligent Batch Splitting**: - - Recursively splits batches that exceed DA blob size limits - - Handles partial submissions within split batches - - Prevents infinite recursion with proper base cases -- **Comprehensive Error Classification**: - - `StatusSuccess`: Full or partial successful submission - - `StatusTooBig`: Triggers batch splitting logic - - `StatusNotIncludedInBlock`/`StatusAlreadyInMempool`: Mempool-specific handling - - `StatusContextCanceled`: Graceful shutdown support - - Other errors: Standard exponential backoff - -The manager enforces a limit on pending headers and data through `MaxPendingHeadersAndData` configuration. When this limit is reached, block production pauses to prevent unbounded growth of the pending queues. - -### Block Retrieval from DA Network (Syncer Component) - -The **Syncer component** implements a `RetrieveLoop` through its DARetriever that regularly pulls headers and data from the DA network. The retrieval process supports both legacy single-namespace mode (for backward compatibility) and the new separate namespace mode: - -```mermaid -flowchart TD - A[Start RetrieveLoop] --> B[Get DA Height] - B --> C{DABlockTime Timer} - C --> D[GetHeightPair from DA] - D --> E{Result?} - E -->|Success| F[Validate Signatures] - E -->|NotFound| G[Increment Height] - E -->|Error| H[Retry Logic] - - F --> I[Check Sequencer Info] - I --> J[Mark DA Included] - J --> K[Send to Sync] - K --> L[Increment Height] - L --> M[Immediate Next Retrieval] - - G --> C - H --> N{Retries < 10?} - N -->|Yes| O[Wait 100ms] - N -->|No| P[Log Error & Stall] - O --> D - M --> D -``` - -#### Retrieval Process - -1. **Height Management**: Starts from the latest of: - - DA height from the last state in local store - - `DAStartHeight` configuration parameter - - Maintains and increments `daHeight` counter after successful retrievals - -2. **Retrieval Mechanism**: - - Executes at `DABlockTime` intervals - - Implements namespace migration support: - - First attempts legacy namespace retrieval if migration not completed - - Falls back to separate header and data namespace retrieval - - Tracks migration status to optimize future retrievals - - Retrieves from separate namespaces: - - Headers from `HeaderNamespace` - - Data from `DataNamespace` - - Combines results from both namespaces - - Handles three possible outcomes: - - `Success`: Process retrieved header and/or data - - `NotFound`: No chain block at this DA height (normal case) - - `Error`: Retry with backoff - -3. **Error Handling**: - - Implements retry logic with 100ms delay between attempts - - After 10 retries, logs error and stalls retrieval - - Does not increment `daHeight` on persistent errors - -4. **Processing Retrieved Blocks**: - - Validates header and data signatures - - Checks sequencer information - - Marks blocks as DA included in caches - - Sends to sync goroutine for state update - - Successful processing triggers immediate next retrieval without waiting for timer - - Updates namespace migration status when appropriate: - - Marks migration complete when data is found in new namespaces - - Persists migration state to avoid future legacy checks - -#### Header and Data Caching - -The retrieval system uses persistent caches for both headers and data: - -- Prevents duplicate processing -- Tracks DA inclusion status -- Supports out-of-order block arrival -- Enables efficient sync from P2P and DA sources -- Maintains namespace migration state for optimized retrieval - -For more details on DA integration, see the [Data Availability specification](./da.md). - -#### Out-of-Order Chain Blocks on DA - -Evolve should support blocks arriving out-of-order on DA, like so: -![out-of-order blocks](./out-of-order-blocks.png) - -#### Termination Condition - -If the sequencer double-signs two blocks at the same height, evidence of the fault should be posted to DA. Evolve full nodes should process the longest valid chain up to the height of the fault evidence, and terminate. See diagram: -![termination condition](./termination.png) - -### Block Sync Service (Syncer Component) - -The **Syncer component** manages the synchronization of headers and data through its P2PHandler and coordination with the Cache Manager: - -#### Architecture - -- **Header Store**: Uses `goheader.Store[*types.SignedHeader]` for header management -- **Data Store**: Uses `goheader.Store[*types.SignedData]` for data management -- **Separation of Concerns**: Headers and data are handled independently, supporting the header/data separation architecture - -#### Synchronization Flow - -1. **Header Sync**: Headers created by the sequencer are sent to the header store for P2P gossip -2. **Data Sync**: Data blocks are sent to the data store for P2P gossip -3. **Cache Integration**: Both header and data caches track seen items to prevent duplicates -4. **DA Inclusion Tracking**: Separate tracking for header and data DA inclusion status - -### Block Publication to P2P network (Executor Component) - -The **Executor component** of aggregator nodes publishes headers and data separately to the P2P network: - -#### Header Publication - -- Headers are sent through the header broadcast channel -- Written to the header store for P2P gossip -- Broadcast to network peers via header sync service - -#### Data Publication - -- Data blocks are sent through the data broadcast channel -- Written to the data store for P2P gossip -- Broadcast to network peers via data sync service - -Non-sequencer full nodes receive headers and data through the P2P sync service and do not publish blocks themselves. - -### Block Retrieval from P2P network (Syncer Component) - -The **Syncer component** retrieves headers and data separately from P2P stores through its P2PHandler: - -#### Header Store Retrieval Loop - -The `HeaderStoreRetrieveLoop`: - -- Operates at `BlockTime` intervals via `headerStoreCh` signals -- Tracks `headerStoreHeight` for the last retrieved header -- Retrieves all headers between last height and current store height -- Validates sequencer information using `assertUsingExpectedSingleSequencer` -- Marks headers as "seen" in the header cache -- Sends headers to sync goroutine via `headerInCh` - -#### Data Store Retrieval Loop - -The `DataStoreRetrieveLoop`: - -- Operates at `BlockTime` intervals via `dataStoreCh` signals -- Tracks `dataStoreHeight` for the last retrieved data -- Retrieves all data blocks between last height and current store height -- Validates data signatures using `assertValidSignedData` -- Marks data as "seen" in the data cache -- Sends data to sync goroutine via `dataInCh` - -#### Soft Confirmations - -Headers and data retrieved from P2P are marked as soft confirmed until both: - -1. The corresponding header is seen on the DA layer -2. The corresponding data is seen on the DA layer - -Once both conditions are met, the block is marked as DA-included. - -#### About Soft Confirmations and DA Inclusions - -The block manager retrieves blocks from both the P2P network and the underlying DA network because the blocks are available in the P2P network faster and DA retrieval is slower (e.g., 1 second vs 6 seconds). -The blocks retrieved from the P2P network are only marked as soft confirmed until the DA retrieval succeeds on those blocks and they are marked DA-included. -DA-included blocks are considered to have a higher level of finality. - -**DAIncluderLoop**: -The `DAIncluderLoop` is responsible for advancing the `DAIncludedHeight` by: - -- Checking if blocks after the current height have both header and data marked as DA-included in caches -- Stopping advancement if either header or data is missing for a height -- Calling `SetFinal` on the executor when a block becomes DA-included -- Storing the Evolve height to DA height mapping for tracking -- Ensuring only blocks with both header and data present are considered DA-included - -### State Update after Block Retrieval (Syncer Component) - -The **Syncer component** uses a `SyncLoop` to coordinate state updates from blocks retrieved via P2P or DA networks: - -```mermaid -flowchart TD - subgraph Sources - P1[P2P Header Store] --> H[headerInCh] - P2[P2P Data Store] --> D[dataInCh] - DA1[DA Header Retrieval] --> H - DA2[DA Data Retrieval] --> D - end - - subgraph SyncLoop - H --> S[Sync Goroutine] - D --> S - S --> C{Header & Data for Same Height?} - C -->|Yes| R[Reconstruct Block] - C -->|No| W[Wait for Matching Pair] - R --> V[Validate Signatures] - V --> A[ApplyBlock] - A --> CM[Commit] - CM --> ST[Store Block & State] - ST --> F{DA Included?} - F -->|Yes| FN[SetFinal] - F -->|No| E[End] - FN --> U[Update DA Height] - end -``` - -#### Sync Loop Architecture - -The `SyncLoop` processes headers and data from multiple sources: - -- Headers from `headerInCh` (P2P and DA sources) -- Data from `dataInCh` (P2P and DA sources) -- Maintains caches to track processed items -- Ensures ordered processing by height - -#### State Update Process - -When both header and data are available for a height: - -1. **Block Reconstruction**: Combines header and data into a complete block -2. **Validation**: Verifies header and data signatures match expectations -3. **ApplyBlock**: - - Validates the block against current state - - Executes transactions - - Captures validator updates - - Returns updated state -4. **Commit**: - - Persists execution results - - Updates mempool by removing included transactions - - Publishes block events -5. **Storage**: - - Stores the block, validators, and updated state - - Updates last state in manager -6. **Finalization**: - - When block is DA-included, calls `SetFinal` on executor - - Updates DA included height - -## Message Structure/Communication Format - -### Component Communication - -The components communicate through well-defined interfaces: - -#### Executor ↔ Core Executor - -- `InitChain`: initializes the chain state with the given genesis time, initial height, and chain ID using `InitChainSync` on the executor to obtain initial `appHash` and initialize the state. -- `CreateBlock`: prepares a block with transactions from the provided batch data. -- `ApplyBlock`: validates the block, executes the block (apply transactions), captures validator updates, and returns updated state. -- `SetFinal`: marks the block as final when both its header and data are confirmed on the DA layer. -- `GetTxs`: retrieves transactions from the application (used by Reaper component). - -#### Reaper ↔ Sequencer - -- `GetNextBatch`: retrieves the next batch of transactions to include in a block. -- `VerifyBatch`: validates that a batch came from the expected sequencer. - -#### Submitter/Syncer ↔ DA Layer - -- `Submit`: submits headers or data blobs to the DA network. -- `Get`: retrieves headers or data blobs from the DA network. -- `GetHeightPair`: retrieves both header and data at a specific DA height. - -## Assumptions and Considerations - -### Component Architecture - -- The block package uses a modular component architecture instead of a monolithic manager -- Components are created based on node type: aggregator nodes get all components, non-aggregator nodes only get synchronization components -- Each component has a specific responsibility and communicates through well-defined interfaces -- Components share a common Cache Manager for coordination and state tracking - -### Initialization and State Management - -- Components load the initial state from the local store and use genesis if not found in the local store, when the node (re)starts -- During startup the Syncer invokes the execution Replayer to re-execute any blocks the local execution layer is missing; the replayer enforces strict app-hash matching so a mismatch aborts initialization instead of silently drifting out of sync -- The default mode for aggregator nodes is normal (not lazy) -- Components coordinate through channels and shared cache structures - -### Block Production (Executor Component) - -- The Executor can produce empty blocks -- In lazy aggregation mode, the Executor maintains consistency with the DA layer by producing empty blocks at regular intervals, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks -- The lazy aggregation mechanism uses a dual timer approach: - - A `blockTimer` that triggers block production when transactions are available - - A `lazyTimer` that ensures blocks are produced even during periods of inactivity -- Empty batches are handled differently in lazy mode - instead of discarding them, they are returned with the `ErrNoBatch` error, allowing the caller to create empty blocks with proper timestamps -- Transaction notifications from the `Reaper` to the `Executor` are handled via a non-blocking notification channel (`txNotifyCh`) to prevent backpressure - -### DA Submission (Submitter Component) - -- The Submitter enforces `MaxPendingHeadersAndData` limit to prevent unbounded growth of pending queues during DA submission issues -- Headers and data are submitted separately to the DA layer using different namespaces, supporting the header/data separation architecture -- The Cache Manager uses persistent caches for headers and data to track seen items and DA inclusion status -- Namespace migration is handled transparently by the Syncer, with automatic detection and state persistence to optimize future operations -- The system supports backward compatibility with legacy single-namespace deployments while transitioning to separate namespaces -- Gas price management in the Submitter includes automatic adjustment with `GasMultiplier` on DA submission retries - -### Storage and Persistence - -- Components use persistent storage (disk) when the `root_dir` and `db_path` configuration parameters are specified in `config.yaml` file under the app directory. If these configuration parameters are not specified, the in-memory storage is used, which will not be persistent if the node stops -- The Syncer does not re-apply blocks when they transition from soft confirmed to DA included status. The block is only marked DA included in the caches -- Header and data stores use separate prefixes for isolation in the underlying database -- The genesis `ChainID` is used to create separate `PubSubTopID`s for headers and data in go-header - -### P2P and Synchronization - -- Block sync over the P2P network works only when a full node is connected to the P2P network by specifying the initial seeds to connect to via `P2PConfig.Seeds` configuration parameter when starting the full node -- Node's context is passed down to all components to support graceful shutdown and cancellation - -### Architecture Design Decisions - -- The Executor supports custom signature payload providers for headers, enabling flexible signing schemes -- The component architecture supports the separation of header and data structures in Evolve. This allows for expanding the sequencing scheme beyond single sequencing and enables the use of a decentralized sequencer mode. For detailed information on this architecture, see the [Header and Data Separation ADR](../../adr/adr-014-header-and-data-separation.md) -- Components process blocks with a minimal header format, which is designed to eliminate dependency on CometBFT's header format and can be used to produce an execution layer tailored header if needed. For details on this header structure, see the [Evolve Minimal Header](../../adr/adr-015-rollkit-minimal-header.md) specification - -## Metrics - -The block components expose comprehensive metrics for monitoring through the shared Metrics instance: - -### Block Production Metrics (Executor Component) - -- `last_block_produced_height`: Height of the last produced block -- `last_block_produced_time`: Timestamp of the last produced block -- `aggregation_type`: Current aggregation mode (normal/lazy) -- `block_size_bytes`: Size distribution of produced blocks -- `produced_empty_blocks_total`: Count of empty blocks produced - -### DA Metrics (Submitter and Syncer Components) - -- `da_submitter_pending_blobs`: Total of Header/Data pending blobs -- `da_submission_attempts_total`: Total DA submission attempts -- `da_submission_success_total`: Successful DA submissions -- `da_submission_failure_total`: Failed DA submissions -- `da_retrieval_attempts_total`: Total DA retrieval attempts -- `da_retrieval_success_total`: Successful DA retrievals -- `da_retrieval_failure_total`: Failed DA retrievals -- `da_height`: Current DA retrieval height - -### Sync Metrics (Syncer Component) - -- `sync_height`: Current sync height -- `da_included_height`: Height of last DA-included block -- `soft_confirmed_height`: Height of last soft confirmed block -- `header_store_height`: Current header store height -- `data_store_height`: Current data store height - -### Performance Metrics (All Components) - -- `block_production_time`: Time to produce a block -- `da_submission_time`: Time to submit to DA -- `state_update_time`: Time to apply block and update state -- `channel_buffer_usage`: Usage of internal channels - -### Error Metrics (All Components) - -- `errors_total`: Total errors by type and operation - -## Implementation - -The modular block components are implemented in the following packages: - -- [Executor]: Block production and state transitions (`block/internal/executing/`) -- [Reaper]: Transaction collection and submission (`block/internal/reaping/`) -- [Submitter]: DA submission logic (`block/internal/submitting/`) -- [Syncer]: Block synchronization from DA and P2P (`block/internal/syncing/`) -- [Cache Manager]: Coordination and state tracking (`block/internal/cache/`) -- [Components]: Main components orchestration (`block/components.go`) - -See [tutorial] for running a multi-node network with both aggregator and non-aggregator full nodes. - -## References - -[1] [Go Header][go-header] - -[2] [Block Sync][block-sync] - -[3] [Full Node][full-node] - -[4] [Block Components][Components] - -[5] [Tutorial][tutorial] - -[6] [Header and Data Separation ADR](../../adr/adr-014-header-and-data-separation.md) - -[7] [Evolve Minimal Header](../../adr/adr-015-rollkit-minimal-header.md) - -[8] [Data Availability](./da.md) - -[9] [Lazy Aggregation with DA Layer Consistency ADR](../../adr/adr-021-lazy-aggregation.md) - -[defaultBlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L50 -[defaultDABlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L59 -[defaultLazyBlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L52 -[go-header]: https://github.com/celestiaorg/go-header -[block-sync]: https://github.com/evstack/ev-node/blob/main/pkg/sync/sync_service.go -[full-node]: https://github.com/evstack/ev-node/blob/main/node/full.go -[Executor]: https://github.com/evstack/ev-node/blob/main/block/internal/executing/executor.go -[Reaper]: https://github.com/evstack/ev-node/blob/main/block/internal/reaping/reaper.go -[Submitter]: https://github.com/evstack/ev-node/blob/main/block/internal/submitting/submitter.go -[Syncer]: https://github.com/evstack/ev-node/blob/main/block/internal/syncing/syncer.go -[Cache Manager]: https://github.com/evstack/ev-node/blob/main/block/internal/cache/manager.go -[Components]: https://github.com/evstack/ev-node/blob/main/block/components.go -[tutorial]: https://ev.xyz/guides/full-node diff --git a/content/docs/learn/specs/block-validity.md b/content/docs/learn/specs/block-validity.md deleted file mode 100644 index 8eee587..0000000 --- a/content/docs/learn/specs/block-validity.md +++ /dev/null @@ -1,133 +0,0 @@ -# Block and Header Validity - -## Abstract - -Like all blockchains, chains are defined as the chain of **valid** blocks from the genesis, to the head. Thus, the block and header validity rules define the chain. - -Verifying a block/header is done in 3 parts: - -1. Verify correct serialization according to the protobuf spec - -2. Perform basic validation of the types - -3. Perform verification of the new block against the previously accepted block - -Evolve uses a header/data separation architecture where headers and data can be validated independently. The system has moved from a multi-validator model to a single signer model for simplified sequencer management. - -## Basic Validation - -Each type contains a `.ValidateBasic()` method, which verifies that certain basic invariants hold. The `ValidateBasic()` calls are nested for each structure. - -### SignedHeader Validation - -```go -SignedHeader.ValidateBasic() - // Make sure the SignedHeader's Header passes basic validation - Header.ValidateBasic() - verify ProposerAddress not nil - // Make sure the SignedHeader's signature passes basic validation - Signature.ValidateBasic() - // Ensure that someone signed the header - verify len(c.Signatures) not 0 - // For based chains (sh.Signer.IsEmpty()), pass validation - if !sh.Signer.IsEmpty(): - // Verify the signer matches the proposer address - verify sh.Signer.Address == sh.ProposerAddress - // Verify signature using custom verifier if set, otherwise use default - if sh.verifier != nil: - verify sh.verifier(sh) == nil - else: - verify sh.Signature.Verify(sh.Signer.PubKey, sh.Header.MarshalBinary()) -``` - -### SignedData Validation - -```go -SignedData.ValidateBasic() - // Always passes basic validation for the Data itself - Data.ValidateBasic() // always passes - // Make sure the signature is valid - Signature.ValidateBasic() - verify len(c.Signatures) not 0 - // Verify the signer - If !sd.Signer.IsEmpty(): - verify sd.Signature.Verify(sd.Signer.PubKey, sd.Data.MarshalBinary()) -``` - -### Block Validation - -Blocks are composed of SignedHeader and Data: - -```go -// Block validation happens by validating header and data separately -// then ensuring data hash matches -verify SignedHeader.ValidateBasic() == nil -verify Data.Hash() == SignedHeader.DataHash -``` - -## Verification Against Previous Block - -```go -SignedHeader.Verify(untrustedHeader *SignedHeader) - // Basic validation is handled by go-header before this - Header.Verify(untrustedHeader) - // Verify height sequence - if untrustedHeader.Height != h.Height + 1: - if untrustedHeader.Height > h.Height + 1: - return soft verification failure - return error "headers are not adjacent" - // Verify the link to previous header - verify untrustedHeader.LastHeaderHash == h.Header.Hash() - // Note: ValidatorHash field exists for compatibility but is not validated -``` - -## Data - -[Source: `data.go`](https://github.com/evstack/ev-node/blob/main/types/data.go) - -| **Field Name** | **Valid State** | **Validation** | -|----------------|-----------------------------------------|------------------------------------| -| Txs | Transaction data of the block | Data.Hash() == SignedHeader.DataHash | -| Metadata | Optional p2p gossiping metadata | Not validated | - -## SignedHeader - -[Source: `signed_header.go`](https://github.com/evstack/ev-node/blob/main/types/signed_header.go) - -| **Field Name** | **Valid State** | **Validation** | -|----------------|--------------------------------------------------------------------------|---------------------------------------------------------------------------------------------| -| Header | Valid header for the block | `Header` passes `ValidateBasic()` and `Verify()` | -| Signature | Valid signature from the single sequencer | `Signature` passes `ValidateBasic()`, verified against signer | -| Signer | Information about who signed the header | Must match ProposerAddress if not empty (based chain case) | -| verifier | Optional custom signature verification function | Used instead of default verification if set | - -## Header - -[Source: `header.go`](https://github.com/evstack/ev-node/blob/main/types/header.go) - -***Note***: Evolve has moved to a single signer model. The multi-validator architecture has been replaced with a simpler single sequencer approach. - -| **Field Name** | **Valid State** | **Validation** | -|---------------------|--------------------------------------------------------------------------------------------|---------------------------------------| -| **BaseHeader** | | | -| Height | Height of the previous accepted header, plus 1. | checked in the `Verify()`` step | -| Time | Timestamp of the block | Not validated in Evolve | -| ChainID | The hard-coded ChainID of the chain | Should be checked as soon as the header is received | -| **Header** . | | | -| Version | unused | | -| LastHeaderHash | The hash of the previous accepted block | checked in the `Verify()`` step | -| DataHash | Correct hash of the block's Data field | checked in the `ValidateBasic()`` step | -| AppHash | The correct state root after executing the block's transactions against the accepted state | checked during block execution | -| ProposerAddress | Address of the expected proposer | Must match Signer.Address in SignedHeader | -| ValidatorHash | Compatibility field for Tendermint light client | Not validated | - -## Signer - -[Source: `signed_header.go`](https://github.com/evstack/ev-node/blob/main/types/signed_header.go) - -The Signer type replaces the previous ValidatorSet for single sequencer operation: - -| **Field Name** | **Valid State** | **Validation** | -|----------------|-----------------------------------------------------------------|-----------------------------| -| PubKey | Public key of the signer | Must not be nil if Signer is not empty | -| Address | Address derived from the public key | Must match ProposerAddress | diff --git a/content/docs/learn/specs/da.md b/content/docs/learn/specs/da.md deleted file mode 100644 index 481a433..0000000 --- a/content/docs/learn/specs/da.md +++ /dev/null @@ -1,63 +0,0 @@ -# DA - -Evolve provides a generic [data availability interface][da-interface] for modular blockchains. Any DA that implements this interface can be used with Evolve. - -## Details - -`Client` can connect via JSON-RPC transports using Evolve's [jsonrpc][jsonrpc] implementations. The connection can be configured using the following cli flags: - -* `--rollkit.da.address`: url address of the DA service (default: "grpc://localhost:26650") -* `--rollkit.da.auth_token`: authentication token of the DA service -* `--rollkit.da.namespace`: namespace to use when submitting blobs to the DA service (deprecated) -* `--rollkit.da.header_namespace`: namespace to use when submitting headers to the DA service -* `--rollkit.da.data_namespace`: namespace to use when submitting data to the DA service - -The Submitter component now submits headers and data separately to the DA layer using different namespaces: - -* **Headers**: Submitted to the namespace specified by `--rollkit.da.header_namespace` (or falls back to `--rollkit.da.namespace` if not set) -* **Data**: Submitted to the namespace specified by `--rollkit.da.data_namespace` (or falls back to `--rollkit.da.namespace` if not set) - -Each submission first encodes the headers or data using protobuf (the encoded data are called blobs) and invokes the `Submit` method on the underlying DA implementation with the appropriate namespace. On successful submission (`StatusSuccess`), the DA block height which included the blobs is returned. - -To make sure that the serialised blocks don't exceed the underlying DA's blob limits, it fetches the blob size limit by calling `Config` which returns the limit as `uint64` bytes, then includes serialised blocks until the limit is reached. If the limit is reached, it submits the partial set and returns the count of successfully submitted blocks as `SubmittedCount`. The caller should retry with the remaining blocks until all the blocks are submitted. If the first block itself is over the limit, it throws an error. - -The `Submit` call may result in an error (`StatusError`) based on the underlying DA implementations on following scenarios: - -* the total blobs size exceeds the underlying DA's limits (includes empty blobs) -* the implementation specific failures, e.g., for [celestia-da-json-rpc][jsonrpc], invalid namespace, unable to create the commitment or proof, setting low gas price, etc, could return error. - -The retrieval process now supports both legacy single-namespace mode and separate namespace mode: - -1. **Legacy Mode Support**: For backward compatibility, the system first attempts to retrieve from the legacy namespace if migration has not been completed. - -2. **Separate Namespace Retrieval**: The system retrieves headers and data separately: - * Headers are retrieved from the `HeaderNamespace` - * Data is retrieved from the `DataNamespace` - * Results from both namespaces are combined - -3. **Namespace Migration**: The system automatically detects and tracks namespace migration: - * When data is found in new namespaces, migration is marked as complete - * Migration state is persisted to optimize future retrievals - * Once migration is complete, legacy namespace checks are skipped - -If there are no blocks available for a given DA height in any namespace, `StatusNotFound` is returned (which is not an error case). The retrieved blobs are converted back to headers and data, then combined into complete blocks for processing. - -Both header/data submission and retrieval operations may be unsuccessful if the DA node and the DA blockchain that the DA implementation is using have failures. For example, failures such as, DA mempool is full, DA submit transaction is nonce clashing with other transaction from the DA submitter account, DA node is not synced, etc. - -## Namespace Separation Benefits - -The separation of headers and data into different namespaces provides several advantages: - -* **Improved Scalability**: Headers and data can be processed independently, allowing for more efficient resource utilization -* **Flexible Data Availability**: Different availability guarantees can be applied to headers vs data -* **Optimized Retrieval**: Clients can retrieve only the data they need (e.g., light clients may only need headers) -* **Backward Compatibility**: The system maintains support for legacy single-namespace deployments while enabling gradual migration - -## References - -[1] [da-interface][da-interface] - -[2] [jsonrpc][jsonrpc] - -[da-interface]: https://github.com/evstack/ev-node/blob/main/block/public.go -[jsonrpc]: https://github.com/evstack/ev-node/tree/main/pkg/da/jsonrpc diff --git a/content/docs/learn/specs/full_node.md b/content/docs/learn/specs/full_node.md deleted file mode 100644 index f909536..0000000 --- a/content/docs/learn/specs/full_node.md +++ /dev/null @@ -1,107 +0,0 @@ -# Full Node - -## Abstract - -A Full Node is a top-level service that encapsulates different components of Evolve and initializes/manages them. - -## Details - -### Full Node Details - -A Full Node is initialized inside the Cosmos SDK start script along with the node configuration, a private key to use in the P2P client, a private key for signing blocks as a block proposer, a client creator, a genesis document, and a logger. It uses them to initialize the components described above. The components TxIndexer, BlockIndexer, and IndexerService exist to ensure cometBFT compatibility since they are needed for most of the RPC calls from the `SignClient` interface from cometBFT. - -Note that unlike a light node which only syncs and stores block headers seen on the P2P layer, the full node also syncs and stores full blocks seen on both the P2P network and the DA layer. Full blocks contain all the transactions published as part of the block. - -The Full Node mainly encapsulates and initializes/manages the following components: - -### genesisDoc - -The [genesis] document contains information about the initial state of the chain, in particular its validator set. - -### conf - -The [node configuration] contains all the necessary settings for the node to be initialized and function properly. - -### P2P - -The [peer-to-peer client] is used to gossip transactions between full nodes in the network. - -### Store - -The [Store] is initialized with `DefaultStore`, an implementation of the [store interface] which is used for storing and retrieving blocks, commits, and state. | - -### blockComponents - -The [Block Components] provide a modular architecture for managing block-related operations. Instead of a single monolithic manager, the system uses specialized components: - -**For Aggregator Nodes:** - -- **Executor**: Block production (normal and lazy modes) and state transitions -- **Reaper**: Transaction collection and submission to sequencer -- **Submitter**: Header and data submission to DA layer -- **Syncer**: Block retrieval and synchronization from DA and P2P -- **Cache Manager**: Coordination and tracking across all components - -**For Non-Aggregator Nodes:** - -- **Syncer**: Block retrieval and synchronization from DA and P2P -- **Cache Manager**: Tracking and caching of synchronized blocks - -This modular architecture implements header/data separation where headers and transaction data are handled independently by different components. - -### dalc - -The [Data Availability Layer Client][dalc] is used to interact with the data availability layer. It is initialized with the DA Layer and DA Config specified in the node configuration. - -### hSyncService - -The [Header Sync Service] is used for syncing signed headers between nodes over P2P. It operates independently from data sync to support light clients. - -### dSyncService - -The [Data Sync Service] is used for syncing transaction data between nodes over P2P. This service is only used by full nodes, not light nodes. - -## Message Structure/Communication Format - -The Full Node communicates with other nodes in the network using the P2P client. It also communicates with the application using the ABCI proxy connections. The communication format is based on the P2P and ABCI protocols. - -## Assumptions and Considerations - -The Full Node assumes that the configuration, private keys, client creator, genesis document, and logger are correctly passed in by the Cosmos SDK. It also assumes that the P2P client, data availability layer client, block components, and other services can be started and stopped without errors. - -## Implementation - -See [full node] - -## References - -[1] [Full Node][full node] - -[2] [Genesis Document][genesis] - -[3] [Node Configuration][node configuration] - -[4] [Peer to Peer Client][peer-to-peer client] - -[5] [Store][Store] - -[6] [Store Interface][store interface] - -[7] [Block Components][block components] - -[8] [Data Availability Layer Client][dalc] - -[9] [Header Sync Service][Header Sync Service] - -[10] [Data Sync Service][Data Sync Service] - -[full node]: https://github.com/evstack/ev-node/blob/main/node/full.go -[genesis]: https://github.com/cometbft/cometbft/blob/main/spec/core/genesis.md -[node configuration]: https://github.com/evstack/ev-node/blob/main/pkg/config/config.go -[peer-to-peer client]: https://github.com/evstack/ev-node/blob/main/pkg/p2p/client.go -[Store]: https://github.com/evstack/ev-node/blob/main/pkg/store/store.go -[store interface]: https://github.com/evstack/ev-node/blob/main/pkg/store/types.go -[Block Components]: https://github.com/evstack/ev-node/blob/main/block/components.go -[dalc]: https://github.com/evstack/ev-node/blob/main/block/public.go -[Header Sync Service]: https://github.com/evstack/ev-node/blob/main/pkg/sync/sync_service.go -[Data Sync Service]: https://github.com/evstack/ev-node/blob/main/pkg/sync/sync_service.go diff --git a/content/docs/learn/specs/header-sync.md b/content/docs/learn/specs/header-sync.md deleted file mode 100644 index ae237f9..0000000 --- a/content/docs/learn/specs/header-sync.md +++ /dev/null @@ -1,172 +0,0 @@ -# Header and Data Sync - -## Abstract - -The nodes in the P2P network sync headers and data using separate sync services that implement the [go-header][go-header] interface. Evolve uses a header/data separation architecture where headers and transaction data are synchronized independently through parallel services. Each sync service consists of several components as listed below. - -| Component | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| store | a prefixed [datastore][datastore] where synced items are stored (`headerSync` prefix for headers, `dataSync` prefix for data) | -| subscriber | a [libp2p][libp2p] node pubsub subscriber for the specific data type | -| P2P server | a server for handling requests between peers in the P2P network | -| exchange | a client that enables sending in/out-bound requests from/to the P2P network | -| syncer | a service for efficient synchronization. When a P2P node falls behind and wants to catch up to the latest network head via P2P network, it can use the syncer. | - -## Details - -Evolve implements two separate sync services: - -### Header Sync Service - -- Synchronizes `SignedHeader` structures containing block headers with signatures -- Used by all node types (sequencer, full, and light) -- Essential for maintaining the canonical view of the chain - -### Data Sync Service - -- Synchronizes `Data` structures containing transaction data -- Used only by full nodes and sequencers -- Light nodes do not run this service as they only need headers - -Both services: - -- Utilize the generic `SyncService[H header.Header[H]]` implementation -- Inherit the `ConnectionGater` from the node's P2P client for peer management -- Use `NodeConfig.BlockTime` to determine outdated items during sync -- Operate independently on separate P2P topics and datastores - -### Consumption of Sync Services - -#### Header Sync - -- Sequencer nodes publish signed headers to the P2P network after block creation -- Full and light nodes receive and store headers for chain validation -- Headers contain commitments (DataHash) that link to the corresponding data - -#### Data Sync - -- Sequencer nodes publish transaction data separately from headers -- Only full nodes receive and store data (light nodes skip this) -- Data is linked to headers through the DataHash commitment - -#### Parallel Broadcasting - -The Executor component (in aggregator nodes) broadcasts headers and data in parallel when publishing blocks: - -- Headers are sent through `headerBroadcaster` -- Data is sent through `dataBroadcaster` -- This enables efficient network propagation of both components - -## Assumptions - -- Separate datastores are created with different prefixes: - - Headers: `headerSync` prefix on the main datastore - - Data: `dataSync` prefix on the main datastore -- Network IDs are suffixed to distinguish services: - - Header sync: `{network}-headerSync` - - Data sync: `{network}-dataSync` -- Chain IDs for pubsub topics are also separated: - - Headers: `{chainID}-headerSync` creates topic like `/gm-headerSync/header-sub/v0.0.1` - - Data: `{chainID}-dataSync` creates topic like `/gm-dataSync/header-sub/v0.0.1` -- Both stores must contain at least one item before the syncer starts: - - On first boot, the services fetch the configured genesis height from peers - - On restart, each store reuses its latest item to derive the initial height requested from peers -- Sync services work only when connected to P2P network via `P2PConfig.Seeds` -- Node context is passed to all components for graceful shutdown -- Headers and data are linked through DataHash but synced independently - -## Implementation - -The sync service implementation can be found in [pkg/sync/sync_service.go][sync-service]. The generic `SyncService[H header.Header[H]]` is instantiated as: - -- `HeaderSyncService` for syncing `*types.SignedHeader` -- `DataSyncService` for syncing `*types.Data` - -Full nodes create and start both services, while light nodes only start the header sync service. The services are created in [full][fullnode] and [light][lightnode] node implementations. - -The block components integrate with both services through: - -- The Syncer component's P2PHandler retrieves headers and data from P2P -- The Executor component publishes headers and data through broadcast channels -- Separate stores and channels manage header and data synchronization - -## DA Height Hints - -DA Height Hints (DAHint) provide an optimization for P2P synchronization by indicating which DA layer height contains a block's header or data. This allows syncing nodes to fetch missing DA data directly instead of performing sequential DA scanning. - -### Naming Considerations - -The naming convention follows this pattern: - -| Name | Usage | -| ----------------- | ---------------------------------------------------------- | -| `DAHeightHint` | Internal struct field storing the hint value | -| `DAHint()` | Getter method returning the DA height hint | -| `SetDAHint()` | Setter method for the DA height hint | -| `P2PSignedHeader` | Wrapper around `SignedHeader` that includes `DAHeightHint` | -| `P2PData` | Wrapper around `Data` that includes `DAHeightHint` | - -The term "hint" is used deliberately because: - -1. **It's advisory, not authoritative**: The hint suggests where to find data on the DA layer, but the authoritative source is always the DA layer itself -2. **It may be absent**: Hints are only populated during certain sync scenarios (see below) -3. **It optimizes but doesn't replace**: Nodes can still function without hints by scanning the DA layer sequentially - -### When DAHints Are Populated - -DAHints are **only populated when a node catches up from P2P** and is not yet synced to the head. When a node is already synced to the head: - -- The executor broadcasts headers/data immediately after block creation -- At this point, DA submission has not occurred yet (it happens later in the flow) -- Therefore, the broadcasted P2P messages do not contain DA hints - -This means: - -- **Syncing nodes** (catching up): Receive headers/data with DA hints populated -- **Synced nodes** (at head): Receive headers/data without DA hints - -The DA hints are set by the DA submitter after successful inclusion on the DA layer and stored for later P2P propagation to syncing peers. - -### Implementation Details - -The P2P wrapper types (`P2PSignedHeader` and `P2PData`) extend the base types with an optional `DAHeightHint` field: - -- Uses protobuf optional fields (`optional uint64 da_height_hint`) for backward compatibility -- Old nodes can still unmarshal new messages (the hint field is simply ignored) -- New nodes can unmarshal old messages (the hint field defaults to zero/absent) - -The hint flow: - -1. **Set by the DA Submitter** when headers/data are successfully included on the DA layer -2. **Stored in the P2P store** alongside the header/data -3. **Propagated via P2P** when syncing nodes request blocks -4. **Queued as priority** by the Syncer's DA retriever when received via P2P -5. **Fetched before sequential heights** - priority heights take precedence over normal DA scanning - -### Priority Queue Mechanism - -When a P2P event arrives with a DA height hint, the hint is queued as a priority height in the DA retriever. The `fetchDAUntilCaughtUp` loop checks for priority heights first: - -1. If priority heights are queued, pop and fetch the lowest one first -2. If no priority heights, continue sequential DA fetching (form last known da height) -3. Priority heights are sorted ascending to process lower heights first -4. Already-processed priority heights are tracked to avoid duplicate fetches - -This ensures that when syncing from P2P, the node can immediately fetch the DA data for blocks it receives, rather than waiting for sequential scanning to reach that height. - -## References - -[1] [Header Sync][sync-service] - -[2] [Full Node][fullnode] - -[3] [Light Node][lightnode] - -[4] [go-header][go-header] - -[sync-service]: https://github.com/evstack/ev-node/blob/main/pkg/sync/sync_service.go -[fullnode]: https://github.com/evstack/ev-node/blob/main/node/full.go -[lightnode]: https://github.com/evstack/ev-node/blob/main/node/light.go -[go-header]: https://github.com/celestiaorg/go-header -[libp2p]: https://github.com/libp2p/go-libp2p -[datastore]: https://github.com/ipfs/go-datastore diff --git a/content/docs/learn/specs/meta.json b/content/docs/learn/specs/meta.json deleted file mode 100644 index 8552b89..0000000 --- a/content/docs/learn/specs/meta.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "title": "Technical Specifications", - "pages": [ - "overview", - "block-manager", - "block-validity", - "da", - "full_node", - "header-sync", - "p2p", - "store" - ] -} diff --git a/content/docs/learn/specs/out-of-order-blocks.png b/content/docs/learn/specs/out-of-order-blocks.png deleted file mode 100644 index fa7a955..0000000 Binary files a/content/docs/learn/specs/out-of-order-blocks.png and /dev/null differ diff --git a/content/docs/learn/specs/overview.md b/content/docs/learn/specs/overview.md deleted file mode 100644 index 0621ad0..0000000 --- a/content/docs/learn/specs/overview.md +++ /dev/null @@ -1,17 +0,0 @@ -# Specs Overview - -Welcome to the Evolve Technical Specifications. - -This is comprehensive documentation on the inner components of Evolve, including data storage, transaction processing, and more. It’s an essential resource for developers looking to understand, contribute to, and leverage the full capabilities of Evolve. - -Each file in this folder covers a specific aspect of the system, from block management to data availability and networking. Use this page as a starting point to explore the technical details and architecture of Evolve. - -## Table of Contents - -- [Block Components](./block-manager.md): Explains the modular component architecture for block processing in Evolve. -- [Block Validity](./block-validity.md): Details the rules and checks for block validity within the protocol. -- [Data Availability (DA)](./da.md): Describes how Evolve ensures data availability and integrates with DA layers. -- [Full Node](./full_node.md): Outlines the architecture and operation of a full node in Evolve. -- [Header Sync](./header-sync.md): Covers the process and protocol for synchronizing block headers. -- [P2P](./p2p.md): Documents the peer-to-peer networking layer and its protocols. -- [Store](./store.md): Provides information about the storage subsystem and data management. diff --git a/content/docs/learn/specs/p2p.md b/content/docs/learn/specs/p2p.md deleted file mode 100644 index 14309d9..0000000 --- a/content/docs/learn/specs/p2p.md +++ /dev/null @@ -1,60 +0,0 @@ -# P2P - -Every node (both full and light) runs a P2P client using [go-libp2p][go-libp2p] P2P networking stack for gossiping transactions in the chain's P2P network. The same P2P client is also used by the header and block sync services for gossiping headers and blocks. - -Following parameters are required for creating a new instance of a P2P client: - -* P2PConfig (described below) -* [go-libp2p][go-libp2p] private key used to create a libp2p connection and join the p2p network. -* chainID: identifier used as namespace within the p2p network for peer discovery. The namespace acts as a sub network in the p2p network, where peer connections are limited to the same namespace. -* datastore: an instance of [go-datastore][go-datastore] used for creating a connection gator and stores blocked and allowed peers. -* logger - -```go -// P2PConfig stores configuration related to peer-to-peer networking. -type P2PConfig struct { - ListenAddress string // Address to listen for incoming connections - Seeds string // Comma separated list of seed nodes to connect to - BlockedPeers string // Comma separated list of nodes to ignore - AllowedPeers string // Comma separated list of nodes to whitelist -} -``` - -A P2P client also instantiates a [connection gator][conngater] to block and allow peers specified in the `P2PConfig`. - -It also sets up a gossiper using the gossip topic `+` (`txTopicSuffix` is defined in [p2p/client.go][client.go]), a Distributed Hash Table (DHT) using the `Seeds` defined in the `P2PConfig` and peer discovery using go-libp2p's `discovery.RoutingDiscovery`. - -A P2P client provides an interface `SetTxValidator(p2p.GossipValidator)` for specifying a gossip validator which can define how to handle the incoming `GossipMessage` in the P2P network. The `GossipMessage` represents message gossiped via P2P network (e.g. transaction, Block etc). - -```go -// GossipValidator is a callback function type. -type GossipValidator func(*GossipMessage) bool -``` - -The full nodes define a transaction validator (shown below) as gossip validator for processing the gossiped transactions to add to the mempool, whereas light nodes simply pass a dummy validator as light nodes do not process gossiped transactions. - -```go -// newTxValidator creates a pubsub validator that uses the node's mempool to check the -// transaction. If the transaction is valid, then it is added to the mempool -func (n *FullNode) newTxValidator() p2p.GossipValidator { -``` - -```go -// Dummy validator that always returns a callback function with boolean `false` -func (ln *LightNode) falseValidator() p2p.GossipValidator { -``` - -## References - -[1] [client.go][client.go] - -[2] [go-datastore][go-datastore] - -[3] [go-libp2p][go-libp2p] - -[4] [conngater][conngater] - -[client.go]: https://github.com/evstack/ev-node/blob/main/pkg/p2p/client.go -[go-datastore]: https://github.com/ipfs/go-datastore -[go-libp2p]: https://github.com/libp2p/go-libp2p -[conngater]: https://github.com/libp2p/go-libp2p/tree/master/p2p/net/conngater diff --git a/content/docs/learn/specs/store.md b/content/docs/learn/specs/store.md deleted file mode 100644 index 8432902..0000000 --- a/content/docs/learn/specs/store.md +++ /dev/null @@ -1,92 +0,0 @@ -# Store - -## Abstract - -The Store interface defines methods for storing and retrieving blocks, commits, and the state of the blockchain. - -## Protocol/Component Description - -The Store interface defines the following methods: - -- `Height`: Returns the height of the highest block in the store. -- `SetHeight`: Sets given height in the store if it's higher than the existing height in the store. -- `SaveBlock`: Saves a block (containing both header and data) along with its seen signature. -- `GetBlock`: Returns a block at a given height. -- `GetBlockByHash`: Returns a block with a given block header hash. - -Note: While blocks are stored as complete units in the store, the block components handle headers and data separately during synchronization and DA layer interaction. - -- `SaveBlockResponses`: Saves block responses in the Store. -- `GetBlockResponses`: Returns block results at a given height. -- `GetSignature`: Returns a signature for a block at a given height. -- `GetSignatureByHash`: Returns a signature for a block with a given block header hash. -- `UpdateState`: Updates the state saved in the Store. Only one State is stored. -- `GetState`: Returns the last state saved with UpdateState. -- `SaveValidators`: Saves the validator set at a given height. -- `GetValidators`: Returns the validator set at a given height. - -The `TxnDatastore` interface inside [go-datastore] is used for constructing different key-value stores for the underlying storage of a full node. There are two different implementations of `TxnDatastore` in [kv.go]: - -- `NewTestInMemoryKVStore`: Builds a key-value store that uses the [BadgerDB] library and operates in-memory, without accessing the disk. Used only across unit tests and integration tests. - -- `NewDefaultKVStore`: Builds a key-value store that uses the [BadgerDB] library and stores the data on disk at the specified path. - -A Evolve full node is [initialized][full_node_store_initialization] using `NewDefaultKVStore` as the base key-value store for underlying storage. To store various types of data in this base key-value store, different prefixes are used: `mainPrefix`, `dalcPrefix`, and `indexerPrefix`. The `mainPrefix` equal to `0` is used for the main node data, `dalcPrefix` equal to `1` is used for Data Availability Layer Client (DALC) data, and `indexerPrefix` equal to `2` is used for indexing related data. - -For the main node data, `DefaultStore` struct, an implementation of the Store interface, is used with the following prefixes for various types of data within it: - -- `blockPrefix` with value "b": Used to store complete blocks in the key-value store. -- `indexPrefix` with value "i": Used to index the blocks stored in the key-value store. -- `commitPrefix` with value "c": Used to store commits related to the blocks. -- `statePrefix` with value "s": Used to store the state of the blockchain. -- `responsesPrefix` with value "r": Used to store responses related to the blocks. -- `validatorsPrefix` with value "v": Used to store validator sets at a given height. - -Additional prefixes used by sync services: - -- `headerSyncPrefix` with value "hs": Used by the header sync service for P2P synced headers. -- `dataSyncPrefix` with value "ds": Used by the data sync service for P2P synced transaction data. - For example, in a call to `GetBlockByHash` for some block hash ``, the key used in the full node's base key-value store will be `/0/b/` where `0` is the main store prefix and `b` is the block prefix. Similarly, in a call to `GetValidators` for some height ``, the key used in the full node's base key-value store will be `/0/v/` where `0` is the main store prefix and `v` is the validator set prefix. - -Inside the key-value store, the value of these various types of data like `Block` is stored as a byte array which is encoded and decoded using the corresponding Protobuf [marshal and unmarshal methods][serialization]. - -The store is most widely used inside the [block components] to perform their functions correctly. Within the block components, since they have multiple go-routines, access is protected by mutex locks to synchronize read/write access and prevent race conditions. - -## Message Structure/Communication Format - -The Store does not communicate over the network, so there is no message structure or communication format. - -## Assumptions and Considerations - -The Store assumes that the underlying datastore is reliable and provides atomicity for transactions. It also assumes that the data passed to it for storage is valid and correctly formatted. - -## Implementation - -See [Store Interface][store_interface] and [Default Store][default_store] for its implementation. - -## References - -[1] [Store Interface][store_interface] - -[2] [Default Store][default_store] - -[3] [Full Node Store Initialization][full_node_store_initialization] - -[4] [Block Components][block components] - -[5] [Badger DB][BadgerDB] - -[6] [Go Datastore][go-datastore] - -[7] [Key Value Store][kv.go] - -[8] [Serialization][serialization] - -[store_interface]: https://github.com/evstack/ev-node/blob/main/pkg/store/types.go#L11 -[default_store]: https://github.com/evstack/ev-node/blob/main/pkg/store/store.go -[full_node_store_initialization]: https://github.com/evstack/ev-node/blob/main/node/full.go#L96 -[block components]: https://github.com/evstack/ev-node/blob/main/block/components.go -[BadgerDB]: https://github.com/dgraph-io/badger -[go-datastore]: https://github.com/ipfs/go-datastore -[kv.go]: https://github.com/evstack/ev-node/blob/main/pkg/store/kv.go -[serialization]: https://github.com/evstack/ev-node/blob/main/types/serialization.go diff --git a/content/docs/learn/specs/template.md b/content/docs/learn/specs/template.md deleted file mode 100644 index 32f5544..0000000 --- a/content/docs/learn/specs/template.md +++ /dev/null @@ -1,103 +0,0 @@ -# Protocol/Component Name - -## Abstract - -Provide a concise description of the purpose of the component for which the -specification is written, along with its contribution to the evolve or -other relevant parts of the system. Make sure to include proper references to -the relevant sections. - -## Protocol/Component Description - -Offer a comprehensive explanation of the protocol, covering aspects such as data -flow, communication mechanisms, and any other details necessary for -understanding the inner workings of this component. - -## Message Structure/Communication Format - -If this particular component is expected to communicate over the network, -outline the structure of the message protocol, including details such as field -interpretation, message format, and any other relevant information. - -## Assumptions and Considerations - -If there are any assumptions required for the component's correct operation, -performance, security, or other expected features, outline them here. -Additionally, provide any relevant considerations related to security or other -concerns. - -## Implementation - -Include a link to the location where the implementation of this protocol can be -found. Note that specific implementation details should be documented in the -evolve repository rather than in the specification document. - -## References - -List any references used or cited in the document. - -## General Tips - -### How to use a mermaid diagram that you can display in a markdown - -```mermaid - -sequenceDiagram - title Example - participant A - participant B - A->>B: Example - B->>A: Example - - ``` - - ```mermaid - -graph LR - A[Example] --> B[Example] - B --> C[Example] - C --> A - - ``` - - ```mermaid - -gantt - title Example - dateFormat YYYY-MM-DD - section Example - A :done, des1, 2014-01-06,2014-01-08 - B :done, des2, 2014-01-06,2014-01-08 - C :done, des3, 2014-01-06,2014-01-08 - - ``` - -### Grammar and spelling check - -The recommendation is to use your favorite spellchecker extension in your IDE like [grammarly], to make sure that the document is free of spelling and grammar errors. - -### Use of links - -If you want to use links use proper syntax. This goes for both internal and external links like [documentation] or [external links] - -At the bottom of the document in the [References](#references) section, you can add the following footnotes that will be visible in the markdown document: - -[1] [Grammarly][grammarly] - -[2] [Documentation][documentation] - -[3] [external links][external links] - -Then at the bottom add the actual links that will not be visible in the markdown document: - -[grammarly]: https://www.grammarly.com/ -[documentation]: ../../README.md -[external links]: https://github.com/celestiaorg/go-header - -### Use of tables - -If you are describing variables, components or other things in a structured list that can be described in a table use the following syntax: - -| Name | Type | Description | -| ---- | ---- | ----------- | -| `name` | `type` | Description | diff --git a/content/docs/learn/specs/termination.png b/content/docs/learn/specs/termination.png deleted file mode 100644 index 0b61c8f..0000000 Binary files a/content/docs/learn/specs/termination.png and /dev/null differ diff --git a/content/docs/learn/transaction-flow.md b/content/docs/learn/transaction-flow.md deleted file mode 100644 index 8d05532..0000000 --- a/content/docs/learn/transaction-flow.md +++ /dev/null @@ -1,53 +0,0 @@ -# Transaction flow - -Chain users use a light node to communicate with the chain P2P network for two primary reasons: - -- submitting transactions -- gossiping headers and fraud proofs - -Here's what the typical transaction flow looks like: - -## Transaction submission - -```mermaid -sequenceDiagram - participant User - participant LightNode - participant FullNode - - User->>LightNode: Submit Transaction - LightNode->>FullNode: Gossip Transaction - FullNode-->>User: Refuse (if invalid) -``` - -## Transaction validation and processing - -```mermaid -sequenceDiagram - participant FullNode - participant Sequencer - - FullNode->>FullNode: Check Validity - FullNode->>FullNode: Add to Mempool (if valid) - FullNode-->>User: Transaction Processed (if valid) - FullNode->>Sequencer: Inform about Valid Transaction - Sequencer->>DALayer: Add to Chain Block -``` - -## Block processing - -```mermaid -sequenceDiagram - participant DALayer - participant FullNode - participant Chain - - DALayer->>Chain: Update State - DALayer->>FullNode: Download & Validate Block -``` - -To transact, users submit a transaction to their light node, which gossips the transaction to a full node. Before adding the transaction to their mempool, the full node checks its validity. Valid transactions are included in the mempool, while invalid ones are refused, and the user's transaction will not be processed. - -If the transaction is valid and has been included in the mempool, the sequencer can add it to a chain block, which is then submitted to the data availability (DA) layer. This results in a successful transaction flow for the user, and the state of the chain is updated accordingly. - -After the block is submitted to the DA layer, the full nodes download and validate the block. diff --git a/content/docs/meta.json b/content/docs/meta.json deleted file mode 100644 index a97ba33..0000000 --- a/content/docs/meta.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "root": true, - "pages": [ - "learn", - "getting-started", - "guides", - "api", - "reference", - "concepts", - "overview", - "ev-abci", - "ev-reth", - "adr" - ] -} diff --git a/content/docs/overview/architecture.md b/content/docs/overview/architecture.md deleted file mode 100644 index d0ba6ec..0000000 --- a/content/docs/overview/architecture.md +++ /dev/null @@ -1,185 +0,0 @@ -# Architecture - -Evolve uses a modular architecture where each component has a well-defined interface and can be swapped independently. This document provides an overview of how the pieces fit together. - -## System Overview - -```text -┌─────────────────────────────────────────────────────────────────┐ -│ Client Apps │ -│ (wallets, dapps, indexers) │ -└─────────────────────────────┬───────────────────────────────────┘ - │ JSON-RPC / gRPC -┌─────────────────────────────▼───────────────────────────────────┐ -│ ev-node │ -│ ┌───────────┐ ┌───────────┐ ┌───────────┐ ┌──────────────┐ │ -│ │ Block │ │ Sequencer │ │ P2P │ │ Sync │ │ -│ │ Components│ │ │ │ Network │ │ Services │ │ -│ └─────┬─────┘ └─────┬─────┘ └─────┬─────┘ └───────┬──────┘ │ -└────────┼──────────────┼──────────────┼────────────────┼─────────┘ - │ │ │ │ - │ Executor │ Sequencer │ libp2p │ DA Client - ▼ ▼ ▼ ▼ -┌────────────────┐ ┌──────────┐ ┌─────────────────────────────────┐ -│ Executor │ │Sequencer │ │ DA Layer │ -│ (ev-reth or │ │(single, │ │ (Celestia) │ -│ ev-abci) │ │ based) │ │ │ -└────────────────┘ └──────────┘ └─────────────────────────────────┘ -``` - -## Core Design Principles - -1. **Zero-dependency core** — The `core/` package contains only interfaces with no external dependencies. This keeps the API stable and allows any implementation. - -2. **Modular components** — Executor, Sequencer, and DA layer are all pluggable. Swap them without changing ev-node. - -3. **Separation of concerns** — Block production, syncing, and DA submission run as independent components that communicate through well-defined channels. - -4. **Two operating modes** — Nodes run as either an Aggregator (produces blocks) or Sync-only (follows chain). - -## Block Components - -The block package is the heart of ev-node. It's organized into specialized components: - -| Component | Responsibility | Runs On | -|-----------|---------------|---------| -| **Executor** | Produces blocks by getting batches from sequencer and executing via execution layer | Aggregator only | -| **Reaper** | Scrapes transactions from execution layer mempool and submits to sequencer | Aggregator only | -| **Syncer** | Coordinates block sync from DA layer and P2P network | All nodes | -| **Submitter** | Submits blocks to DA layer and tracks inclusion | Aggregator only | -| **Cache** | Manages in-memory state for headers, data, and pending submissions | All nodes | - -### Component Interaction - -```text - ┌─────────────┐ - │ Reaper │ - │ (tx scrape)│ - └──────┬──────┘ - │ Submit batch - ▼ -┌─────────────┐ ┌─────────────┐ ┌─────────────┐ -│ Sequencer │◄───│ Executor │───►│ Broadcaster │ -│ │ │(block prod) │ │ (P2P) │ -└─────────────┘ └──────┬──────┘ └─────────────┘ - │ - │ Queue for submission - ▼ - ┌─────────────┐ - │ Submitter │───► DA Layer - │ │ - └──────┬──────┘ - │ - │ Track inclusion - ▼ - ┌─────────────┐ - │ Cache │ - └─────────────┘ -``` - -## Node Types - -Evolve supports several node configurations: - -| Type | Block Production | Full Validation | DA Submission | Use Case | -|------|-----------------|-----------------|---------------|----------| -| **Aggregator** | Yes | Yes | Yes | Block producer (sequencer) | -| **Full Node** | No | Yes | No | RPC provider, validator | -| **Light Node** | No | Headers only | No | Mobile, embedded clients | -| **Attester** | No | Yes | No | Soft consensus participant | - -### Aggregator - -The aggregator (also called sequencer node) produces blocks: - -1. Reaper collects transactions from execution layer -2. Executor gets ordered batch from sequencer -3. Executor calls execution layer to process transactions -4. Executor creates and signs block (header + data) -5. Broadcaster gossips block to P2P network -6. Submitter queues block for DA submission - -### Full Node - -Full nodes sync and validate without producing blocks: - -1. Syncer receives blocks from DA layer and/or P2P -2. Validates header signatures and data hashes -3. Executes transactions via execution layer -4. Verifies resulting state root matches header -5. Persists validated blocks to local store - -## Data Flow - -### Block Production (Aggregator) - -```text -User Tx → Execution Layer Mempool - │ - ▼ - Reaper scrapes txs - │ - ▼ - Sequencer orders batch - │ - ▼ - Executor.ExecuteTxs() - │ - ├──► SignedHeader + Data - │ - ├──► P2P Broadcast (soft confirmation) - │ - └──► Submitter Queue - │ - ▼ - DA Layer (hard confirmation) -``` - -### Block Sync (Non-Aggregator) - -```text -┌────────────────────────────────────────┐ -│ Syncer │ -├────────────┬────────────┬──────────────┤ -│ DA Worker │ P2P Worker │Forced Incl. │ -│ │ │ Worker │ -└─────┬──────┴─────┬──────┴───────┬──────┘ - │ │ │ - └────────────┴──────────────┘ - │ - ▼ - processHeightEvent() - │ - ▼ - Validate → Execute → Persist -``` - -## P2P Network - -Built on libp2p with: - -- **GossipSub** for transaction and block propagation -- **Kademlia DHT** for peer discovery -- **Topics**: `{chainID}-tx`, `{chainID}-header`, `{chainID}-data` - -Nodes discover peers through: - -1. Bootstrap/seed nodes -2. DHT peer exchange -3. PEX (peer exchange protocol) - -## Storage - -ev-node uses a key-value store (badger) for: - -- **Headers** — Indexed by height and hash -- **Data** — Transaction lists indexed by height -- **State** — Last committed height, app hash, DA height -- **Pending** — Blocks awaiting DA inclusion - -## Further Reading - -- [Block Lifecycle](/concepts/block-lifecycle) — Detailed block processing flow -- [Sequencing](/concepts/sequencing) — How transaction ordering works -- [Data Availability](/concepts/data-availability) — DA layer integration -- [Executor Interface](/reference/interfaces/executor) — Full interface reference diff --git a/content/docs/overview/execution-environments.md b/content/docs/overview/execution-environments.md deleted file mode 100644 index c2c7650..0000000 --- a/content/docs/overview/execution-environments.md +++ /dev/null @@ -1,31 +0,0 @@ -# Execution Layers in Evolve - -Evolve is designed to be modular and flexible, allowing different execution layers to be plugged in. Evolve defines a general-purpose execution interface ([see execution.go](https://github.com/evstack/ev-node/blob/main/core/execution/execution.go)) that enables developers to integrate any compatible application as the chain's execution layer. - -This means you can use a variety of Cosmos SDK or Reth compatible applications as the execution environment for your chain: choose the execution environment that best fits your use case. - -## Supported Execution Layers - -### Cosmos SDK Execution Layer - -Evolve natively supports Cosmos SDK-based applications as the execution layer for a chain via the ABCI (Application Blockchain Interface) protocol. The Cosmos SDK provides a rich set of modules for staking, governance, IBC, and more, and is widely used in the Cosmos ecosystem. This integration allows developers to leverage the full power and flexibility of the Cosmos SDK when building their chain applications. - -- [Cosmos SDK Documentation](https://docs.cosmos.network/) -- [Cosmos SDK ABCI Documentation](https://docs.cosmos.network/main/build/abci/introduction) -- [Evolve ABCI Adapter](https://github.com/evstack/ev-abci) - -### Reth - -Reth is a high-performance Ethereum execution client written in Rust. Evolve can integrate Reth as an execution layer, enabling Ethereum-compatible chains to process EVM transactions and maintain Ethereum-like state. This allows developers to build chains that leverage the Ethereum ecosystem, tooling, and smart contracts, while benefiting from Evolve's modular consensus and data availability. - -For more information about Reth, see the official documentation: - -- [Reth GitHub Repository](https://github.com/paradigmxyz/reth) -- [Evolve Reth Integration](https://github.com/evstack/ev-reth) - -## How It Works - -- Evolve acts as the consensus and uses Celestia as its data availability layer. -- The execution layer (Cosmos SDK app or Reth) processes transactions and maintains application state. - -For more details on integrating an execution layer with Evolve, see the respective documentation links above. diff --git a/content/docs/overview/meta.json b/content/docs/overview/meta.json deleted file mode 100644 index 2f66705..0000000 --- a/content/docs/overview/meta.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "title": "Overview", - "icon": "LayoutDashboard", - "pages": ["..."] -} diff --git a/content/docs/overview/what-is-evolve.md b/content/docs/overview/what-is-evolve.md deleted file mode 100644 index 59dc8cd..0000000 --- a/content/docs/overview/what-is-evolve.md +++ /dev/null @@ -1,95 +0,0 @@ -# Introduction - -Evolve is the fastest way to launch your own modular network — without validator overhead or token lock-in. - -Built on Celestia, Evolve offers L1-level control with L2-level performance. - -This isn't a toolkit. It's a launch stack. - -No fees, no middlemen, no revenue share. - -## What is Evolve - -Evolve is a launch stack for L1s. It gives you full control over execution — without CometBFT, validator ops, or lock-in. - -Evolve is [open-source](https://github.com/evstack/ev-node), production-ready, and fully composable. - -At its core is `ev-node`, a modular node that exposes an [Execution interface](https://github.com/evstack/ev-node/blob/main/core/execution/execution.go) — letting you bring any VM or execution logic, including Cosmos SDK or custom-built runtimes. - -Evolving from Cosmos SDK? - -Migrate without rewriting your stack. Bring your logic and state to Evolve and shed validator overhead — all while gaining performance and execution freedom. - -Evolve is how you launch your network. Modular. Production-ready. Yours. - -With Evolve, you get: - -- Full control over execution \- use any VM -- Low-cost launch — no emissions, no validator inflation -- Speed to traction — from local devnet to testnet in minutes -- Keep sequencer revenue — monetize directly -- Optional L1 validator network for fast finality and staking - -Powered by Celestia — toward 1GB blocks, multi-VM freedom, and execution without compromising flexibility or cost. - -## What problems is Evolve solving - -### 1\. Scalability and customizability - -Deploying your decentralized application as a smart contract on a shared blockchain has many limitations. Your smart contract has to share computational resources with every other application, so scalability is limited. - -Plus, you're restricted to the execution environment that the shared blockchain uses, so developer flexibility is limited as well. - -### 2\. Security and time to market - -Deploying a new chain might sound like the perfect solution for the problems listed above. While it's somewhat true, deploying a new layer 1 chain presents a complex set of challenges and trade-offs for developers looking to build blockchain products. - -Deploying a legacy layer 1 has huge barriers to entry: time, capital, token emissions and expertise. - -In order to secure the network, developers must bootstrap a sufficiently secure set of validators, incurring the overhead of managing a full consensus network. This requires paying validators with inflationary tokens, putting the network's business sustainability at risk. Network effects are also critical for success, but can be challenging to achieve as the network must gain widespread adoption to be secure and valuable. - -In a potential future with millions of chains, it's unlikely all of those chains will be able to sustainably attract a sufficiently secure and decentralized validator set. - -## Why Evolve - -Evolve solves the challenges encountered during the deployment of a smart contract or a new layer 1, by minimizing these tradeoffs through the implementation of evolve chains. - -With Evolve, developers can benefit from: - -- **Shared security**: Chains inherit security from a data availability layer, by posting blocks to it. Chains reduce the trust assumptions placed on chain sequencers by allowing full nodes to download and verify the transactions in the blocks posted by the sequencer. For optimistic or zk-chains, in case of fraudulent blocks, full nodes can generate fraud or zk-proofs, which they can share with the rest of the network, including light nodes. Our roadmap includes the ability for light clients to receive and verify proofs, so that everyday users can enjoy high security guarantees. - -- **Scalability:** Evolve chains are deployed on specialized data availability layers like Celestia, which directly leverages the scalability of the DA layer. Additionally, chain transactions are executed off-chain rather than on the data availability layer. This means chains have their own dedicated computational resources, rather than sharing computational resources with other applications. - -- **Customizability:** Evolve is built as an open-source modular framework, to make it easier for developers to reuse the four main components and customize their chains. These components are data availability layers, execution environments, proof systems, and sequencer schemes. - -- **Faster time-to-market:** Evolve eliminates the need to bootstrap a validator set, manage a consensus network, incur high economic costs, and face other trade-offs that come with deploying a legacy layer 1\. Evolve's goal is to make deploying a chain as easy as it is to deploy a smart contract, cutting the time it takes to bring blockchain products to market from months (or even years) to just minutes. - -- **Sovereignty**: Evolve also enables developers to deploy chains for cases where communities require sovereignty. - -## How can you use Evolve - -As briefly mentioned above, Evolve could be used in many different ways. From chains, to settlement layers, and in the future even to L3s. - -### Chain with any VM - -Evolve gives developers the flexibility to use pre-existing ABCI-compatible state machines or create a custom state machine tailored to their chain needs. Evolve does not restrict the use of any specific virtual machine, allowing developers to experiment and bring innovative applications to life. - -### Cosmos SDK - -Similarly to how developers utilize the Cosmos SDK to build a layer 1 chain, the Cosmos SDK could be utilized to create a Evolve-compatible chain. Cosmos-SDK has great [documentation](https://docs.cosmos.network/main) and tooling that developers can leverage to learn. - -Another possibility is taking an existing layer 1 built with the Cosmos SDK and deploying it as a Evolve chain. Evolve gives your network a forward path. Migrate seamlessly, keep your logic, and evolve into a modular, high-performance system without CometBFT bottlenecks and zero validator overhead. - -### Build a settlement layer - -[Settlement layers](https://celestia.org/learn/modular-settlement-layers/settlement-in-the-modular-stack/) are ideal for developers who want to avoid deploying chains. They provide a platform for chains to verify proofs and resolve disputes. Additionally, they act as a hub for chains to facilitate trust-minimized token transfers and liquidity sharing between chains that share the same settlement layer. Think of settlement layers as a special type of execution layer. - -## When can you use Evolve - -As of today, Evolve provides a single sequencer, an execution interface (Engine API or ABCI), and a connection to Celestia. - -We're currently working on implementing many new and exciting features such as light nodes and state fraud proofs. - -Head down to the next section to learn more about what's coming for Evolve. If you're ready to start building, you can skip to the [Guides](../guides/quick-start.md) section. - -Spoiler alert, whichever you choose, it's going to be a great rabbit hole\! diff --git a/content/docs/reference/api/abci-rpc.md b/content/docs/reference/api/abci-rpc.md deleted file mode 100644 index ffca6a1..0000000 --- a/content/docs/reference/api/abci-rpc.md +++ /dev/null @@ -1,196 +0,0 @@ -# ABCI RPC Reference - -CometBFT-compatible RPC endpoints provided by ev-abci. - -## Query Methods - -### /abci_query - -Query application state. - -**Request:** - -```bash -curl 'http://localhost:26657/abci_query?path="/store/bank/key"&data=0x...' -``` - -**Response:** - -```json -{ - "jsonrpc": "2.0", - "result": { - "response": { - "code": 0, - "value": "base64encodedvalue", - "height": "1000" - } - }, - "id": 1 -} -``` - -### /block - -Get block at height. - -**Request:** - -```bash -curl 'http://localhost:26657/block?height=100' -``` - -### /block_results - -Get block results (tx results, events). - -**Request:** - -```bash -curl 'http://localhost:26657/block_results?height=100' -``` - -### /commit - -Get commit (signatures) at height. - -**Request:** - -```bash -curl 'http://localhost:26657/commit?height=100' -``` - -### /validators - -Get validator set (returns sequencer in Evolve). - -**Request:** - -```bash -curl 'http://localhost:26657/validators?height=100' -``` - -### /status - -Get node status. - -**Request:** - -```bash -curl 'http://localhost:26657/status' -``` - -### /genesis - -Get genesis document. - -**Request:** - -```bash -curl 'http://localhost:26657/genesis' -``` - -### /health - -Health check. - -**Request:** - -```bash -curl 'http://localhost:26657/health' -``` - -## Transaction Methods - -### /broadcast_tx_async - -Broadcast transaction, return immediately. - -**Request:** - -```bash -curl 'http://localhost:26657/broadcast_tx_async?tx=0x...' -``` - -### /broadcast_tx_sync - -Broadcast transaction, wait for CheckTx. - -**Request:** - -```bash -curl 'http://localhost:26657/broadcast_tx_sync?tx=0x...' -``` - -### /broadcast_tx_commit - -Broadcast transaction, wait for inclusion. - -**Request:** - -```bash -curl 'http://localhost:26657/broadcast_tx_commit?tx=0x...' -``` - -### /tx - -Get transaction by hash. - -**Request:** - -```bash -curl 'http://localhost:26657/tx?hash=0x...' -``` - -### /tx_search - -Search transactions. - -**Request:** - -```bash -curl 'http://localhost:26657/tx_search?query="tx.height=100"' -``` - -## WebSocket - -### /subscribe - -Subscribe to events. - -```json -{ - "jsonrpc": "2.0", - "method": "subscribe", - "params": {"query": "tm.event='NewBlock'"}, - "id": 1 -} -``` - -Event types: - -- `NewBlock` — New block committed -- `Tx` — Transaction included -- `NewBlockHeader` — New block header - -## Unsupported Methods - -These CometBFT methods are not supported in ev-abci: - -| Method | Reason | -|--------|--------| -| `/consensus_state` | No BFT consensus | -| `/dump_consensus_state` | No BFT consensus | -| `/net_info` | Different P2P model | -| `/unconfirmed_txs` | Different mempool | -| `/num_unconfirmed_txs` | Different mempool | - -## Port - -Default: `26657` - -Configure: - -```bash ---evnode.rpc.address tcp://0.0.0.0:26657 -``` diff --git a/content/docs/reference/api/engine-api.md b/content/docs/reference/api/engine-api.md deleted file mode 100644 index 9e8c4e9..0000000 --- a/content/docs/reference/api/engine-api.md +++ /dev/null @@ -1,183 +0,0 @@ -# Engine API Reference - -Engine API methods used by ev-node to communicate with ev-reth. - -## Authentication - -All requests require JWT authentication via the `Authorization` header: - -```text -Authorization: Bearer -``` - -Generate JWT from shared secret: - -```bash -openssl rand -hex 32 > jwt.hex -``` - -## Methods - -### engine_forkchoiceUpdatedV3 - -Update fork choice and optionally build a new block. - -**Request:** - -```json -{ - "jsonrpc": "2.0", - "method": "engine_forkchoiceUpdatedV3", - "params": [ - { - "headBlockHash": "0x...", - "safeBlockHash": "0x...", - "finalizedBlockHash": "0x..." - }, - { - "timestamp": "0x...", - "prevRandao": "0x...", - "suggestedFeeRecipient": "0x...", - "withdrawals": [], - "parentBeaconBlockRoot": "0x..." - } - ], - "id": 1 -} -``` - -**Response:** - -```json -{ - "jsonrpc": "2.0", - "result": { - "payloadStatus": { - "status": "VALID", - "latestValidHash": "0x..." - }, - "payloadId": "0x..." - }, - "id": 1 -} -``` - -### engine_getPayloadV3 - -Get a built payload. - -**Request:** - -```json -{ - "jsonrpc": "2.0", - "method": "engine_getPayloadV3", - "params": ["0x...payloadId"], - "id": 1 -} -``` - -**Response:** - -```json -{ - "jsonrpc": "2.0", - "result": { - "executionPayload": { - "parentHash": "0x...", - "feeRecipient": "0x...", - "stateRoot": "0x...", - "receiptsRoot": "0x...", - "logsBloom": "0x...", - "prevRandao": "0x...", - "blockNumber": "0x1", - "gasLimit": "0x...", - "gasUsed": "0x...", - "timestamp": "0x...", - "extraData": "0x", - "baseFeePerGas": "0x...", - "blockHash": "0x...", - "transactions": ["0x..."], - "withdrawals": [], - "blobGasUsed": "0x0", - "excessBlobGas": "0x0" - }, - "blockValue": "0x...", - "blobsBundle": { - "commitments": [], - "proofs": [], - "blobs": [] - }, - "shouldOverrideBuilder": false - }, - "id": 1 -} -``` - -### engine_newPayloadV3 - -Validate and execute a payload. - -**Request:** - -```json -{ - "jsonrpc": "2.0", - "method": "engine_newPayloadV3", - "params": [ - { - "parentHash": "0x...", - "feeRecipient": "0x...", - "stateRoot": "0x...", - "receiptsRoot": "0x...", - "logsBloom": "0x...", - "prevRandao": "0x...", - "blockNumber": "0x1", - "gasLimit": "0x...", - "gasUsed": "0x...", - "timestamp": "0x...", - "extraData": "0x", - "baseFeePerGas": "0x...", - "blockHash": "0x...", - "transactions": ["0x..."], - "withdrawals": [], - "blobGasUsed": "0x0", - "excessBlobGas": "0x0" - }, - ["0x...expectedBlobVersionedHashes"], - "0x...parentBeaconBlockRoot" - ], - "id": 1 -} -``` - -**Response:** - -```json -{ - "jsonrpc": "2.0", - "result": { - "status": "VALID", - "latestValidHash": "0x...", - "validationError": null - }, - "id": 1 -} -``` - -## Payload Status - -| Status | Description | -|--------|-------------| -| `VALID` | Payload is valid | -| `INVALID` | Payload failed validation | -| `SYNCING` | Node is syncing, cannot validate | -| `ACCEPTED` | Payload accepted, validation pending | -| `INVALID_BLOCK_HASH` | Block hash mismatch | - -## Ports - -| Port | Purpose | -|------|---------| -| 8551 | Engine API (authenticated) | -| 8545 | JSON-RPC (public) | diff --git a/content/docs/reference/api/meta.json b/content/docs/reference/api/meta.json deleted file mode 100644 index 51c85eb..0000000 --- a/content/docs/reference/api/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "API Reference", - "pages": ["..."] -} diff --git a/content/docs/reference/api/rpc-endpoints.md b/content/docs/reference/api/rpc-endpoints.md deleted file mode 100644 index a8e41a7..0000000 --- a/content/docs/reference/api/rpc-endpoints.md +++ /dev/null @@ -1,176 +0,0 @@ -# RPC Endpoints Reference - -ev-node JSON-RPC endpoints. - -## Health - -### GET /health - -Check node health. - -**Response:** - -```json -{ - "status": "ok" -} -``` - -## Block Queries - -### POST /block - -Get block by height. - -**Request:** - -```json -{ - "jsonrpc": "2.0", - "method": "block", - "params": { "height": "100" }, - "id": 1 -} -``` - -**Response:** - -```json -{ - "jsonrpc": "2.0", - "result": { - "block": { - "header": { - "height": "100", - "time": "2024-01-01T00:00:00Z", - "last_header_hash": "0x...", - "data_hash": "0x...", - "app_hash": "0x...", - "proposer_address": "0x..." - }, - "data": { - "txs": ["0x..."] - } - } - }, - "id": 1 -} -``` - -### POST /header - -Get header by height. - -**Request:** - -```json -{ - "jsonrpc": "2.0", - "method": "header", - "params": { "height": "100" }, - "id": 1 -} -``` - -### POST /block_by_hash - -Get block by hash. - -**Request:** - -```json -{ - "jsonrpc": "2.0", - "method": "block_by_hash", - "params": { "hash": "0x..." }, - "id": 1 -} -``` - -## Transaction Queries - -### POST /tx - -Get transaction by hash. - -**Request:** - -```json -{ - "jsonrpc": "2.0", - "method": "tx", - "params": { "hash": "0x..." }, - "id": 1 -} -``` - -## Status - -### POST /status - -Get node status. - -**Response:** - -```json -{ - "jsonrpc": "2.0", - "result": { - "node_info": { - "network": "chain-id", - "version": "1.0.0" - }, - "sync_info": { - "latest_block_height": "1000", - "latest_block_time": "2024-01-01T00:00:00Z", - "catching_up": false - } - }, - "id": 1 -} -``` - -## DA Status - -### POST /da_status - -Get DA layer status. - -**Response:** - -```json -{ - "jsonrpc": "2.0", - "result": { - "da_height": "5000", - "last_submitted_height": "999", - "pending_blocks": 1 - }, - "id": 1 -} -``` - -## Configuration - -Default port: `26657` - -Configure via flag: - -```bash ---evnode.rpc.address tcp://0.0.0.0:26657 -``` - -## WebSocket - -Subscribe to events via WebSocket at `ws://localhost:26657/websocket`. - -### Subscribe to new blocks - -```json -{ - "jsonrpc": "2.0", - "method": "subscribe", - "params": { "query": "tm.event='NewBlock'" }, - "id": 1 -} -``` diff --git a/content/docs/reference/configuration/ev-abci-flags.md b/content/docs/reference/configuration/ev-abci-flags.md deleted file mode 100644 index 2733a8a..0000000 --- a/content/docs/reference/configuration/ev-abci-flags.md +++ /dev/null @@ -1,99 +0,0 @@ -# ev-abci Flags Reference - -Command-line flags for Cosmos SDK applications using ev-abci. - -## ev-node Flags - -These flags configure the underlying ev-node instance. - -### Node Configuration - -| Flag | Type | Default | Description | -|---------------------------------|----------|---------|------------------------------| -| `--evnode.node.aggregator` | bool | `false` | Run as block producer | -| `--evnode.node.block_time` | duration | `1s` | Block production interval | -| `--evnode.node.lazy_aggregator` | bool | `false` | Only produce blocks with txs | -| `--evnode.node.lazy_block_time` | duration | `1s` | Max wait in lazy mode | - -### DA Configuration - -| Flag | Type | Default | Description | -|--------------------------|--------|----------|-------------------------| -| `--evnode.da.address` | string | required | DA layer URL | -| `--evnode.da.auth_token` | string | `""` | DA authentication token | -| `--evnode.da.namespace` | string | `""` | DA namespace (hex) | -| `--evnode.da.gas_price` | float | `0.01` | DA gas price | - -### P2P Configuration - -| Flag | Type | Default | Description | -|------------------------|--------|--------------------------|--------------------------------| -| `--evnode.p2p.listen` | string | `/ip4/0.0.0.0/tcp/26656` | P2P listen address | -| `--evnode.p2p.peers` | string | `""` | Comma-separated peer addresses | -| `--evnode.p2p.blocked` | string | `""` | Blocked peer IDs | - -### Signer Configuration - -| Flag | Type | Default | Description | -|------------------------------|--------|----------|-----------------------| -| `--evnode.signer.passphrase` | string | required | Signer key passphrase | - -### RPC Configuration - -| Flag | Type | Default | Description | -|------------------------|--------|-----------------------|--------------------| -| `--evnode.rpc.address` | string | `tcp://0.0.0.0:26657` | RPC listen address | - -## Cosmos SDK Flags - -Standard Cosmos SDK flags remain available: - -| Flag | Description | -|----------------|--------------------------------------| -| `--home` | Application home directory | -| `--log_level` | Log level (debug, info, warn, error) | -| `--log_format` | Log format (plain, json) | -| `--trace` | Enable full stack traces | - -## Environment Variables - -Flags can be set via environment variables: - -```bash -EVNODE_NODE_AGGREGATOR=true -EVNODE_DA_ADDRESS=http://localhost:7980 -EVNODE_SIGNER_PASSPHRASE=secret -``` - -Pattern: `EVNODE_
_` (uppercase, underscores) - -## Examples - -### Sequencer Node - -```bash -appd start \ - --evnode.node.aggregator \ - --evnode.node.block_time 500ms \ - --evnode.da.address http://localhost:7980 \ - --evnode.signer.passphrase secret -``` - -### Full Node - -```bash -appd start \ - --evnode.da.address http://localhost:7980 \ - --evnode.p2p.peers 12D3KooW...@sequencer.example.com:26656 -``` - -### Lazy Aggregator - -```bash -appd start \ - --evnode.node.aggregator \ - --evnode.node.lazy_aggregator \ - --evnode.node.lazy_block_time 5s \ - --evnode.da.address http://localhost:7980 \ - --evnode.signer.passphrase secret -``` diff --git a/content/docs/reference/configuration/ev-node-config.md b/content/docs/reference/configuration/ev-node-config.md deleted file mode 100644 index 56eaade..0000000 --- a/content/docs/reference/configuration/ev-node-config.md +++ /dev/null @@ -1,999 +0,0 @@ -# Config - -This document provides a comprehensive reference for all configuration options available in Evolve. Understanding these configurations will help you tailor Evolve's behavior to your specific needs, whether you're running an aggregator, a full node, or a light client. - -## Table of Contents - -- [DA-Only Sync Mode](#da-only-sync-mode) -- [Introduction to Configurations](#configs) -- [Base Configuration](#base-configuration) - - [Root Directory](#root-directory) - - [Database Path](#database-path) - - [Chain ID](#chain-id) -- [Node Configuration (`node`)](#node-configuration-node) - - [Aggregator Mode](#aggregator-mode) - - [Light Client Mode](#light-client-mode) - - [Block Time](#block-time) - - [Maximum Pending Blocks](#maximum-pending-blocks) - - [Lazy Mode (Lazy Aggregator)](#lazy-mode-lazy-aggregator) - - [Lazy Block Interval](#lazy-block-interval) -- [Data Availability Configuration (`da`)](#data-availability-configuration-da) - - [DA Service Address](#da-service-address) - - [DA Authentication Token](#da-authentication-token) - - [DA Gas Price](#da-gas-price) - - [DA Gas Multiplier](#da-gas-multiplier) - - [DA Submit Options](#da-submit-options) - - [DA Signing Addresses](#da-signing-addresses) - - [DA Namespace](#da-namespace) - - [DA Header Namespace](#da-namespace) - - [DA Data Namespace](#da-data-namespace) - - [DA Block Time](#da-block-time) - - [DA Mempool TTL](#da-mempool-ttl) - - [DA Request Timeout](#da-request-timeout) - - [DA Batching Strategy](#da-batching-strategy) - - [DA Batch Size Threshold](#da-batch-size-threshold) - - [DA Batch Max Delay](#da-batch-max-delay) - - [DA Batch Min Items](#da-batch-min-items) -- [P2P Configuration (`p2p`)](#p2p-configuration-p2p) - - [P2P Listen Address](#p2p-listen-address) - - [P2P Peers](#p2p-peers) - - [P2P Blocked Peers](#p2p-blocked-peers) - - [P2P Allowed Peers](#p2p-allowed-peers) -- [RPC Configuration (`rpc`)](#rpc-configuration-rpc) - - [RPC Server Address](#rpc-server-address) - - [Enable DA Visualization](#enable-da-visualization) - - [Health Endpoints](#health-endpoints) -- [Instrumentation Configuration (`instrumentation`)](#instrumentation-configuration-instrumentation) - - [Enable Prometheus Metrics](#enable-prometheus-metrics) - - [Prometheus Listen Address](#prometheus-listen-address) - - [Maximum Open Connections](#maximum-open-connections) - - [Enable Pprof Profiling](#enable-pprof-profiling) - - [Pprof Listen Address](#pprof-listen-address) -- [Logging Configuration (`log`)](#logging-configuration-log) - - [Log Level](#log-level) - - [Log Format](#log-format) - - [Log Trace (Stack Traces)](#log-trace-stack-traces) -- [Signer Configuration (`signer`)](#signer-configuration-signer) - - [Signer Type](#signer-type) - - [Signer Path](#signer-path) - - [Signer Passphrase](#signer-passphrase) - -## DA-Only Sync Mode - -Evolve supports running nodes that sync exclusively from the Data Availability (DA) layer without participating in P2P networking. This mode is useful for: - -- **Pure DA followers**: Nodes that only need the canonical chain data from DA -- **Resource optimization**: Reducing network overhead by avoiding P2P gossip -- **Simplified deployment**: No need to configure or maintain P2P peer connections -- **Isolated environments**: Nodes that should not participate in P2P communication - -**To enable DA-only sync mode:** - -1. **Leave P2P peers empty** (default behavior): - - ```yaml - p2p: - peers: "" # Empty or omit this field entirely - ``` - -2. **Configure DA connection** (required): - - ```yaml - da: - address: "your-da-service:port" - namespace: "your-namespace" - # ... other DA configuration - ``` - -3. **Optional**: You can still configure P2P listen address for potential future connections, but without peers, no P2P networking will occur. - -When running in DA-only mode, the node will: - -- ✅ Sync blocks and headers from the DA layer -- ✅ Validate transactions and maintain state -- ✅ Serve RPC requests -- ❌ Not participate in P2P gossip or peer discovery -- ❌ Not share blocks with other nodes via P2P -- ❌ Not receive transactions via P2P (only from direct RPC submission) - -## Configs - -Evolve configurations can be managed through a YAML file (typically `evnode.yml` located in `~/.evolve/config/` or `/config/`) and command-line flags. The system prioritizes configurations in the following order (highest priority first): - -1. **Command-line flags:** Override all other settings. -2. **YAML configuration file:** Values specified in the `config.yaml` file. -3. **Default values:** Predefined defaults within Evolve. - -Environment variables can also be used, typically prefixed with your executable's name (e.g., `YOURAPP_CHAIN_ID="my-chain"`). - -## Base Configuration - -These are fundamental settings for your Evolve node. - -### Root Directory - -**Description:** -The root directory where Evolve stores its data, including the database and configuration files. This is a foundational setting that dictates where all other file paths are resolved from. - -**YAML:** -This option is not set within the YAML configuration file itself, as it specifies the location _of_ the configuration file and other application data. - -**Command-line Flag:** -`--home ` -_Example:_ `--home /mnt/data/evolve_node` -_Default:_ `~/.evolve` (or a directory derived from the application name if `defaultHome` is customized). -_Constant:_ `FlagRootDir` - -### Database Path - -**Description:** -The path, relative to the Root Directory, where the Evolve database will be stored. This database contains blockchain state, blocks, and other critical node data. - -**YAML:** -Set this in your configuration file at the top level: - -```yaml -db_path: "data" -``` - -**Command-line Flag:** -`--evnode.db_path ` -_Example:_ `--evnode.db_path "node_db"` -_Default:_ `"data"` -_Constant:_ `FlagDBPath` - -### Chain ID - -**Description:** -The unique identifier for your chain. This ID is used to differentiate your network from others and is crucial for network communication and transaction validation. - -**YAML:** -Set this in your configuration file at the top level: - -```yaml -chain_id: "my-evolve-chain" -``` - -**Command-line Flag:** -`--chain_id ` -_Example:_ `--chain_id "super_rollup_testnet_v1"` -_Default:_ `"evolve"` -_Constant:_ `FlagChainID` - -## Node Configuration (`node`) - -Settings related to the core behavior of the Evolve node, including its mode of operation and block production parameters. - -**YAML Section:** - -```yaml -node: - # ... node configurations ... -``` - -### Aggregator Mode - -**Description:** -If true, the node runs in aggregator mode. Aggregators are responsible for producing blocks by collecting transactions, ordering them, and proposing them to the network. - -**YAML:** - -```yaml -node: - aggregator: true -``` - -**Command-line Flag:** -`--evnode.node.aggregator` (boolean, presence enables it) -_Example:_ `--evnode.node.aggregator` -_Default:_ `false` -_Constant:_ `FlagAggregator` - -### Light Client Mode - -**Description:** -If true, the node runs in light client mode. Light clients rely on full nodes for block headers and state information, offering a lightweight way to interact with the chain without storing all data. - -**YAML:** - -```yaml -node: - light: true -``` - -**Command-line Flag:** -`--evnode.node.light` (boolean, presence enables it) -_Example:_ `--evnode.node.light` -_Default:_ `false` -_Constant:_ `FlagLight` - -### Block Time - -**Description:** -The target time interval between consecutive blocks produced by an aggregator. This duration (e.g., "500ms", "1s", "5s") dictates the pace of block production. - -**YAML:** - -```yaml -node: - block_time: "1s" -``` - -**Command-line Flag:** -`--evnode.node.block_time ` -_Example:_ `--evnode.node.block_time 2s` -_Default:_ `"1s"` -_Constant:_ `FlagBlockTime` - -### Maximum Pending Blocks - -**Description:** -The maximum number of blocks that can be pending Data Availability (DA) submission. When this limit is reached, the aggregator pauses block production until some blocks are confirmed on the DA layer. Use 0 for no limit. This helps manage resource usage and DA layer capacity. - -**YAML:** - -```yaml -node: - max_pending_blocks: 100 -``` - -**Command-line Flag:** -`--evnode.node.max_pending_blocks ` -_Example:_ `--evnode.node.max_pending_blocks 50` -_Default:_ `0` (no limit) -_Constant:_ `FlagMaxPendingBlocks` - -### Lazy Mode (Lazy Aggregator) - -**Description:** -Enables lazy aggregation mode. In this mode, blocks are produced only when new transactions are available in the mempool or after the `lazy_block_interval` has passed. This optimizes resource usage by avoiding the creation of empty blocks during periods of inactivity. - -**YAML:** - -```yaml -node: - lazy_mode: true -``` - -**Command-line Flag:** -`--evnode.node.lazy_mode` (boolean, presence enables it) -_Example:_ `--evnode.node.lazy_mode` -_Default:_ `false` -_Constant:_ `FlagLazyAggregator` - -### Lazy Block Interval - -**Description:** -The maximum time interval between blocks when running in lazy aggregation mode (`lazy_mode`). This ensures that blocks are produced periodically even if there are no new transactions, keeping the chain active. This value is generally larger than `block_time`. - -**YAML:** - -```yaml -node: - lazy_block_interval: "30s" -``` - -**Command-line Flag:** -`--evnode.node.lazy_block_interval ` -_Example:_ `--evnode.node.lazy_block_interval 1m` -_Default:_ `"30s"` -_Constant:_ `FlagLazyBlockTime` - -## Data Availability Configuration (`da`) - -Parameters for connecting and interacting with the Data Availability (DA) layer, which Evolve uses to publish block data. - -**YAML Section:** - -```yaml -da: - # ... DA configurations ... -``` - -### DA Service Address - -**Description:** -The network address (host:port) of the Data Availability layer service. Evolve connects to this endpoint to submit and retrieve block data. - -**YAML:** - -```yaml -da: - address: "localhost:26659" -``` - -**Command-line Flag:** -`--evnode.da.address ` -_Example:_ `--evnode.da.address 192.168.1.100:26659` -_Default:_ `""` (empty, must be configured if DA is used) -_Constant:_ `FlagDAAddress` - -### DA Authentication Token - -**Description:** -The authentication token required to interact with the DA layer service, if the service mandates authentication. - -**YAML:** - -```yaml -da: - auth_token: "YOUR_DA_AUTH_TOKEN" -``` - -**Command-line Flag:** -`--evnode.da.auth_token ` -_Example:_ `--evnode.da.auth_token mysecrettoken` -_Default:_ `""` (empty) -_Constant:_ `FlagDAAuthToken` - -### DA Gas Price - -**Description:** -The gas price to use for transactions submitted to the DA layer. A value of -1 indicates automatic gas price determination (if supported by the DA layer). Higher values may lead to faster inclusion of data. - -**YAML:** - -```yaml -da: - gas_price: 0.025 -``` - -**Command-line Flag:** -`--evnode.da.gas_price ` -_Example:_ `--evnode.da.gas_price 0.05` -_Default:_ `-1` (automatic) -_Constant:_ `FlagDAGasPrice` - -### DA Gas Multiplier - -**Description:** -A multiplier applied to the gas price when retrying failed DA submissions. Values greater than 1 increase the gas price on retries, potentially improving the chances of successful inclusion. - -**YAML:** - -```yaml -da: - gas_multiplier: 1.1 -``` - -**Command-line Flag:** -`--evnode.da.gas_multiplier ` -_Example:_ `--evnode.da.gas_multiplier 1.5` -_Default:_ `1.0` (no multiplication) -_Constant:_ `FlagDAGasMultiplier` - -### DA Submit Options - -**Description:** -Additional options passed to the DA layer when submitting data. The format and meaning of these options depend on the specific DA implementation being used. For example, with Celestia, this can include custom gas settings or other submission parameters in JSON format. - -**Note:** If you configure multiple signing addresses (see [DA Signing Addresses](#da-signing-addresses)), the selected signing address will be automatically merged into these options as a JSON field `signer_address` (matching Celestia's TxConfig schema). If the base options are already valid JSON, the signing address is added to the existing object; otherwise, a new JSON object is created. - -**YAML:** - -```yaml -da: - submit_options: '{"key":"value"}' # Example, format depends on DA layer -``` - -**Command-line Flag:** -`--evnode.da.submit_options ` -_Example:_ `--evnode.da.submit_options '{"custom_param":true}'` -_Default:_ `""` (empty) -_Constant:_ `FlagDASubmitOptions` - -### DA Signing Addresses - -**Description:** -A comma-separated list of signing addresses to use for DA blob submissions. When multiple addresses are provided, they will be used in round-robin fashion to prevent sequence mismatches that can occur with high-throughput Cosmos SDK-based DA layers. This is particularly useful for Celestia when submitting many transactions concurrently. - -Each submission will select the next address in the list, and that address will be automatically added to the `submit_options` as `signer_address`. This ensures that the DA layer (e.g., celestia-node) uses the specified account for signing that particular blob submission. - -**Setup Requirements:** - -- All addresses must be loaded into the DA node's keyring and have sufficient funds for transaction fees -- For Celestia, see the guide on setting up multiple accounts in the DA node documentation - -**YAML:** - -```yaml -da: - signing_addresses: - - "celestia1abc123..." - - "celestia1def456..." - - "celestia1ghi789..." -``` - -**Command-line Flag:** -`--evnode.da.signing_addresses ` -_Example:_ `--evnode.da.signing_addresses celestia1abc...,celestia1def...,celestia1ghi...` -_Default:_ `[]` (empty, uses default DA node behavior) -_Constant:_ `FlagDASigningAddresses` - -**Behavior:** - -- If no signing addresses are configured, submissions use the DA layer's default signing behavior -- If one address is configured, all submissions use that address -- If multiple addresses are configured, they are used in round-robin order to distribute the load and prevent nonce/sequence conflicts -- The address selection is thread-safe for concurrent submissions - -### DA Namespace - -**Description:** -The namespace ID used when submitting blobs (block data) to the DA layer. This helps segregate data from different chains or applications on a shared DA layer. - -**Note:** If only `namespace` is provided, it will be used for both headers and data, otherwise the `data_namespace` will be used for data. Doing so allows speeding up light clients. - -**YAML:** - -```yaml -da: - namespace: "MY_UNIQUE_NAMESPACE_ID" -``` - -**Command-line Flag:** -`--evnode.da.namespace ` -_Example:_ `--evnode.da.namespace 0x1234567890abcdef` -_Default:_ `""` (empty) -_Constant:_ `FlagDANamespace` - -### DA Data Namespace - -**Description:** -The namespace ID specifically for submitting transaction data to the DA layer. Transaction data is submitted separately from headers, enabling nodes to sync only the data they need. The namespace value is encoded by the node to ensure proper formatting and compatibility with the DA layer. - -**YAML:** - -```yaml -da: - data_namespace: "DATA_NAMESPACE_ID" -``` - -**Command-line Flag:** -`--evnode.da.data_namespace ` -_Example:_ `--evnode.da.data_namespace my_data_namespace` -_Default:_ Falls back to `namespace` if not set -_Constant:_ `FlagDADataNamespace` - -### DA Block Time - -**Description:** -The average block time of the Data Availability chain (specified as a duration string, e.g., "15s", "1m"). This value influences: - -- The frequency of DA layer syncing. -- The maximum backoff time for retrying DA submissions. -- Calculation of transaction expiration when multiplied by `mempool_ttl`. - -**YAML:** - -```yaml -da: - block_time: "6s" -``` - -**Command-line Flag:** -`--evnode.da.block_time ` -_Example:_ `--evnode.da.block_time 12s` -_Default:_ `"6s"` -_Constant:_ `FlagDABlockTime` - -### DA Mempool TTL - -**Description:** -The number of DA blocks after which a transaction submitted to the DA layer is considered expired and potentially dropped from the DA layer's mempool. This also controls the retry backoff timing for DA submissions. - -**YAML:** - -```yaml -da: - mempool_ttl: 20 -``` - -**Command-line Flag:** -`--evnode.da.mempool_ttl ` -_Example:_ `--evnode.da.mempool_ttl 30` -_Default:_ `20` -_Constant:_ `FlagDAMempoolTTL` - -### DA Request Timeout - -**Description:** -Per-request timeout applied to DA `GetIDs` and `Get` RPC calls while retrieving blobs. Increase this value if your DA endpoint has high latency to avoid premature failures; decrease it to make the syncer fail fast and free resources sooner when the DA node becomes unresponsive. - -**YAML:** - -```yaml -da: - request_timeout: "30s" -``` - -**Command-line Flag:** -`--evnode.da.request_timeout ` -_Example:_ `--evnode.da.request_timeout 45s` -_Default:_ `"30s"` -_Constant:_ `FlagDARequestTimeout` - -### DA Batching Strategy - -**Description:** -Controls how blocks are batched before submission to the DA layer. Different strategies offer trade-offs between latency, cost efficiency, and throughput. All strategies pass through the DA submitter which performs additional size checks and may further split batches that exceed the DA layer's blob size limit. - -Available strategies: - -- **`immediate`**: Submits as soon as any items are available. Best for low-latency requirements where cost is not a concern. -- **`size`**: Waits until the batch reaches a size threshold (fraction of max blob size). Best for maximizing blob utilization and minimizing costs when latency is flexible. -- **`time`**: Waits for a time interval before submitting. Provides predictable submission timing aligned with DA block times. -- **`adaptive`**: Balances between size and time constraints—submits when either the size threshold is reached OR the max delay expires. Recommended for most production deployments as it optimizes both cost and latency. - -**YAML:** - -```yaml -da: - batching_strategy: "time" -``` - -**Command-line Flag:** -`--evnode.da.batching_strategy ` -_Example:_ `--evnode.da.batching_strategy adaptive` -_Default:_ `"time"` -_Constant:_ `FlagDABatchingStrategy` - -### DA Batch Size Threshold - -**Description:** -The minimum blob size threshold (as a fraction of the maximum blob size, between 0.0 and 1.0) before submitting a batch. Only applies to the `size` and `adaptive` strategies. For example, a value of 0.8 means the batch will be submitted when it reaches 80% of the maximum blob size. - -Higher values maximize blob utilization and reduce costs but may increase latency. Lower values reduce latency but may result in less efficient blob usage. - -**YAML:** - -```yaml -da: - batch_size_threshold: 0.8 -``` - -**Command-line Flag:** -`--evnode.da.batch_size_threshold ` -_Example:_ `--evnode.da.batch_size_threshold 0.9` -_Default:_ `0.8` (80% of max blob size) -_Constant:_ `FlagDABatchSizeThreshold` - -### DA Batch Max Delay - -**Description:** -The maximum time to wait before submitting a batch regardless of size. Applies to the `time` and `adaptive` strategies. Lower values reduce latency but may increase costs due to smaller batches. This value is typically aligned with the DA chain's block time to ensure submissions land in consecutive blocks. - -When set to 0, defaults to the DA BlockTime value. - -**YAML:** - -```yaml -da: - batch_max_delay: "6s" -``` - -**Command-line Flag:** -`--evnode.da.batch_max_delay ` -_Example:_ `--evnode.da.batch_max_delay 12s` -_Default:_ `0` (uses DA BlockTime) -_Constant:_ `FlagDABatchMaxDelay` - -### DA Batch Min Items - -**Description:** -The minimum number of items (headers or data) to accumulate before considering submission. This helps avoid submitting single items when more are expected soon, improving batching efficiency. All strategies respect this minimum. - -**YAML:** - -```yaml -da: - batch_min_items: 1 -``` - -**Command-line Flag:** -`--evnode.da.batch_min_items ` -_Example:_ `--evnode.da.batch_min_items 5` -_Default:_ `1` -_Constant:_ `FlagDABatchMinItems` - -## P2P Configuration (`p2p`) - -Settings for peer-to-peer networking, enabling nodes to discover each other, exchange blocks, and share transactions. - -**YAML Section:** - -```yaml -p2p: - # ... P2P configurations ... -``` - -### P2P Listen Address - -**Description:** -The network address (host:port) on which the Evolve node will listen for incoming P2P connections from other nodes. - -**YAML:** - -```yaml -p2p: - listen_address: "0.0.0.0:7676" -``` - -**Command-line Flag:** -`--evnode.p2p.listen_address ` -_Example:_ `--evnode.p2p.listen_address /ip4/127.0.0.1/tcp/26656` -_Default:_ `"/ip4/0.0.0.0/tcp/7676"` -_Constant:_ `FlagP2PListenAddress` - -### P2P Peers - -**Description:** -A comma-separated list of peer addresses (e.g., multiaddresses) that the node will attempt to connect to for bootstrapping its P2P connections. These are often referred to as seed nodes. - -**For DA-only sync mode:** Leave this field empty (default) to disable P2P networking entirely. When no peers are configured, the node will sync exclusively from the Data Availability layer without participating in P2P gossip, peer discovery, or block sharing. This is useful for nodes that only need to follow the canonical chain data from DA. - -**YAML:** - -```yaml -p2p: - peers: "/ip4/some_peer_ip/tcp/7676/p2p/PEER_ID1,/ip4/another_peer_ip/tcp/7676/p2p/PEER_ID2" - # For DA-only sync, leave peers empty: - # peers: "" -``` - -**Command-line Flag:** -`--evnode.p2p.peers ` -_Example:_ `--evnode.p2p.peers /dns4/seed.example.com/tcp/26656/p2p/12D3KooW...` -_Default:_ `""` (empty - enables DA-only sync mode) -_Constant:_ `FlagP2PPeers` - -### P2P Blocked Peers - -**Description:** -A comma-separated list of peer IDs that the node should block from connecting. This can be used to prevent connections from known malicious or problematic peers. - -**YAML:** - -```yaml -p2p: - blocked_peers: "PEER_ID_TO_BLOCK1,PEER_ID_TO_BLOCK2" -``` - -**Command-line Flag:** -`--evnode.p2p.blocked_peers ` -_Example:_ `--evnode.p2p.blocked_peers 12D3KooW...,12D3KooX...` -_Default:_ `""` (empty) -_Constant:_ `FlagP2PBlockedPeers` - -### P2P Allowed Peers - -**Description:** -A comma-separated list of peer IDs that the node should exclusively allow connections from. If this list is non-empty, only peers in this list will be able to connect. - -**YAML:** - -```yaml -p2p: - allowed_peers: "PEER_ID_TO_ALLOW1,PEER_ID_TO_ALLOW2" -``` - -**Command-line Flag:** -`--evnode.p2p.allowed_peers ` -_Example:_ `--evnode.p2p.allowed_peers 12D3KooY...,12D3KooZ...` -_Default:_ `""` (empty, allow all unless blocked) -_Constant:_ `FlagP2PAllowedPeers` - -## RPC Configuration (`rpc`) - -Settings for the Remote Procedure Call (RPC) server, which allows clients and applications to interact with the Evolve node. - -**YAML Section:** - -```yaml -rpc: - # ... RPC configurations ... -``` - -### RPC Server Address - -**Description:** -The network address (host:port) to which the RPC server will bind and listen for incoming requests. - -**YAML:** - -```yaml -rpc: - address: "127.0.0.1:7331" -``` - -**Command-line Flag:** -`--evnode.rpc.address ` -_Example:_ `--evnode.rpc.address 0.0.0.0:26657` -_Default:_ `"127.0.0.1:7331"` -_Constant:_ `FlagRPCAddress` - -### Enable DA Visualization - -**Description:** -If true, enables the Data Availability (DA) visualization endpoints that provide real-time monitoring of blob submissions to the DA layer. This includes a web-based dashboard and REST API endpoints for tracking submission statistics, monitoring DA health, and analyzing blob details. Only aggregator nodes submit data to the DA layer, so this feature is most useful when running in aggregator mode. - -**YAML:** - -```yaml -rpc: - enable_da_visualization: true -``` - -**Command-line Flag:** -`--evnode.rpc.enable_da_visualization` (boolean, presence enables it) -_Example:_ `--evnode.rpc.enable_da_visualization` -_Default:_ `false` -_Constant:_ `FlagRPCEnableDAVisualization` - -See the [DA Visualizer Guide](../guides/da/visualizer.md) for detailed information on using this feature. - -### Health Endpoints - -#### `/health/live` - -Returns `200 OK` if the process is alive and can access the store. - -```bash -curl http://localhost:7331/health/live -``` - -#### `/health/ready` - -Returns `200 OK` if the node can serve correct data. Checks: - -- P2P is listening (if enabled) -- Has synced blocks -- Not too far behind network -- Non-aggregators: has peers -- Aggregators: producing blocks at expected rate - -```bash -curl http://localhost:7331/health/ready -``` - -Configure max blocks behind: - -```yaml -node: - readiness_max_blocks_behind: 15 -``` - -## Instrumentation Configuration (`instrumentation`) - -Settings for enabling and configuring metrics and profiling endpoints, useful for monitoring node performance and debugging. - -**YAML Section:** - -```yaml -instrumentation: - # ... instrumentation configurations ... -``` - -### Enable Prometheus Metrics - -**Description:** -If true, enables the Prometheus metrics endpoint, allowing Prometheus to scrape operational data from the Evolve node. - -**YAML:** - -```yaml -instrumentation: - prometheus: true -``` - -**Command-line Flag:** -`--evnode.instrumentation.prometheus` (boolean, presence enables it) -_Example:_ `--evnode.instrumentation.prometheus` -_Default:_ `false` -_Constant:_ `FlagPrometheus` - -### Prometheus Listen Address - -**Description:** -The network address (host:port) where the Prometheus metrics server will listen for scraping requests. - -See [Metrics](../guides/metrics.md) for more details on what metrics are exposed. - -**YAML:** - -```yaml -instrumentation: - prometheus_listen_addr: ":2112" -``` - -**Command-line Flag:** -`--evnode.instrumentation.prometheus_listen_addr ` -_Example:_ `--evnode.instrumentation.prometheus_listen_addr 0.0.0.0:9090` -_Default:_ `":2112"` -_Constant:_ `FlagPrometheusListenAddr` - -### Maximum Open Connections - -**Description:** -The maximum number of simultaneous connections allowed for the metrics server (e.g., Prometheus endpoint). - -**YAML:** - -```yaml -instrumentation: - max_open_connections: 100 -``` - -**Command-line Flag:** -`--evnode.instrumentation.max_open_connections ` -_Example:_ `--evnode.instrumentation.max_open_connections 50` -_Default:_ (Refer to `DefaultInstrumentationConfig()` in code, typically a reasonable number like 100) -_Constant:_ `FlagMaxOpenConnections` - -### Enable Pprof Profiling - -**Description:** -If true, enables the pprof HTTP endpoint, which provides runtime profiling data for debugging performance issues. Accessing these endpoints can help diagnose CPU and memory usage. - -**YAML:** - -```yaml -instrumentation: - pprof: true -``` - -**Command-line Flag:** -`--evnode.instrumentation.pprof` (boolean, presence enables it) -_Example:_ `--evnode.instrumentation.pprof` -_Default:_ `false` -_Constant:_ `FlagPprof` - -### Pprof Listen Address - -**Description:** -The network address (host:port) where the pprof HTTP server will listen for profiling requests. - -**YAML:** - -```yaml -instrumentation: - pprof_listen_addr: "localhost:6060" -``` - -**Command-line Flag:** -`--evnode.instrumentation.pprof_listen_addr ` -_Example:_ `--evnode.instrumentation.pprof_listen_addr 0.0.0.0:6061` -_Default:_ `"localhost:6060"` -_Constant:_ `FlagPprofListenAddr` - -## Logging Configuration (`log`) - -Settings that control the verbosity and format of log output from the Evolve node. These are typically set via global flags. - -**YAML Section:** - -```yaml -log: - # ... logging configurations ... -``` - -### Log Level - -**Description:** -Sets the minimum severity level for log messages to be displayed. Common levels include `debug`, `info`, `warn`, `error`. - -**YAML:** - -```yaml -log: - level: "info" -``` - -**Command-line Flag:** -`--log.level ` (Note: some applications might use a different flag name like `--log_level`) -_Example:_ `--log.level debug` -_Default:_ `"info"` -_Constant:_ `FlagLogLevel` (value: "evolve.log.level", but often overridden by global app flags) - -### Log Format - -**Description:** -Sets the format for log output. Common formats include `text` (human-readable) and `json` (structured, machine-readable). - -**YAML:** - -```yaml -log: - format: "text" -``` - -**Command-line Flag:** -`--log.format ` (Note: some applications might use a different flag name like `--log_format`) -_Example:_ `--log.format json` -_Default:_ `"text"` -_Constant:_ `FlagLogFormat` (value: "evolve.log.format", but often overridden by global app flags) - -### Log Trace (Stack Traces) - -**Description:** -If true, enables the inclusion of stack traces in error logs. This can be very helpful for debugging issues by showing the call stack at the point of an error. - -**YAML:** - -```yaml -log: - trace: false -``` - -**Command-line Flag:** -`--log.trace` (boolean, presence enables it; Note: some applications might use a different flag name like `--log_trace`) -_Example:_ `--log.trace` -_Default:_ `false` -_Constant:_ `FlagLogTrace` (value: "evolve.log.trace", but often overridden by global app flags) - -## Signer Configuration (`signer`) - -Settings related to the signing mechanism used by the node, particularly for aggregators that need to sign blocks. - -**YAML Section:** - -```yaml -signer: - # ... signer configurations ... -``` - -### Signer Type - -**Description:** -Specifies the type of remote signer to use. Common options might include `file` (for key files) or `grpc` (for connecting to a remote signing service). - -**YAML:** - -```yaml -signer: - signer_type: "file" -``` - -**Command-line Flag:** -`--evnode.signer.signer_type ` -_Example:_ `--evnode.signer.signer_type grpc` -_Default:_ (Depends on application, often "file" or none if not an aggregator) -_Constant:_ `FlagSignerType` - -### Signer Path - -**Description:** -The path to the signer file (if `signer_type` is `file`) or the address of the remote signer service (if `signer_type` is `grpc` or similar). - -**YAML:** - -```yaml -signer: - signer_path: "/path/to/priv_validator_key.json" # For file signer - # signer_path: "localhost:9000" # For gRPC signer -``` - -**Command-line Flag:** -`--evnode.signer.signer_path ` -_Example:_ `--evnode.signer.signer_path ./config` -_Default:_ (Depends on application) -_Constant:_ `FlagSignerPath` - -### Signer Passphrase - -**Description:** -The passphrase required to decrypt or access the signer key, particularly if using a `file` signer and the key is encrypted, or if the aggregator mode is enabled and requires it. This flag is not directly a field in the `SignerConfig` struct but is used in conjunction with it. - -**YAML:** -This is typically not stored in the YAML file for security reasons but provided via flag or environment variable. - -**Command-line Flag:** -`--evnode.signer.passphrase ` -_Example:_ `--evnode.signer.passphrase "mysecretpassphrase"` -_Default:_ `""` (empty) -_Constant:_ `FlagSignerPassphrase` -_Note:_ Be cautious with providing passphrases directly on the command line in shared environments due to history logging. Environment variables or secure input methods are often preferred. - ---- - -This reference should help you configure your Evolve node effectively. Always refer to the specific version of Evolve you are using, as options and defaults may change over time. diff --git a/content/docs/reference/configuration/ev-reth-chainspec.md b/content/docs/reference/configuration/ev-reth-chainspec.md deleted file mode 100644 index 9a6585e..0000000 --- a/content/docs/reference/configuration/ev-reth-chainspec.md +++ /dev/null @@ -1,160 +0,0 @@ -# ev-reth Chainspec Reference - -Complete reference for ev-reth chainspec (genesis.json) configuration. - -## Structure - -```json -{ - "config": { }, - "alloc": { }, - "coinbase": "0x...", - "difficulty": "0x0", - "gasLimit": "0x...", - "nonce": "0x0", - "timestamp": "0x0" -} -``` - -## config - -Chain configuration parameters. - -### Standard Ethereum Fields - -| Field | Type | Description | -|-----------------------|--------|-----------------------------------| -| `chainId` | number | Unique chain identifier | -| `homesteadBlock` | number | Homestead fork block (use 0) | -| `eip150Block` | number | EIP-150 fork block (use 0) | -| `eip155Block` | number | EIP-155 fork block (use 0) | -| `eip158Block` | number | EIP-158 fork block (use 0) | -| `byzantiumBlock` | number | Byzantium fork block (use 0) | -| `constantinopleBlock` | number | Constantinople fork block (use 0) | -| `petersburgBlock` | number | Petersburg fork block (use 0) | -| `istanbulBlock` | number | Istanbul fork block (use 0) | -| `berlinBlock` | number | Berlin fork block (use 0) | -| `londonBlock` | number | London fork block (use 0) | -| `shanghaiTime` | number | Shanghai fork timestamp (use 0) | -| `cancunTime` | number | Cancun fork timestamp (use 0) | - -### config.evolve - -Evolve-specific extensions. - -| Field | Type | Description | -|-----------------------------------|---------|------------------------------------| -| `baseFeeSink` | address | Redirect base fees to this address | -| `baseFeeRedirectActivationHeight` | number | Block height to activate redirect | -| `deployAllowlist` | object | Contract deployment restrictions | -| `contractSizeLimit` | number | Max contract bytecode size (bytes) | -| `mintPrecompile` | object | Native token minting precompile | - -#### deployAllowlist - -```json -{ - "admin": "0x...", - "enabled": ["0x...", "0x..."] -} -``` - -| Field | Type | Description | -|-----------|-----------|-----------------------------| -| `admin` | address | Can modify the allowlist | -| `enabled` | address[] | Addresses allowed to deploy | - -#### mintPrecompile - -```json -{ - "admin": "0x...", - "address": "0x0000000000000000000000000000000000000100" -} -``` - -| Field | Type | Description | -|-----------|---------|--------------------| -| `admin` | address | Can call mint() | -| `address` | address | Precompile address | - -## alloc - -Pre-funded accounts and contract deployments. - -```json -{ - "alloc": { - "0xAddress1": { - "balance": "0x..." - }, - "0xAddress2": { - "balance": "0x...", - "code": "0x...", - "storage": { - "0x0": "0x..." - } - } - } -} -``` - -| Field | Type | Description | -|-----------|------------|------------------------------| -| `balance` | hex string | Wei balance | -| `code` | hex string | Contract bytecode (optional) | -| `storage` | object | Storage slots (optional) | -| `nonce` | hex string | Account nonce (optional) | - -## Top-Level Fields - -| Field | Type | Description | -|--------------|------------|--------------------------------| -| `coinbase` | address | Default fee recipient | -| `difficulty` | hex string | Initial difficulty (use "0x0") | -| `gasLimit` | hex string | Block gas limit | -| `nonce` | hex string | Genesis nonce (use "0x0") | -| `timestamp` | hex string | Genesis timestamp | -| `extraData` | hex string | Extra data (optional) | -| `mixHash` | hex string | Mix hash (optional) | - -## Example - -```json -{ - "config": { - "chainId": 1337, - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "shanghaiTime": 0, - "cancunTime": 0, - "evolve": { - "baseFeeSink": "0x1234567890123456789012345678901234567890", - "baseFeeRedirectActivationHeight": 0, - "contractSizeLimit": 49152, - "mintPrecompile": { - "admin": "0xBridgeContract", - "address": "0x0000000000000000000000000000000000000100" - } - } - }, - "alloc": { - "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": { - "balance": "0x200000000000000000000000000000000000000000000000000000000000000" - } - }, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x0", - "gasLimit": "0x1c9c380", - "nonce": "0x0", - "timestamp": "0x0" -} -``` diff --git a/content/docs/reference/configuration/meta.json b/content/docs/reference/configuration/meta.json deleted file mode 100644 index 2760d92..0000000 --- a/content/docs/reference/configuration/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Configuration", - "pages": ["..."] -} diff --git a/content/docs/reference/interfaces/da.md b/content/docs/reference/interfaces/da.md deleted file mode 100644 index f746e52..0000000 --- a/content/docs/reference/interfaces/da.md +++ /dev/null @@ -1,195 +0,0 @@ -# DA Interface - -The DA (Data Availability) interface defines how ev-node submits and retrieves data from the DA layer. - -This page is an overview. The source of truth for exact interfaces is the current DA interface implementation in this repository. - -## Client Interface - -```go -type Client interface { - Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) ResultSubmit - Retrieve(ctx context.Context, height uint64, namespace []byte) ResultRetrieve - Get(ctx context.Context, ids []ID, namespace []byte) ([]Blob, error) - GetHeaderNamespace() []byte - GetDataNamespace() []byte - GetForcedInclusionNamespace() []byte - HasForcedInclusionNamespace() bool -} -``` - -## Methods - -### Submit - -Submits blobs to the DA layer. - -```go -Submit(ctx context.Context, data [][]byte, gasPrice float64, namespace []byte, options []byte) ResultSubmit -``` - -**Parameters:** - -- `data` - Blobs to submit -- `gasPrice` - DA layer gas price -- `namespace` - Target namespace -- `options` - DA-specific options (JSON encoded) - -**Returns:** - -```go -type ResultSubmit struct { - BaseResult -} -``` - -### Retrieve - -Retrieves all blobs at a DA height and namespace. - -```go -Retrieve(ctx context.Context, height uint64, namespace []byte) ResultRetrieve -``` - -**Returns:** - -```go -type ResultRetrieve struct { - BaseResult - Data [][]byte // Retrieved blobs -} -``` - -### Get - -Retrieves specific blobs by their IDs. - -```go -Get(ctx context.Context, ids []ID, namespace []byte) ([]Blob, error) -``` - -### Namespace Accessors - -```go -GetHeaderNamespace() []byte // Namespace for block headers -GetDataNamespace() []byte // Namespace for block data -GetForcedInclusionNamespace() []byte // Namespace for forced inclusion txs -HasForcedInclusionNamespace() bool // Whether forced inclusion is enabled -``` - -## Verifier Interface - -For sequencers that need to verify batch inclusion: - -```go -type Verifier interface { - GetProofs(ctx context.Context, ids []ID, namespace []byte) ([]Proof, error) - Validate(ctx context.Context, ids []ID, proofs []Proof, namespace []byte) ([]bool, error) -} -``` - -## FullClient Interface - -Combines Client and Verifier: - -```go -type FullClient interface { - Client - Verifier -} -``` - -## Types - -### Core Types - -```go -type Blob = []byte // Raw data -type ID = []byte // Blob identifier (height + commitment) -type Commitment = []byte // Cryptographic commitment -type Proof = []byte // Inclusion proof -``` - -### BaseResult - -Common fields for DA operations: - -```go -type BaseResult struct { - Code StatusCode - Message string - Height uint64 - SubmittedCount uint64 - BlobSize uint64 - IDs [][]byte - Timestamp time.Time -} -``` - -### Status Codes - -```go -const ( - StatusUnknown StatusCode = iota - StatusSuccess - StatusNotFound - StatusNotIncludedInBlock - StatusAlreadyInMempool - StatusTooBig - StatusContextDeadline - StatusError - StatusIncorrectAccountSequence - StatusContextCanceled - StatusHeightFromFuture -) -``` - -## ID Format - -IDs encode both height and commitment: - -```go -// ID = height (8 bytes, little-endian) + commitment -func SplitID(id []byte) (height uint64, commitment []byte, error) -``` - -## Namespaces - -DA uses 29-byte namespaces (Celestia format): - -- 1 byte version -- 28 bytes identifier - -Three namespaces are used: - -| Namespace | Purpose | -|------------------|-----------------------------------------| -| Header | Block headers | -| Data | Transaction data | -| Forced Inclusion | User-submitted censorship-resistant txs | - -## Implementations - -| Implementation | Package | Description | -|----------------|-------------------|---------------------| -| Celestia | `pkg/da/celestia` | Production DA layer | -| Local DA | `pkg/da/local` | Development/testing | - -## Configuration - -```bash -# Celestia ---evnode.da.address http://localhost:26658 ---evnode.da.auth_token ---evnode.da.namespace ---evnode.da.gas_price 0.01 - -# Local DA ---evnode.da.address http://localhost:7980 -``` - -## See Also - -- [Data Availability Concepts](/concepts/data-availability) -- [Celestia Guide](/guides/da-layers/celestia) -- [Local DA Guide](/guides/da-layers/local-da) diff --git a/content/docs/reference/interfaces/executor.md b/content/docs/reference/interfaces/executor.md deleted file mode 100644 index 5cb0e9f..0000000 --- a/content/docs/reference/interfaces/executor.md +++ /dev/null @@ -1,185 +0,0 @@ -# Executor Interface - -The Executor interface defines how ev-node communicates with execution layers. Implement this interface to run custom execution environments on Evolve. - -## Interface Definition - -```go -type Executor interface { - InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) (stateRoot []byte, err error) - GetTxs(ctx context.Context) ([][]byte, error) - ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) - SetFinal(ctx context.Context, blockHeight uint64) error - GetExecutionInfo(ctx context.Context) (ExecutionInfo, error) - FilterTxs(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) ([]FilterStatus, error) -} -``` - -## Methods - -### InitChain - -Initializes the blockchain with genesis parameters. - -```go -InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) (stateRoot []byte, err error) -``` - -**Parameters:** - -- `genesisTime` - Chain start timestamp (UTC) -- `initialHeight` - First block height (must be > 0) -- `chainID` - Unique chain identifier - -**Returns:** - -- `stateRoot` - Hash representing initial state - -**Requirements:** - -- Must be idempotent (repeated calls return same result) -- Must validate genesis parameters -- Must generate deterministic initial state root - -### GetTxs - -Fetches transactions from the execution layer's mempool. - -```go -GetTxs(ctx context.Context) ([][]byte, error) -``` - -**Returns:** - -- Slice of valid transactions - -**Requirements:** - -- Return only currently valid transactions -- Do not remove transactions from mempool -- May remove invalid transactions - -### ExecuteTxs - -Processes transactions to produce a new block state. - -```go -ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) -``` - -**Parameters:** - -- `txs` - Ordered list of transactions -- `blockHeight` - Height of block being created -- `timestamp` - Block timestamp (UTC) -- `prevStateRoot` - Previous block's state root - -**Returns:** - -- `updatedStateRoot` - New state root after execution - -**Requirements:** - -- Must be deterministic -- Must handle empty transaction lists -- Must handle malformed transactions gracefully -- Must validate against previous state root - -### SetFinal - -Marks a block as finalized. - -```go -SetFinal(ctx context.Context, blockHeight uint64) error -``` - -**Parameters:** - -- `blockHeight` - Height to finalize - -**Requirements:** - -- Must be idempotent -- Must verify block exists -- Finalized blocks cannot be reverted - -### GetExecutionInfo - -Returns current execution layer parameters. - -```go -GetExecutionInfo(ctx context.Context) (ExecutionInfo, error) -``` - -**Returns:** - -```go -type ExecutionInfo struct { - MaxGas uint64 // Maximum gas per block (0 = no gas-based limiting) -} -``` - -### FilterTxs - -Validates and filters transactions for block inclusion. - -```go -FilterTxs(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) ([]FilterStatus, error) -``` - -**Parameters:** - -- `txs` - All transactions (force-included + mempool) -- `maxBytes` - Maximum cumulative size (0 = no limit) -- `maxGas` - Maximum cumulative gas (0 = no limit) -- `hasForceIncludedTransaction` - Whether force-included txs are present - -**Returns:** - -```go -type FilterStatus int - -const ( - FilterOK FilterStatus = iota // Include in batch - FilterRemove // Invalid, remove - FilterPostpone // Valid but exceeds limits, postpone -) -``` - -## Optional Interfaces - -### HeightProvider - -Enables height synchronization checks between ev-node and the execution layer. - -```go -type HeightProvider interface { - GetLatestHeight(ctx context.Context) (uint64, error) -} -``` - -Useful for detecting desynchronization after crashes or restarts. - -### Rollbackable - -Enables automatic rollback when execution layer is ahead of consensus. - -```go -type Rollbackable interface { - Rollback(ctx context.Context, targetHeight uint64) error -} -``` - -Only implement if your execution layer supports in-flight rollback. - -## Implementations - -| Implementation | Package | Description | -|----------------|---------|-------------| -| ev-reth | `execution/evm` | EVM execution via Engine API | -| ev-abci | `execution/abci` | Cosmos SDK via ABCI | -| testapp | `apps/testapp` | Simple key-value store | - -## Implementation Guide - -See [Implement Custom Executor](/getting-started/custom/implement-executor) for a step-by-step guide. diff --git a/content/docs/reference/interfaces/meta.json b/content/docs/reference/interfaces/meta.json deleted file mode 100644 index c2eac68..0000000 --- a/content/docs/reference/interfaces/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Interfaces", - "pages": ["..."] -} diff --git a/content/docs/reference/interfaces/sequencer.md b/content/docs/reference/interfaces/sequencer.md deleted file mode 100644 index ead2eb9..0000000 --- a/content/docs/reference/interfaces/sequencer.md +++ /dev/null @@ -1,159 +0,0 @@ -# Sequencer Interface - -The Sequencer interface defines how ev-node orders transactions for block production. Two implementations are provided: single sequencer and based sequencer. - -## Interface Definition - -```go -type Sequencer interface { - SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) - GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) - VerifyBatch(ctx context.Context, req VerifyBatchRequest) (*VerifyBatchResponse, error) - SetDAHeight(height uint64) - GetDAHeight() uint64 -} -``` - -## Methods - -### SubmitBatchTxs - -Submits a batch of transactions from the executor to the sequencer. - -```go -SubmitBatchTxs(ctx context.Context, req SubmitBatchTxsRequest) (*SubmitBatchTxsResponse, error) -``` - -**Request:** - -```go -type SubmitBatchTxsRequest struct { - Id []byte // Chain identifier - Batch *Batch // Transactions to submit -} - -type Batch struct { - Transactions [][]byte -} -``` - -### GetNextBatch - -Returns the next batch of transactions for block production. - -```go -GetNextBatch(ctx context.Context, req GetNextBatchRequest) (*GetNextBatchResponse, error) -``` - -**Request:** - -```go -type GetNextBatchRequest struct { - Id []byte // Chain identifier - LastBatchData [][]byte // Previous batch data - MaxBytes uint64 // Maximum batch size -} -``` - -**Response:** - -```go -type GetNextBatchResponse struct { - Batch *Batch // Transactions to include - Timestamp time.Time // Block timestamp - BatchData [][]byte // Data for verification -} -``` - -### VerifyBatch - -Verifies a batch received from another node during sync. - -```go -VerifyBatch(ctx context.Context, req VerifyBatchRequest) (*VerifyBatchResponse, error) -``` - -**Request:** - -```go -type VerifyBatchRequest struct { - Id []byte // Chain identifier - BatchData [][]byte // Batch data to verify -} -``` - -**Response:** - -```go -type VerifyBatchResponse struct { - Status bool // true if valid -} -``` - -### SetDAHeight / GetDAHeight - -Track the current DA height for forced inclusion retrieval. - -```go -SetDAHeight(height uint64) -GetDAHeight() uint64 -``` - -## Batch Type - -```go -type Batch struct { - Transactions [][]byte -} - -// Hash returns SHA256 hash of the batch -func (batch *Batch) Hash() ([]byte, error) -``` - -The hash is computed deterministically: - -1. Write transaction count as uint64 (big-endian) -2. For each transaction: write length as uint64, then bytes - -## Implementations - -### Single Sequencer - -Located in `pkg/sequencers/single/`. - -- Maintains local mempool -- Supports forced inclusion from DA -- Default for most deployments - -### Based Sequencer - -Located in `pkg/sequencers/based/`. - -- No local mempool -- All transactions come from DA layer -- Maximum censorship resistance - -## Configuration - -Select sequencer mode via configuration: - -```yaml -# Single sequencer (default) -sequencer: - type: single - -# Based sequencer -sequencer: - type: based -``` - -## Forced Inclusion - -Both sequencer implementations support forced inclusion, but with different behaviors: - -| Sequencer | Forced Inclusion Source | Mempool | -|-----------|------------------------|---------| -| Single | DA namespace + local mempool | Yes | -| Based | DA namespace only | No | - -The sequencer tracks DA height via `SetDAHeight()` to know which forced inclusion transactions to include. diff --git a/content/docs/reference/meta.json b/content/docs/reference/meta.json deleted file mode 100644 index 1f94571..0000000 --- a/content/docs/reference/meta.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "title": "Reference", - "icon": "Library", - "pages": ["configuration", "api", "interfaces", "specs"] -} diff --git a/content/docs/reference/specs/block-manager.md b/content/docs/reference/specs/block-manager.md deleted file mode 100644 index c97171f..0000000 --- a/content/docs/reference/specs/block-manager.md +++ /dev/null @@ -1,759 +0,0 @@ -# Block Components - -## Abstract - -The block package provides a modular component-based architecture for handling block-related operations in full nodes. Instead of a single monolithic manager, the system is divided into specialized components that work together, each responsible for specific aspects of block processing. This architecture enables better separation of concerns, easier testing, and more flexible node configurations. - -The main components are: - -- **Executor**: Handles block production and state transitions (aggregator nodes only) -- **Reaper**: Periodically retrieves transactions and submits them to the sequencer (aggregator nodes only) -- **Submitter**: Manages submission of headers and data to the DA network (aggregator nodes only) -- **Syncer**: Handles synchronization from both DA and P2P sources (all full nodes) -- **Cache Manager**: Coordinates caching and tracking of blocks across all components - -A full node coordinates these components based on its role: - -- **Aggregator nodes**: Use all components for block production, submission, and synchronization -- **Non-aggregator full nodes**: Use only Syncer and Cache for block synchronization - -```mermaid -sequenceDiagram - title Overview of Block Manager - - participant User - participant Sequencer - participant Full Node 1 - participant Full Node 2 - participant DA Layer - - User->>Sequencer: Send Tx - Sequencer->>Sequencer: Generate Block - Sequencer->>DA Layer: Publish Block - - Sequencer->>Full Node 1: Gossip Block - Sequencer->>Full Node 2: Gossip Block - Full Node 1->>Full Node 1: Verify Block - Full Node 1->>Full Node 2: Gossip Block - Full Node 1->>Full Node 1: Mark Block Soft Confirmed - - Full Node 2->>Full Node 2: Verify Block - Full Node 2->>Full Node 2: Mark Block Soft Confirmed - - DA Layer->>Full Node 1: Retrieve Block - Full Node 1->>Full Node 1: Mark Block DA Included - - DA Layer->>Full Node 2: Retrieve Block - Full Node 2->>Full Node 2: Mark Block DA Included -``` - -### Component Architecture Overview - -```mermaid -flowchart TB - subgraph Block Components [Modular Block Components] - EXE[Executor
Block Production] - REA[Reaper
Tx Collection] - SUB[Submitter
DA Submission] - SYN[Syncer
Block Sync] - CAC[Cache Manager
State Tracking] - end - - subgraph External Components - CEXE[Core Executor] - SEQ[Sequencer] - DA[DA Layer] - HS[Header Store/P2P] - DS[Data Store/P2P] - ST[Local Store] - end - - REA -->|GetTxs| CEXE - REA -->|SubmitBatch| SEQ - REA -->|Notify| EXE - - EXE -->|CreateBlock| CEXE - EXE -->|ApplyBlock| CEXE - EXE -->|Save| ST - EXE -->|Track| CAC - - EXE -->|Headers| SUB - EXE -->|Data| SUB - SUB -->|Submit| DA - SUB -->|Track| CAC - - DA -->|Retrieve| SYN - HS -->|Headers| SYN - DS -->|Data| SYN - - SYN -->|ApplyBlock| CEXE - SYN -->|Save| ST - SYN -->|Track| CAC - SYN -->|SetFinal| CEXE - - CAC -->|Coordinate| EXE - CAC -->|Coordinate| SUB - CAC -->|Coordinate| SYN -``` - -## Protocol/Component Description - -The block components are initialized based on the node type: - -### Aggregator Components - -Aggregator nodes create all components for full block production and synchronization capabilities: - -```go -components := block.NewAggregatorComponents( - config, // Node configuration - genesis, // Genesis state - store, // Local datastore - executor, // Core executor for state transitions - sequencer, // Sequencer client - da, // DA client - signer, // Block signing key - // P2P stores and options... -) -``` - -### Non-Aggregator Components - -Non-aggregator full nodes create only synchronization components: - -```go -components := block.NewSyncComponents( - config, // Node configuration - genesis, // Genesis state - store, // Local datastore - executor, // Core executor for state transitions - da, // DA client - // P2P stores and options... (no signer or sequencer needed) -) -``` - -### Component Initialization Parameters - -| **Name** | **Type** | **Description** | -| --------------------------- | ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| signing key | crypto.PrivKey | used for signing blocks and data after creation | -| config | config.BlockManagerConfig | block manager configurations (see config options below) | -| genesis | \*cmtypes.GenesisDoc | initialize the block manager with genesis state (genesis configuration defined in `config/genesis.json` file under the app directory) | -| store | store.Store | local datastore for storing chain blocks and states (default local store path is `$db_dir/evolve` and `db_dir` specified in the `config.yaml` file under the app directory) | -| mempool, proxyapp, eventbus | mempool.Mempool, proxy.AppConnConsensus, \*cmtypes.EventBus | for initializing the executor (state transition function). mempool is also used in the manager to check for availability of transactions for lazy block production | -| dalc | da.DAClient | the data availability light client used to submit and retrieve blocks to DA network | -| headerStore | *goheaderstore.Store[*types.SignedHeader] | to store and retrieve block headers gossiped over the P2P network | -| dataStore | *goheaderstore.Store[*types.SignedData] | to store and retrieve block data gossiped over the P2P network | -| signaturePayloadProvider | types.SignaturePayloadProvider | optional custom provider for header signature payloads | -| sequencer | core.Sequencer | used to retrieve batches of transactions from the sequencing layer | -| reaper | \*Reaper | component that periodically retrieves transactions from the executor and submits them to the sequencer | - -### Configuration Options - -The block components share a common configuration: - -| Name | Type | Description | -| ------------------------ | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -| BlockTime | time.Duration | time interval used for block production and block retrieval from block store ([`defaultBlockTime`][defaultBlockTime]) | -| DABlockTime | time.Duration | time interval used for both block publication to DA network and block retrieval from DA network ([`defaultDABlockTime`][defaultDABlockTime]) | -| DAStartHeight | uint64 | block retrieval from DA network starts from this height | -| LazyBlockInterval | time.Duration | time interval used for block production in lazy aggregator mode even when there are no transactions ([`defaultLazyBlockTime`][defaultLazyBlockTime]) | -| LazyMode | bool | when set to true, enables lazy aggregation mode which produces blocks only when transactions are available or at LazyBlockInterval intervals | -| MaxPendingHeadersAndData | uint64 | maximum number of pending headers and data blocks before pausing block production (default: 100) | -| MaxSubmitAttempts | int | maximum number of retry attempts for DA submissions (default: 30) | -| MempoolTTL | int | number of blocks to wait when transaction is stuck in DA mempool (default: 25) | -| GasPrice | float64 | gas price for DA submissions (-1 for automatic/default) | -| GasMultiplier | float64 | multiplier for gas price on DA submission retries (default: 1.3) | -| Namespace | da.Namespace | DA namespace ID for block submissions (deprecated, use HeaderNamespace and DataNamespace instead) | -| HeaderNamespace | string | namespace ID for submitting headers to DA layer (automatically encoded by the node) | -| DataNamespace | string | namespace ID for submitting data to DA layer (automatically encoded by the node) | -| RequestTimeout | duration | per-request timeout for DA `GetIDs`/`Get` calls; higher values tolerate slow DA nodes, lower values fail faster (default: 30s) | - -### Block Production (Executor Component) - -When the full node is operating as an aggregator, the **Executor component** handles block production. There are two modes of block production, which can be specified in the block manager configurations: `normal` and `lazy`. - -In `normal` mode, the block manager runs a timer, which is set to the `BlockTime` configuration parameter, and continuously produces blocks at `BlockTime` intervals. - -In `lazy` mode, the block manager implements a dual timer mechanism: - -```mermaid -flowchart LR - subgraph Lazy Aggregation Mode - R[Reaper] -->|GetTxs| CE[Core Executor] - CE -->|Txs Available| R - R -->|Submit to Sequencer| S[Sequencer] - R -->|NotifyNewTransactions| N[txNotifyCh] - - N --> E{Executor Logic} - BT[blockTimer] --> E - LT[lazyTimer] --> E - - E -->|Txs Available| P1[Produce Block with Txs] - E -->|No Txs & LazyTimer| P2[Produce Empty Block] - - P1 --> B[Block Creation] - P2 --> B - end -``` - -1. A `blockTimer` that triggers block production at regular intervals when transactions are available -2. A `lazyTimer` that ensures blocks are produced at `LazyBlockInterval` intervals even during periods of inactivity - -The block manager starts building a block when any transaction becomes available in the mempool via a notification channel (`txNotifyCh`). When the `Reaper` detects new transactions, it calls `Manager.NotifyNewTransactions()`, which performs a non-blocking signal on this channel. The block manager also produces empty blocks at regular intervals to maintain consistency with the DA layer, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks. - -The Reaper component periodically retrieves transactions from the core executor and submits them to the sequencer. It runs independently and notifies the Executor component when new transactions are available, enabling responsive block production in lazy mode. - -#### Building the Block - -The Executor component of aggregator nodes performs the following steps to produce a block: - -```mermaid -flowchart TD - A[Timer Trigger / Transaction Notification] --> B[Retrieve Batch] - B --> C{Transactions Available?} - C -->|Yes| D[Create Block with Txs] - C -->|No| E[Create Empty Block] - D --> F[Generate Header & Data] - E --> F - F --> G[Sign Header → SignedHeader] - F --> H[Sign Data → SignedData] - G --> I[Apply Block] - H --> I - I --> J[Update State] - J --> K[Save to Store] - K --> L[Add to pendingHeaders] - K --> M[Add to pendingData] - L --> N[Broadcast Header to P2P] - M --> O[Broadcast Data to P2P] -``` - -- Retrieve a batch of transactions using `retrieveBatch()` which interfaces with the sequencer -- Call `CreateBlock` using executor with the retrieved transactions -- Create separate header and data structures from the block -- Sign the header using `signing key` to generate `SignedHeader` -- Sign the data using `signing key` to generate `SignedData` (if transactions exist) -- Call `ApplyBlock` using executor to generate an updated state -- Save the block, validators, and updated state to local store -- Add the newly generated header to `pendingHeaders` queue -- Add the newly generated data to `pendingData` queue (if not empty) -- Publish the newly generated header and data to channels to notify other components of the sequencer node (such as block and header gossip) - -Note: When no transactions are available, the block manager creates blocks with empty data using a special `dataHashForEmptyTxs` marker. The header and data separation architecture allows headers and data to be submitted and retrieved independently from the DA layer. - -### Block Publication to DA Network (Submitter Component) - -The **Submitter component** of aggregator nodes implements separate submission loops for headers and data, both operating at `DABlockTime` intervals. Headers and data are submitted to different namespaces to improve scalability and allow for more flexible data availability strategies: - -```mermaid -flowchart LR - subgraph Header Submission - H1[pendingHeaders Queue] --> H2[Header Submission Loop] - H2 --> H3[Marshal to Protobuf] - H3 --> H4[Submit to DA] - H4 -->|Success| H5[Remove from Queue] - H4 -->|Failure| H6[Keep in Queue & Retry] - end - - subgraph Data Submission - D1[pendingData Queue] --> D2[Data Submission Loop] - D2 --> D3[Marshal to Protobuf] - D3 --> D4[Submit to DA] - D4 -->|Success| D5[Remove from Queue] - D4 -->|Failure| D6[Keep in Queue & Retry] - end - - H2 -.->|DABlockTime| H2 - D2 -.->|DABlockTime| D2 -``` - -#### Header Submission Loop - -The `HeaderSubmissionLoop` manages the submission of signed headers to the DA network: - -- Retrieves pending headers from the `pendingHeaders` queue -- Marshals headers to protobuf format -- Submits to DA using the generic `submitToDA` helper with the configured `HeaderNamespace` -- On success, removes submitted headers from the pending queue -- On failure, headers remain in the queue for retry - -#### Data Submission Loop - -The `DataSubmissionLoop` manages the submission of signed data to the DA network: - -- Retrieves pending data from the `pendingData` queue -- Marshals data to protobuf format -- Submits to DA using the generic `submitToDA` helper with the configured `DataNamespace` -- On success, removes submitted data from the pending queue -- On failure, data remains in the queue for retry - -#### Generic Submission Logic - -Both loops use a shared `submitToDA` function that provides: - -- Namespace-specific submission based on header or data type -- Retry logic with configurable maximum attempts via `MaxSubmitAttempts` configuration -- Exponential backoff starting at `initialBackoff` (100ms), doubling each attempt, capped at `DABlockTime` -- Gas price management with `GasMultiplier` applied on retries using a centralized `retryStrategy` -- Recursive batch splitting for handling "too big" DA submissions that exceed blob size limits -- Comprehensive error handling for different DA submission failure types (mempool issues, context cancellation, blob size limits) -- Comprehensive metrics tracking for attempts, successes, and failures -- Context-aware cancellation support - -#### Retry Strategy and Error Handling - -The DA submission system implements sophisticated retry logic using a centralized `retryStrategy` struct to handle various failure scenarios: - -```mermaid -flowchart TD - A[Submit to DA] --> B{Submission Result} - B -->|Success| C[Reset Backoff & Adjust Gas Price Down] - B -->|Too Big| D{Batch Size > 1?} - B -->|Mempool/Not Included| E[Mempool Backoff Strategy] - B -->|Context Canceled| F[Stop Submission] - B -->|Other Error| G[Exponential Backoff] - - D -->|Yes| H[Recursive Batch Splitting] - D -->|No| I[Skip Single Item - Cannot Split] - - E --> J[Set Backoff = MempoolTTL * BlockTime] - E --> K[Multiply Gas Price by GasMultiplier] - - G --> L[Double Backoff Time] - G --> M[Cap at MaxBackoff - BlockTime] - - H --> N[Split into Two Halves] - N --> O[Submit First Half] - O --> P[Submit Second Half] - P --> Q{Both Halves Processed?} - Q -->|Yes| R[Combine Results] - Q -->|No| S[Handle Partial Success] - - C --> T[Update Pending Queues] - T --> U[Post-Submit Actions] -``` - -##### Retry Strategy Features - -- **Centralized State Management**: The `retryStrategy` struct manages attempt counts, backoff timing, and gas price adjustments -- **Multiple Backoff Types**: - - Exponential backoff for general failures (doubles each attempt, capped at `BlockTime`) - - Mempool-specific backoff (waits `MempoolTTL * BlockTime` for stuck transactions) - - Success-based backoff reset with gas price reduction -- **Gas Price Management**: - - Increases gas price by `GasMultiplier` on mempool failures - - Decreases gas price after successful submissions (bounded by initial price) - - Supports automatic gas price detection (`-1` value) -- **Intelligent Batch Splitting**: - - Recursively splits batches that exceed DA blob size limits - - Handles partial submissions within split batches - - Prevents infinite recursion with proper base cases -- **Comprehensive Error Classification**: - - `StatusSuccess`: Full or partial successful submission - - `StatusTooBig`: Triggers batch splitting logic - - `StatusNotIncludedInBlock`/`StatusAlreadyInMempool`: Mempool-specific handling - - `StatusContextCanceled`: Graceful shutdown support - - Other errors: Standard exponential backoff - -The manager enforces a limit on pending headers and data through `MaxPendingHeadersAndData` configuration. When this limit is reached, block production pauses to prevent unbounded growth of the pending queues. - -### Block Retrieval from DA Network (Syncer Component) - -The **Syncer component** implements a `RetrieveLoop` through its DARetriever that regularly pulls headers and data from the DA network. The retrieval process supports both legacy single-namespace mode (for backward compatibility) and the new separate namespace mode: - -```mermaid -flowchart TD - A[Start RetrieveLoop] --> B[Get DA Height] - B --> C{DABlockTime Timer} - C --> D[GetHeightPair from DA] - D --> E{Result?} - E -->|Success| F[Validate Signatures] - E -->|NotFound| G[Increment Height] - E -->|Error| H[Retry Logic] - - F --> I[Check Sequencer Info] - I --> J[Mark DA Included] - J --> K[Send to Sync] - K --> L[Increment Height] - L --> M[Immediate Next Retrieval] - - G --> C - H --> N{Retries < 10?} - N -->|Yes| O[Wait 100ms] - N -->|No| P[Log Error & Stall] - O --> D - M --> D -``` - -#### Retrieval Process - -1. **Height Management**: Starts from the latest of: - - DA height from the last state in local store - - `DAStartHeight` configuration parameter - - Maintains and increments `daHeight` counter after successful retrievals - -2. **Retrieval Mechanism**: - - Executes at `DABlockTime` intervals - - Implements namespace migration support: - - First attempts legacy namespace retrieval if migration not completed - - Falls back to separate header and data namespace retrieval - - Tracks migration status to optimize future retrievals - - Retrieves from separate namespaces: - - Headers from `HeaderNamespace` - - Data from `DataNamespace` - - Combines results from both namespaces - - Handles three possible outcomes: - - `Success`: Process retrieved header and/or data - - `NotFound`: No chain block at this DA height (normal case) - - `Error`: Retry with backoff - -3. **Error Handling**: - - Implements retry logic with 100ms delay between attempts - - After 10 retries, logs error and stalls retrieval - - Does not increment `daHeight` on persistent errors - -4. **Processing Retrieved Blocks**: - - Validates header and data signatures - - Checks sequencer information - - Marks blocks as DA included in caches - - Sends to sync goroutine for state update - - Successful processing triggers immediate next retrieval without waiting for timer - - Updates namespace migration status when appropriate: - - Marks migration complete when data is found in new namespaces - - Persists migration state to avoid future legacy checks - -#### Header and Data Caching - -The retrieval system uses persistent caches for both headers and data: - -- Prevents duplicate processing -- Tracks DA inclusion status -- Supports out-of-order block arrival -- Enables efficient sync from P2P and DA sources -- Maintains namespace migration state for optimized retrieval - -For more details on DA integration, see the [Data Availability specification](./da.md). - -#### Out-of-Order Chain Blocks on DA - -Evolve should support blocks arriving out-of-order on DA, like so: -![out-of-order blocks](./out-of-order-blocks.png) - -#### Termination Condition - -If the sequencer double-signs two blocks at the same height, evidence of the fault should be posted to DA. Evolve full nodes should process the longest valid chain up to the height of the fault evidence, and terminate. See diagram: -![termination condition](./termination.png) - -### Block Sync Service (Syncer Component) - -The **Syncer component** manages the synchronization of headers and data through its P2PHandler and coordination with the Cache Manager: - -#### Architecture - -- **Header Store**: Uses `goheader.Store[*types.SignedHeader]` for header management -- **Data Store**: Uses `goheader.Store[*types.SignedData]` for data management -- **Separation of Concerns**: Headers and data are handled independently, supporting the header/data separation architecture - -#### Synchronization Flow - -1. **Header Sync**: Headers created by the sequencer are sent to the header store for P2P gossip -2. **Data Sync**: Data blocks are sent to the data store for P2P gossip -3. **Cache Integration**: Both header and data caches track seen items to prevent duplicates -4. **DA Inclusion Tracking**: Separate tracking for header and data DA inclusion status - -### Block Publication to P2P network (Executor Component) - -The **Executor component** of aggregator nodes publishes headers and data separately to the P2P network: - -#### Header Publication - -- Headers are sent through the header broadcast channel -- Written to the header store for P2P gossip -- Broadcast to network peers via header sync service - -#### Data Publication - -- Data blocks are sent through the data broadcast channel -- Written to the data store for P2P gossip -- Broadcast to network peers via data sync service - -Non-sequencer full nodes receive headers and data through the P2P sync service and do not publish blocks themselves. - -### Block Retrieval from P2P network (Syncer Component) - -The **Syncer component** retrieves headers and data separately from P2P stores through its P2PHandler: - -#### Header Store Retrieval Loop - -The `HeaderStoreRetrieveLoop`: - -- Operates at `BlockTime` intervals via `headerStoreCh` signals -- Tracks `headerStoreHeight` for the last retrieved header -- Retrieves all headers between last height and current store height -- Validates sequencer information using `assertUsingExpectedSingleSequencer` -- Marks headers as "seen" in the header cache -- Sends headers to sync goroutine via `headerInCh` - -#### Data Store Retrieval Loop - -The `DataStoreRetrieveLoop`: - -- Operates at `BlockTime` intervals via `dataStoreCh` signals -- Tracks `dataStoreHeight` for the last retrieved data -- Retrieves all data blocks between last height and current store height -- Validates data signatures using `assertValidSignedData` -- Marks data as "seen" in the data cache -- Sends data to sync goroutine via `dataInCh` - -#### Soft Confirmations - -Headers and data retrieved from P2P are marked as soft confirmed until both: - -1. The corresponding header is seen on the DA layer -2. The corresponding data is seen on the DA layer - -Once both conditions are met, the block is marked as DA-included. - -#### About Soft Confirmations and DA Inclusions - -The block manager retrieves blocks from both the P2P network and the underlying DA network because the blocks are available in the P2P network faster and DA retrieval is slower (e.g., 1 second vs 6 seconds). -The blocks retrieved from the P2P network are only marked as soft confirmed until the DA retrieval succeeds on those blocks and they are marked DA-included. -DA-included blocks are considered to have a higher level of finality. - -**DAIncluderLoop**: -The `DAIncluderLoop` is responsible for advancing the `DAIncludedHeight` by: - -- Checking if blocks after the current height have both header and data marked as DA-included in caches -- Stopping advancement if either header or data is missing for a height -- Calling `SetFinal` on the executor when a block becomes DA-included -- Storing the Evolve height to DA height mapping for tracking -- Ensuring only blocks with both header and data present are considered DA-included - -### State Update after Block Retrieval (Syncer Component) - -The **Syncer component** uses a `SyncLoop` to coordinate state updates from blocks retrieved via P2P or DA networks: - -```mermaid -flowchart TD - subgraph Sources - P1[P2P Header Store] --> H[headerInCh] - P2[P2P Data Store] --> D[dataInCh] - DA1[DA Header Retrieval] --> H - DA2[DA Data Retrieval] --> D - end - - subgraph SyncLoop - H --> S[Sync Goroutine] - D --> S - S --> C{Header & Data for Same Height?} - C -->|Yes| R[Reconstruct Block] - C -->|No| W[Wait for Matching Pair] - R --> V[Validate Signatures] - V --> A[ApplyBlock] - A --> CM[Commit] - CM --> ST[Store Block & State] - ST --> F{DA Included?} - F -->|Yes| FN[SetFinal] - F -->|No| E[End] - FN --> U[Update DA Height] - end -``` - -#### Sync Loop Architecture - -The `SyncLoop` processes headers and data from multiple sources: - -- Headers from `headerInCh` (P2P and DA sources) -- Data from `dataInCh` (P2P and DA sources) -- Maintains caches to track processed items -- Ensures ordered processing by height - -#### State Update Process - -When both header and data are available for a height: - -1. **Block Reconstruction**: Combines header and data into a complete block -2. **Validation**: Verifies header and data signatures match expectations -3. **ApplyBlock**: - - Validates the block against current state - - Executes transactions - - Captures validator updates - - Returns updated state -4. **Commit**: - - Persists execution results - - Updates mempool by removing included transactions - - Publishes block events -5. **Storage**: - - Stores the block, validators, and updated state - - Updates last state in manager -6. **Finalization**: - - When block is DA-included, calls `SetFinal` on executor - - Updates DA included height - -## Message Structure/Communication Format - -### Component Communication - -The components communicate through well-defined interfaces: - -#### Executor ↔ Core Executor - -- `InitChain`: initializes the chain state with the given genesis time, initial height, and chain ID using `InitChainSync` on the executor to obtain initial `appHash` and initialize the state. -- `CreateBlock`: prepares a block with transactions from the provided batch data. -- `ApplyBlock`: validates the block, executes the block (apply transactions), captures validator updates, and returns updated state. -- `SetFinal`: marks the block as final when both its header and data are confirmed on the DA layer. -- `GetTxs`: retrieves transactions from the application (used by Reaper component). - -#### Reaper ↔ Sequencer - -- `GetNextBatch`: retrieves the next batch of transactions to include in a block. -- `VerifyBatch`: validates that a batch came from the expected sequencer. - -#### Submitter/Syncer ↔ DA Layer - -- `Submit`: submits headers or data blobs to the DA network. -- `Get`: retrieves headers or data blobs from the DA network. -- `GetHeightPair`: retrieves both header and data at a specific DA height. - -## Assumptions and Considerations - -### Component Architecture - -- The block package uses a modular component architecture instead of a monolithic manager -- Components are created based on node type: aggregator nodes get all components, non-aggregator nodes only get synchronization components -- Each component has a specific responsibility and communicates through well-defined interfaces -- Components share a common Cache Manager for coordination and state tracking - -### Initialization and State Management - -- Components load the initial state from the local store and use genesis if not found in the local store, when the node (re)starts -- During startup the Syncer invokes the execution Replayer to re-execute any blocks the local execution layer is missing; the replayer enforces strict app-hash matching so a mismatch aborts initialization instead of silently drifting out of sync -- The default mode for aggregator nodes is normal (not lazy) -- Components coordinate through channels and shared cache structures - -### Block Production (Executor Component) - -- The Executor can produce empty blocks -- In lazy aggregation mode, the Executor maintains consistency with the DA layer by producing empty blocks at regular intervals, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks -- The lazy aggregation mechanism uses a dual timer approach: - - A `blockTimer` that triggers block production when transactions are available - - A `lazyTimer` that ensures blocks are produced even during periods of inactivity -- Empty batches are handled differently in lazy mode - instead of discarding them, they are returned with the `ErrNoBatch` error, allowing the caller to create empty blocks with proper timestamps -- Transaction notifications from the `Reaper` to the `Executor` are handled via a non-blocking notification channel (`txNotifyCh`) to prevent backpressure - -### DA Submission (Submitter Component) - -- The Submitter enforces `MaxPendingHeadersAndData` limit to prevent unbounded growth of pending queues during DA submission issues -- Headers and data are submitted separately to the DA layer using different namespaces, supporting the header/data separation architecture -- The Cache Manager uses persistent caches for headers and data to track seen items and DA inclusion status -- Namespace migration is handled transparently by the Syncer, with automatic detection and state persistence to optimize future operations -- The system supports backward compatibility with legacy single-namespace deployments while transitioning to separate namespaces -- Gas price management in the Submitter includes automatic adjustment with `GasMultiplier` on DA submission retries - -### Storage and Persistence - -- Components use persistent storage (disk) when the `root_dir` and `db_path` configuration parameters are specified in `config.yaml` file under the app directory. If these configuration parameters are not specified, the in-memory storage is used, which will not be persistent if the node stops -- The Syncer does not re-apply blocks when they transition from soft confirmed to DA included status. The block is only marked DA included in the caches -- Header and data stores use separate prefixes for isolation in the underlying database -- The genesis `ChainID` is used to create separate `PubSubTopID`s for headers and data in go-header - -### P2P and Synchronization - -- Block sync over the P2P network works only when a full node is connected to the P2P network by specifying the initial seeds to connect to via `P2PConfig.Seeds` configuration parameter when starting the full node -- Node's context is passed down to all components to support graceful shutdown and cancellation - -### Architecture Design Decisions - -- The Executor supports custom signature payload providers for headers, enabling flexible signing schemes -- The component architecture supports the separation of header and data structures in Evolve. This allows for expanding the sequencing scheme beyond single sequencing and enables the use of a decentralized sequencer mode. For detailed information on this architecture, see the [Header and Data Separation ADR](../../adr/adr-014-header-and-data-separation.md) -- Components process blocks with a minimal header format, which is designed to eliminate dependency on CometBFT's header format and can be used to produce an execution layer tailored header if needed. For details on this header structure, see the [Evolve Minimal Header](../../adr/adr-015-rollkit-minimal-header.md) specification - -## Metrics - -The block components expose comprehensive metrics for monitoring through the shared Metrics instance: - -### Block Production Metrics (Executor Component) - -- `last_block_produced_height`: Height of the last produced block -- `last_block_produced_time`: Timestamp of the last produced block -- `aggregation_type`: Current aggregation mode (normal/lazy) -- `block_size_bytes`: Size distribution of produced blocks -- `produced_empty_blocks_total`: Count of empty blocks produced - -### DA Metrics (Submitter and Syncer Components) - -- `da_submission_attempts_total`: Total DA submission attempts -- `da_submission_success_total`: Successful DA submissions -- `da_submission_failure_total`: Failed DA submissions -- `da_retrieval_attempts_total`: Total DA retrieval attempts -- `da_retrieval_success_total`: Successful DA retrievals -- `da_retrieval_failure_total`: Failed DA retrievals -- `da_height`: Current DA retrieval height -- `pending_headers_count`: Number of headers pending DA submission -- `pending_data_count`: Number of data blocks pending DA submission - -### Sync Metrics (Syncer Component) - -- `sync_height`: Current sync height -- `da_included_height`: Height of last DA-included block -- `soft_confirmed_height`: Height of last soft confirmed block -- `header_store_height`: Current header store height -- `data_store_height`: Current data store height - -### Performance Metrics (All Components) - -- `block_production_time`: Time to produce a block -- `da_submission_time`: Time to submit to DA -- `state_update_time`: Time to apply block and update state -- `channel_buffer_usage`: Usage of internal channels - -### Error Metrics (All Components) - -- `errors_total`: Total errors by type and operation - -## Implementation - -The modular block components are implemented in the following packages: - -- [Executor]: Block production and state transitions (`block/internal/executing/`) -- [Reaper]: Transaction collection and submission (`block/internal/reaping/`) -- [Submitter]: DA submission logic (`block/internal/submitting/`) -- [Syncer]: Block synchronization from DA and P2P (`block/internal/syncing/`) -- [Cache Manager]: Coordination and state tracking (`block/internal/cache/`) -- [Components]: Main components orchestration (`block/components.go`) - -See [tutorial] for running a multi-node network with both aggregator and non-aggregator full nodes. - -## References - -[1] [Go Header][go-header] - -[2] [Block Sync][block-sync] - -[3] [Full Node][full-node] - -[4] [Block Components][Components] - -[5] [Tutorial][tutorial] - -[6] [Header and Data Separation ADR](../../adr/adr-014-header-and-data-separation.md) - -[7] [Evolve Minimal Header](../../adr/adr-015-rollkit-minimal-header.md) - -[8] [Data Availability](./da.md) - -[9] [Lazy Aggregation with DA Layer Consistency ADR](../../adr/adr-021-lazy-aggregation.md) - -[defaultBlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L50 -[defaultDABlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L59 -[defaultLazyBlockTime]: https://github.com/evstack/ev-node/blob/main/pkg/config/defaults.go#L52 -[go-header]: https://github.com/celestiaorg/go-header -[block-sync]: https://github.com/evstack/ev-node/blob/main/pkg/sync/sync_service.go -[full-node]: https://github.com/evstack/ev-node/blob/main/node/full.go -[Executor]: https://github.com/evstack/ev-node/blob/main/block/internal/executing/executor.go -[Reaper]: https://github.com/evstack/ev-node/blob/main/block/internal/reaping/reaper.go -[Submitter]: https://github.com/evstack/ev-node/blob/main/block/internal/submitting/submitter.go -[Syncer]: https://github.com/evstack/ev-node/blob/main/block/internal/syncing/syncer.go -[Cache Manager]: https://github.com/evstack/ev-node/blob/main/block/internal/cache/manager.go -[Components]: https://github.com/evstack/ev-node/blob/main/block/components.go -[tutorial]: https://ev.xyz/guides/full-node diff --git a/content/docs/reference/specs/block-validity.md b/content/docs/reference/specs/block-validity.md deleted file mode 100644 index 8eee587..0000000 --- a/content/docs/reference/specs/block-validity.md +++ /dev/null @@ -1,133 +0,0 @@ -# Block and Header Validity - -## Abstract - -Like all blockchains, chains are defined as the chain of **valid** blocks from the genesis, to the head. Thus, the block and header validity rules define the chain. - -Verifying a block/header is done in 3 parts: - -1. Verify correct serialization according to the protobuf spec - -2. Perform basic validation of the types - -3. Perform verification of the new block against the previously accepted block - -Evolve uses a header/data separation architecture where headers and data can be validated independently. The system has moved from a multi-validator model to a single signer model for simplified sequencer management. - -## Basic Validation - -Each type contains a `.ValidateBasic()` method, which verifies that certain basic invariants hold. The `ValidateBasic()` calls are nested for each structure. - -### SignedHeader Validation - -```go -SignedHeader.ValidateBasic() - // Make sure the SignedHeader's Header passes basic validation - Header.ValidateBasic() - verify ProposerAddress not nil - // Make sure the SignedHeader's signature passes basic validation - Signature.ValidateBasic() - // Ensure that someone signed the header - verify len(c.Signatures) not 0 - // For based chains (sh.Signer.IsEmpty()), pass validation - if !sh.Signer.IsEmpty(): - // Verify the signer matches the proposer address - verify sh.Signer.Address == sh.ProposerAddress - // Verify signature using custom verifier if set, otherwise use default - if sh.verifier != nil: - verify sh.verifier(sh) == nil - else: - verify sh.Signature.Verify(sh.Signer.PubKey, sh.Header.MarshalBinary()) -``` - -### SignedData Validation - -```go -SignedData.ValidateBasic() - // Always passes basic validation for the Data itself - Data.ValidateBasic() // always passes - // Make sure the signature is valid - Signature.ValidateBasic() - verify len(c.Signatures) not 0 - // Verify the signer - If !sd.Signer.IsEmpty(): - verify sd.Signature.Verify(sd.Signer.PubKey, sd.Data.MarshalBinary()) -``` - -### Block Validation - -Blocks are composed of SignedHeader and Data: - -```go -// Block validation happens by validating header and data separately -// then ensuring data hash matches -verify SignedHeader.ValidateBasic() == nil -verify Data.Hash() == SignedHeader.DataHash -``` - -## Verification Against Previous Block - -```go -SignedHeader.Verify(untrustedHeader *SignedHeader) - // Basic validation is handled by go-header before this - Header.Verify(untrustedHeader) - // Verify height sequence - if untrustedHeader.Height != h.Height + 1: - if untrustedHeader.Height > h.Height + 1: - return soft verification failure - return error "headers are not adjacent" - // Verify the link to previous header - verify untrustedHeader.LastHeaderHash == h.Header.Hash() - // Note: ValidatorHash field exists for compatibility but is not validated -``` - -## Data - -[Source: `data.go`](https://github.com/evstack/ev-node/blob/main/types/data.go) - -| **Field Name** | **Valid State** | **Validation** | -|----------------|-----------------------------------------|------------------------------------| -| Txs | Transaction data of the block | Data.Hash() == SignedHeader.DataHash | -| Metadata | Optional p2p gossiping metadata | Not validated | - -## SignedHeader - -[Source: `signed_header.go`](https://github.com/evstack/ev-node/blob/main/types/signed_header.go) - -| **Field Name** | **Valid State** | **Validation** | -|----------------|--------------------------------------------------------------------------|---------------------------------------------------------------------------------------------| -| Header | Valid header for the block | `Header` passes `ValidateBasic()` and `Verify()` | -| Signature | Valid signature from the single sequencer | `Signature` passes `ValidateBasic()`, verified against signer | -| Signer | Information about who signed the header | Must match ProposerAddress if not empty (based chain case) | -| verifier | Optional custom signature verification function | Used instead of default verification if set | - -## Header - -[Source: `header.go`](https://github.com/evstack/ev-node/blob/main/types/header.go) - -***Note***: Evolve has moved to a single signer model. The multi-validator architecture has been replaced with a simpler single sequencer approach. - -| **Field Name** | **Valid State** | **Validation** | -|---------------------|--------------------------------------------------------------------------------------------|---------------------------------------| -| **BaseHeader** | | | -| Height | Height of the previous accepted header, plus 1. | checked in the `Verify()`` step | -| Time | Timestamp of the block | Not validated in Evolve | -| ChainID | The hard-coded ChainID of the chain | Should be checked as soon as the header is received | -| **Header** . | | | -| Version | unused | | -| LastHeaderHash | The hash of the previous accepted block | checked in the `Verify()`` step | -| DataHash | Correct hash of the block's Data field | checked in the `ValidateBasic()`` step | -| AppHash | The correct state root after executing the block's transactions against the accepted state | checked during block execution | -| ProposerAddress | Address of the expected proposer | Must match Signer.Address in SignedHeader | -| ValidatorHash | Compatibility field for Tendermint light client | Not validated | - -## Signer - -[Source: `signed_header.go`](https://github.com/evstack/ev-node/blob/main/types/signed_header.go) - -The Signer type replaces the previous ValidatorSet for single sequencer operation: - -| **Field Name** | **Valid State** | **Validation** | -|----------------|-----------------------------------------------------------------|-----------------------------| -| PubKey | Public key of the signer | Must not be nil if Signer is not empty | -| Address | Address derived from the public key | Must match ProposerAddress | diff --git a/content/docs/reference/specs/da.md b/content/docs/reference/specs/da.md deleted file mode 100644 index 481a433..0000000 --- a/content/docs/reference/specs/da.md +++ /dev/null @@ -1,63 +0,0 @@ -# DA - -Evolve provides a generic [data availability interface][da-interface] for modular blockchains. Any DA that implements this interface can be used with Evolve. - -## Details - -`Client` can connect via JSON-RPC transports using Evolve's [jsonrpc][jsonrpc] implementations. The connection can be configured using the following cli flags: - -* `--rollkit.da.address`: url address of the DA service (default: "grpc://localhost:26650") -* `--rollkit.da.auth_token`: authentication token of the DA service -* `--rollkit.da.namespace`: namespace to use when submitting blobs to the DA service (deprecated) -* `--rollkit.da.header_namespace`: namespace to use when submitting headers to the DA service -* `--rollkit.da.data_namespace`: namespace to use when submitting data to the DA service - -The Submitter component now submits headers and data separately to the DA layer using different namespaces: - -* **Headers**: Submitted to the namespace specified by `--rollkit.da.header_namespace` (or falls back to `--rollkit.da.namespace` if not set) -* **Data**: Submitted to the namespace specified by `--rollkit.da.data_namespace` (or falls back to `--rollkit.da.namespace` if not set) - -Each submission first encodes the headers or data using protobuf (the encoded data are called blobs) and invokes the `Submit` method on the underlying DA implementation with the appropriate namespace. On successful submission (`StatusSuccess`), the DA block height which included the blobs is returned. - -To make sure that the serialised blocks don't exceed the underlying DA's blob limits, it fetches the blob size limit by calling `Config` which returns the limit as `uint64` bytes, then includes serialised blocks until the limit is reached. If the limit is reached, it submits the partial set and returns the count of successfully submitted blocks as `SubmittedCount`. The caller should retry with the remaining blocks until all the blocks are submitted. If the first block itself is over the limit, it throws an error. - -The `Submit` call may result in an error (`StatusError`) based on the underlying DA implementations on following scenarios: - -* the total blobs size exceeds the underlying DA's limits (includes empty blobs) -* the implementation specific failures, e.g., for [celestia-da-json-rpc][jsonrpc], invalid namespace, unable to create the commitment or proof, setting low gas price, etc, could return error. - -The retrieval process now supports both legacy single-namespace mode and separate namespace mode: - -1. **Legacy Mode Support**: For backward compatibility, the system first attempts to retrieve from the legacy namespace if migration has not been completed. - -2. **Separate Namespace Retrieval**: The system retrieves headers and data separately: - * Headers are retrieved from the `HeaderNamespace` - * Data is retrieved from the `DataNamespace` - * Results from both namespaces are combined - -3. **Namespace Migration**: The system automatically detects and tracks namespace migration: - * When data is found in new namespaces, migration is marked as complete - * Migration state is persisted to optimize future retrievals - * Once migration is complete, legacy namespace checks are skipped - -If there are no blocks available for a given DA height in any namespace, `StatusNotFound` is returned (which is not an error case). The retrieved blobs are converted back to headers and data, then combined into complete blocks for processing. - -Both header/data submission and retrieval operations may be unsuccessful if the DA node and the DA blockchain that the DA implementation is using have failures. For example, failures such as, DA mempool is full, DA submit transaction is nonce clashing with other transaction from the DA submitter account, DA node is not synced, etc. - -## Namespace Separation Benefits - -The separation of headers and data into different namespaces provides several advantages: - -* **Improved Scalability**: Headers and data can be processed independently, allowing for more efficient resource utilization -* **Flexible Data Availability**: Different availability guarantees can be applied to headers vs data -* **Optimized Retrieval**: Clients can retrieve only the data they need (e.g., light clients may only need headers) -* **Backward Compatibility**: The system maintains support for legacy single-namespace deployments while enabling gradual migration - -## References - -[1] [da-interface][da-interface] - -[2] [jsonrpc][jsonrpc] - -[da-interface]: https://github.com/evstack/ev-node/blob/main/block/public.go -[jsonrpc]: https://github.com/evstack/ev-node/tree/main/pkg/da/jsonrpc diff --git a/content/docs/reference/specs/full-node.md b/content/docs/reference/specs/full-node.md deleted file mode 100644 index 426c5df..0000000 --- a/content/docs/reference/specs/full-node.md +++ /dev/null @@ -1,107 +0,0 @@ -# Full Node - -## Abstract - -A Full Node is a top-level service that encapsulates different components of Evolve and initializes/manages them. - -## Details - -### Full Node Details - -A Full Node is initialized inside the Cosmos SDK start script along with the node configuration, a private key to use in the P2P client, a private key for signing blocks as a block proposer, a client creator, a genesis document, and a logger. It uses them to initialize the components described above. The components TxIndexer, BlockIndexer, and IndexerService exist to ensure cometBFT compatibility since they are needed for most of the RPC calls from the `SignClient` interface from cometBFT. - -Note that unlike a light node which only syncs and stores block headers seen on the P2P layer, the full node also syncs and stores full blocks seen on both the P2P network and the DA layer. Full blocks contain all the transactions published as part of the block. - -The Full Node mainly encapsulates and initializes/manages the following components: - -### genesisDoc - -The [genesis] document contains information about the initial state of the chain, in particular its validator set. - -### conf - -The [node configuration] contains all the necessary settings for the node to be initialized and function properly. - -### P2P - -The [peer-to-peer client] is used to gossip transactions between full nodes in the network. - -### Store - -The [Store] is initialized with `DefaultStore`, an implementation of the [store interface] which is used for storing and retrieving blocks, commits, and state. - -### blockComponents - -The [Block Components] provide a modular architecture for managing block-related operations. Instead of a single monolithic manager, the system uses specialized components: - -**For Aggregator Nodes:** - -- **Executor**: Block production (normal and lazy modes) and state transitions -- **Reaper**: Transaction collection and submission to sequencer -- **Submitter**: Header and data submission to DA layer -- **Syncer**: Block retrieval and synchronization from DA and P2P -- **Cache Manager**: Coordination and tracking across all components - -**For Non-Aggregator Nodes:** - -- **Syncer**: Block retrieval and synchronization from DA and P2P -- **Cache Manager**: Tracking and caching of synchronized blocks - -This modular architecture implements header/data separation where headers and transaction data are handled independently by different components. - -### dalc - -The [Data Availability Layer Client][dalc] is used to interact with the data availability layer. It is initialized with the DA Layer and DA Config specified in the node configuration. - -### hSyncService - -The [Header Sync Service] is used for syncing signed headers between nodes over P2P. It operates independently from data sync to support light clients. - -### dSyncService - -The [Data Sync Service] is used for syncing transaction data between nodes over P2P. This service is only used by full nodes, not light nodes. - -## Message Structure/Communication Format - -The Full Node communicates with other nodes in the network using the P2P client. It also communicates with the application using the ABCI proxy connections. The communication format is based on the P2P and ABCI protocols. - -## Assumptions and Considerations - -The Full Node assumes that the configuration, private keys, client creator, genesis document, and logger are correctly passed in by the Cosmos SDK. It also assumes that the P2P client, data availability layer client, block components, and other services can be started and stopped without errors. - -## Implementation - -See [full node] - -## References - -[1] [Full Node][full node] - -[2] [Genesis Document][genesis] - -[3] [Node Configuration][node configuration] - -[4] [Peer to Peer Client][peer-to-peer client] - -[5] [Store][Store] - -[6] [Store Interface][store interface] - -[7] [Block Components][block components] - -[8] [Data Availability Layer Client][dalc] - -[9] [Header Sync Service][Header Sync Service] - -[10] [Data Sync Service][Data Sync Service] - -[full node]: https://github.com/evstack/ev-node/blob/main/node/full.go -[genesis]: https://github.com/cometbft/cometbft/blob/main/spec/core/genesis.md -[node configuration]: https://github.com/evstack/ev-node/blob/main/pkg/config/config.go -[peer-to-peer client]: https://github.com/evstack/ev-node/blob/main/pkg/p2p/client.go -[Store]: https://github.com/evstack/ev-node/blob/main/pkg/store/store.go -[store interface]: https://github.com/evstack/ev-node/blob/main/pkg/store/types.go -[Block Components]: https://github.com/evstack/ev-node/blob/main/block/components.go -[dalc]: https://github.com/evstack/ev-node/blob/main/block/public.go -[Header Sync Service]: https://github.com/evstack/ev-node/blob/main/pkg/sync/sync_service.go -[Data Sync Service]: https://github.com/evstack/ev-node/blob/main/pkg/sync/sync_service.go diff --git a/content/docs/reference/specs/header-sync.md b/content/docs/reference/specs/header-sync.md deleted file mode 100644 index 7f87f5e..0000000 --- a/content/docs/reference/specs/header-sync.md +++ /dev/null @@ -1,108 +0,0 @@ -# Header and Data Sync - -## Abstract - -The nodes in the P2P network sync headers and data using separate sync services that implement the [go-header][go-header] interface. Evolve uses a header/data separation architecture where headers and transaction data are synchronized independently through parallel services. Each sync service consists of several components as listed below. - -|Component|Description| -|---|---| -|store| a prefixed [datastore][datastore] where synced items are stored (`headerSync` prefix for headers, `dataSync` prefix for data)| -|subscriber| a [libp2p][libp2p] node pubsub subscriber for the specific data type| -|P2P server| a server for handling requests between peers in the P2P network| -|exchange| a client that enables sending in/out-bound requests from/to the P2P network| -|syncer| a service for efficient synchronization. When a P2P node falls behind and wants to catch up to the latest network head via P2P network, it can use the syncer.| - -## Details - -Evolve implements two separate sync services: - -### Header Sync Service - -- Synchronizes `SignedHeader` structures containing block headers with signatures -- Used by all node types (sequencer, full, and light) -- Essential for maintaining the canonical view of the chain - -### Data Sync Service - -- Synchronizes `Data` structures containing transaction data -- Used only by full nodes and sequencers -- Light nodes do not run this service as they only need headers - -Both services: - -- Utilize the generic `SyncService[H header.Header[H]]` implementation -- Inherit the `ConnectionGater` from the node's P2P client for peer management -- Use `NodeConfig.BlockTime` to determine outdated items during sync -- Operate independently on separate P2P topics and datastores - -### Consumption of Sync Services - -#### Header Sync - -- Sequencer nodes publish signed headers to the P2P network after block creation -- Full and light nodes receive and store headers for chain validation -- Headers contain commitments (DataHash) that link to the corresponding data - -#### Data Sync - -- Sequencer nodes publish transaction data separately from headers -- Only full nodes receive and store data (light nodes skip this) -- Data is linked to headers through the DataHash commitment - -#### Parallel Broadcasting - -The Executor component (in aggregator nodes) broadcasts headers and data in parallel when publishing blocks: - -- Headers are sent through `headerBroadcaster` -- Data is sent through `dataBroadcaster` -- This enables efficient network propagation of both components - -## Assumptions - -- Separate datastores are created with different prefixes: - - Headers: `headerSync` prefix on the main datastore - - Data: `dataSync` prefix on the main datastore -- Network IDs are suffixed to distinguish services: - - Header sync: `{network}-headerSync` - - Data sync: `{network}-dataSync` -- Chain IDs for pubsub topics are also separated: - - Headers: `{chainID}-headerSync` creates topic like `/gm-headerSync/header-sub/v0.0.1` - - Data: `{chainID}-dataSync` creates topic like `/gm-dataSync/header-sub/v0.0.1` -- Both stores must contain at least one item before the syncer starts: - - On first boot, the services fetch the configured genesis height from peers - - On restart, each store reuses its latest item to derive the initial height requested from peers -- Sync services work only when connected to P2P network via `P2PConfig.Seeds` -- Node context is passed to all components for graceful shutdown -- Headers and data are linked through DataHash but synced independently - -## Implementation - -The sync service implementation can be found in [pkg/sync/sync_service.go][sync-service]. The generic `SyncService[H header.Header[H]]` is instantiated as: - -- `HeaderSyncService` for syncing `*types.SignedHeader` -- `DataSyncService` for syncing `*types.Data` - -Full nodes create and start both services, while light nodes only start the header sync service. The services are created in [full][fullnode] and [light][lightnode] node implementations. - -The block components integrate with both services through: - -- The Syncer component's P2PHandler retrieves headers and data from P2P -- The Executor component publishes headers and data through broadcast channels -- Separate stores and channels manage header and data synchronization - -## References - -[1] [Header Sync][sync-service] - -[2] [Full Node][fullnode] - -[3] [Light Node][lightnode] - -[4] [go-header][go-header] - -[sync-service]: https://github.com/evstack/ev-node/blob/main/pkg/sync/sync_service.go -[fullnode]: https://github.com/evstack/ev-node/blob/main/node/full.go -[lightnode]: https://github.com/evstack/ev-node/blob/main/node/light.go -[go-header]: https://github.com/celestiaorg/go-header -[libp2p]: https://github.com/libp2p/go-libp2p -[datastore]: https://github.com/ipfs/go-datastore diff --git a/content/docs/reference/specs/meta.json b/content/docs/reference/specs/meta.json deleted file mode 100644 index 227bee5..0000000 --- a/content/docs/reference/specs/meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "title": "Specifications", - "pages": ["..."] -} diff --git a/content/docs/reference/specs/out-of-order-blocks.png b/content/docs/reference/specs/out-of-order-blocks.png deleted file mode 100644 index fa7a955..0000000 Binary files a/content/docs/reference/specs/out-of-order-blocks.png and /dev/null differ diff --git a/content/docs/reference/specs/overview.md b/content/docs/reference/specs/overview.md deleted file mode 100644 index b471ed0..0000000 --- a/content/docs/reference/specs/overview.md +++ /dev/null @@ -1,17 +0,0 @@ -# Specs Overview - -Welcome to the Evolve Technical Specifications. - -This is comprehensive documentation on the inner components of Evolve, including data storage, transaction processing, and more. It’s an essential resource for developers looking to understand, contribute to, and leverage the full capabilities of Evolve. - -Each file in this folder covers a specific aspect of the system, from block management to data availability and networking. Use this page as a starting point to explore the technical details and architecture of Evolve. - -## Table of Contents - -- [Block Components](./block-manager.md): Explains the modular component architecture for block processing in Evolve. -- [Block Validity](./block-validity.md): Details the rules and checks for block validity within the protocol. -- [Data Availability (DA)](./da.md): Describes how Evolve ensures data availability and integrates with DA layers. -- [Full Node](./full-node.md): Outlines the architecture and operation of a full node in Evolve. -- [Header Sync](./header-sync.md): Covers the process and protocol for synchronizing block headers. -- [P2P](../../learn/specs/p2p.md): Documents the peer-to-peer networking layer and its protocols. -- [Store](./store.md): Provides information about the storage subsystem and data management. diff --git a/content/docs/reference/specs/store.md b/content/docs/reference/specs/store.md deleted file mode 100644 index 8432902..0000000 --- a/content/docs/reference/specs/store.md +++ /dev/null @@ -1,92 +0,0 @@ -# Store - -## Abstract - -The Store interface defines methods for storing and retrieving blocks, commits, and the state of the blockchain. - -## Protocol/Component Description - -The Store interface defines the following methods: - -- `Height`: Returns the height of the highest block in the store. -- `SetHeight`: Sets given height in the store if it's higher than the existing height in the store. -- `SaveBlock`: Saves a block (containing both header and data) along with its seen signature. -- `GetBlock`: Returns a block at a given height. -- `GetBlockByHash`: Returns a block with a given block header hash. - -Note: While blocks are stored as complete units in the store, the block components handle headers and data separately during synchronization and DA layer interaction. - -- `SaveBlockResponses`: Saves block responses in the Store. -- `GetBlockResponses`: Returns block results at a given height. -- `GetSignature`: Returns a signature for a block at a given height. -- `GetSignatureByHash`: Returns a signature for a block with a given block header hash. -- `UpdateState`: Updates the state saved in the Store. Only one State is stored. -- `GetState`: Returns the last state saved with UpdateState. -- `SaveValidators`: Saves the validator set at a given height. -- `GetValidators`: Returns the validator set at a given height. - -The `TxnDatastore` interface inside [go-datastore] is used for constructing different key-value stores for the underlying storage of a full node. There are two different implementations of `TxnDatastore` in [kv.go]: - -- `NewTestInMemoryKVStore`: Builds a key-value store that uses the [BadgerDB] library and operates in-memory, without accessing the disk. Used only across unit tests and integration tests. - -- `NewDefaultKVStore`: Builds a key-value store that uses the [BadgerDB] library and stores the data on disk at the specified path. - -A Evolve full node is [initialized][full_node_store_initialization] using `NewDefaultKVStore` as the base key-value store for underlying storage. To store various types of data in this base key-value store, different prefixes are used: `mainPrefix`, `dalcPrefix`, and `indexerPrefix`. The `mainPrefix` equal to `0` is used for the main node data, `dalcPrefix` equal to `1` is used for Data Availability Layer Client (DALC) data, and `indexerPrefix` equal to `2` is used for indexing related data. - -For the main node data, `DefaultStore` struct, an implementation of the Store interface, is used with the following prefixes for various types of data within it: - -- `blockPrefix` with value "b": Used to store complete blocks in the key-value store. -- `indexPrefix` with value "i": Used to index the blocks stored in the key-value store. -- `commitPrefix` with value "c": Used to store commits related to the blocks. -- `statePrefix` with value "s": Used to store the state of the blockchain. -- `responsesPrefix` with value "r": Used to store responses related to the blocks. -- `validatorsPrefix` with value "v": Used to store validator sets at a given height. - -Additional prefixes used by sync services: - -- `headerSyncPrefix` with value "hs": Used by the header sync service for P2P synced headers. -- `dataSyncPrefix` with value "ds": Used by the data sync service for P2P synced transaction data. - For example, in a call to `GetBlockByHash` for some block hash ``, the key used in the full node's base key-value store will be `/0/b/` where `0` is the main store prefix and `b` is the block prefix. Similarly, in a call to `GetValidators` for some height ``, the key used in the full node's base key-value store will be `/0/v/` where `0` is the main store prefix and `v` is the validator set prefix. - -Inside the key-value store, the value of these various types of data like `Block` is stored as a byte array which is encoded and decoded using the corresponding Protobuf [marshal and unmarshal methods][serialization]. - -The store is most widely used inside the [block components] to perform their functions correctly. Within the block components, since they have multiple go-routines, access is protected by mutex locks to synchronize read/write access and prevent race conditions. - -## Message Structure/Communication Format - -The Store does not communicate over the network, so there is no message structure or communication format. - -## Assumptions and Considerations - -The Store assumes that the underlying datastore is reliable and provides atomicity for transactions. It also assumes that the data passed to it for storage is valid and correctly formatted. - -## Implementation - -See [Store Interface][store_interface] and [Default Store][default_store] for its implementation. - -## References - -[1] [Store Interface][store_interface] - -[2] [Default Store][default_store] - -[3] [Full Node Store Initialization][full_node_store_initialization] - -[4] [Block Components][block components] - -[5] [Badger DB][BadgerDB] - -[6] [Go Datastore][go-datastore] - -[7] [Key Value Store][kv.go] - -[8] [Serialization][serialization] - -[store_interface]: https://github.com/evstack/ev-node/blob/main/pkg/store/types.go#L11 -[default_store]: https://github.com/evstack/ev-node/blob/main/pkg/store/store.go -[full_node_store_initialization]: https://github.com/evstack/ev-node/blob/main/node/full.go#L96 -[block components]: https://github.com/evstack/ev-node/blob/main/block/components.go -[BadgerDB]: https://github.com/dgraph-io/badger -[go-datastore]: https://github.com/ipfs/go-datastore -[kv.go]: https://github.com/evstack/ev-node/blob/main/pkg/store/kv.go -[serialization]: https://github.com/evstack/ev-node/blob/main/types/serialization.go diff --git a/content/docs/reference/specs/termination.png b/content/docs/reference/specs/termination.png deleted file mode 100644 index 0b61c8f..0000000 Binary files a/content/docs/reference/specs/termination.png and /dev/null differ diff --git a/eslint.config.mjs b/eslint.config.mjs index 48d88b4..a9eb938 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -1,11 +1,27 @@ -import nextConfig from "eslint-config-next"; +import tsParser from "@typescript-eslint/parser"; +import tsPlugin from "@typescript-eslint/eslint-plugin"; +import prettier from "eslint-config-prettier"; /** @type {import("eslint").Linter.FlatConfig[]} */ export default [ - ...nextConfig, { + ignores: ["dist/**", ".astro/**", "node_modules/**", "public/**", "**/*.astro"], + }, + { + files: ["src/**/*.{ts,tsx,js,jsx}"], + languageOptions: { + parser: tsParser, + parserOptions: { + ecmaFeatures: { jsx: true }, + sourceType: "module", + }, + }, + plugins: { + "@typescript-eslint": tsPlugin, + }, rules: { - "no-unused-vars": [ + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": [ "error", { args: "after-used", @@ -16,4 +32,5 @@ export default [ ], }, }, + prettier, ]; diff --git a/next.config.mjs b/next.config.mjs deleted file mode 100644 index 64065bf..0000000 --- a/next.config.mjs +++ /dev/null @@ -1,47 +0,0 @@ -import { createMDX } from 'fumadocs-mdx/next'; - -// Doc sections that previously lived at the root in VitePress -const docSections = [ - 'adr', - // 'api' excluded — conflicts with /api/ routes (md, search) - 'concepts', - 'ev-abci', - 'ev-reth', - 'getting-started', - 'guides', - 'learn', - 'overview', - 'reference' -] - -/** @type {import('next').NextConfig} */ -const nextConfig = { - images: { - remotePatterns: [ - { - protocol: 'https', - hostname: - '*.gstatic.com', - pathname: '**' - } - ] - }, - async redirects() { - return docSections.flatMap((section) => [ - { - source: `/${section}`, - destination: `/docs/${section}`, - permanent: true - }, - { - source: `/${section}/:path*`, - destination: `/docs/${section}/:path*`, - permanent: true - } - ]) - } -} - -const withMDX = createMDX(); - -export default withMDX(nextConfig); diff --git a/package.json b/package.json index c59682d..48afba1 100644 --- a/package.json +++ b/package.json @@ -1,32 +1,25 @@ { "name": "site", - "version": "0.1.0", + "version": "0.2.0", "private": true, "scripts": { - "dev": "next dev", - "build": "next build", - "start": "next start", - "lint": "next lint", + "dev": "astro dev", + "build": "astro build", + "preview": "astro preview", + "lint": "eslint src/", "prepare": "husky install", "commit": "git-cz" }, "dependencies": { + "@astrojs/react": "^5.0.2", + "@astrojs/sitemap": "^3.7.2", "@rive-app/react-webgl2": "^4.27.0", - "@types/mdx": "^2.0.13", - "fumadocs-core": "^16.7.4", - "fumadocs-mdx": "^14.2.11", - "fumadocs-openapi": "^10.4.1", - "fumadocs-ui": "^16.7.4", - "mermaid": "^11.13.0", + "astro": "^6.1.1", "motion": "^12.35.0", - "next": "16.1.1", "react": "^19.2.3", "react-dom": "^19.2.3", "react-fast-marquee": "^1.6.5", "react-markdown": "^10.1.0", - "remark-directive": "^4.0.0", - "sass": "^1.97.1", - "svg-pan-zoom": "^3.6.2", "tailwind-merge": "^3.5.0" }, "devDependencies": { @@ -35,26 +28,30 @@ "@playwright/test": "^1.58.2", "@tailwindcss/postcss": "^4.1.18", "@types/node": "^25", - "@types/react": "^19", - "@types/react-dom": "^19", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", + "@typescript-eslint/eslint-plugin": "^8.57.2", + "@typescript-eslint/parser": "^8.57.2", "autoprefixer": "^10.4.23", "commitizen": "^4.3.1", "cz-conventional-changelog": "^3.3.0", "eslint": "^9.39.2", - "eslint-config-next": "16.1.1", "eslint-config-prettier": "^10.1.8", "eslint-plugin-prettier": "^5.5.4", "husky": "^9.1.7", "lint-staged": "^16.2.7", "postcss": "^8", "prettier": "^3.7.4", + "prettier-plugin-astro": "^0.14.1", "tailwindcss": "^4.1.18", "typescript": "^5" }, "lint-staged": { - "*.{js,ts,jsx,tsx}": [ + "src/**/*.{js,ts,jsx,tsx}": [ "eslint --fix", - "eslint", + "prettier --write" + ], + "src/**/*.astro": [ "prettier --write" ], "*.json": [ diff --git a/public/_redirects b/public/_redirects new file mode 100644 index 0000000..4fdad81 --- /dev/null +++ b/public/_redirects @@ -0,0 +1,11 @@ +# Redirect docs paths to docs site +/docs/* https://docs.ev.xyz/:splat 301 +/learn/* https://docs.ev.xyz/learn/:splat 301 +/getting-started/* https://docs.ev.xyz/getting-started/:splat 301 +/guides/* https://docs.ev.xyz/guides/:splat 301 +/concepts/* https://docs.ev.xyz/concepts/:splat 301 +/overview/* https://docs.ev.xyz/overview/:splat 301 +/reference/* https://docs.ev.xyz/reference/:splat 301 +/ev-abci/* https://docs.ev.xyz/ev-abci/:splat 301 +/ev-reth/* https://docs.ev.xyz/ev-reth/:splat 301 +/adr/* https://docs.ev.xyz/adr/:splat 301 diff --git a/public/docs/openapi-rpc.json b/public/docs/openapi-rpc.json deleted file mode 100644 index deb727b..0000000 --- a/public/docs/openapi-rpc.json +++ /dev/null @@ -1,1050 +0,0 @@ -{ - "openapi": "3.1.0", - "info": { - "title": "Evolve API", - "description": "This API provides access to Signer, Store, P2P, Config, and Health services.\n\n## Services\n\n* **Signer Service** - Sign messages and retrieve public keys\n* **Store Service** - Access blocks, state, and metadata\n* **P2P Service** - Network and peer information\n* **Config Service** - Network configuration and namespace information\n* **Health Service** - Node health checks and simple HTTP endpoints\n\n## Protocols\n\n### gRPC-Web Protocol\n\nMost endpoints use gRPC-Web protocol over HTTP/1.1 with JSON encoding. Requests are made via POST with `Content-Type: application/json`.\n\n### Simple HTTP Endpoints\n\nSome endpoints (like `/health/live`) are simple HTTP GET requests that return plain text responses for basic monitoring and health checks.", - "version": "1.0.0", - "contact": { - "name": "Evolve Team", - "url": "https://ev.xyz" - } - }, - "servers": [ - { - "url": "http://localhost:7331", - "description": "A local Evolve instance configured to provide a remote procedure call (RPC) endpoint, actively listening for connections on TCP port 7331." - } - ], - "tags": [ - { - "name": "Signer Service", - "description": "Sign messages and retrieve public keys" - }, - { - "name": "Store Service", - "description": "Access blocks, state, and metadata from the chain store" - }, - { - "name": "P2P Service", - "description": "Network and peer information" - }, - { - "name": "Config Service", - "description": "Network configuration and namespace information" - }, - { - "name": "Health Service", - "description": "Node health and liveness checks" - } - ], - "paths": { - "/evnode.v1.SignerService/Sign": { - "post": { - "tags": ["Signer Service"], - "summary": "Sign a message", - "description": "Sign the given message bytes and return the signature.", - "operationId": "sign", - "requestBody": { - "description": "Message to sign", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SignRequest" - }, - "examples": { - "default": { - "summary": "Sign a message", - "value": { - "message": "SGVsbG8gV29ybGQ=" - } - } - } - } - } - }, - "responses": { - "200": { - "description": "Message signed successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SignResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest" - }, - "500": { - "$ref": "#/components/responses/InternalError" - } - } - } - }, - "/evnode.v1.SignerService/GetPublic": { - "post": { - "tags": ["Signer Service"], - "summary": "Get public key", - "description": "Retrieve the public key of the signer.", - "operationId": "getPublic", - "requestBody": { - "description": "Get public key request (empty)", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetPublicRequest" - }, - "examples": { - "default": { - "summary": "Get public key", - "value": {} - } - } - } - } - }, - "responses": { - "200": { - "description": "Public key retrieved successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetPublicResponse" - } - } - } - }, - "500": { - "$ref": "#/components/responses/InternalError" - } - } - } - }, - "/evnode.v1.StoreService/GetBlock": { - "post": { - "tags": ["Store Service"], - "summary": "Get a block", - "description": "Retrieve a block by height or hash from the chain store.", - "operationId": "getBlock", - "requestBody": { - "description": "Block request parameters", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetBlockRequest" - }, - "examples": { - "by_height": { - "summary": "Get block by height", - "value": { - "height": 1 - } - }, - "by_hash": { - "summary": "Get block by hash", - "value": { - "hash": "SGVsbG8gV29ybGQ=" - } - } - } - } - } - }, - "responses": { - "200": { - "description": "Block retrieved successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetBlockResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest" - }, - "404": { - "$ref": "#/components/responses/NotFound" - }, - "500": { - "$ref": "#/components/responses/InternalError" - } - } - } - }, - "/evnode.v1.StoreService/GetState": { - "post": { - "tags": ["Store Service"], - "summary": "Get current state", - "description": "Retrieve the current state of the chain.", - "operationId": "getState", - "requestBody": { - "description": "State request (empty)", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetStateRequest" - }, - "examples": { - "default": { - "summary": "Get current state", - "value": {} - } - } - } - } - }, - "responses": { - "200": { - "description": "State retrieved successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetStateResponse" - } - } - } - }, - "500": { - "$ref": "#/components/responses/InternalError" - } - } - } - }, - "/evnode.v1.StoreService/GetMetadata": { - "post": { - "tags": ["Store Service"], - "summary": "Get metadata", - "description": "Retrieve metadata by key from the chain store.", - "operationId": "getMetadata", - "requestBody": { - "description": "Metadata request with key", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetMetadataRequest" - }, - "examples": { - "default": { - "summary": "Get metadata by key", - "value": { - "key": "example_key" - } - } - } - } - } - }, - "responses": { - "200": { - "description": "Metadata retrieved successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetMetadataResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest" - }, - "404": { - "$ref": "#/components/responses/NotFound" - }, - "500": { - "$ref": "#/components/responses/InternalError" - } - } - } - }, - "/evnode.v1.P2PService/GetPeerInfo": { - "post": { - "tags": ["P2P Service"], - "summary": "Get peer information", - "description": "Retrieve information about connected peers.", - "operationId": "getPeerInfo", - "requestBody": { - "description": "Peer info request (empty)", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetPeerInfoRequest" - }, - "examples": { - "default": { - "summary": "Get peer information", - "value": {} - } - } - } - } - }, - "responses": { - "200": { - "description": "Peer information retrieved successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetPeerInfoResponse" - } - } - } - }, - "500": { - "$ref": "#/components/responses/InternalError" - } - } - } - }, - "/evnode.v1.P2PService/GetNetInfo": { - "post": { - "tags": ["P2P Service"], - "summary": "Get network information", - "description": "Retrieve network information and statistics.", - "operationId": "getNetInfo", - "requestBody": { - "description": "Network info request (empty)", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetNetInfoRequest" - }, - "examples": { - "default": { - "summary": "Get network information", - "value": {} - } - } - } - } - }, - "responses": { - "200": { - "description": "Network information retrieved successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetNetInfoResponse" - } - } - } - }, - "500": { - "$ref": "#/components/responses/InternalError" - } - } - } - }, - "/evnode.v1.ConfigService/GetNamespace": { - "post": { - "tags": ["Config Service"], - "summary": "Get namespace configuration", - "description": "Retrieve the header and data namespace configuration for this network.", - "operationId": "getNamespace", - "requestBody": { - "description": "Get namespace request (empty)", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetNamespaceRequest" - }, - "examples": { - "default": { - "summary": "Get namespace configuration", - "value": {} - } - } - } - } - }, - "responses": { - "200": { - "description": "Namespace configuration retrieved successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetNamespaceResponse" - } - } - } - }, - "500": { - "$ref": "#/components/responses/InternalError" - } - } - } - }, - "/evnode.v1.HealthService/Livez": { - "post": { - "tags": ["Health Service"], - "summary": "Check node health", - "description": "Check if the node is alive and healthy.", - "operationId": "livez", - "requestBody": { - "description": "Health check request (empty)", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LivezRequest" - }, - "examples": { - "default": { - "summary": "Check node health", - "value": {} - } - } - } - } - }, - "responses": { - "200": { - "description": "Node is healthy", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetHealthResponse" - } - } - } - }, - "503": { - "description": "Node is unhealthy", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - } - } - } - }, - "/health/live": { - "get": { - "tags": ["Health Service"], - "summary": "Simple liveness check", - "description": "Simple HTTP endpoint to check if the node is alive. Returns plain text 'OK' response.", - "operationId": "healthLive", - "responses": { - "200": { - "description": "Node is alive", - "content": { - "text/plain": { - "schema": { - "type": "string", - "example": "OK" - } - } - } - } - } - } - }, - "/evnode.v1.StoreService/GetP2PStoreInfo": { - "post": { - "tags": ["Store Service"], - "summary": "Inspect go-header stores", - "description": "Returns head/tail information for the header and data go-header stores used by P2P sync.", - "operationId": "getP2PStoreInfo", - "requestBody": { - "description": "Empty request", - "required": true, - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Empty" - }, - "examples": { - "default": { - "summary": "Get go-header store snapshots", - "value": {} - } - } - } - } - }, - "responses": { - "200": { - "description": "Snapshots returned successfully", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetP2PStoreInfoResponse" - } - } - } - }, - "500": { - "$ref": "#/components/responses/InternalError" - } - } - } - } - }, - "components": { - "responses": { - "BadRequest": { - "description": "Bad Request", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "NotFound": { - "description": "Not Found", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - }, - "InternalError": { - "description": "Internal Server Error", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Error" - } - } - } - } - }, - "schemas": { - "SignRequest": { - "type": "object", - "description": "Request to sign a message", - "required": ["message"], - "properties": { - "message": { - "type": "string", - "format": "byte", - "description": "The bytes we want to sign (base64-encoded)", - "example": "SGVsbG8gV29ybGQ=" - } - } - }, - "SignResponse": { - "type": "object", - "description": "Response containing signature", - "required": ["signature"], - "properties": { - "signature": { - "type": "string", - "format": "byte", - "description": "The signature bytes (base64-encoded)", - "example": "c2lnbmF0dXJl" - } - } - }, - "GetPublicRequest": { - "type": "object", - "description": "Request to get public key (empty)", - "properties": {} - }, - "GetPublicResponse": { - "type": "object", - "description": "Response containing public key", - "required": ["public_key"], - "properties": { - "public_key": { - "type": "string", - "format": "byte", - "description": "The public key (base64-encoded)", - "example": "cHVibGljX2tleQ==" - } - } - }, - "GetBlockRequest": { - "type": "object", - "description": "Request to get a block by height or hash", - "properties": { - "height": { - "type": "integer", - "format": "int64", - "description": "Block height to retrieve", - "example": 1 - }, - "hash": { - "type": "string", - "format": "byte", - "description": "Block hash to retrieve (base64-encoded)", - "example": "SGVsbG8gV29ybGQ=" - } - } - }, - "GetBlockResponse": { - "type": "object", - "description": "Response containing block data", - "properties": { - "block": { - "$ref": "#/components/schemas/Block" - }, - "header_da_height": { - "type": "integer", - "format": "int64", - "description": "Data availability height for header" - }, - "data_da_height": { - "type": "integer", - "format": "int64", - "description": "Data availability height for data" - } - } - }, - "GetStateRequest": { - "type": "object", - "description": "Request to get current state (empty)", - "properties": {} - }, - "GetStateResponse": { - "type": "object", - "description": "Response containing current state", - "properties": { - "state": { - "$ref": "#/components/schemas/State" - } - } - }, - "GetMetadataRequest": { - "type": "object", - "description": "Request to get metadata by key", - "required": ["key"], - "properties": { - "key": { - "type": "string", - "description": "Metadata key to retrieve", - "example": "example_key" - } - } - }, - "GetMetadataResponse": { - "type": "object", - "description": "Response containing metadata", - "required": ["value"], - "properties": { - "value": { - "type": "string", - "format": "byte", - "description": "Metadata value (base64-encoded)", - "example": "dGVzdCB2YWx1ZQ==" - } - } - }, - "GetPeerInfoRequest": { - "type": "object", - "description": "Request to get peer information (empty)", - "properties": {} - }, - "GetPeerInfoResponse": { - "type": "object", - "description": "Response containing peer information", - "required": ["peers"], - "properties": { - "peers": { - "type": "array", - "items": { - "$ref": "#/components/schemas/PeerInfo" - }, - "description": "List of connected peers" - } - } - }, - "GetNetInfoRequest": { - "type": "object", - "description": "Request to get network information (empty)", - "properties": {} - }, - "GetNetInfoResponse": { - "type": "object", - "description": "Response containing network information", - "properties": { - "net_info": { - "$ref": "#/components/schemas/NetInfo" - } - } - }, - "GetNamespaceRequest": { - "type": "object", - "description": "Request to get namespace configuration (empty)", - "properties": {} - }, - "GetNamespaceResponse": { - "type": "object", - "description": "Response containing namespace configuration. Namespaces are encoded by the node to ensure proper formatting and compatibility with the DA layer.", - "required": ["header_namespace", "data_namespace"], - "properties": { - "header_namespace": { - "type": "string", - "description": "The namespace identifier for block headers. This namespace is used exclusively for storing and retrieving block header data on the DA layer. The value is pre-encoded by the node.", - "example": "0x01234567890abcdef" - }, - "data_namespace": { - "type": "string", - "description": "The namespace identifier for block data (transactions). This namespace is used exclusively for storing and retrieving transaction data on the DA layer, separate from headers. The value is pre-encoded by the node.", - "example": "0xfedcba9876543210" - } - } - }, - "LivezRequest": { - "type": "object", - "description": "Request to check node health (empty)", - "properties": {} - }, - "GetHealthResponse": { - "type": "object", - "description": "Response indicating node health status", - "required": ["status"], - "properties": { - "status": { - "$ref": "#/components/schemas/HealthStatus" - } - } - }, - "HealthStatus": { - "type": "string", - "enum": ["UNKNOWN", "PASS", "WARN", "FAIL"], - "description": "Health status of the node" - }, - "Block": { - "type": "object", - "description": "Chain block data", - "properties": { - "header": { - "$ref": "#/components/schemas/SignedHeader" - }, - "data": { - "$ref": "#/components/schemas/Data" - } - } - }, - "SignedHeader": { - "type": "object", - "description": "Signed block header", - "properties": { - "header": { - "$ref": "#/components/schemas/Header" - }, - "signature": { - "type": "string", - "format": "byte", - "description": "Header signature (base64-encoded)" - }, - "signer": { - "$ref": "#/components/schemas/Signer" - } - } - }, - "Header": { - "type": "object", - "description": "Block header information", - "required": ["height", "time", "chain_id"], - "properties": { - "version": { - "$ref": "#/components/schemas/Version" - }, - "height": { - "type": "integer", - "format": "int64", - "description": "Block height" - }, - "time": { - "type": "integer", - "format": "int64", - "description": "Block creation time" - }, - "last_header_hash": { - "type": "string", - "format": "byte", - "description": "Previous block info (base64-encoded)" - }, - "data_hash": { - "type": "string", - "format": "byte", - "description": "Block.Data root aka Transactions (base64-encoded)" - }, - "app_hash": { - "type": "string", - "format": "byte", - "description": "State after applying txs from the current block (base64-encoded)" - }, - "proposer_address": { - "type": "string", - "format": "byte", - "description": "Original proposer of the block (base64-encoded)" - }, - "validator_hash": { - "type": "string", - "format": "byte", - "description": "validatorhash for compatibility with tendermint light client (base64-encoded)" - }, - "chain_id": { - "type": "string", - "description": "Chain ID the block belongs to" - } - } - }, - "Version": { - "type": "object", - "description": "Version captures the consensus rules for processing a block", - "required": ["block", "app"], - "properties": { - "block": { - "type": "integer", - "format": "int64", - "description": "Block version" - }, - "app": { - "type": "integer", - "format": "int64", - "description": "App version" - } - } - }, - "Signer": { - "type": "object", - "description": "Signer of a block", - "required": ["address", "pub_key"], - "properties": { - "address": { - "type": "string", - "format": "byte", - "description": "Address of the signer (base64-encoded)" - }, - "pub_key": { - "type": "string", - "format": "byte", - "description": "Public key of the signer (base64-encoded)" - } - } - }, - "Data": { - "type": "object", - "description": "Block transaction data", - "required": ["txs"], - "properties": { - "metadata": { - "$ref": "#/components/schemas/Metadata" - }, - "txs": { - "type": "array", - "items": { - "type": "string", - "format": "byte" - }, - "description": "List of transactions (base64-encoded)" - } - } - }, - "Metadata": { - "type": "object", - "description": "Metadata of a block", - "required": ["chain_id", "height", "time"], - "properties": { - "chain_id": { - "type": "string", - "description": "Chain ID" - }, - "height": { - "type": "integer", - "format": "int64", - "description": "Block height" - }, - "time": { - "type": "integer", - "format": "int64", - "description": "Block creation time" - }, - "last_data_hash": { - "type": "string", - "format": "byte", - "description": "Previous block info (base64-encoded)" - } - } - }, - "State": { - "type": "object", - "description": "Current chain state", - "required": ["chain_id", "initial_height", "last_block_height", "da_height"], - "properties": { - "version": { - "$ref": "#/components/schemas/Version" - }, - "chain_id": { - "type": "string", - "description": "Chain ID" - }, - "initial_height": { - "type": "integer", - "format": "int64", - "description": "Initial height" - }, - "last_block_height": { - "type": "integer", - "format": "int64", - "description": "Last block height" - }, - "last_block_time": { - "type": "string", - "format": "date-time", - "description": "Last block time" - }, - "da_height": { - "type": "integer", - "format": "int64", - "description": "Data availability height" - }, - "app_hash": { - "type": "string", - "format": "byte", - "description": "Application state hash (base64-encoded)" - } - } - }, - "PeerInfo": { - "type": "object", - "description": "Information about a connected peer", - "required": ["id", "address"], - "properties": { - "id": { - "type": "string", - "description": "Peer ID" - }, - "address": { - "type": "string", - "description": "Peer network address" - } - } - }, - "NetInfo": { - "type": "object", - "description": "Network information", - "required": ["id", "listen_addresses", "connected_peers"], - "properties": { - "id": { - "type": "string", - "description": "Network ID" - }, - "listen_addresses": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Listen addresses" - }, - "connected_peers": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of connected peers" - } - } - }, - "Error": { - "type": "object", - "description": "Error response", - "required": ["code", "message"], - "properties": { - "code": { - "type": "integer", - "description": "Error code" - }, - "message": { - "type": "string", - "description": "Error message" - }, - "details": { - "type": "array", - "items": { - "type": "object" - }, - "description": "Additional error details" - } - } - }, - "P2PStoreEntry": { - "type": "object", - "description": "Head or tail entry for a go-header store", - "required": ["height", "hash"], - "properties": { - "height": { - "type": "integer", - "format": "int64", - "description": "Block height" - }, - "hash": { - "type": "string", - "format": "byte", - "description": "Header/data hash (base64-encoded)" - }, - "time": { - "type": "string", - "format": "date-time", - "description": "Entry timestamp" - } - } - }, - "P2PStoreSnapshot": { - "type": "object", - "description": "Snapshot of a go-header store", - "required": ["label", "height", "head_present", "tail_present"], - "properties": { - "label": { - "type": "string", - "description": "Human friendly store label" - }, - "height": { - "type": "integer", - "format": "int64", - "description": "Highest contiguous height" - }, - "head_present": { - "type": "boolean", - "description": "Whether a head entry exists" - }, - "head": { - "$ref": "#/components/schemas/P2PStoreEntry" - }, - "tail_present": { - "type": "boolean", - "description": "Whether a tail entry exists" - }, - "tail": { - "$ref": "#/components/schemas/P2PStoreEntry" - } - } - }, - "GetP2PStoreInfoResponse": { - "type": "object", - "description": "Snapshot of the header and data go-header stores", - "required": ["stores"], - "properties": { - "stores": { - "type": "array", - "items": { - "$ref": "#/components/schemas/P2PStoreSnapshot" - } - } - } - } - } - } -} diff --git a/source.config.ts b/source.config.ts deleted file mode 100644 index f062bc9..0000000 --- a/source.config.ts +++ /dev/null @@ -1,38 +0,0 @@ -import { defineDocs, defineConfig, frontmatterSchema } from 'fumadocs-mdx/config' -import remarkDirective from 'remark-directive' -import { remarkDirectiveAdmonition, remarkMdxMermaid } from 'fumadocs-core/mdx-plugins' -import { remarkTemplateVars } from './src/plugins/remark-template-vars' -import { remarkVitepressCodeGroup } from './src/plugins/remark-vitepress-codegroup' -import { remarkStripMdLinks } from './src/plugins/remark-strip-md-links' -import { remarkStripTextDirectives } from './src/plugins/remark-strip-text-directives' -import { z } from 'zod' -import lastModified from 'fumadocs-mdx/plugins/last-modified' - -export const docs = defineDocs({ - dir: 'content/docs', - docs: { - schema: frontmatterSchema.extend({ - title: z.string().optional().default(''), - full: z.boolean().optional().default(false) - }), - postprocess: { - includeProcessedMarkdown: true - } - } -}) - -export default defineConfig({ - plugins: [lastModified()], - mdxOptions: { - remarkPlugins: (defaults) => [ - remarkDirective, - remarkDirectiveAdmonition, - remarkStripTextDirectives, - remarkVitepressCodeGroup, - remarkTemplateVars, - remarkStripMdLinks, - remarkMdxMermaid, - ...defaults - ] - } -}) diff --git a/src/app/(marketing)/layout.tsx b/src/app/(marketing)/layout.tsx deleted file mode 100644 index 111244c..0000000 --- a/src/app/(marketing)/layout.tsx +++ /dev/null @@ -1,11 +0,0 @@ -import type { ReactNode } from 'react' -import Header from '@/components/ui/organisms/Header' - -export default function MarketingLayout({ children }: { children: ReactNode }) { - return ( - <> -
- {children} - - ) -} diff --git a/src/app/(marketing)/page.tsx b/src/app/(marketing)/page.tsx deleted file mode 100644 index 0811bea..0000000 --- a/src/app/(marketing)/page.tsx +++ /dev/null @@ -1,21 +0,0 @@ -import HomeHeroSection from '@/components/ui/sections/HomeHeroSection' -import LogoSection from '@/components/ui/sections/LogoSection' -import ValuePropSection from '@/components/ui/sections/ValuePropSection' -import ArchitectureSection from '@/components/ui/sections/ArchitectureSection' -import UseCasesSection from '@/components/ui/sections/UseCasesSection' -import ComparisonSection from '@/components/ui/sections/ComparisonSection' -import EcosystemSection from '@/components/ui/sections/EcosystemSection' - -export default function Home() { - return ( -
- - - - - - - -
- ) -} diff --git a/src/app/(marketing)/privacy-policy/page.tsx b/src/app/(marketing)/privacy-policy/page.tsx deleted file mode 100644 index 6e6dc86..0000000 --- a/src/app/(marketing)/privacy-policy/page.tsx +++ /dev/null @@ -1,21 +0,0 @@ -import LegalPage from '@/components/ui/organisms/LegalPage' -import EcosystemSection from '@/components/ui/sections/EcosystemSection' -import type { Metadata } from 'next' - -export const metadata: Metadata = { - title: 'Privacy Policy', - description: 'Learn about how we collect, use, and protect your personal information.', - robots: { - index: false, - follow: true - } -} - -export default function PrivacyPolicyPage() { - return ( - <> - - - - ) -} diff --git a/src/app/(marketing)/terms-and-conditions/page.tsx b/src/app/(marketing)/terms-and-conditions/page.tsx deleted file mode 100644 index e15c6bf..0000000 --- a/src/app/(marketing)/terms-and-conditions/page.tsx +++ /dev/null @@ -1,21 +0,0 @@ -import LegalPage from '@/components/ui/organisms/LegalPage' -import EcosystemSection from '@/components/ui/sections/EcosystemSection' -import { Metadata } from 'next' - -export const metadata: Metadata = { - title: 'Terms & Conditions', - description: 'Review the terms that govern the use of our website and services.', - robots: { - index: false, - follow: true - } -} - -export default function TermsAndConditionsPage() { - return ( - <> - - - - ) -} diff --git a/src/app/api/md/[[...slug]]/route.ts b/src/app/api/md/[[...slug]]/route.ts deleted file mode 100644 index 48bae19..0000000 --- a/src/app/api/md/[[...slug]]/route.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { source } from '@/lib/source' -import { type NextRequest, NextResponse } from 'next/server' -import { notFound } from 'next/navigation' - -export const revalidate = false - -export async function GET(_req: NextRequest, { params }: { params: Promise<{ slug?: string[] }> }) { - const { slug } = await params - const page = source.getPage(slug) - if (!page) notFound() - - const text = await page.data.getText('processed') - - return new NextResponse(text, { - headers: { - 'Content-Type': 'text/markdown; charset=utf-8' - } - }) -} - -export function generateStaticParams() { - return source.generateParams() -} diff --git a/src/app/api/search/route.ts b/src/app/api/search/route.ts deleted file mode 100644 index 60e8da7..0000000 --- a/src/app/api/search/route.ts +++ /dev/null @@ -1,4 +0,0 @@ -import { source } from '@/lib/source' -import { createFromSource } from 'fumadocs-core/search/server' - -export const { GET } = createFromSource(source) diff --git a/src/app/docs/[[...slug]]/page.tsx b/src/app/docs/[[...slug]]/page.tsx deleted file mode 100644 index a06accd..0000000 --- a/src/app/docs/[[...slug]]/page.tsx +++ /dev/null @@ -1,81 +0,0 @@ -import { source } from '@/lib/source' -import { DocsBody, DocsPage, EditOnGitHub, PageLastUpdate } from 'fumadocs-ui/page' -import { MarkdownCopyButton, ViewOptionsPopover } from 'fumadocs-ui/layouts/docs/page' -import { notFound } from 'next/navigation' -import { getMDXComponents } from '@/components/mdx' -import type { Metadata } from 'next' - -const EDIT_URL_BASE = 'https://github.com/evstack/docs/edit/main' -const GITHUB_BLOB_BASE = 'https://github.com/evstack/docs/blob/main' - -export default async function Page(props: { params: Promise<{ slug?: string[] }> }) { - const params = await props.params - const page = source.getPage(params.slug) - if (!page) notFound() - - const MDX = page.data.body - const lastModified = page.data._exports.lastModified as Date | undefined - const filePath = page.data.info.path - const markdownUrl = `/api/md/${page.slugs.join('/')}` - const githubUrl = `${GITHUB_BLOB_BASE}/${filePath}` - - return ( - - - - - ) - }} - > - {/* Mobile/tablet: show copy actions above title (hidden on xl where TOC has them) */} -
- - -
- - - -
- - {lastModified && } -
-
- ) -} - -export function generateStaticParams() { - return source.generateParams() -} - -export async function generateMetadata(props: { - params: Promise<{ slug?: string[] }> -}): Promise { - const params = await props.params - const page = source.getPage(params.slug) - if (!page) return {} - - const url = `/docs/${(params.slug ?? []).join('/')}` - - return { - title: `${page.data.title} | Evolve Docs`, - description: page.data.description, - alternates: { canonical: url }, - openGraph: { - title: `${page.data.title} | Evolve Docs`, - description: page.data.description, - url, - type: 'article' - }, - twitter: { - card: 'summary_large_image', - title: `${page.data.title} | Evolve Docs`, - description: page.data.description - } - } -} diff --git a/src/app/docs/layout.tsx b/src/app/docs/layout.tsx deleted file mode 100644 index f183f47..0000000 --- a/src/app/docs/layout.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import type { ReactNode } from 'react' -import { source } from '@/lib/source' -import { DocsLayout } from 'fumadocs-ui/layouts/docs' -import { baseOptions } from '@/lib/layout.shared' -import { DocsFooter } from '@/components/docs/DocsFooter' -import { SidebarHomeLink } from '@/components/docs/SidebarHomeLink' - -export default function Layout({ children }: { children: ReactNode }) { - return ( - - -
- - ), - footer: - }} - tabs={{ - transform(option, node) { - if (!node.icon) return option - return { - ...option, - icon: ( -
- {node.icon} -
- ) - } - } - }} - {...baseOptions()} - > - {children} -
- ) -} diff --git a/src/app/favicon.ico b/src/app/favicon.ico deleted file mode 100644 index a306b39..0000000 Binary files a/src/app/favicon.ico and /dev/null differ diff --git a/src/app/layout.tsx b/src/app/layout.tsx deleted file mode 100644 index 860ad68..0000000 --- a/src/app/layout.tsx +++ /dev/null @@ -1,37 +0,0 @@ -import type { Metadata } from 'next' -import { Inter, Geist_Mono } from 'next/font/google' -import './globals.css' -import { createMetadata, viewport, organizationJsonLd, websiteJsonLd } from '@/content/seo' -import { RootProvider } from 'fumadocs-ui/provider/next' -import Script from 'next/script' - -const inter = Inter({ subsets: ['latin'], variable: '--font-inter' }) -const geistMono = Geist_Mono({ subsets: ['latin'], variable: '--font-geist-mono' }) - -export const metadata: Metadata = createMetadata() -export { viewport } - -export default function RootLayout({ - children -}: Readonly<{ - children: React.ReactNode -}>) { - return ( - - - + + +