diff --git a/.github/actions/setup-go/action.yml b/.github/actions/setup-go/action.yml index ca8f69c5..f7987984 100644 --- a/.github/actions/setup-go/action.yml +++ b/.github/actions/setup-go/action.yml @@ -1,5 +1,10 @@ name: Setup Go from go.mod description: Detect the Go toolchain version from go.mod and install it with caching enabled +inputs: + version: + description: "Go version to use (overrides go.mod detection)" + required: false + default: "" outputs: version: description: Detected Go version @@ -13,15 +18,20 @@ runs: run: | set -euo pipefail - VERSION="" - TOOLCHAIN_VERSION=$(grep -E '^toolchain go[0-9]+\.[0-9]+(\.[0-9]+)?$' go.mod | cut -d ' ' -f 2 | sed 's/^go//' || true) - if [ -n "$TOOLCHAIN_VERSION" ]; then - VERSION="$TOOLCHAIN_VERSION" - echo "Detected toolchain directive: go$VERSION" + if [ -n "${{ inputs.version }}" ]; then + VERSION="${{ inputs.version }}" + echo "Using provided version: go$VERSION" else - VERSION=$(grep -E '^go [0-9]+\.[0-9]+(\.[0-9]+)?$' go.mod | cut -d ' ' -f 2 || true) - if [ -n "$VERSION" ]; then - echo "Detected go directive: $VERSION" + VERSION="" + TOOLCHAIN_VERSION=$(grep -E '^toolchain go[0-9]+\.[0-9]+(\.[0-9]+)?$' go.mod | cut -d ' ' -f 2 | sed 's/^go//' || true) + if [ -n "$TOOLCHAIN_VERSION" ]; then + VERSION="$TOOLCHAIN_VERSION" + echo "Detected toolchain directive: go$VERSION" + else + VERSION=$(grep -E '^go [0-9]+\.[0-9]+(\.[0-9]+)?$' go.mod | cut -d ' ' -f 2 || true) + if [ -n "$VERSION" ]; then + echo "Detected go directive: $VERSION" + fi fi fi diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 288db9b0..56048fa5 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -33,6 +33,8 @@ jobs: - name: Setup Go id: setup-go uses: ./.github/actions/setup-go + with: + version: "1.25.6" - name: Prepare Build Variables id: vars diff --git a/.gitignore b/.gitignore index 506ce99c..caff401c 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,18 @@ build/ *.swagger.json + + +.roo/ +.roomodes +docs/context.json +docs/requirements.json +docs/decisions.md +docs/human-playbook.md +docs/new-feature* +plans/ +CLAUDE.md +.claude/ +.codex +.agents +AGENTS.md \ No newline at end of file diff --git a/app/proto_bridge.go b/app/proto_bridge.go index 74abb220..38159e76 100644 --- a/app/proto_bridge.go +++ b/app/proto_bridge.go @@ -17,5 +17,6 @@ func init() { // Lumera module enums. protobridge.RegisterEnum("lumera.action.v1.ActionType", actiontypes.ActionType_value) protobridge.RegisterEnum("lumera.action.v1.ActionState", actiontypes.ActionState_value) + protobridge.RegisterEnum("lumera.action.v1.HashAlgo", actiontypes.HashAlgo_value) protobridge.RegisterEnum("lumera.supernode.v1.SuperNodeState", supernodetypes.SuperNodeState_value) } diff --git a/buf.lock b/buf.lock index 0703ad4f..4a4b8994 100644 --- a/buf.lock +++ b/buf.lock @@ -5,8 +5,8 @@ deps: commit: 04467658e59e44bbb22fe568206e1f70 digest: b5:8058c0aadbee8c9af67a9cefe86492c6c0b0bd5b4526b0ec820507b91fc9b0b5efbebca97331854576d2d279b0b3f5ed6a7abb0640cb640c4186532239c48fc4 - name: buf.build/cosmos/cosmos-sdk - commit: 650cd9ad7f7a468e8e19975269958658 - digest: b5:652a0cd9aa3c220bb12b558f29b30ca5c248b994420472c9c2a54eed3d33356b1307e51687c1909ea4f535a2a1e180895b8cda83b58a4697003009d17fdbc154 + commit: 65fa41963e6a41dd95a35934239029df + digest: b5:45c2788f1c8ca1c0e72f643d51dba37c3b25d113ee166277cd60dfd49404e713a178e177beaa7d6d016f3853722d77f8fbf40e9444e173cd3d89754d39ca0427 - name: buf.build/cosmos/gogo-proto commit: 88ef6483f90f478fb938c37dde52ece3 digest: b5:f0c69202c9bca9672dc72a9737ea9bc83744daaed2b3da77e3a95b0e53b86dee76b5a7405b993181d6c863fd64afaca0976a302f700d6c4912eb1692a1782c0a @@ -23,5 +23,5 @@ deps: commit: 004180b77378443887d3b55cabc00384 digest: b5:e8f475fe3330f31f5fd86ac689093bcd274e19611a09db91f41d637cb9197881ce89882b94d13a58738e53c91c6e4bae7dc1feba85f590164c975a89e25115dc - name: buf.build/protocolbuffers/wellknowntypes - commit: 4e1ccfa6827947beb55974645a315b8d - digest: b5:eb5228b1abd02064d6ff0248918500c1ec1ce7df69126af3f220c0b67d81ff45bdf9f016a8e66cd9c1e534f18afc6d8e090d400604c5331d551a68d05f7e7be9 + commit: 9d16d599a978406980f6e2f081331a93 + digest: b5:dd06e497a5c52f5ddf6ec02b3c7d289cc6c0432093fc2f6bf7a4fb5fae786c3e4c893e55d2759ffb6833268daf3de0bce303a406fed15725790528f2c27dc219 diff --git a/devnet/config/config.json b/devnet/config/config.json index 7a6e5b3c..df89ff1f 100644 --- a/devnet/config/config.json +++ b/devnet/config/config.json @@ -33,6 +33,6 @@ "account_balance": "10000000ulume" }, "hermes": { - "enabled": true + "enabled": false } } diff --git a/devnet/go.mod b/devnet/go.mod index f5f27906..f2cac661 100644 --- a/devnet/go.mod +++ b/devnet/go.mod @@ -5,7 +5,7 @@ go 1.25.5 replace ( // Local development - uncomment these for local testing // Comment lines with github.com/LumeraProtocol/ before releasing - // github.com/LumeraProtocol/lumera => .. + // github.com/LumeraProtocol/lumera => .. //github.com/LumeraProtocol/sdk-go => ../../sdk-go github.com/envoyproxy/protoc-gen-validate => github.com/bufbuild/protoc-gen-validate v1.3.0 github.com/lyft/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate v1.3.0 @@ -19,9 +19,11 @@ require ( github.com/LumeraProtocol/lumera v1.10.0 github.com/LumeraProtocol/sdk-go v1.0.8 github.com/cosmos/cosmos-sdk v0.53.5 + github.com/cosmos/gogoproto v1.7.2 github.com/cosmos/ibc-go/v10 v10.5.0 github.com/stretchr/testify v1.11.1 go.uber.org/zap v1.27.0 + google.golang.org/grpc v1.77.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -65,7 +67,6 @@ require ( github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect - github.com/cosmos/gogoproto v1.7.2 // indirect github.com/cosmos/iavl v1.2.6 // indirect github.com/cosmos/ics23/go v0.11.0 // indirect github.com/cosmos/ledger-cosmos-go v0.16.0 // indirect @@ -179,7 +180,6 @@ require ( google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/grpc v1.77.0 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect diff --git a/devnet/go.sum b/devnet/go.sum index 8df15cb8..0cd78b6c 100644 --- a/devnet/go.sum +++ b/devnet/go.sum @@ -109,8 +109,6 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.10.0 h1:IIuvqlFNUPoSkTJ3DoKDNHtr3E0+8GmE4CiNbgTzI2s= -github.com/LumeraProtocol/lumera v1.10.0/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= github.com/LumeraProtocol/sdk-go v1.0.8 h1:8M4QgrrmblDM42ABaKxFfjeF9/xtTHDkRwTYHEbtrSk= diff --git a/devnet/tests/validator/lep5_test.go b/devnet/tests/validator/lep5_test.go new file mode 100644 index 00000000..9047b080 --- /dev/null +++ b/devnet/tests/validator/lep5_test.go @@ -0,0 +1,1323 @@ +package validator + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "testing" + "time" + + sdkmath "cosmossdk.io/math" + "github.com/LumeraProtocol/lumera/x/action/v1/keeper" + "github.com/LumeraProtocol/lumera/x/action/v1/merkle" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" + "github.com/LumeraProtocol/sdk-go/blockchain" + "github.com/LumeraProtocol/sdk-go/cascade" + sdkcrypto "github.com/LumeraProtocol/sdk-go/pkg/crypto" + sdktypes "github.com/LumeraProtocol/sdk-go/types" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + gogoproto "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const ( + lep5ChunkSize = uint32(262144) + lep5CommitmentType = "lep5/chunk-merkle/v1" + lep5DefaultLumeraGRPC = "localhost:9090" + lep5FinalizeMaxAttempts = 8 + lep5TopSupernodesLimit = int32(25) +) + +var lep5CommitmentHashAlgo = actiontypes.HashAlgo_HASH_ALGO_BLAKE3 + +func TestLEP5CascadeAvailabilityCommitment(t *testing.T) { + runCascadeCommitmentTest(t, 8*uint64(lep5ChunkSize), lep5ChunkSize) +} + +// TestLEP5VariableChunkSizes exercises the availability-commitment flow with +// non-default chunk sizes. Each subtest creates a file of a specific size, +// chunks it with the given chunk size, and runs the full register → finalize → +// DONE cycle. +func TestLEP5VariableChunkSizes(t *testing.T) { + cases := []struct { + name string + fileSize uint64 + chunkSize uint32 + }{ + { + name: "SmallFile_5KB_ChunkSize_1024", + fileSize: 5 * 1024, // 5 KB → 5 chunks of 1024 + chunkSize: 1024, + }, + { + name: "MediumFile_500KB_ChunkSize_131072", + fileSize: 500 * 1024, // 500 KB → 4 chunks (3×131072 + 1×24576) + chunkSize: 131072, + }, + { + name: "TineFile_4B_ChunkSize_1B", + fileSize: 4, // 4 B → 4 chunks of 1 B + chunkSize: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + runCascadeCommitmentTest(t, tc.fileSize, tc.chunkSize) + }) + } +} + +// runCascadeCommitmentTest is the parameterised core of the LEP-5 cascade +// availability-commitment E2E test. It creates a file of fileSize bytes, splits +// it into chunks of chunkSize bytes (last chunk may be smaller), builds a +// Merkle tree, registers a Cascade action with the commitment, finalises it +// with valid proofs, and asserts the action reaches DONE with correct metadata. +func runCascadeCommitmentTest(t *testing.T, fileSize uint64, chunkSize uint32) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Minute) + defer cancel() + + rpcAddr := resolveLumeraRPC() + if resolvedRPC, err := lep5ResolveReachableRPC(ctx); err == nil { + rpcAddr = resolvedRPC + } + + grpcAddr := lep5ResolveReachableGRPC(lep5NormalizeGRPCAddr(getenv("LUMERA_GRPC_ADDR", lep5DefaultLumeraGRPC))) + chainID := getenv("LUMERA_CHAIN_ID", defaultLumeraChainID) + denom := getenv("LUMERA_DENOM", defaultLumeraDenom) + moniker := detectValidatorMoniker() + + if _, _, err := lep5NextFinalizeSeed(ctx, rpcAddr); err != nil { + t.Skipf("skipping LEP-5 devnet E2E: Lumera RPC not reachable at %s (%v)", rpcAddr, err) + } + + kr, keyName, supernodeAddr, err := lep5LoadSignerKey(ctx, chainID, moniker, rpcAddr, grpcAddr) + if err != nil { + t.Skipf("skipping LEP-5 devnet E2E: signer key unavailable (%v)", err) + } + + bc, err := blockchain.New(ctx, blockchain.Config{ + ChainID: chainID, + GRPCAddr: grpcAddr, + RPCEndpoint: rpcAddr, + AccountHRP: "lumera", + FeeDenom: denom, + GasPrice: sdkmath.LegacyNewDecWithPrec(25, 3), + Timeout: 30 * time.Second, + MaxRecvMsgSize: 10 * 1024 * 1024, + MaxSendMsgSize: 10 * 1024 * 1024, + InsecureGRPC: true, + }, kr, keyName) + require.NoError(t, err, "create lumera blockchain client") + defer bc.Close() + + cascadeClient, err := cascade.New(ctx, cascade.Config{ + ChainID: chainID, + GRPCAddr: grpcAddr, + Address: supernodeAddr, + KeyName: keyName, + Timeout: 30 * time.Second, + }, kr) + require.NoError(t, err, "create cascade client") + defer cascadeClient.Close() + + filePath, chunks, totalSize := lep5CreateTestFileWithSize(t, fileSize, chunkSize) + numChunks := uint32(len(chunks)) + t.Logf("--- Test file: %d bytes, chunkSize=%d, numChunks=%d ---", totalSize, chunkSize, numChunks) + + tree, err := merkle.BuildTree(chunks) + require.NoError(t, err, "build merkle tree") + + // Client picks challenge indices at registration time. + // Default SVC challenge count is 8, capped to the actual number of chunks. + challengeCount := uint32(8) + if challengeCount > numChunks { + challengeCount = numChunks + } + challengeIndices := make([]uint32, 0, challengeCount) + for i := uint32(0); i < challengeCount; i++ { + challengeIndices = append(challengeIndices, i) + } + + lep5PrintMerkleTreeDiagram(t, tree, challengeIndices) + + commitment := &actiontypes.AvailabilityCommitment{ + CommitmentType: lep5CommitmentType, + HashAlgo: lep5CommitmentHashAlgo, + ChunkSize: chunkSize, + TotalSize: totalSize, + NumChunks: numChunks, + Root: append([]byte(nil), tree.Root[:]...), + ChallengeIndices: challengeIndices, + } + + requestMsg, _, err := cascadeClient.CreateRequestActionMessage(ctx, supernodeAddr, filePath, &cascade.UploadOptions{Public: true}) + require.NoError(t, err, "build request action message") + + var requestMeta actiontypes.CascadeMetadata + require.NoError(t, json.Unmarshal([]byte(requestMsg.Metadata), &requestMeta), "unmarshal request metadata") + requestMeta.AvailabilityCommitment = commitment + + t.Log("=== REQUEST METADATA (before submit) ===") + lep5PrintCascadeMetadata(t, &requestMeta) + + requestMetaJSON, err := json.Marshal(&requestMeta) + require.NoError(t, err, "marshal request metadata with availability commitment") + + fileSizeKbs := lep5ResolveFileSizeKBs(filePath, requestMsg.FileSizeKbs) + + t.Log("--- Submitting RequestAction tx (BroadcastMode=SYNC) ---") + t.Log(" The SDK broadcasts with BROADCAST_MODE_SYNC: the node validates the tx in the mempool and returns immediately.") + t.Log(" Then WaitForTxInclusion subscribes via CometBFT websocket (or polls gRPC) until the tx is mined into a block.") + t.Logf(" Devnet block time is ~1-5s, so inclusion is typically fast.") + + reqStart := time.Now() + requestRes, err := bc.RequestActionTx( + ctx, + supernodeAddr, + actiontypes.ActionTypeCascade, + string(requestMetaJSON), + requestMsg.Price, + requestMsg.ExpirationTime, + fileSizeKbs, + "lep5-e2e-register", + ) + require.NoError(t, err, "submit request action tx") + require.NotEmpty(t, requestRes.ActionID, "request action id") + t.Logf("--- RequestAction tx included in block (height=%d, txHash=%s, took %s) ---", requestRes.Height, requestRes.TxHash, time.Since(reqStart)) + t.Logf(" Action ID assigned by chain: %s", requestRes.ActionID) + + t.Log("--- Waiting for action to reach PENDING state (polling every 2s via gRPC GetAction) ---") + pendingStart := time.Now() + _, err = bc.Action.WaitForState(ctx, requestRes.ActionID, sdktypes.ActionStatePending, 2*time.Second) + require.NoError(t, err, "wait for action pending") + t.Logf("--- Action reached PENDING state (took %s) ---", time.Since(pendingStart)) + + t.Log("--- Querying registered action from chain to verify on-chain metadata ---") + queryClient := actiontypes.NewQueryClient(bc.GRPCConn()) + registered, err := queryClient.GetAction(ctx, &actiontypes.QueryGetActionRequest{ActionID: requestRes.ActionID}) + require.NoError(t, err, "query registered action") + require.NotNil(t, registered.Action) + + var registeredMeta actiontypes.CascadeMetadata + require.NoError(t, gogoproto.Unmarshal(registered.Action.Metadata, ®isteredMeta), "decode registered metadata") + + t.Log("=== REGISTERED METADATA (from chain, after request) ===") + t.Logf(" Action.State: %s", registered.Action.State) + lep5PrintCascadeMetadata(t, ®isteredMeta) + + require.NotNil(t, registeredMeta.AvailabilityCommitment, "stored availability commitment") + require.Equal(t, commitment, registeredMeta.AvailabilityCommitment, "stored commitment must match request") + + proofCount := uint32(len(challengeIndices)) + + t.Log("--- Building finalization payload (rqIDs + chunk merkle proofs) ---") + rqIDs := make([]string, 0, registeredMeta.RqIdsMax) + for i := registeredMeta.RqIdsIc; i < registeredMeta.RqIdsIc+registeredMeta.RqIdsMax; i++ { + id, idErr := keeper.CreateKademliaID(registeredMeta.Signatures, i) + require.NoError(t, idErr, "create rq id %d", i) + rqIDs = append(rqIDs, id) + } + + // Generate proofs for the challenge indices stored in the commitment. + proofs := make([]*actiontypes.ChunkProof, 0, len(challengeIndices)) + for _, idx := range challengeIndices { + proof, pErr := tree.GenerateProof(int(idx)) + require.NoError(t, pErr, "generate proof for chunk %d", idx) + proofs = append(proofs, lep5ToChunkProof(proof)) + } + + t.Log("--- ChunkProofs prepared for finalization ---") + for i, p := range proofs { + t.Logf(" Proof [%d]: ChunkIndex=%d, LeafHash=%x, PathLength=%d", i, p.ChunkIndex, p.LeafHash, len(p.PathHashes)) + } + + finalizeMeta := &actiontypes.CascadeMetadata{ + RqIdsIds: rqIDs, + ChunkProofs: proofs, + } + finalizeJSON, mErr := json.Marshal(finalizeMeta) + require.NoError(t, mErr, "marshal finalize metadata") + + t.Logf("--- Finalization payload ready: %d rqIDs, %d chunk proofs, challengeIndices=%v ---", len(rqIDs), len(proofs), challengeIndices) + + var lastTxHash string + var finalizeSucceeded bool + + t.Logf("--- Submitting FinalizeAction tx (up to %d attempts, BroadcastMode=SYNC + WaitForTxInclusion) ---", lep5FinalizeMaxAttempts) + for attempt := 1; attempt <= lep5FinalizeMaxAttempts; attempt++ { + t.Logf(" [attempt %d/%d] Broadcasting FinalizeAction tx...", attempt, lep5FinalizeMaxAttempts) + finalizeStart := time.Now() + finalizeRes, fErr := bc.FinalizeActionTx( + ctx, + supernodeAddr, + requestRes.ActionID, + actiontypes.ActionTypeCascade, + string(finalizeJSON), + fmt.Sprintf("lep5-e2e-finalize-%d", attempt), + ) + require.NoError(t, fErr, "submit finalize tx attempt %d", attempt) + lastTxHash = finalizeRes.TxHash + t.Logf(" [attempt %d/%d] FinalizeAction tx included in block (txHash=%s, took %s)", attempt, lep5FinalizeMaxAttempts, finalizeRes.TxHash, time.Since(finalizeStart)) + + t.Logf(" [attempt %d/%d] Re-querying tx to verify on-chain result code...", attempt, lep5FinalizeMaxAttempts) + txResp, txErr := bc.GetTx(ctx, finalizeRes.TxHash) + require.NoError(t, txErr, "query finalize tx attempt %d", attempt) + require.NotNil(t, txResp.TxResponse, "finalize tx response attempt %d", attempt) + + if txResp.TxResponse.Code == 0 { + t.Logf(" [attempt %d/%d] FinalizeAction tx succeeded (code=0)", attempt, lep5FinalizeMaxAttempts) + finalizeSucceeded = true + break + } + + t.Logf( + " [attempt %d/%d] FinalizeAction tx failed on-chain (code=%d, log=%s), sleeping 2s before retry...", + attempt, lep5FinalizeMaxAttempts, + txResp.TxResponse.Code, + txResp.TxResponse.RawLog, + ) + time.Sleep(2 * time.Second) + } + + require.True(t, finalizeSucceeded, "finalize tx did not succeed after retries (last tx=%s)", lastTxHash) + + t.Log("--- Waiting for action to reach DONE state (polling every 2s via gRPC GetAction) ---") + doneStart := time.Now() + _, err = bc.Action.WaitForState(ctx, requestRes.ActionID, sdktypes.ActionStateDone, 2*time.Second) + require.NoError(t, err, "wait for action done") + t.Logf("--- Action reached DONE state (took %s) ---", time.Since(doneStart)) + + t.Log("--- Final verification: querying action from chain to confirm DONE state and metadata ---") + finalAction, err := queryClient.GetAction(ctx, &actiontypes.QueryGetActionRequest{ActionID: requestRes.ActionID}) + require.NoError(t, err, "query finalized action") + require.NotNil(t, finalAction.Action) + require.Equal(t, actiontypes.ActionStateDone, finalAction.Action.State, "action state must be DONE") + + var finalMeta actiontypes.CascadeMetadata + require.NoError(t, gogoproto.Unmarshal(finalAction.Action.Metadata, &finalMeta), "decode finalized metadata") + + t.Log("=== FINALIZED METADATA (from chain, after finalize) ===") + t.Logf(" Action.State: %s", finalAction.Action.State) + lep5PrintCascadeMetadata(t, &finalMeta) + + require.NotNil(t, finalMeta.AvailabilityCommitment, "finalized metadata commitment must exist") + require.Equal(t, commitment.Root, finalMeta.AvailabilityCommitment.Root, "commitment root must be preserved") + require.Equal(t, chunkSize, finalMeta.AvailabilityCommitment.ChunkSize, "chunk size must be preserved") + require.Equal(t, numChunks, finalMeta.AvailabilityCommitment.NumChunks, "num chunks must be preserved") + require.Len(t, finalMeta.ChunkProofs, int(proofCount), "chunk proof count") +} + +// TestLEP5CascadeAvailabilityCommitmentFailure registers a Cascade action with +// a valid AvailabilityCommitment, then attempts to finalize it with corrupt +// chunk proofs (flipped leaf hashes). The test asserts that the finalization +// transaction is rejected on-chain and the action state remains PENDING. +func TestLEP5CascadeAvailabilityCommitmentFailure(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Minute) + defer cancel() + + rpcAddr := resolveLumeraRPC() + if resolvedRPC, err := lep5ResolveReachableRPC(ctx); err == nil { + rpcAddr = resolvedRPC + } + + grpcAddr := lep5ResolveReachableGRPC(lep5NormalizeGRPCAddr(getenv("LUMERA_GRPC_ADDR", lep5DefaultLumeraGRPC))) + chainID := getenv("LUMERA_CHAIN_ID", defaultLumeraChainID) + denom := getenv("LUMERA_DENOM", defaultLumeraDenom) + moniker := detectValidatorMoniker() + + if _, _, err := lep5NextFinalizeSeed(ctx, rpcAddr); err != nil { + t.Skipf("skipping LEP-5 devnet E2E: Lumera RPC not reachable at %s (%v)", rpcAddr, err) + } + + kr, keyName, supernodeAddr, err := lep5LoadSignerKey(ctx, chainID, moniker, rpcAddr, grpcAddr) + if err != nil { + t.Skipf("skipping LEP-5 devnet E2E: signer key unavailable (%v)", err) + } + + bc, err := blockchain.New(ctx, blockchain.Config{ + ChainID: chainID, + GRPCAddr: grpcAddr, + RPCEndpoint: rpcAddr, + AccountHRP: "lumera", + FeeDenom: denom, + GasPrice: sdkmath.LegacyNewDecWithPrec(25, 3), + Timeout: 30 * time.Second, + MaxRecvMsgSize: 10 * 1024 * 1024, + MaxSendMsgSize: 10 * 1024 * 1024, + InsecureGRPC: true, + }, kr, keyName) + require.NoError(t, err, "create lumera blockchain client") + defer bc.Close() + + cascadeClient, err := cascade.New(ctx, cascade.Config{ + ChainID: chainID, + GRPCAddr: grpcAddr, + Address: supernodeAddr, + KeyName: keyName, + Timeout: 30 * time.Second, + }, kr) + require.NoError(t, err, "create cascade client") + defer cascadeClient.Close() + + // --- Register action (identical to the success test) --- + filePath, chunks, totalSize := lep5CreateTestFile(t, 8) + tree, err := merkle.BuildTree(chunks) + require.NoError(t, err, "build merkle tree") + + challengeCount := uint32(8) + if challengeCount > uint32(len(chunks)) { + challengeCount = uint32(len(chunks)) + } + challengeIndices := make([]uint32, 0, challengeCount) + for i := uint32(0); i < challengeCount; i++ { + challengeIndices = append(challengeIndices, i) + } + + lep5PrintMerkleTreeDiagram(t, tree, challengeIndices) + + commitment := &actiontypes.AvailabilityCommitment{ + CommitmentType: lep5CommitmentType, + HashAlgo: lep5CommitmentHashAlgo, + ChunkSize: lep5ChunkSize, + TotalSize: totalSize, + NumChunks: uint32(len(chunks)), + Root: append([]byte(nil), tree.Root[:]...), + ChallengeIndices: challengeIndices, + } + + requestMsg, _, err := cascadeClient.CreateRequestActionMessage(ctx, supernodeAddr, filePath, &cascade.UploadOptions{Public: true}) + require.NoError(t, err, "build request action message") + + var requestMeta actiontypes.CascadeMetadata + require.NoError(t, json.Unmarshal([]byte(requestMsg.Metadata), &requestMeta), "unmarshal request metadata") + requestMeta.AvailabilityCommitment = commitment + + t.Log("=== REQUEST METADATA (before submit) ===") + lep5PrintCascadeMetadata(t, &requestMeta) + + requestMetaJSON, err := json.Marshal(&requestMeta) + require.NoError(t, err, "marshal request metadata with availability commitment") + + fileSizeKbs := lep5ResolveFileSizeKBs(filePath, requestMsg.FileSizeKbs) + + t.Log("--- [FAILURE TEST] Submitting RequestAction tx ---") + requestRes, err := bc.RequestActionTx( + ctx, + supernodeAddr, + actiontypes.ActionTypeCascade, + string(requestMetaJSON), + requestMsg.Price, + requestMsg.ExpirationTime, + fileSizeKbs, + "lep5-e2e-register-bad-finalize", + ) + require.NoError(t, err, "submit request action tx") + require.NotEmpty(t, requestRes.ActionID, "request action id") + t.Logf("--- [FAILURE TEST] RequestAction included (height=%d, txHash=%s, actionID=%s) ---", requestRes.Height, requestRes.TxHash, requestRes.ActionID) + + t.Log("--- [FAILURE TEST] Waiting for action PENDING state ---") + _, err = bc.Action.WaitForState(ctx, requestRes.ActionID, sdktypes.ActionStatePending, 2*time.Second) + require.NoError(t, err, "wait for action pending") + t.Log("--- [FAILURE TEST] Action reached PENDING state ---") + + t.Log("--- [FAILURE TEST] Querying registered action from chain to verify on-chain metadata ---") + queryClient := actiontypes.NewQueryClient(bc.GRPCConn()) + registered, err := queryClient.GetAction(ctx, &actiontypes.QueryGetActionRequest{ActionID: requestRes.ActionID}) + require.NoError(t, err, "query registered action") + require.NotNil(t, registered.Action) + + var registeredMeta actiontypes.CascadeMetadata + require.NoError(t, gogoproto.Unmarshal(registered.Action.Metadata, ®isteredMeta), "decode registered metadata") + + t.Log("=== REGISTERED METADATA (from chain, after request) ===") + t.Logf(" Action.State: %s", registered.Action.State) + lep5PrintCascadeMetadata(t, ®isteredMeta) + + // --- Build finalization payload with CORRUPT proofs --- + rqIDs := make([]string, 0, registeredMeta.RqIdsMax) + for i := registeredMeta.RqIdsIc; i < registeredMeta.RqIdsIc+registeredMeta.RqIdsMax; i++ { + id, idErr := keeper.CreateKademliaID(registeredMeta.Signatures, i) + require.NoError(t, idErr, "create rq id %d", i) + rqIDs = append(rqIDs, id) + } + + // Generate valid proofs then corrupt the leaf hashes by flipping all bytes. + proofs := make([]*actiontypes.ChunkProof, 0, len(challengeIndices)) + for _, idx := range challengeIndices { + proof, pErr := tree.GenerateProof(int(idx)) + require.NoError(t, pErr, "generate proof for chunk %d", idx) + cp := lep5ToChunkProof(proof) + // Corrupt the leaf hash: flip every byte so merkle verification fails. + for j := range cp.LeafHash { + cp.LeafHash[j] ^= 0xFF + } + proofs = append(proofs, cp) + } + + t.Log("--- ChunkProofs prepared for finalization (CORRUPT) ---") + for i, p := range proofs { + t.Logf(" Proof [%d]: ChunkIndex=%d, LeafHash=%x, PathLength=%d", i, p.ChunkIndex, p.LeafHash, len(p.PathHashes)) + } + + finalizeMeta := &actiontypes.CascadeMetadata{ + RqIdsIds: rqIDs, + ChunkProofs: proofs, + } + finalizeJSON, mErr := json.Marshal(finalizeMeta) + require.NoError(t, mErr, "marshal corrupt finalize metadata") + + t.Logf("--- [FAILURE TEST] Finalization payload ready: %d rqIDs, %d CORRUPT chunk proofs ---", len(rqIDs), len(proofs)) + + // --- Submit bad finalization and expect on-chain rejection --- + t.Log("--- [FAILURE TEST] Submitting FinalizeAction tx with corrupt proofs ---") + finalizeRes, fErr := bc.FinalizeActionTx( + ctx, + supernodeAddr, + requestRes.ActionID, + actiontypes.ActionTypeCascade, + string(finalizeJSON), + "lep5-e2e-finalize-bad", + ) + + if fErr != nil { + // Tx was rejected at broadcast/CheckTx level – this is an acceptable failure path. + t.Logf("--- [FAILURE TEST] FinalizeAction tx rejected at broadcast level: %v ---", fErr) + } else { + // Tx was included in a block – verify the on-chain result code is non-zero. + require.NotEmpty(t, finalizeRes.TxHash, "finalize tx hash must not be empty") + t.Logf("--- [FAILURE TEST] FinalizeAction tx included (txHash=%s), verifying on-chain code ---", finalizeRes.TxHash) + + txResp, txErr := bc.GetTx(ctx, finalizeRes.TxHash) + require.NoError(t, txErr, "query finalize tx") + require.NotNil(t, txResp.TxResponse, "finalize tx response") + require.NotEqual(t, uint32(0), txResp.TxResponse.Code, + "corrupt finalize tx must fail on-chain (code=%d, log=%s)", + txResp.TxResponse.Code, txResp.TxResponse.RawLog) + t.Logf("--- [FAILURE TEST] FinalizeAction tx failed on-chain as expected (code=%d, log=%s) ---", + txResp.TxResponse.Code, txResp.TxResponse.RawLog) + } + + // --- Verify the action is still PENDING (not DONE) --- + t.Log("--- [FAILURE TEST] Querying action state to confirm it remains PENDING ---") + actionResp, err := queryClient.GetAction(ctx, &actiontypes.QueryGetActionRequest{ActionID: requestRes.ActionID}) + require.NoError(t, err, "query action after failed finalize") + require.NotNil(t, actionResp.Action) + require.Equal(t, actiontypes.ActionStatePending, actionResp.Action.State, + "action state must remain PENDING after failed finalization, got %s", actionResp.Action.State) + t.Logf("--- [FAILURE TEST] Action %s confirmed in PENDING state (corrupt finalization correctly rejected) ---", requestRes.ActionID) + + var finalMeta actiontypes.CascadeMetadata + require.NoError(t, gogoproto.Unmarshal(actionResp.Action.Metadata, &finalMeta), "decode finalized metadata") + + t.Log("=== FINALIZED METADATA (from chain, after finalize) ===") + t.Logf(" Action.State: %s", actionResp.Action.State) + lep5PrintCascadeMetadata(t, &finalMeta) +} + +func lep5ResolveMnemonicPath(chainID, moniker string) (string, bool) { + if fromEnv := strings.TrimSpace(os.Getenv("LUMERA_SUPERNODE_MNEMONIC_FILE")); fromEnv != "" { + if _, err := os.Stat(fromEnv); err == nil { + return fromEnv, true + } + return "", false + } + + if moniker == "" { + moniker = "supernova_validator_1" + } + + candidates := []string{ + fmt.Sprintf("/shared/status/%s/sn_mnemonic", moniker), + fmt.Sprintf("/tmp/%s/shared/status/%s/sn_mnemonic", chainID, moniker), + fmt.Sprintf("/tmp/lumera-devnet-1/shared/status/%s/sn_mnemonic", moniker), + fmt.Sprintf("/tmp/lumera-devnet/shared/status/%s/sn_mnemonic", moniker), + } + + for _, candidate := range candidates { + if _, err := os.Stat(candidate); err == nil { + return candidate, true + } + } + + return "", false +} + +func lep5LoadSignerKey(ctx context.Context, chainID, moniker, rpcAddr, grpcAddr string) (keyring.Keyring, string, string, error) { + activeSupernodes, err := lep5QueryActiveSupernodeAccounts(ctx, rpcAddr, grpcAddr) + if err != nil { + return nil, "", "", fmt.Errorf("query active supernodes: %w", err) + } + + if kr, keyName, addr, ok := lep5LoadSignerFromMnemonicCandidates(chainID, moniker, activeSupernodes); ok { + return kr, keyName, addr, nil + } + + backend := getenv("LUMERA_KEYRING_BACKEND", "test") + app := getenv("LUMERA_KEYRING_APP", "lumera") + + for _, home := range lep5KeyringHomeCandidates(chainID) { + kr, openErr := sdkcrypto.NewKeyring(sdkcrypto.KeyringParams{ + AppName: app, + Backend: backend, + Dir: home, + Input: strings.NewReader(""), + }) + if openErr != nil { + continue + } + + for _, keyName := range lep5SignerKeyNameCandidates(moniker) { + addr, addrErr := sdkcrypto.AddressFromKey(kr, keyName, "lumera") + if addrErr != nil { + continue + } + if _, ok := activeSupernodes[addr]; ok { + return kr, keyName, addr, nil + } + } + + records, listErr := kr.List() + if listErr != nil { + continue + } + for _, rec := range records { + accAddr, addrErr := rec.GetAddress() + if addrErr != nil { + continue + } + addr := accAddr.String() + if _, ok := activeSupernodes[addr]; ok { + return kr, rec.Name, addr, nil + } + } + } + + accounts := make([]string, 0, len(activeSupernodes)) + for addr := range activeSupernodes { + accounts = append(accounts, addr) + } + sort.Strings(accounts) + if len(accounts) > 5 { + accounts = accounts[:5] + } + + return nil, "", "", fmt.Errorf("no local key matched active supernode accounts; sample active accounts=%v", accounts) +} + +func lep5QueryActiveSupernodeAccounts(ctx context.Context, rpcAddr, grpcAddr string) (map[string]struct{}, error) { + seedHeight, _, err := lep5NextFinalizeSeed(ctx, rpcAddr) + if err != nil { + return nil, err + } + if seedHeight == 0 { + return nil, fmt.Errorf("invalid block height from rpc") + } + + queryHeight := int32(seedHeight - 1) + dialCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + conn, err := grpc.DialContext( + dialCtx, + lep5NormalizeGRPCAddr(grpcAddr), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithBlock(), + ) + if err != nil { + return nil, fmt.Errorf("dial grpc %s: %w", grpcAddr, err) + } + defer conn.Close() + + queryCtx, queryCancel := context.WithTimeout(ctx, 10*time.Second) + defer queryCancel() + + client := sntypes.NewQueryClient(conn) + resp, err := client.GetTopSuperNodesForBlock(queryCtx, &sntypes.QueryGetTopSuperNodesForBlockRequest{ + BlockHeight: queryHeight, + Limit: lep5TopSupernodesLimit, + State: sntypes.SuperNodeStateActive.String(), + }) + if err != nil { + return nil, err + } + + active := make(map[string]struct{}, len(resp.Supernodes)) + for _, sn := range resp.Supernodes { + addr := strings.TrimSpace(sn.SupernodeAccount) + if addr != "" { + active[addr] = struct{}{} + } + } + if len(active) == 0 { + return nil, fmt.Errorf("supernode query returned no active accounts") + } + + return active, nil +} + +func lep5LoadSignerFromMnemonicCandidates(chainID, moniker string, activeSupernodes map[string]struct{}) (keyring.Keyring, string, string, bool) { + for _, mnemonicPath := range lep5MnemonicPathCandidates(chainID, moniker) { + for _, keyName := range lep5SignerKeyNameCandidates(moniker) { + kr, _, addr, err := sdkcrypto.LoadKeyringFromMnemonic(keyName, mnemonicPath) + if err != nil { + continue + } + if _, ok := activeSupernodes[addr]; ok { + return kr, keyName, addr, true + } + } + } + + return nil, "", "", false +} + +func lep5MnemonicPathCandidates(chainID, moniker string) []string { + candidates := make([]string, 0, 16) + seen := make(map[string]struct{}, 16) + + add := func(path string) { + path = strings.TrimSpace(path) + if path == "" { + return + } + if _, exists := seen[path]; exists { + return + } + if _, err := os.Stat(path); err != nil { + return + } + seen[path] = struct{}{} + candidates = append(candidates, path) + } + + if fromEnv := strings.TrimSpace(os.Getenv("LUMERA_SUPERNODE_MNEMONIC_FILE")); fromEnv != "" { + add(fromEnv) + } + if resolved, ok := lep5ResolveMnemonicPath(chainID, moniker); ok { + add(resolved) + } + + for _, pattern := range []string{ + "/shared/status/*/sn_mnemonic", + fmt.Sprintf("/tmp/%s/shared/status/*/sn_mnemonic", chainID), + "/tmp/lumera-devnet*/shared/status/*/sn_mnemonic", + "/tmp/*/shared/status/*/sn_mnemonic", + } { + matches, _ := filepath.Glob(pattern) + for _, match := range matches { + add(match) + } + } + + return candidates +} + +func lep5SignerKeyNameCandidates(moniker string) []string { + candidates := make([]string, 0, 8) + seen := make(map[string]struct{}, 8) + + add := func(name string) { + name = strings.TrimSpace(name) + if name == "" { + return + } + if _, exists := seen[name]; exists { + return + } + seen[name] = struct{}{} + candidates = append(candidates, name) + } + + add(os.Getenv("LUMERA_SUPERNODE_KEY_NAME")) + add(resolveLumeraKeyName()) + add("supernova_validator_1_key") + add("supernova_supernode_1_key") + + if moniker != "" { + add(moniker + "_key") + if strings.Contains(moniker, "validator") { + add(strings.Replace(moniker, "validator", "supernode", 1) + "_key") + } + } + + for _, validatorCfgPath := range lep5ValidatorConfigCandidates(getenv("LUMERA_CHAIN_ID", defaultLumeraChainID)) { + data, err := os.ReadFile(validatorCfgPath) + if err != nil { + continue + } + + var vals []struct { + KeyName string `json:"key_name"` + } + if err := json.Unmarshal(data, &vals); err != nil { + continue + } + + for _, v := range vals { + add(v.KeyName) + if strings.Contains(v.KeyName, "validator") { + add(strings.Replace(v.KeyName, "validator", "supernode", 1)) + } + } + } + + return candidates +} + +func lep5KeyringHomeCandidates(chainID string) []string { + candidates := make([]string, 0, 24) + seen := make(map[string]struct{}, 24) + + add := func(path string) { + path = strings.TrimSpace(path) + if path == "" { + return + } + if _, exists := seen[path]; exists { + return + } + if fi, err := os.Stat(path); err != nil || !fi.IsDir() { + return + } + seen[path] = struct{}{} + candidates = append(candidates, path) + } + + add(os.Getenv("LUMERA_HOME")) + add(fmt.Sprintf("/tmp/%s/.lumera", chainID)) + add("/tmp/lumera-devnet/supernova_validator_1-data") + add("/tmp/lumera-devnet-1/supernova_validator_1-data") + add("/tmp/lumera-devnet/validator1-data") + add("/tmp/lumera-devnet-1/validator1-data") + + for _, pattern := range []string{ + "/tmp/lumera-devnet*/supernova_validator_*-data", + "/tmp/lumera-devnet*/validator*-data", + "/tmp/*/supernova_validator_*-data", + "/tmp/*/validator*-data", + } { + matches, _ := filepath.Glob(pattern) + for _, match := range matches { + add(match) + } + } + + if userHome, err := os.UserHomeDir(); err == nil && userHome != "" { + add(filepath.Join(userHome, ".lumera")) + add(filepath.Join(userHome, ".lumera-devnet")) + add(filepath.Join(userHome, ".lumera-testnet")) + add(filepath.Join(userHome, ".lumera-upgrade-test")) + } + + if len(candidates) == 0 { + if userHome, err := os.UserHomeDir(); err == nil && userHome != "" { + candidates = append(candidates, filepath.Join(userHome, ".lumera")) + } else { + candidates = append(candidates, "/root/.lumera") + } + } + + return candidates +} + +func lep5ValidatorConfigCandidates(chainID string) []string { + candidates := make([]string, 0, 16) + seen := make(map[string]struct{}, 16) + + add := func(path string) { + path = strings.TrimSpace(path) + if path == "" { + return + } + if _, exists := seen[path]; exists { + return + } + fi, err := os.Stat(path) + if err != nil || fi.IsDir() { + return + } + seen[path] = struct{}{} + candidates = append(candidates, path) + } + + add(getenv("LUMERA_VALIDATORS_FILE", defaultValidatorsFile)) + add("/shared/config/validators.json") + add(fmt.Sprintf("/tmp/%s/shared/config/validators.json", chainID)) + add("/tmp/lumera-devnet/shared/config/validators.json") + add("/tmp/lumera-devnet-1/shared/config/validators.json") + + for _, pattern := range []string{ + "/tmp/lumera-devnet*/shared/config/validators.json", + "/tmp/*/shared/config/validators.json", + } { + matches, _ := filepath.Glob(pattern) + for _, match := range matches { + add(match) + } + } + + return candidates +} + +func lep5ResolveReachableRPC(ctx context.Context) (string, error) { + candidates := []string{ + strings.TrimSpace(os.Getenv("LUMERA_RPC_ADDR")), + resolveLumeraRPC(), + "http://localhost:26667", + "http://127.0.0.1:26667", + "http://localhost:26677", + "http://127.0.0.1:26677", + "http://localhost:26687", + "http://127.0.0.1:26687", + "http://localhost:26697", + "http://127.0.0.1:26697", + "http://localhost:26607", + "http://127.0.0.1:26607", + "http://localhost:26657", + "http://127.0.0.1:26657", + } + + seen := make(map[string]struct{}, len(candidates)) + var lastErr error + for _, candidate := range candidates { + candidate = strings.TrimSpace(candidate) + if candidate == "" { + continue + } + if _, ok := seen[candidate]; ok { + continue + } + seen[candidate] = struct{}{} + + if _, _, err := lep5NextFinalizeSeed(ctx, candidate); err == nil { + return candidate, nil + } else { + lastErr = err + } + } + + if lastErr != nil { + return "", lastErr + } + return "", fmt.Errorf("no rpc candidates available") +} + +func lep5ResolveReachableGRPC(preferred string) string { + candidates := []string{ + preferred, + lep5DefaultLumeraGRPC, + "localhost:9091", + "127.0.0.1:9091", + "localhost:9092", + "127.0.0.1:9092", + "localhost:9093", + "127.0.0.1:9093", + "localhost:9094", + "127.0.0.1:9094", + "localhost:9095", + "127.0.0.1:9095", + "localhost:9090", + "127.0.0.1:9090", + } + seen := make(map[string]struct{}, len(candidates)) + + for _, candidate := range candidates { + candidate = lep5NormalizeGRPCAddr(candidate) + if candidate == "" { + continue + } + if _, ok := seen[candidate]; ok { + continue + } + seen[candidate] = struct{}{} + if lep5CanDialTCP(candidate) { + return candidate + } + } + + return lep5NormalizeGRPCAddr(preferred) +} + +func lep5CanDialTCP(addr string) bool { + conn, err := net.DialTimeout("tcp", addr, 2*time.Second) + if err != nil { + return false + } + _ = conn.Close() + return true +} + +func lep5NormalizeGRPCAddr(addr string) string { + out := strings.TrimSpace(addr) + out = strings.TrimPrefix(out, "http://") + out = strings.TrimPrefix(out, "https://") + return out +} + +func lep5ResolveFileSizeKBs(filePath, msgFileSize string) int64 { + if msgFileSize != "" { + if parsed, err := strconv.ParseInt(msgFileSize, 10, 64); err == nil && parsed > 0 { + return parsed + } + } + fi, err := os.Stat(filePath) + if err != nil { + return 0 + } + return (fi.Size() + 1023) / 1024 +} + +func lep5CreateTestFile(t *testing.T, numChunks int) (string, [][]byte, uint64) { + t.Helper() + return lep5CreateTestFileWithSize(t, uint64(numChunks)*uint64(lep5ChunkSize), lep5ChunkSize) +} + +// lep5CreateTestFileWithSize creates a temporary test file of exactly fileSize +// bytes, splits it into chunks of chunkSize bytes (the last chunk may be +// shorter), writes the file to disk, and returns the path, the chunk slices, +// and the total size. Each chunk is filled with a repeating byte pattern +// derived from its index so that chunk data is deterministic but +// distinguishable across chunks. +func lep5CreateTestFileWithSize(t *testing.T, fileSize uint64, chunkSize uint32) (string, [][]byte, uint64) { + t.Helper() + + remaining := fileSize + var chunks [][]byte + var fileData bytes.Buffer + idx := 0 + + for remaining > 0 { + sz := uint64(chunkSize) + if sz > remaining { + sz = remaining + } + chunk := bytes.Repeat([]byte{byte(idx%255 + 1)}, int(sz)) + chunks = append(chunks, chunk) + _, err := fileData.Write(chunk) + require.NoError(t, err) + remaining -= sz + idx++ + } + + path := filepath.Join(t.TempDir(), "lep5-e2e.bin") + require.NoError(t, os.WriteFile(path, fileData.Bytes(), 0o600)) + + return path, chunks, uint64(fileData.Len()) +} + +func lep5ToChunkProof(p *merkle.Proof) *actiontypes.ChunkProof { + leaf := make([]byte, merkle.HashSize) + copy(leaf, p.LeafHash[:]) + + pathHashes := make([][]byte, 0, len(p.PathHashes)) + for _, h := range p.PathHashes { + b := make([]byte, merkle.HashSize) + copy(b, h[:]) + pathHashes = append(pathHashes, b) + } + + return &actiontypes.ChunkProof{ + ChunkIndex: p.ChunkIndex, + LeafHash: leaf, + PathHashes: pathHashes, + PathDirections: append([]bool(nil), p.PathDirections...), + } +} + +func lep5NextFinalizeSeed(ctx context.Context, rpcAddr string) (uint64, []byte, error) { + type statusResponse struct { + Result struct { + SyncInfo struct { + LatestBlockHeight string `json:"latest_block_height"` + LatestBlockHash string `json:"latest_block_hash"` + } `json:"sync_info"` + } `json:"result"` + } + + url := strings.TrimSuffix(rpcAddr, "/") + "/status" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return 0, nil, fmt.Errorf("build status request: %w", err) + } + + resp, err := (&http.Client{Timeout: 10 * time.Second}).Do(req) + if err != nil { + return 0, nil, fmt.Errorf("request status: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return 0, nil, fmt.Errorf("status request failed: %s", resp.Status) + } + + var status statusResponse + if err := json.NewDecoder(resp.Body).Decode(&status); err != nil { + return 0, nil, fmt.Errorf("decode status response: %w", err) + } + + latestHeight, err := strconv.ParseUint(strings.TrimSpace(status.Result.SyncInfo.LatestBlockHeight), 10, 64) + if err != nil { + return 0, nil, fmt.Errorf("parse latest block height: %w", err) + } + + hashHex := strings.TrimSpace(status.Result.SyncInfo.LatestBlockHash) + hashHex = strings.TrimPrefix(hashHex, "0x") + hashHex = strings.TrimPrefix(hashHex, "0X") + if hashHex == "" { + return 0, nil, fmt.Errorf("latest block hash is empty") + } + + latestHash, err := hex.DecodeString(hashHex) + if err != nil { + return 0, nil, fmt.Errorf("decode latest block hash: %w", err) + } + + return latestHeight + 1, latestHash, nil +} + +// lep5QueryBlockPrevHash queries the previous-block hash for a specific height +// via CometBFT RPC /block endpoint. The returned hash is the LastBlockId.Hash +// of the block at the given height (i.e., the hash of block height-1). +func lep5QueryBlockPrevHash(ctx context.Context, rpcAddr string, height uint64) ([]byte, error) { + url := fmt.Sprintf("%s/block?height=%d", strings.TrimSuffix(rpcAddr, "/"), height) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("build block request: %w", err) + } + + resp, err := (&http.Client{Timeout: 10 * time.Second}).Do(req) + if err != nil { + return nil, fmt.Errorf("request block: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("block request failed: %s", resp.Status) + } + + var blockResp struct { + Result struct { + Block struct { + Header struct { + LastBlockID struct { + Hash string `json:"hash"` + } `json:"last_block_id"` + } `json:"header"` + } `json:"block"` + } `json:"result"` + } + if err := json.NewDecoder(resp.Body).Decode(&blockResp); err != nil { + return nil, fmt.Errorf("decode block response: %w", err) + } + + hashHex := strings.TrimSpace(blockResp.Result.Block.Header.LastBlockID.Hash) + hashHex = strings.TrimPrefix(hashHex, "0x") + hashHex = strings.TrimPrefix(hashHex, "0X") + if hashHex == "" { + return nil, fmt.Errorf("block %d last_block_id.hash is empty", height) + } + + return hex.DecodeString(hashHex) +} + +// lep5PrintMerkleTreeDiagram prints a level-by-level ASCII diagram of the +// Merkle tree, highlighting challenged leaves (★) and the sibling nodes that +// form the proof paths (P). Padded leaves are marked with ░. +// +// The function is a no-op unless LUMERA_PRINT_MERKLE_TREE=true is set. +func lep5PrintMerkleTreeDiagram(t *testing.T, tree *merkle.Tree, challengeIndices []uint32) { + t.Helper() + + if os.Getenv("LUMERA_PRINT_MERKLE_TREE") != "true" { + return + } + + // Build set of challenged leaf indices for O(1) lookup. + challengedSet := make(map[int]struct{}, len(challengeIndices)) + for _, idx := range challengeIndices { + challengedSet[int(idx)] = struct{}{} + } + + // For each level, track which node indices are proof siblings. + // A proof sibling is the node paired with the verification-path node at + // each level; it is the hash included in the Merkle proof. + proofSiblings := make([]map[int]struct{}, len(tree.Levels)) + for i := range proofSiblings { + proofSiblings[i] = make(map[int]struct{}) + } + + for _, leafIdx := range challengeIndices { + idx := int(leafIdx) + for level := 0; level < len(tree.Levels)-1; level++ { + if idx%2 == 0 { + proofSiblings[level][idx+1] = struct{}{} + } else { + proofSiblings[level][idx-1] = struct{}{} + } + idx /= 2 + } + } + + t.Logf("") + t.Logf("╔══════════════════════════════════════════════════════════════╗") + t.Logf("║ MERKLE TREE DIAGRAM ║") + t.Logf("╚══════════════════════════════════════════════════════════════╝") + t.Logf(" Real leaves: %d | Padded total: %d | Levels: %d", + tree.LeafCount, len(tree.Levels[0]), len(tree.Levels)) + t.Logf(" Challenge indices: %v", challengeIndices) + t.Logf("") + + // Print from root level down to leaves. + for level := len(tree.Levels) - 1; level >= 0; level-- { + nodes := tree.Levels[level] + label := fmt.Sprintf("Level %d", level) + switch { + case level == len(tree.Levels)-1: + label += " (root)" + case level == 0: + label += " (leaves)" + } + t.Logf(" %s:", label) + + for i, hash := range nodes { + hashHex := hex.EncodeToString(hash[:]) + short := hashHex[:6] + ".." + hashHex[len(hashHex)-6:] + + var markers []string + + if level == 0 { + if _, ok := challengedSet[i]; ok { + markers = append(markers, "★ challenged") + } + if i >= tree.LeafCount { + markers = append(markers, "░ padding") + } + } + if _, ok := proofSiblings[level][i]; ok { + markers = append(markers, "P proof-sibling") + } + + tag := "" + if len(markers) > 0 { + tag = " ← " + strings.Join(markers, ", ") + } + + t.Logf(" [%d] %s%s", i, short, tag) + } + t.Logf("") + } + + t.Logf(" Legend: ★ = challenged leaf | P = proof sibling | ░ = padding duplicate") + t.Logf("") +} + +// lep5PrintCascadeMetadata pretty-prints a CascadeMetadata struct via t.Log +// so the operator can inspect what was created / stored on chain. +func lep5PrintCascadeMetadata(t *testing.T, meta *actiontypes.CascadeMetadata) { + t.Helper() + + t.Logf(" DataHash: %s", meta.DataHash) + t.Logf(" FileName: %s", meta.FileName) + t.Logf(" RqIdsIc: %d", meta.RqIdsIc) + t.Logf(" RqIdsMax: %d", meta.RqIdsMax) + t.Logf(" Signatures: %s", meta.Signatures) + t.Logf(" Public: %v", meta.Public) + t.Logf(" RqIdsIds: (%d entries)", len(meta.RqIdsIds)) + for i, id := range meta.RqIdsIds { + t.Logf(" [%d] %s", i, id) + } + + if c := meta.AvailabilityCommitment; c != nil { + t.Logf(" AvailabilityCommitment:") + t.Logf(" CommitmentType: %s", c.CommitmentType) + t.Logf(" HashAlgo: %s", c.HashAlgo) + t.Logf(" ChunkSize: %d", c.ChunkSize) + t.Logf(" TotalSize: %d", c.TotalSize) + t.Logf(" NumChunks: %d", c.NumChunks) + t.Logf(" Root: %s", hex.EncodeToString(c.Root)) + t.Logf(" ChallengeIndices: %v", c.ChallengeIndices) + } else { + t.Log(" AvailabilityCommitment: ") + } + + t.Logf(" ChunkProofs: (%d entries)", len(meta.ChunkProofs)) + for i, cp := range meta.ChunkProofs { + t.Logf(" [%d] ChunkIndex=%d LeafHash=%s PathHashes=%d PathDirections=%v", + i, cp.ChunkIndex, hex.EncodeToString(cp.LeafHash), len(cp.PathHashes), cp.PathDirections) + } +} + +// TestLEP5QueryActionMetadata connects to the local devnet, queries an action +// by its ID, decodes the protobuf CascadeMetadata, and prints every field so +// the operator can verify the on-chain state matches what was submitted. +// +// Set LUMERA_ACTION_ID to the action ID you want to inspect (default "1"). +func TestLEP5QueryActionMetadata(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + grpcAddr := lep5ResolveReachableGRPC(lep5NormalizeGRPCAddr(getenv("LUMERA_GRPC_ADDR", lep5DefaultLumeraGRPC))) + + if !lep5CanDialTCP(grpcAddr) { + t.Skipf("skipping: cannot reach gRPC at %s", grpcAddr) + } + + dialCtx, dialCancel := context.WithTimeout(ctx, 10*time.Second) + defer dialCancel() + + conn, err := grpc.DialContext( + dialCtx, + grpcAddr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithBlock(), + ) + require.NoError(t, err, "dial gRPC %s", grpcAddr) + defer conn.Close() + + actionID := getenv("LUMERA_ACTION_ID", "1") + t.Logf("Querying action ID: %s (set LUMERA_ACTION_ID to override)", actionID) + + queryClient := actiontypes.NewQueryClient(conn) + resp, err := queryClient.GetAction(ctx, &actiontypes.QueryGetActionRequest{ActionID: actionID}) + require.NoError(t, err, "GetAction(%s)", actionID) + require.NotNil(t, resp.Action, "action must not be nil") + + action := resp.Action + t.Log("=== ACTION ===") + t.Logf(" ActionID: %s", action.ActionID) + t.Logf(" Creator: %s", action.Creator) + t.Logf(" ActionType: %s", action.ActionType) + t.Logf(" State: %s", action.State) + t.Logf(" Price: %s", action.Price) + t.Logf(" ExpirationTime: %d", action.ExpirationTime) + t.Logf(" BlockHeight: %d", action.BlockHeight) + t.Logf(" SuperNodes: %v", action.SuperNodes) + t.Logf(" FileSizeKbs: %d", action.FileSizeKbs) + t.Logf(" Metadata bytes: %d", len(action.Metadata)) + + if action.ActionType != actiontypes.ActionTypeCascade { + t.Logf("Action type is %s, not CASCADE – raw metadata hex: %s", action.ActionType, hex.EncodeToString(action.Metadata)) + return + } + + var meta actiontypes.CascadeMetadata + require.NoError(t, gogoproto.Unmarshal(action.Metadata, &meta), "decode CascadeMetadata") + + t.Log("=== CASCADE METADATA ===") + lep5PrintCascadeMetadata(t, &meta) +} diff --git a/docs/leps/lep5.md b/docs/leps/lep5.md new file mode 100644 index 00000000..ad9073e4 --- /dev/null +++ b/docs/leps/lep5.md @@ -0,0 +1,91 @@ +# LEP-5 — Cascade Availability Commitment (Testing Guide) + +> **Full design:** [LEP-5 Technical Specification](../new-feature-lep5.md) + +LEP-5 introduces a **Cascade Availability Commitment** system that requires a finalizing SuperNode to prove possession of actual file data via BLAKE3-based Merkle proofs, closing the vulnerability where a malicious SuperNode could finalize actions and collect fees without ever storing anything. + +--- + +## 1. Prepare the Devnet + +```bash +# Build the chain binary and devnet artefacts +make devnet-build-default + +# Compile the devnet Go test binaries +make devnet-tests-build + +# Start the network (foreground — logs stream to terminal) +make devnet-up +``` + +Wait until all validators have produced blocks and supernodes have registered (watch the log output for `supernode-setup.sh` completion). + +--- + +## 2. Verify Supernodes Are ACTIVE + +From inside any validator container (or from the host if ports are forwarded): + +```bash +docker exec lumera-supernova_validator_1 lumerad q supernode list-supernodes +``` + +All supernodes listed must show `state: "ACTIVE"` before running the LEP-5 tests. + +--- + +## 3. Run the LEP-5 Tests + +### 3.1 Happy-Path Test (valid Merkle proofs → action reaches DONE) + +```bash +LUMERA_PRINT_MERKLE_TREE=true \ +LUMERA_RPC_ADDR=http://localhost:26667 \ +LUMERA_GRPC_ADDR=localhost:9090 \ + go test -v -run TestLEP5CascadeAvailabilityCommitment -timeout 20m \ + ./devnet/tests/validator/ +``` + +### 3.2 Failure-Path Test (corrupt proofs → finalization rejected, action stays PENDING) + +```bash +LUMERA_PRINT_MERKLE_TREE=true \ +LUMERA_RPC_ADDR=http://localhost:26667 \ +LUMERA_GRPC_ADDR=localhost:9090 \ + go test -v -run TestLEP5CascadeAvailabilityCommitmentFailure -timeout 20m \ + ./devnet/tests/validator/ +``` + +### 3.3 Query Action Metadata (inspect on-chain state for any action) + +```bash +LUMERA_GRPC_ADDR=localhost:9090 \ +LUMERA_ACTION_ID=1 \ + go test -v -run TestLEP5QueryActionMetadata -timeout 2m \ + ./devnet/tests/validator/ +``` + +--- + +## 4. Environment Variables Reference + +| Variable | Default | Description | +|----------|---------|-------------| +| `LUMERA_RPC_ADDR` | auto-probed | CometBFT RPC endpoint | +| `LUMERA_GRPC_ADDR` | `localhost:9090` | gRPC endpoint | +| `LUMERA_CHAIN_ID` | `lumera-devnet-1` | Chain ID | +| `LUMERA_DENOM` | `ulume` | Fee denomination | +| `LUMERA_PRINT_MERKLE_TREE` | *(unset)* | Set to `true` to print an ASCII Merkle tree diagram with challenge highlights | +| `LUMERA_ACTION_ID` | `1` | Action ID for the `TestLEP5QueryActionMetadata` helper | +| `LUMERA_SUPERNODE_MNEMONIC_FILE` | auto-probed | Path to the supernode mnemonic file | + +--- + +## 5. What the Tests Verify + +| Test | Verifies | +|------|----------| +| `TestLEP5CascadeAvailabilityCommitment` | Register action with `AvailabilityCommitment` → finalize with valid chunk Merkle proofs → action reaches **DONE** | +| `TestLEP5CascadeAvailabilityCommitmentFailure` | Register action → finalize with **corrupt** leaf hashes → tx rejected on-chain → action stays **PENDING** | +| `TestLEP5QueryActionMetadata` | Query and pretty-print full `CascadeMetadata` for a given action ID | diff --git a/docs/leps/new-feature-lep5.md b/docs/leps/new-feature-lep5.md new file mode 100644 index 00000000..a01be00c --- /dev/null +++ b/docs/leps/new-feature-lep5.md @@ -0,0 +1,1063 @@ +# LEP-5 — Cascade Availability Commitment (Merkle Proof Challenge) + +## Status: Draft +## Author: Lumera Protocol Team +## Created: 2025-02-08 +## Requires: LEP1 (Minimizing Cascade Metadata Size) + +--- + +## 1. Executive Summary + +### 1.1 The Problem + +The current Cascade protocol has a critical vulnerability: **a malicious SuperNode can finalize actions and claim fees without ever receiving or storing the actual file data.** + +This attack is possible because: +- All information needed to compute valid `rq_ids_ids` is available on-chain (creator's signature + counter) +- The verification formula uses only on-chain metadata +- No proof of actual file possession is required at finalization + +**Impact:** +- Users pay fees but data is never stored +- Network storage guarantees are undermined +- Malicious operators can extract value without providing service + +### 1.2 The Solution + +Introduce an **Availability Commitment** system that requires the finalizing SuperNode to prove possession of actual file data: + +1. **Merkle Root Commitment:** At registration, the client computes a Merkle root over fixed-size chunks of the uploaded file and includes it on-chain. + +2. **Challenge-Response Proofs:** At registration, the client also commits a set of challenge indices. At finalization, the SuperNode must produce valid Merkle proofs for those challenged chunks — which is only possible if the SuperNode actually has the file. + +**Key Properties:** + +| Property | Guarantee | +|----------|-----------| +| Commitment Binding | Client commits to all file chunks at registration time | +| Challenge Commitment | Client commits challenge indices upfront; SuperNode must prove possession of those exact chunks | +| Proof Compactness | O(log N) proof size per challenged chunk | +| Verification Efficiency | O(m × log N) on-chain verification | + +### 1.3 Why Chunk-Based (Not Symbol-Based) + +The Merkle tree is built over **fixed-size chunks of the original file**, not RaptorQ symbols: + +- **Client practicality:** The web client (JS SDK + rq-lite-wasm) currently calls the RQ API which returns layout metadata and symbol *indices* — not the raw symbol bytes themselves. Exposing potentially thousands of symbol buffers to the browser would require significant WASM API changes and create memory pressure. +- **No WASM changes required:** Chunk-based commitment uses only BLAKE3 hashing over file bytes — fast and available via npm packages (e.g., `blake3`). +- **Determinism without rq_oti:** RaptorQ deterministic re-encoding requires exact OTI parameters and bit-identical library versions across client and SuperNode. Chunk hashing of the original file avoids this fragile coupling entirely. +- **Upgrade path preserved:** A future `SYMBOL_MERKLE_V1` mode can be added when/if rq-library exposes symbol-level iteration in its WASM build, providing even stronger binding to actual Cascade work. + +--- + +## 2. Background + +### 2.1 Current Protocol Flow (Cascade) + +``` + 1. Client registers action with: + - data_hash (hash of entire file) + - rq_ids_signature = Base64(index_file).creators_signature + + 2. SuperNode finalizes with: + - rq_ids_ids = [ID_1, ID_2, ..., ID_50] + where ID_i = Base58(BLAKE3(zstd(rq_ids_signature.counter))) + + 3. Action Module verifies: + - Random ID matches formula ✓ + - SuperNode is in top 10 ✓ + - NO PROOF OF FILE POSSESSION ✗ +``` + +### 2.2 The Attack Vector + +``` + MALICIOUS SUPERNODE STRATEGY: + + 1. Monitor blockchain for new Cascade actions + 2. Extract from on-chain data: + - action_id, rq_ids_signature, rq_ids_ic, rq_ids_max + 3. Compute valid IDs WITHOUT the file: + for counter in range(rq_ids_ic, rq_ids_max): + ID = Base58(BLAKE3(zstd(rq_ids_signature.counter))) + 4. Submit FinalizeAction with computed IDs + 5. Collect fees without storing anything + + RESULT: User pays, but data is NEVER stored. +``` + +### 2.3 Why Current Verification Fails + +| What's Verified | What's NOT Verified | +|-----------------|---------------------| +| ID formula correctness | Actual file receipt | +| SuperNode authorization | Symbol computation | +| Signature validity | Data storage in Kademlia | +| Expiration time | Proof of data possession | + +--- + +## 3. Design Overview + +### 3.1 High-Level Flow + +``` + REGISTRATION (Client): + File bytes → Split into fixed chunks → Hash each chunk (BLAKE3) → + Build Merkle tree → Generate challenge indices → + Commit root + challenge indices on-chain + + FINALIZATION (SuperNode): + Receive file → Verify data_hash → Recompute Merkle tree (BLAKE3) → + Verify root matches on-chain commitment → + Read challenge indices from on-chain commitment → + Produce Merkle proofs for challenged chunks → + Submit MsgFinalizeAction with proofs + + VERIFICATION (Action Module): + Read expected challenge indices from stored commitment → + Verify each Merkle proof against stored root → + If all valid → state = DONE, release fees +``` + +--- + +## 4. Technical Specification + +### 4.1 Merkle Tree Construction + +#### 4.1.1 Chunking + +Let file bytes be `B` of length `N`. Let chunk size be `S`. + +**Hard boundaries enforced by the chain:** + +| Rule | Value | Enforcement | +|------|-------|-------------| +| Minimum file size | 4 bytes | `total_size >= 4` — reject trivially tiny files | +| Maximum chunk size | 256 KiB (262,144) | `chunk_size <= 262144` | +| Minimum chunk size | 1 byte | `chunk_size >= 1` | +| Minimum chunk count | 4 | `num_chunks >= svc_min_chunks_for_challenge` (default 4) — **unconditional** | +| Minimum challenge indices | 4 | `num_indices = min(svc_challenge_count, num_chunks)` — always ≥ 4 since num_chunks ≥ 4 | +| Maximum challenge indices | 8 | `num_indices = min(svc_challenge_count, num_chunks)` — capped by `svc_challenge_count` (default 8) | + +**Chunk size rules:** +- `S` must be a power of 2 +- `S` must be in `[1, 262144]` (1 byte floor, 256 KiB ceiling) +- Default: `S = 262144` (256 KiB) for files ≥ 1 MiB +- For smaller files, the client MUST reduce `S` so that `ceil(N / S) >= svc_min_chunks_for_challenge` (default 4) +- The minimum chunk count is enforced **unconditionally** — all files with an AvailabilityCommitment must produce ≥ 4 chunks + +**Client chunk size selection algorithm:** +``` +S = 262144 +while ceil(N / S) < svc_min_chunks_for_challenge AND S > 1: + S = S / 2 +``` + +- `num_chunks = ceil(N / S)` +- `chunk_i = B[i*S : min((i+1)*S, N)]` + +The last chunk may be smaller than `S`. + +**Examples:** + +| File Size | Chunk Size | Chunks | Indices | Accepted? | +|-----------|-----------|--------|---------|-----------| +| 2 MiB | 256 KiB | 8 | 8 | ✓ (8 chunks, 8 indices) | +| 1 MiB | 256 KiB | 4 | 4 | ✓ (4 chunks, 4 indices) | +| 500 KiB | 128 KiB | 4 | 4 | ✓ (4 chunks, 4 indices) | +| 100 KiB | 32 KiB | 4 | 4 | ✓ (4 chunks, 4 indices) | +| 4 KiB | 1 KiB | 4 | 4 | ✓ (4 chunks, 4 indices) | +| 4 bytes | 1 byte | 4 | 4 | ✓ (4 chunks, 4 indices) | +| 3 bytes | — | — | — | ✗ (below min file size of 4 bytes) | +| 500 KiB | 256 KiB | 2 | — | ✗ (only 2 chunks < min 4) | + +#### 4.1.2 Leaf Hashing (Domain Separated) + +To prevent second-preimage attacks, leaves and internal nodes use different domain prefixes: + +``` +leaf_i = BLAKE3(0x00 || uint32be(i) || chunk_i) +``` + +Where `uint32be(i)` is the chunk index as 4 bytes big-endian. + +#### 4.1.3 Internal Node Hashing (Domain Separated) + +``` +parent = BLAKE3(0x01 || left_hash || right_hash) +``` + +If a level has an odd number of nodes, duplicate the last node (`right = left`). + +#### 4.1.4 Tree Structure + +``` + ┌─────────────────┐ + │ MERKLE ROOT │ + │ (32 bytes) │ + └────────┬─────────┘ + │ + ┌─────────────┴─────────────┐ + │ │ + ┌─────┴─────┐ ┌─────┴─────┐ + │ H(01||L||R)│ │ H(01||L||R)│ + └─────┬─────┘ └─────┬─────┘ + │ │ + ... ... ... ... + │ │ + ┌─────────┴─────────┐ ┌─────────┴─────────┐ + │ │ │ │ + H(00||0||C0) H(00||1||C1) H(00||2||C2) H(00||3||C3) + │ │ │ │ + Chunk 0 Chunk 1 Chunk 2 Chunk 3 + (256 KiB) (256 KiB) (256 KiB) (≤256 KiB) +``` + +#### 4.1.5 Merkle Proof Structure + +To prove chunk `i` belongs to the tree, provide sibling hashes along the path from leaf to root: + +``` + PROOF FOR CHUNK i: + { + chunk_index: i, + leaf_hash: BLAKE3(0x00 || uint32be(i) || chunk_i), + path_hashes: [sibling_0, sibling_1, ..., sibling_d], + path_directions: [bit_0, bit_1, ..., bit_d] // true = sibling on right + } + + VERIFICATION: + current = leaf_hash + for j in 0..d: + if path_directions[j]: // sibling on right + current = BLAKE3(0x01 || current || path_hashes[j]) + else: // sibling on left + current = BLAKE3(0x01 || path_hashes[j] || current) + + ACCEPT if: current == stored_merkle_root +``` + +### 4.2 Protocol Changes + +#### 4.2.1 On-Chain Commitment: `AvailabilityCommitment` + +Added to CascadeMetadata at registration: + +```protobuf +enum HashAlgo { + HASH_ALGO_UNSPECIFIED = 0; + HASH_ALGO_BLAKE3 = 1; + HASH_ALGO_SHA256 = 2; // Reserved for future use +} + +message AvailabilityCommitment { + string commitment_type = 1; // "lep5/chunk-merkle/v1" + HashAlgo hash_algo = 2; // HASH_ALGO_BLAKE3 + uint32 chunk_size = 3; // e.g. 262144 for 256 KiB + uint64 total_size = 4; // Original file size in bytes + uint32 num_chunks = 5; // ceil(total_size / chunk_size) + bytes root = 6; // 32 bytes - Merkle root + repeated uint32 challenge_indices = 7; // Client-chosen challenge chunk indices +} +``` + +#### 4.2.2 Extended CascadeMetadata + +```protobuf +message CascadeMetadata { + // ═══════════════════════════════════════════════════ + // EXISTING FIELDS (unchanged) + // ═══════════════════════════════════════════════════ + string data_hash = 1; + string file_name = 2; + uint64 rq_ids_ic = 3; + uint64 rq_ids_max = 4; + repeated string rq_ids_ids = 5; + string signatures = 6; + bool public = 7; + + // ═══════════════════════════════════════════════════ + // NEW FIELDS — LEP-5 + // ═══════════════════════════════════════════════════ + + // Set at Registration (by client) — includes root and challenge_indices + AvailabilityCommitment availability_commitment = 8; + + // Set at Finalization (by SuperNode) + repeated ChunkProof chunk_proofs = 9; +} + +message ChunkProof { + uint32 chunk_index = 1; // Which chunk this proves + bytes leaf_hash = 2; // BLAKE3(0x00 || uint32be(idx) || chunk_bytes) + repeated bytes path_hashes = 3; // Sibling hashes (ceil(log2(N)) × 32 bytes) + repeated bool path_directions = 4; // true = sibling on right +} +``` + +#### 4.2.3 Module Parameters + +```protobuf +message Params { + // ... existing params ... + + // NEW: LEP-5 parameters + uint32 svc_challenge_count = 12; // m — number of chunks to challenge (default: 8) + uint32 svc_min_chunks_for_challenge = 13; // Minimum chunks to require SVC (default: 4) +} +``` + +**Chunk size protocol constants** (not governance-tunable): + +| Constant | Value | Purpose | +|----------|-------|---------| +| `cascadeCommitmentMaxChunkSize` | 262,144 (256 KiB) | Maximum allowed chunk size | +| `cascadeCommitmentMinChunkSize` | 1 (1 byte) | Minimum allowed chunk size | +| `cascadeCommitmentMinTotalSize` | 4 | Minimum file size in bytes (reject trivially tiny files) | + +The client's `chunk_size` must be a power of 2 within this range. The chain **unconditionally rejects** registrations where `num_chunks < svc_min_chunks_for_challenge` (default 4). The number of challenge indices is always `min(svc_challenge_count, num_chunks)`, yielding a value in [4, 8] since `num_chunks >= 4`. + +### 4.3 Challenge Index Generation + +Challenge indices are **generated by the client at registration time** and stored in the `AvailabilityCommitment.challenge_indices` field. The indices must be: +- **Deterministic:** Generated via BLAKE3-based derivation from known inputs +- **Unique:** No duplicate indices in a single challenge set +- **Within range:** All indices in `[0, num_chunks)` + +The `DeriveIndices` helper function (available in `x/action/v1/challenge/`) can be used by clients: + +``` +seed = BLAKE3( + entropy_input || // e.g. random nonce or hash of file content + action_id || + uint64be(num_chunks) || + signer_addr || + "lep5/challenge/v1" +) + +for j in 0..m-1: + raw = BLAKE3(seed || uint32be(j)) + idx_j = uint64(raw[0:8]) mod num_chunks + // If duplicate, increment j and retry +``` + +The keeper does **not** re-derive indices. At finalization, it reads `commitment.ChallengeIndices` directly from the stored on-chain commitment and validates that submitted proofs match those indices. + +### 4.4 Updated Protocol Flows + +#### 4.4.1 Registration Phase (Client) + +``` + CLIENT REGISTRATION: + + 1. Read file, compute data_hash (existing) + 2. Split file into chunks of size S (default 256 KiB) + 3. Compute leaf hashes: + leaf_i = BLAKE3(0x00 || uint32be(i) || chunk_i) + 4. Build Merkle tree, obtain root (32 bytes) + 5. Generate challenge indices: + Use DeriveIndices to pick m unique chunk indices + 6. Create AvailabilityCommitment: + { + commitment_type: "lep5/chunk-merkle/v1", + hash_algo: HASH_ALGO_BLAKE3, + chunk_size: 262144, + total_size: , + num_chunks: ceil(file_size / chunk_size), + root: , + challenge_indices: [idx_0, idx_1, ..., idx_m-1] + } + 7. Include commitment in MsgRequestAction metadata + 8. (Existing) Generate rq_ids, sign, submit RegisterAction +``` + +**JS SDK addition:** + +```javascript +import { blake3 } from 'blake3'; // or equivalent BLAKE3 npm package + +async function computeCommitment(fileBlob, chunkSize = 262144) { + const totalSize = fileBlob.size; + const numChunks = Math.ceil(totalSize / chunkSize); + const leafHashes = []; + + for (let i = 0; i < numChunks; i++) { + const start = i * chunkSize; + const end = Math.min(start + chunkSize, totalSize); + const chunkBytes = new Uint8Array(await fileBlob.slice(start, end).arrayBuffer()); + + // Domain-separated leaf: 0x00 || uint32be(i) || chunk + const prefix = new Uint8Array(5); + prefix[0] = 0x00; + new DataView(prefix.buffer).setUint32(1, i, false); // big-endian + + const leafInput = new Uint8Array(prefix.length + chunkBytes.length); + leafInput.set(prefix); + leafInput.set(chunkBytes, prefix.length); + + const hash = blake3(leafInput); // BLAKE3 hash → 32 bytes + leafHashes.push(new Uint8Array(hash)); + } + + const root = await buildMerkleRoot(leafHashes); // standard bottom-up construction + + // Generate challenge indices using BLAKE3-based derivation + const challengeIndices = deriveIndices(root, numChunks, challengeCount); + + return { root, totalSize, numChunks, chunkSize, challengeIndices }; +} +``` + +#### 4.4.2 Finalization Phase (SuperNode) + +``` + SUPERNODE FINALIZATION: + + 1. Receive file from client via gRPC stream + 2. Verify: BLAKE3(received_file) == action.data_hash (existing) + 3. Split file into chunks using commitment.chunk_size + 4. Compute leaf hashes with domain separation (BLAKE3) + 5. Build Merkle tree + 6. VERIFY: computed_root == action.availability_commitment.root + (If mismatch → client provided bad commitment, report evidence) + 7. Perform existing RQ encoding, ID generation, Kademlia storage + 8. Read challenge indices from on-chain commitment: + indices = action.availability_commitment.challenge_indices + 9. Generate Merkle proofs for each challenged chunk + 10. Submit MsgFinalizeAction with: + - Existing fields (rq_ids_ids, rq_ids_oti) + - NEW: chunk_proofs array +``` + +#### 4.4.3 Verification Phase (Action Module) + +``` + ACTION MODULE — On receiving MsgFinalizeAction: + + 1. Existing checks (expiration, SN authorization, rq_ids verification) + + 2. Skip SVC if num_chunks < svc_min_chunks_for_challenge + + 3. Read expected challenge indices from stored commitment: + expectedIndices = commitment.ChallengeIndices + + 4. Validate proof count matches expected challenge count + + 5. For each chunk_proof: + a. Verify chunk_proof.chunk_index == expectedIndices[i] + b. Verify Merkle proof: + - Walk path_hashes with path_directions + - Use domain-separated hashing (0x01 prefix for internal nodes, BLAKE3) + - Computed root must equal stored commitment.root + + 6. If all proofs valid → state = DONE, release fees +``` + +### 4.5 Complete Sequence Diagram + +``` +┌──────┐ ┌────────┐ ┌──────────┐ ┌────────┐ +│Client│ │ Action │ │SuperNode │ │Kademlia│ +└──┬───┘ │ Module │ └────┬─────┘ └───┬────┘ + │ └───┬────┘ │ │ + │ │ │ │ + │ REGISTRATION │ │ │ + │══════════════════│ │ │ + │ Chunk file │ │ │ + │ Build Merkle tree│ │ │ + │ (BLAKE3) │ │ │ + │ Compute root │ │ │ + │ Pick challenge │ │ │ + │ indices │ │ │ + │ │ │ │ + │ RegisterAction │ │ │ + │ (+commitment │ │ │ + │ +indices) │ │ │ + │─────────────────>│ │ │ + │ │ Store action │ │ + │ │ + Merkle root │ │ + │ │ + challenge indices │ │ + │ │ │ │ + │ PROCESSING │ │ │ + │══════════════════│ │ │ + │ Upload file ────────────────────────> │ │ + │ │ │ Verify hash │ + │ │ │ Chunk + tree │ + │ │ │ (BLAKE3) │ + │ │ │ Verify root match │ + │ │ │ RQ encode │ + │ │ │ Store symbols ────>│ + │ │ │ │ + │ FINALIZATION │ │ │ + │══════════════════│ │ │ + │ │ │ Read challenge │ + │ │ │ indices from │ + │ │ │ stored commitment │ + │ │ │ Generate Merkle │ + │ │ │ proofs (BLAKE3) │ + │ │ │ │ + │ │ FinalizeAction │ │ + │ │ (+chunk_proofs) │ │ + │ │<───────────────────│ │ + │ │ │ │ + │ │ Read stored │ │ + │ │ challenge indices │ │ + │ │ Verify each proof │ │ + │ │ against stored root│ │ + │ │ (BLAKE3) │ │ + │ │ │ │ + │ │ All valid? │ │ + │ │ → DONE │ │ + │ │ → Release fees │ │ +``` + +--- + +## 5. Security Analysis + +### 5.1 Attack: Finalize Without Data + +**Defense:** The finalizer must produce Merkle proofs for chunk indices that were committed at registration. Without the actual file bytes, the finalizer cannot compute the correct leaf hashes — the domain-separated BLAKE3 hash includes the raw chunk content. + +### 5.2 Attack: Pre-computation / Index Prediction + +**Defense:** Challenge indices are committed by the client at registration time. A malicious SuperNode cannot know which chunks it will be challenged on until it reads the on-chain commitment. Once the commitment is on-chain, the indices are immutable and the SuperNode must provide valid proofs for those exact chunks. + +### 5.3 Attack: Selective Storage (Store Only Some Chunks) + +With m challenges drawn uniformly from N chunks, and an attacker storing fraction p of chunks: + +``` +P(evade detection) = p^m +``` + +| Chunks Stored | p | P(evade) m=5 | P(evade) m=8 | P(evade) m=16 | +|---------------|-----|-------------|-------------|--------------| +| 90% | 0.9 | 59.0% | 43.0% | 18.5% | +| 80% | 0.8 | 32.8% | 16.8% | 2.8% | +| 50% | 0.5 | 3.1% | 0.39% | 0.0015% | +| 20% | 0.2 | 0.032% | negligible | negligible | + +With m=8 (recommended default), storing less than half the file has <0.4% chance of evading a single challenge round. Failed attempts accumulate Audit module evidence. + +### 5.4 Attack: Client–SuperNode Collusion on Challenge Indices + +**Risk:** A malicious client could collude with a SuperNode to choose "easy" challenge indices (e.g., indices for chunks the SuperNode already has from a partial transfer). + +**Defense:** The governance-enforced `svc_challenge_count` (m) ensures a minimum number of challenged chunks. The indices must be unique and within `[0, num_chunks)`. Since the client's goal is to have its data stored, collusion against its own interest is economically irrational. A future enhancement could mix server-side randomness into the index generation to further harden against this vector. + +### 5.5 Attack: Merkle Root Forgery (Collision) + +**Defense:** BLAKE3 collision resistance provides ~2^128 security (birthday attack). Domain-separated hashing (0x00 for leaves, 0x01 for internal nodes) additionally prevents second-preimage attacks where a leaf could be confused with an internal node. + +### 5.6 Security Summary + +| Guarantee | Mechanism | Strength | +|-----------|-----------|----------| +| **Commitment Binding** | BLAKE3 collision resistance | 2^128 security | +| **Challenge Commitment** | Indices stored on-chain at registration | Immutable once committed | +| **Proof Soundness** | Merkle tree structure | Information-theoretic | +| **Collusion Resistance** | Client incentive alignment + governance minimum m | Economic (client pays for storage) | + +--- + +## 6. Implementation Details + +### 6.1 Merkle Tree (Go) + +```go +package merkle + +import ( + "encoding/binary" + "errors" + + "lukechampine.com/blake3" +) + +const HashSize = 32 + +var ( + LeafPrefix = []byte{0x00} + InternalPrefix = []byte{0x01} + ErrEmptyInput = errors.New("empty input") + ErrIndexOutOfRange = errors.New("index out of range") +) + +// HashLeaf computes BLAKE3(0x00 || uint32be(index) || data) +func HashLeaf(index uint32, data []byte) [HashSize]byte { + var prefix [5]byte + prefix[0] = 0x00 + binary.BigEndian.PutUint32(prefix[1:], index) + + h := blake3.New(HashSize, nil) + h.Write(prefix[:]) + h.Write(data) + var result [HashSize]byte + copy(result[:], h.Sum(nil)) + return result +} + +// HashInternal computes BLAKE3(0x01 || left || right) +func HashInternal(left, right [HashSize]byte) [HashSize]byte { + h := blake3.New(HashSize, nil) + h.Write(InternalPrefix) + h.Write(left[:]) + h.Write(right[:]) + var result [HashSize]byte + copy(result[:], h.Sum(nil)) + return result +} + +type Tree struct { + Root [HashSize]byte + Leaves [][HashSize]byte + Levels [][][HashSize]byte // levels[0] = leaves (possibly padded), levels[last] = [root] + LeafCount int +} + +// BuildTree constructs a Merkle tree from chunk data. +// If a level has an odd number of nodes, the last node is duplicated. +func BuildTree(chunks [][]byte) (*Tree, error) { + n := len(chunks) + if n == 0 { + return nil, ErrEmptyInput + } + + // Compute leaf hashes + leaves := make([][HashSize]byte, n) + for i, chunk := range chunks { + leaves[i] = HashLeaf(uint32(i), chunk) + } + + levels := [][][HashSize]byte{leaves} + + // Build tree bottom-up + current := leaves + for len(current) > 1 { + // If odd number of nodes, duplicate the last node + if len(current)%2 != 0 { + current = append(current, current[len(current)-1]) + levels[len(levels)-1] = current + } + + next := make([][HashSize]byte, len(current)/2) + for i := 0; i < len(current); i += 2 { + next[i/2] = HashInternal(current[i], current[i+1]) + } + levels = append(levels, next) + current = next + } + + return &Tree{ + Root: current[0], + Leaves: leaves, + Levels: levels, + LeafCount: n, + }, nil +} + +type Proof struct { + ChunkIndex uint32 + LeafHash [HashSize]byte + PathHashes [][HashSize]byte + PathDirections []bool // true = sibling on right +} + +// GenerateProof creates a Merkle proof for chunk at index +func (t *Tree) GenerateProof(index int) (*Proof, error) { + if index < 0 || index >= t.LeafCount { + return nil, ErrIndexOutOfRange + } + + proof := &Proof{ + ChunkIndex: uint32(index), + LeafHash: t.Leaves[index], + } + + idx := index + for level := 0; level < len(t.Levels)-1; level++ { + if idx%2 == 0 { + proof.PathHashes = append(proof.PathHashes, t.Levels[level][idx+1]) + proof.PathDirections = append(proof.PathDirections, true) + } else { + proof.PathHashes = append(proof.PathHashes, t.Levels[level][idx-1]) + proof.PathDirections = append(proof.PathDirections, false) + } + idx /= 2 + } + + return proof, nil +} + +// Verify checks proof against a root +func (p *Proof) Verify(root [HashSize]byte) bool { + current := p.LeafHash + for i, sibling := range p.PathHashes { + if p.PathDirections[i] { + current = HashInternal(current, sibling) + } else { + current = HashInternal(sibling, current) + } + } + return current == root +} +``` + +### 6.2 Challenge Index Generation (Go) + +The `DeriveIndices` helper generates deterministic unique indices using BLAKE3. This function is available for client-side use. The keeper does **not** call this at finalization — it reads stored indices from `commitment.ChallengeIndices`. + +```go +package challenge + +import ( + "encoding/binary" + + "lukechampine.com/blake3" +) + +const domainTag = "lep5/challenge/v1" + +// DeriveIndices generates m deterministic pseudo-random unique indices. +// Used by clients to generate challenge indices at registration time. +func DeriveIndices(actionID string, entropyInput []byte, height uint64, + signerAddr []byte, numChunks uint32, m uint32) []uint32 { + + if numChunks == 0 || m == 0 { + return []uint32{} + } + + if m > numChunks { + m = numChunks + } + + // Compute seed + var heightBytes [8]byte + binary.BigEndian.PutUint64(heightBytes[:], height) + + seedInput := make([]byte, 0, len(entropyInput)+len(actionID)+8+len(signerAddr)+len(domainTag)) + seedInput = append(seedInput, entropyInput...) + seedInput = append(seedInput, actionID...) + seedInput = append(seedInput, heightBytes[:]...) + seedInput = append(seedInput, signerAddr...) + seedInput = append(seedInput, domainTag...) + + seed := blake3.Sum256(seedInput) + + indices := make([]uint32, 0, m) + used := make(map[uint32]struct{}, m) + counter := uint32(0) + + for uint32(len(indices)) < m { + var counterBytes [4]byte + binary.BigEndian.PutUint32(counterBytes[:], counter) + + h := blake3.New(32, nil) + h.Write(seed[:]) + h.Write(counterBytes[:]) + raw := h.Sum(nil) + + idx := uint32(binary.BigEndian.Uint64(raw[:8]) % uint64(numChunks)) + if _, exists := used[idx]; !exists { + used[idx] = struct{}{} + indices = append(indices, idx) + } + counter++ + } + + return indices +} +``` + +### 6.3 Keeper Verification (Go) + +The keeper reads challenge indices from the stored commitment — it does **not** re-derive them from block state. + +```go +// x/action/v1/keeper/svc.go +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/lumera/x/action/v1/merkle" +) + +// VerifyChunkProofs validates the SVC proofs in a FinalizeAction message. +// Challenge indices are read from the stored AvailabilityCommitment, +// not derived from block state. +func (k Keeper) VerifyChunkProofs( + ctx sdk.Context, + action *types.Action, + superNodeAccount string, + proofs []*types.ChunkProof, +) error { + metadata := action.GetCascadeMetadata() + commitment := metadata.AvailabilityCommitment + + if commitment == nil { + // Backward compatibility: pre-LEP-5 actions do not include commitments. + return nil + } + + params := k.GetParams(ctx) + challengeCount, minChunks := getSVCParamsOrDefault(params) + + // Skip SVC for very small files + if commitment.NumChunks < minChunks { + return nil + } + + expectedCount := min(challengeCount, commitment.NumChunks) + if uint32(len(proofs)) != expectedCount { + return types.ErrWrongProofCount.Wrapf( + "expected %d proofs, got %d", expectedCount, len(proofs)) + } + + // Read expected challenge indices from the stored commitment + expectedIndices := commitment.ChallengeIndices + if uint32(len(expectedIndices)) != expectedCount { + return types.ErrInvalidMetadata.Wrapf( + "commitment has %d challenge_indices, expected %d", + len(expectedIndices), expectedCount) + } + + // Verify each proof + for i, proof := range proofs { + // Check index matches expected challenge + if proof.ChunkIndex != expectedIndices[i] { + return types.ErrWrongChallengeIndex.Wrapf( + "proof %d: expected index %d, got %d", + i, expectedIndices[i], proof.ChunkIndex) + } + + // Verify BLAKE3 Merkle proof against stored root + merkleProof := &merkle.Proof{ + ChunkIndex: proof.ChunkIndex, + LeafHash: proof.LeafHash, + PathHashes: proof.PathHashes, + PathDirections: proof.PathDirections, + } + + if !merkleProof.Verify(commitment.Root) { + return types.ErrInvalidMerkleProof.Wrapf( + "proof for chunk %d failed verification", proof.ChunkIndex) + } + } + + return nil +} +``` + +### 6.4 Gas and Size Impact + +**Registration message increase:** + +| Field | Size | +|-------|------| +| commitment_type | ~24 bytes | +| hash_algo | 1 byte (enum varint) | +| chunk_size | 4 bytes | +| total_size | 8 bytes | +| num_chunks | 4 bytes | +| root | 32 bytes | +| challenge_indices (m=8) | ~32 bytes | +| **Total** | **~105 bytes** | + +**Finalization message increase (per proof):** + +| Field | Size | +|-------|------| +| chunk_index | 4 bytes | +| leaf_hash | 32 bytes | +| path_hashes | ~320 bytes (10 levels for ~1000 chunks) | +| path_directions | ~10 bytes | +| **Per proof** | **~366 bytes** | +| **Total for m=8** | **~2,928 bytes** | + +**Comparison:** + +| | Current | With LEP-5 (m=8) | +|---|---------|------------------| +| Registration | ~2,500 bytes | ~2,605 bytes (+4%) | +| Finalization | ~2,500 bytes | ~5,428 bytes (+117%) | +| **Trade-off** | No data integrity proof | Full protection against fake storage | + +**Gas costs:** + +| Operation | Estimated Gas | +|-----------|--------------| +| Commitment storage | ~25,000 | +| Per-proof verification | ~15,000 | +| **Total finalization overhead (m=8)** | **~145,000** | + +--- + +## 7. Migration and Compatibility + +### 7.1 Upgrade Strategy + +**Phase 1: Soft Launch** (activation height to activation + ~50,000 blocks) +- SVC fields accepted but not required +- SuperNodes and clients upgraded to support SVC +- Monitoring for issues, parameter tuning + +**Phase 2: Enforcement** (after soft launch period) +- SVC required for all new Cascade actions +- Actions without `AvailabilityCommitment` rejected +- Full protection enabled + +### 7.2 Activation + +Add chain parameter: `lep5_enabled_height` +- Before activation: existing finalization rules apply +- After activation: `MsgRequestAction` for Cascade must include `AvailabilityCommitment` (with `challenge_indices`); `MsgFinalizeAction` must include valid `chunk_proofs` + +### 7.3 Backward Compatibility + +| Component | Impact | Migration | +|-----------|--------|-----------| +| Existing finalized actions | None | Already complete | +| SuperNode software | Must upgrade | Proof generation in finalize path | +| Client JS SDK | Must upgrade | Add `computeCommitment()` + challenge index generation | +| rq-library (WASM) | **No changes** | Chunk commitment is pre-RQ | +| Action Module | New validation | Chain upgrade required | + +--- + +## 8. Recommended Parameters + +| Parameter | Mainnet | Testnet | Rationale | +|-----------|---------|---------|-----------| +| `total_size` (min) | 4 bytes | 4 bytes | Reject trivially tiny files | +| `chunk_size` (max) | 262,144 (256 KiB) | 262,144 | Default for large files | +| `chunk_size` (min) | 1 (1 byte) | 1 | Floor; allows small files to have enough chunks | +| `chunk_size` constraint | power of 2 | power of 2 | Simplifies client + tree construction | +| `svc_challenge_count` (m) | 8 | 5 | Strong security without excessive message size | +| `svc_min_chunks_for_challenge` | 4 | 4 | Unconditionally enforced — all files must produce ≥ 4 chunks | +| Challenge indices count | min(m, num_chunks) | min(m, num_chunks) | Always in [4, 8] since num_chunks ≥ 4 | + +--- + +## 9. Future Extensions + +### 9.1 Symbol-Level Commitment (`SYMBOL_MERKLE_V1`) + +When/if rq-library exposes a symbol-iteration API in its WASM build, a stronger mode can commit directly to RaptorQ symbols. This binds the Merkle root to the actual Cascade storage artifacts, not just the input file. The `commitment_type` field enables this upgrade without breaking the protocol. + +### 9.2 Continuous Storage Challenges + +LEP-5 proves possession at finalization time. Future work can introduce periodic re-challenges (Proof of Space-Time) where SuperNodes must prove ongoing retention at random intervals, with slashing for failures. + +### 9.3 Compressed Multi-Proofs + +For large m values, batch Merkle proofs can share common path segments, reducing total proof size. + +### 9.4 Server-Side Randomness Mixing + +A future enhancement could mix validator-provided randomness (e.g., from block hash) into the challenge index generation to further harden against client–SuperNode collusion on index selection. + +--- + +## 10. Test Vectors + +### 10.1 Merkle Tree Construction + +``` +INPUT: + File: 4 chunks of data "C0", "C1", "C2", "C3" + +STEP 1 — Leaf hashes (domain separated, BLAKE3): + L0 = BLAKE3(0x00 || 0x00000000 || "C0") + L1 = BLAKE3(0x00 || 0x00000001 || "C1") + L2 = BLAKE3(0x00 || 0x00000002 || "C2") + L3 = BLAKE3(0x00 || 0x00000003 || "C3") + +STEP 2 — Internal nodes (domain separated, BLAKE3): + I01 = BLAKE3(0x01 || L0 || L1) + I23 = BLAKE3(0x01 || L2 || L3) + +STEP 3 — Root: + ROOT = BLAKE3(0x01 || I01 || I23) +``` + +### 10.2 Challenge Index Generation + +``` +INPUT: + action_id = "cascade_test_001" + entropy_input = 0x999888777666... (32 bytes) + height = 12345 + signer = 0xABCDEF... (20 bytes) + num_chunks = 1000 + m = 5 + +COMPUTATION: + seed = BLAKE3(entropy_input || "cascade_test_001" || uint64be(12345) || signer || "lep5/challenge/v1") + + For j=0: idx = uint64(BLAKE3(seed || uint32be(0))[0:8]) % 1000 + For j=1: idx = uint64(BLAKE3(seed || uint32be(1))[0:8]) % 1000 + ... (skip duplicates, increment counter) + +EXPECTED: 5 unique indices in [0, 999] + +These indices are stored in AvailabilityCommitment.challenge_indices at registration. +``` + +### 10.3 Proof Verification + +``` +Given 4-chunk tree with ROOT from test 10.1: + +Proof for chunk 2: + { + chunk_index: 2, + leaf_hash: L2, + path_hashes: [L3, I01], + path_directions: [true, false] // L3 is RIGHT sibling, I01 is LEFT sibling + } + +Verification (BLAKE3): + current = L2 + step 1: current = BLAKE3(0x01 || current || L3) = I23 (L3 on RIGHT) + step 2: current = BLAKE3(0x01 || I01 || current) = ROOT (I01 on LEFT) + + current == ROOT? ✓ VALID +``` + +--- + +## 11. Canonical Encoding Rules (Normative) + +- **Hash function:** BLAKE3 (via `lukechampine.com/blake3` in Go, `blake3` npm package in JS) +- **Hash output size:** 32 bytes +- **Leaf domain separator:** `0x00` +- **Internal node domain separator:** `0x01` +- **Integer encoding:** Big-endian (`uint32be`, `uint64be`) +- **Odd-level padding:** Duplicate last hash +- **Commitment type constant:** `"lep5/chunk-merkle/v1"` +- **Hash algo enum:** `HASH_ALGO_BLAKE3 = 1` +- **Challenge domain tag:** `"lep5/challenge/v1"` +- **Challenge indices:** Client-provided, stored in `AvailabilityCommitment.challenge_indices` + +--- + +## Changelog + +| Version | Date | Changes | +|---------|------|---------| +| 0.1 | 2025-02-08 | Initial drafts (two separate proposals) | +| 0.2 | 2026-02-08 | Combined proposal: chunk-based commitment, single-SN Merkle proof finalization | +| 0.3 | 2026-02-08 | Removed quorum/attestation layer; finalization relies solely on SuperNode-produced Merkle proofs | +| 0.4 | 2026-02-26 | BLAKE3 replaces SHA-256 for all hashing; hash_algo changed to HashAlgo enum; challenge indices now client-provided at registration (stored in AvailabilityCommitment.challenge_indices) instead of derived from block hash at finalization | +| 0.5 | 2026-02-26 | Variable chunk_size: chunk_size is now client-chosen power-of-2 in [1024, 262144]; chain enforces num_chunks >= svc_min_chunks_for_challenge for files >= 4 KiB, reducing SVC skip threshold from < 1 MiB to < 4 KiB | +| 0.6 | 2026-02-27 | Strict chunking boundaries: min file size 4 bytes; unconditional min 4 chunks; min 4 / max 8 challenge indices; max chunk size 256 KiB | +| 0.7 | 2026-02-27 | Min chunk size lowered from 1 KiB to 1 byte to allow 4-byte files to produce 4 chunks with chunk_size=1 | + +--- + +**Document Status:** Draft — Pending Review diff --git a/proto/lumera/action/v1/metadata.proto b/proto/lumera/action/v1/metadata.proto index 0bb75dcf..6b2c8e28 100644 --- a/proto/lumera/action/v1/metadata.proto +++ b/proto/lumera/action/v1/metadata.proto @@ -5,6 +5,14 @@ option go_package = "x/action/v1/types"; import "gogoproto/gogo.proto"; +// HashAlgo enumerates the supported hash algorithms for availability +// commitments. +enum HashAlgo { + HASH_ALGO_UNSPECIFIED = 0; + HASH_ALGO_BLAKE3 = 1; + HASH_ALGO_SHA256 = 2; +} + // SenseMetadata contains information for Sense actions. // This metadata is directly embedded in the Action.metadata field. // For RequestAction: @@ -30,6 +38,30 @@ message SenseMetadata { repeated string dd_and_fingerprints_ids = 6 [json_name = "dd_and_fingerprints_ids"]; string signatures = 7 [json_name = "signatures"]; } + +// AvailabilityCommitment is the LEP-5 on-chain file commitment included +// during Cascade registration. +message AvailabilityCommitment { + string commitment_type = 1 [json_name = "commitment_type"]; + HashAlgo hash_algo = 2 [json_name = "hash_algo"]; + uint32 chunk_size = 3 [json_name = "chunk_size"]; + uint64 total_size = 4 [json_name = "total_size"]; + uint32 num_chunks = 5 [json_name = "num_chunks"]; + bytes root = 6 [json_name = "root"]; + // Challenge indices chosen by the client at registration time. + // The SuperNode must provide Merkle proofs for these exact chunk + // indices during finalization. The keeper validates proofs match + // these stored indices and the committed root. + repeated uint32 challenge_indices = 7 [json_name = "challenge_indices"]; +} + +// ChunkProof is a Merkle inclusion proof for one challenged chunk. +message ChunkProof { + uint32 chunk_index = 1 [json_name = "chunk_index"]; + bytes leaf_hash = 2 [json_name = "leaf_hash"]; + repeated bytes path_hashes = 3 [json_name = "path_hashes"]; + repeated bool path_directions = 4 [json_name = "path_directions"]; +} // CascadeMetadata contains information for Cascade actions. // This metadata is directly embedded in the Action.metadata field. @@ -56,4 +88,8 @@ message CascadeMetadata { // mark the action as visible to all users; set to false for private // or restricted actions. bool public = 7 [json_name = "public"]; + + // LEP-5 fields + AvailabilityCommitment availability_commitment = 8 [json_name = "availability_commitment"]; + repeated ChunkProof chunk_proofs = 9 [json_name = "chunk_proofs"]; } diff --git a/proto/lumera/action/v1/params.proto b/proto/lumera/action/v1/params.proto index 6ca24f53..605eaf83 100644 --- a/proto/lumera/action/v1/params.proto +++ b/proto/lumera/action/v1/params.proto @@ -43,4 +43,8 @@ message Params { // Reward Distribution string super_node_fee_share = 10 [(cosmos_proto.scalar) = "cosmos.Dec"]; string foundation_fee_share = 11 [(cosmos_proto.scalar) = "cosmos.Dec"]; -} \ No newline at end of file + + // LEP-5: Storage Verification Challenge parameters + uint32 svc_challenge_count = 12; // Number of chunks to challenge (default: 8) + uint32 svc_min_chunks_for_challenge = 13; // Minimum chunks required for SVC (default: 4) +} diff --git a/tests/integration/action/lep5_integration_test.go b/tests/integration/action/lep5_integration_test.go new file mode 100644 index 00000000..ccc06a6c --- /dev/null +++ b/tests/integration/action/lep5_integration_test.go @@ -0,0 +1,345 @@ +package action_test + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + lumeraapp "github.com/LumeraProtocol/lumera/app" + "github.com/LumeraProtocol/lumera/x/action/v1/keeper" + "github.com/LumeraProtocol/lumera/x/action/v1/merkle" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + gogoproto "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +const lep5ChunkSize = uint32(262144) // 256 KiB + +// LEP5IntegrationTestSuite exercises the full LEP-5 Cascade availability +// commitment flow through the real application stack. +type LEP5IntegrationTestSuite struct { + suite.Suite + + app *lumeraapp.App + ctx sdk.Context + keeper keeper.Keeper + msgServer actiontypes.MsgServer + + testAddrs []sdk.AccAddress + privKeys []*secp256k1.PrivKey +} + +func (s *LEP5IntegrationTestSuite) SetupTest() { + app := lumeraapp.Setup(s.T()) + ctx := app.BaseApp.NewContext(false).WithBlockHeight(1).WithBlockTime(time.Now()) + + s.app = app + s.ctx = ctx + s.keeper = app.ActionKeeper + s.msgServer = keeper.NewMsgServerImpl(s.keeper) + + initCoins := sdk.NewCoins(sdk.NewInt64Coin("ulume", 1_000_000)) + s.testAddrs, s.privKeys, _ = createTestAddAddrsWithKeys(5) + + for i, addr := range s.testAddrs { + acc := app.AuthKeeper.GetAccount(s.ctx, addr) + if acc == nil { + account := app.AuthKeeper.NewAccountWithAddress(s.ctx, addr) + baseAcc := account.(*authtypes.BaseAccount) + baseAcc.SetPubKey(s.privKeys[i].PubKey()) + app.AuthKeeper.SetAccount(s.ctx, baseAcc) + } + require.NoError(s.T(), app.BankKeeper.MintCoins(s.ctx, actiontypes.ModuleName, initCoins)) + require.NoError(s.T(), app.BankKeeper.SendCoinsFromModuleToAccount(s.ctx, actiontypes.ModuleName, addr, initCoins)) + } + + valAddr := sdk.ValAddress(s.privKeys[1].PubKey().Address()) + sn := sntypes.SuperNode{ + ValidatorAddress: valAddr.String(), + SupernodeAccount: s.testAddrs[1].String(), + Note: "1.0.0", + States: []*sntypes.SuperNodeStateRecord{{State: sntypes.SuperNodeStateActive}}, + PrevIpAddresses: []*sntypes.IPAddressHistory{{Address: "192.168.1.1"}}, + P2PPort: "2134", + } + require.NoError(s.T(), app.SupernodeKeeper.SetSuperNode(s.ctx, sn)) + + params := actiontypes.DefaultParams() + params.ExpirationDuration = time.Minute + require.NoError(s.T(), s.keeper.SetParams(s.ctx, params)) +} + +// TestLEP5CascadeLifecycle registers a Cascade action with an AvailabilityCommitment, +// finalizes it with valid chunk proofs, and asserts the action reaches DONE. +func (s *LEP5IntegrationTestSuite) TestLEP5CascadeLifecycle() { + t := s.T() + txCreator := s.testAddrs[0].String() + snAccount := s.testAddrs[1].String() + + // --- Build Merkle tree from 8 chunks --- + numChunks := uint32(8) + chunks := make([][]byte, numChunks) + for i := uint32(0); i < numChunks; i++ { + chunks[i] = []byte(fmt.Sprintf("chunk-%d-data", i)) + } + + tree, err := merkle.BuildTree(chunks) + require.NoError(t, err) + + root := make([]byte, merkle.HashSize) + copy(root, tree.Root[:]) + + // Client picks challenge indices — must match min(SVCChallengeCount, numChunks) = 8. + challengeIndices := []uint32{0, 1, 2, 3, 4, 5, 6, 7} + + // --- Register with AvailabilityCommitment --- + sigStr, err := createValidCascadeSignatureString(s.privKeys[0], 1) + require.NoError(t, err) + + commitment := actiontypes.AvailabilityCommitment{ + CommitmentType: "lep5/chunk-merkle/v1", + HashAlgo: actiontypes.HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: lep5ChunkSize, + TotalSize: uint64(numChunks) * uint64(lep5ChunkSize), + NumChunks: numChunks, + Root: root, + ChallengeIndices: challengeIndices, + } + commitmentJSON, err := json.Marshal(&commitment) + require.NoError(t, err) + + metadata := fmt.Sprintf( + `{"data_hash":"abc123","file_name":"file.txt","rq_ids_ic":1,"signatures":"%s","availability_commitment":%s}`, + sigStr, string(commitmentJSON), + ) + + msg := &actiontypes.MsgRequestAction{ + Creator: txCreator, + ActionType: actiontypes.ActionTypeCascade.String(), + Metadata: metadata, + Price: "100000ulume", + ExpirationTime: fmt.Sprintf("%d", time.Now().Add(10*time.Minute).Unix()), + } + + res, err := s.msgServer.RequestAction(s.ctx, msg) + require.NoError(t, err) + require.NotEmpty(t, res.ActionId) + + // Verify commitment stored on-chain. + action, found := s.keeper.GetActionByID(s.ctx, res.ActionId) + require.True(t, found) + require.Equal(t, actiontypes.ActionStatePending, action.State) + + var storedMeta actiontypes.CascadeMetadata + require.NoError(t, gogoproto.Unmarshal(action.Metadata, &storedMeta)) + require.NotNil(t, storedMeta.AvailabilityCommitment) + require.Equal(t, root, storedMeta.AvailabilityCommitment.Root) + require.Equal(t, challengeIndices, storedMeta.AvailabilityCommitment.ChallengeIndices) + + // --- Finalize with chunk proofs --- + chunkProofs := make([]*actiontypes.ChunkProof, 0, len(challengeIndices)) + for _, idx := range challengeIndices { + p, pErr := tree.GenerateProof(int(idx)) + require.NoError(t, pErr) + chunkProofs = append(chunkProofs, protoChunkProof(p)) + } + + ids, err := generateValidCascadeIDs(sigStr, 1, 50) + require.NoError(t, err) + + finMeta := &actiontypes.CascadeMetadata{ + RqIdsIds: ids, + ChunkProofs: chunkProofs, + } + finMetaBytes, err := json.Marshal(finMeta) + require.NoError(t, err) + + s.ctx = s.ctx.WithEventManager(sdk.NewEventManager()) + + finMsg := &actiontypes.MsgFinalizeAction{ + ActionId: res.ActionId, + Creator: snAccount, + ActionType: actiontypes.ActionTypeCascade.String(), + Metadata: string(finMetaBytes), + } + _, err = s.msgServer.FinalizeAction(s.ctx, finMsg) + require.NoError(t, err) + + // Verify action reached DONE state. + finalAction, found := s.keeper.GetActionByID(s.ctx, res.ActionId) + require.True(t, found) + require.Equal(t, actiontypes.ActionStateDone, finalAction.State) + + // Verify SVC verification passed event was emitted. + foundPassedEvent := false + for _, event := range s.ctx.EventManager().Events() { + if event.Type == actiontypes.EventTypeSVCVerificationPassed { + foundPassedEvent = true + break + } + } + require.True(t, foundPassedEvent, "expected SVC verification passed event") +} + +// TestLEP5InvalidCommitmentRejected verifies that a Cascade action with an +// invalid AvailabilityCommitment is rejected at registration. +func (s *LEP5IntegrationTestSuite) TestLEP5InvalidCommitmentRejected() { + t := s.T() + txCreator := s.testAddrs[0].String() + + sigStr, err := createValidCascadeSignatureString(s.privKeys[0], 1) + require.NoError(t, err) + + // Build a valid commitment to use as base, then tweak one field. + numChunks := uint32(8) + chunks := make([][]byte, numChunks) + for i := range chunks { + chunks[i] = []byte(fmt.Sprintf("chunk-%d", i)) + } + tree, err := merkle.BuildTree(chunks) + require.NoError(t, err) + root := make([]byte, merkle.HashSize) + copy(root, tree.Root[:]) + + s.Run("wrong commitment type", func() { + commitment := actiontypes.AvailabilityCommitment{ + CommitmentType: "wrong-type", + HashAlgo: actiontypes.HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: lep5ChunkSize, + TotalSize: uint64(numChunks) * uint64(lep5ChunkSize), + NumChunks: numChunks, + Root: root, + ChallengeIndices: []uint32{0, 1, 2, 3, 4, 5, 6, 7}, + } + commitmentJSON, cErr := json.Marshal(&commitment) + require.NoError(t, cErr) + + metadata := fmt.Sprintf( + `{"data_hash":"abc123","file_name":"file.txt","rq_ids_ic":1,"signatures":"%s","availability_commitment":%s}`, + sigStr, string(commitmentJSON), + ) + msg := &actiontypes.MsgRequestAction{ + Creator: txCreator, + ActionType: actiontypes.ActionTypeCascade.String(), + Metadata: metadata, + Price: "100000ulume", + ExpirationTime: fmt.Sprintf("%d", time.Now().Add(10*time.Minute).Unix()), + } + _, err := s.msgServer.RequestAction(s.ctx, msg) + require.Error(t, err) + require.Contains(t, err.Error(), "commitment_type") + }) +} + +// TestLEP5InvalidProofRejected verifies that invalid chunk proofs are rejected +// during finalization. +func (s *LEP5IntegrationTestSuite) TestLEP5InvalidProofRejected() { + t := s.T() + txCreator := s.testAddrs[0].String() + snAccount := s.testAddrs[1].String() + + numChunks := uint32(8) + chunks := make([][]byte, numChunks) + for i := uint32(0); i < numChunks; i++ { + chunks[i] = []byte(fmt.Sprintf("chunk-%d-data", i)) + } + + tree, err := merkle.BuildTree(chunks) + require.NoError(t, err) + + root := make([]byte, merkle.HashSize) + copy(root, tree.Root[:]) + + challengeIndices := []uint32{0, 1, 2, 3, 4, 5, 6, 7} + + sigStr, err := createValidCascadeSignatureString(s.privKeys[0], 1) + require.NoError(t, err) + + commitment := actiontypes.AvailabilityCommitment{ + CommitmentType: "lep5/chunk-merkle/v1", + HashAlgo: actiontypes.HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: lep5ChunkSize, + TotalSize: uint64(numChunks) * uint64(lep5ChunkSize), + NumChunks: numChunks, + Root: root, + ChallengeIndices: challengeIndices, + } + commitmentJSON, err := json.Marshal(&commitment) + require.NoError(t, err) + + metadata := fmt.Sprintf( + `{"data_hash":"abc123","file_name":"file.txt","rq_ids_ic":1,"signatures":"%s","availability_commitment":%s}`, + sigStr, string(commitmentJSON), + ) + + msg := &actiontypes.MsgRequestAction{ + Creator: txCreator, + ActionType: actiontypes.ActionTypeCascade.String(), + Metadata: metadata, + Price: "100000ulume", + ExpirationTime: fmt.Sprintf("%d", time.Now().Add(10*time.Minute).Unix()), + } + + res, err := s.msgServer.RequestAction(s.ctx, msg) + require.NoError(t, err) + + // Build proofs but tamper with one. + chunkProofs := make([]*actiontypes.ChunkProof, 0, len(challengeIndices)) + for _, idx := range challengeIndices { + p, pErr := tree.GenerateProof(int(idx)) + require.NoError(t, pErr) + chunkProofs = append(chunkProofs, protoChunkProof(p)) + } + // Tamper first proof's leaf hash. + chunkProofs[0].LeafHash[0] ^= 0xFF + + ids, err := generateValidCascadeIDs(sigStr, 1, 50) + require.NoError(t, err) + + finMeta := &actiontypes.CascadeMetadata{ + RqIdsIds: ids, + ChunkProofs: chunkProofs, + } + finMetaBytes, err := json.Marshal(finMeta) + require.NoError(t, err) + + finMsg := &actiontypes.MsgFinalizeAction{ + ActionId: res.ActionId, + Creator: snAccount, + ActionType: actiontypes.ActionTypeCascade.String(), + Metadata: string(finMetaBytes), + } + _, err = s.msgServer.FinalizeAction(s.ctx, finMsg) + require.Error(t, err) + require.Contains(t, err.Error(), "failed verification") +} + +func TestLEP5IntegrationTestSuite(t *testing.T) { + suite.Run(t, new(LEP5IntegrationTestSuite)) +} + +func protoChunkProof(p *merkle.Proof) *actiontypes.ChunkProof { + leaf := make([]byte, merkle.HashSize) + copy(leaf, p.LeafHash[:]) + + pathHashes := make([][]byte, 0, len(p.PathHashes)) + for _, h := range p.PathHashes { + b := make([]byte, merkle.HashSize) + copy(b, h[:]) + pathHashes = append(pathHashes, b) + } + + return &actiontypes.ChunkProof{ + ChunkIndex: p.ChunkIndex, + LeafHash: leaf, + PathHashes: pathHashes, + PathDirections: append([]bool(nil), p.PathDirections...), + } +} + diff --git a/tests/system/action/lep5_test.go b/tests/system/action/lep5_test.go new file mode 100644 index 00000000..9dbe19e0 --- /dev/null +++ b/tests/system/action/lep5_test.go @@ -0,0 +1,234 @@ +package action_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "testing" + "time" + + sdkmath "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + gogoproto "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/require" + + "github.com/LumeraProtocol/lumera/app" + "github.com/LumeraProtocol/lumera/tests/ibctesting" + "github.com/LumeraProtocol/lumera/x/action/v1/keeper" + "github.com/LumeraProtocol/lumera/x/action/v1/merkle" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" +) + +const systemChunkSize = uint32(262144) + +type actionSystemSuite struct { + app *app.App + sdkCtx sdk.Context + ctx context.Context +} + +func setupActionSystemSuite(t *testing.T) *actionSystemSuite { + os.Setenv("SYSTEM_TESTS", "true") + t.Cleanup(func() { os.Unsetenv("SYSTEM_TESTS") }) + + s := &actionSystemSuite{} + coord := ibctesting.NewCoordinator(t, 1) + chain := coord.GetChain(ibctesting.GetChainID(1)) + + a := chain.App.(*app.App) + s.app = a + s.sdkCtx = chain.GetContext() + s.ctx = sdk.WrapSDKContext(s.sdkCtx) + + // Create and bond a validator for supernode registration. + valPrivKey := secp256k1.GenPrivKey() + valPubKey := valPrivKey.PubKey() + valAddr := sdk.ValAddress(valPubKey.Address().Bytes()) + + validator, err := stakingtypes.NewValidator(valAddr.String(), valPubKey, stakingtypes.Description{}) + require.NoError(t, err) + validator.Status = stakingtypes.Bonded + validator.Tokens = sdkmath.NewInt(1_000_000) + a.StakingKeeper.SetValidator(s.sdkCtx, validator) + + // Set action module params with SVC settings. + params := actiontypes.DefaultParams() + params.ExpirationDuration = time.Minute + require.NoError(t, a.ActionKeeper.SetParams(s.sdkCtx, params)) + + return s +} + +// TestLEP5CascadeSystemFlow exercises the full LEP-5 register-with-commitment +// and finalize-with-proofs flow on a full chain started via ibctesting.Coordinator. +func TestLEP5CascadeSystemFlow(t *testing.T) { + s := setupActionSystemSuite(t) + + // --- Accounts --- + creatorPriv := secp256k1.GenPrivKey() + creatorAddr := sdk.AccAddress(creatorPriv.PubKey().Address()) + + snPriv := secp256k1.GenPrivKey() + snAddr := sdk.AccAddress(snPriv.PubKey().Address()) + snValAddr := sdk.ValAddress(snAddr) + + // Fund creator and register account with public key. + initCoins := sdk.NewCoins(sdk.NewInt64Coin("ulume", 1_000_000)) + require.NoError(t, s.app.BankKeeper.MintCoins(s.sdkCtx, actiontypes.ModuleName, initCoins)) + require.NoError(t, s.app.BankKeeper.SendCoinsFromModuleToAccount(s.sdkCtx, actiontypes.ModuleName, creatorAddr, initCoins)) + + creatorAcc := s.app.AuthKeeper.NewAccountWithAddress(s.sdkCtx, creatorAddr) + baseAcc := creatorAcc.(*authtypes.BaseAccount) + require.NoError(t, baseAcc.SetPubKey(creatorPriv.PubKey())) + s.app.AuthKeeper.SetAccount(s.sdkCtx, baseAcc) + + // Register a bonded validator and supernode for the SN account. + val, err := stakingtypes.NewValidator(snValAddr.String(), snPriv.PubKey(), stakingtypes.Description{}) + require.NoError(t, err) + val.Status = stakingtypes.Bonded + val.Tokens = sdkmath.NewInt(1_000_000) + s.app.StakingKeeper.SetValidator(s.sdkCtx, val) + + sn := sntypes.SuperNode{ + ValidatorAddress: snValAddr.String(), + SupernodeAccount: snAddr.String(), + Note: "1.0.0", + States: []*sntypes.SuperNodeStateRecord{{State: sntypes.SuperNodeStateActive}}, + PrevIpAddresses: []*sntypes.IPAddressHistory{{Address: "10.0.0.1"}}, + P2PPort: "4001", + } + require.NoError(t, s.app.SupernodeKeeper.SetSuperNode(s.sdkCtx, sn)) + + // --- Build Merkle tree --- + numChunks := uint32(8) + chunks := make([][]byte, numChunks) + for i := range chunks { + chunks[i] = []byte(fmt.Sprintf("sys-chunk-%d", i)) + } + + tree, err := merkle.BuildTree(chunks) + require.NoError(t, err) + + root := make([]byte, merkle.HashSize) + copy(root, tree.Root[:]) + + challengeIndices := []uint32{0, 1, 2, 3, 4, 5, 6, 7} + + // --- Register Cascade with commitment --- + commitment := actiontypes.AvailabilityCommitment{ + CommitmentType: "lep5/chunk-merkle/v1", + HashAlgo: actiontypes.HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: systemChunkSize, + TotalSize: uint64(numChunks) * uint64(systemChunkSize), + NumChunks: numChunks, + Root: root, + ChallengeIndices: challengeIndices, + } + commitmentJSON, err := json.Marshal(&commitment) + require.NoError(t, err) + + // Build a minimal valid signature for the registration metadata. + sigData := "c2lnLWRhdGE=" // base64("sig-data") + sig, err := creatorPriv.Sign([]byte(sigData)) + require.NoError(t, err) + + sigStr := fmt.Sprintf("%s.%s", sigData, base64.StdEncoding.EncodeToString(sig)) + + metadata := fmt.Sprintf( + `{"data_hash":"syshash","file_name":"sys.bin","rq_ids_ic":1,"signatures":"%s","availability_commitment":%s}`, + sigStr, string(commitmentJSON), + ) + + msgServer := keeper.NewMsgServerImpl(s.app.ActionKeeper) + res, err := msgServer.RequestAction(s.sdkCtx, &actiontypes.MsgRequestAction{ + Creator: creatorAddr.String(), + ActionType: actiontypes.ActionTypeCascade.String(), + Metadata: metadata, + Price: "100000ulume", + ExpirationTime: fmt.Sprintf("%d", s.sdkCtx.BlockTime().Add(10*time.Minute).Unix()), + }) + require.NoError(t, err) + require.NotEmpty(t, res.ActionId) + + // Verify pending state and stored commitment. + action, found := s.app.ActionKeeper.GetActionByID(s.sdkCtx, res.ActionId) + require.True(t, found) + require.Equal(t, actiontypes.ActionStatePending, action.State) + + var storedMeta actiontypes.CascadeMetadata + require.NoError(t, gogoproto.Unmarshal(action.Metadata, &storedMeta)) + require.NotNil(t, storedMeta.AvailabilityCommitment) + require.Equal(t, root, storedMeta.AvailabilityCommitment.Root) + + // --- Finalize with valid chunk proofs --- + chunkProofs := make([]*actiontypes.ChunkProof, 0, len(challengeIndices)) + for _, idx := range challengeIndices { + p, pErr := tree.GenerateProof(int(idx)) + require.NoError(t, pErr) + chunkProofs = append(chunkProofs, toProtoChunkProof(p)) + } + + // Generate Kademlia IDs for CASCADE finalization. + ids := make([]string, 50) + for i := range ids { + id, kErr := keeper.CreateKademliaID(sigStr, uint64(1+i)) + require.NoError(t, kErr) + ids[i] = id + } + + finMeta := &actiontypes.CascadeMetadata{ + RqIdsIds: ids, + ChunkProofs: chunkProofs, + } + finMetaBytes, err := json.Marshal(finMeta) + require.NoError(t, err) + + s.sdkCtx = s.sdkCtx.WithEventManager(sdk.NewEventManager()) + + _, err = msgServer.FinalizeAction(s.sdkCtx, &actiontypes.MsgFinalizeAction{ + ActionId: res.ActionId, + Creator: snAddr.String(), + ActionType: actiontypes.ActionTypeCascade.String(), + Metadata: string(finMetaBytes), + }) + require.NoError(t, err) + + finalAction, found := s.app.ActionKeeper.GetActionByID(s.sdkCtx, res.ActionId) + require.True(t, found) + require.Equal(t, actiontypes.ActionStateDone, finalAction.State) + + // Verify SVC verification passed event. + foundPassedEvent := false + for _, ev := range s.sdkCtx.EventManager().Events() { + if ev.Type == actiontypes.EventTypeSVCVerificationPassed { + foundPassedEvent = true + break + } + } + require.True(t, foundPassedEvent, "expected SVC verification passed event") +} + +func toProtoChunkProof(p *merkle.Proof) *actiontypes.ChunkProof { + leaf := make([]byte, merkle.HashSize) + copy(leaf, p.LeafHash[:]) + + pathHashes := make([][]byte, 0, len(p.PathHashes)) + for _, h := range p.PathHashes { + b := make([]byte, merkle.HashSize) + copy(b, h[:]) + pathHashes = append(pathHashes, b) + } + + return &actiontypes.ChunkProof{ + ChunkIndex: p.ChunkIndex, + LeafHash: leaf, + PathHashes: pathHashes, + PathDirections: append([]bool(nil), p.PathDirections...), + } +} diff --git a/tests/systemtests/go.mod b/tests/systemtests/go.mod index af8a812f..a544f3b6 100644 --- a/tests/systemtests/go.mod +++ b/tests/systemtests/go.mod @@ -49,6 +49,7 @@ require ( cosmossdk.io/schema v1.1.0 // indirect cosmossdk.io/store v1.1.2 // indirect cosmossdk.io/x/tx v0.14.0 // indirect + cosmossdk.io/x/upgrade v0.2.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect @@ -58,7 +59,6 @@ require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.2.0 // indirect - github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce // indirect github.com/bytedance/gopkg v0.1.3 // indirect github.com/bytedance/sonic v1.14.2 // indirect github.com/bytedance/sonic/loader v0.4.0 // indirect @@ -75,6 +75,7 @@ require ( github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/cosmos-db v1.1.3 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/ibc-go/v10 v10.5.0 // indirect github.com/cosmos/ics23/go v0.11.0 // indirect github.com/cosmos/ledger-cosmos-go v0.16.0 // indirect github.com/danieljoos/wincred v1.2.2 // indirect @@ -111,7 +112,6 @@ require ( github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-metrics v0.5.4 // indirect github.com/hashicorp/go-plugin v1.6.3 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/yamux v0.1.2 // indirect @@ -134,7 +134,6 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect github.com/oklog/run v1.1.0 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -177,6 +176,7 @@ require ( google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect + lukechampine.com/blake3 v1.4.1 // indirect nhooyr.io/websocket v1.8.17 // indirect pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.6.0 // indirect diff --git a/tests/systemtests/go.sum b/tests/systemtests/go.sum index df5a502e..61d7fa40 100644 --- a/tests/systemtests/go.sum +++ b/tests/systemtests/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -17,14 +19,27 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= +cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= +cloud.google.com/go/auth v0.16.4 h1:fXOAIQmkApVvcIn7Pc2+5J8QTMVbUGLscnSVNl11su8= +cloud.google.com/go/auth v0.16.4/go.mod h1:j10ncYwjX/g3cdX7GpEzsdM+d+ZNsXAbb6qXA7p1Y5M= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.38.0 h1:MilCLYQW2m7Dku8hRIIKo4r0oKastlD74sSu16riYKs= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -35,6 +50,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= +cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/collections v1.3.1 h1:09e+DUId2brWsNOQ4nrk+bprVmMUaDH9xvtZkeqIjVw= @@ -71,6 +88,12 @@ github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bp github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= @@ -84,7 +107,6 @@ github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrd github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/adlio/schema v1.3.6 h1:k1/zc2jNfeiZBA5aFTRy37jlBIuCkXCm0XmvpzCKI9I= github.com/adlio/schema v1.3.6/go.mod h1:qkxwLgPBd1FgLRHYVCmQT/rrBr3JH38J9LjmVzWNudg= -github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -101,32 +123,26 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= +github.com/aws/aws-sdk-go v1.49.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E= github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.24.3 h1:Bte86SlO3lwPQqww+7BE9ZuUCKIjfqnG5jtEyqA9y9Y= github.com/bits-and-blooms/bitset v1.24.3/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd/btcec/v2 v2.3.5 h1:dpAlnAwmT1yIBm3exhT1/8iUSD98RDJM5vqJVQDQLiU= github.com/btcsuite/btcd/btcec/v2 v2.3.5/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ= github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= -github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/bufbuild/protoc-gen-validate v1.3.0 h1:0lq2b9qA1uzfVnMW6oFJepiVVihDOOzj+VuTGSX4EgE= github.com/bufbuild/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= @@ -165,6 +181,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -226,7 +244,6 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -267,7 +284,12 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= +github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= +github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= +github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/domDngBU= +github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= @@ -296,6 +318,8 @@ github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -322,7 +346,6 @@ github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1 github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= @@ -425,12 +448,18 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -459,6 +488,10 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.7.9 h1:G9gcjrDixz7glqJ+ll5IWvggSBR+R0B54DSRt4qfdC4= +github.com/hashicorp/go-getter v1.7.9/go.mod h1:dyFCmT1AQkDfOIt9NH8pw9XBDqNrIKJT5ylbpi7zPNE= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -472,6 +505,8 @@ github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0U github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -479,6 +514,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -494,6 +531,8 @@ github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8 github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= @@ -510,15 +549,15 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -536,7 +575,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= @@ -590,6 +628,8 @@ github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= @@ -619,7 +659,6 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a h1:dlRvE5fWabOchtH7znfiFCcOvmIYgOeAS5ifBXBlh9Q= @@ -679,6 +718,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -771,6 +812,8 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -818,6 +861,8 @@ github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ulikunitz/xz v0.5.14 h1:uv/0Bq533iFdnMHZdRBTOlaNMdb1+ZxXIlHDZHIHcvg= +github.com/ulikunitz/xz v0.5.14/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -848,6 +893,12 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= @@ -885,7 +936,6 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/arch v0.17.0 h1:4O3dfLzd+lQewptAHqjewQZQDyEdejz3VwgeYwkZneU= golang.org/x/arch v0.17.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -894,7 +944,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1012,6 +1061,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1081,7 +1132,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1143,6 +1193,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1196,7 +1248,6 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1231,6 +1282,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.247.0 h1:tSd/e0QrUlLsrwMKmkbQhYVa109qIintOls2Wh6bngc= +google.golang.org/api v0.247.0/go.mod h1:r1qZOPmxXffXg6xS5uhx16Fa/UFY8QU/K4bfKrnvovM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1371,6 +1424,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/tests/systemtests/lep5_action_test.go b/tests/systemtests/lep5_action_test.go new file mode 100644 index 00000000..c0f8b529 --- /dev/null +++ b/tests/systemtests/lep5_action_test.go @@ -0,0 +1,107 @@ +//go:build system_test + +package system + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + lcfg "github.com/LumeraProtocol/lumera/config" + "github.com/LumeraProtocol/lumera/x/action/v1/merkle" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" +) + +// TestLEP5ActionParamsQuery verifies that the action module params endpoint +// returns a valid response that includes the SVC-related fields. +func TestLEP5ActionParamsQuery(t *testing.T) { + sut.ResetChain(t) + sut.StartChain(t) + + cli := NewLumeradCLI(t, sut, verbose) + + result := cli.CustomQuery("q", "action", "params") + t.Logf("Action params: %s", result) + + // The params response must include the action module fields. + require.True(t, gjson.Get(result, "params").Exists(), "params key must exist") + require.True(t, gjson.Get(result, "params.base_action_fee").Exists(), "base_action_fee must exist") + require.True(t, gjson.Get(result, "params.expiration_duration").Exists(), "expiration_duration must exist") +} + +// TestLEP5CascadeRegisterWithCommitment verifies that a Cascade action can be +// registered with an AvailabilityCommitment via CLI and then queried. +func TestLEP5CascadeRegisterWithCommitment(t *testing.T) { + sut.ResetChain(t) + sut.StartChain(t) + + cli := NewLumeradCLI(t, sut, verbose) + + // Fund the test account. + account := cli.GetKeyAddr("node0") + require.NotEmpty(t, account) + + // Build a Merkle tree from 8 chunks. + numChunks := uint32(8) + chunkSize := uint32(262144) + chunks := make([][]byte, numChunks) + for i := range chunks { + chunks[i] = []byte(fmt.Sprintf("systest-chunk-%d", i)) + } + + tree, err := merkle.BuildTree(chunks) + require.NoError(t, err) + + root := make([]byte, merkle.HashSize) + copy(root, tree.Root[:]) + + challengeIndices := []uint32{0, 1, 2, 3, 4, 5, 6, 7} + + commitment := actiontypes.AvailabilityCommitment{ + CommitmentType: "lep5/chunk-merkle/v1", + HashAlgo: actiontypes.HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: chunkSize, + TotalSize: uint64(numChunks) * uint64(chunkSize), + NumChunks: numChunks, + Root: root, + ChallengeIndices: challengeIndices, + } + commitmentJSON, err := json.Marshal(&commitment) + require.NoError(t, err) + + // Build a valid signature: base64(data).base64(sig). + sigData := base64.StdEncoding.EncodeToString([]byte("rqid-1")) + + expirationTime := fmt.Sprintf("%d", time.Now().Add(10*time.Minute).Unix()) + + metadata := fmt.Sprintf( + `{"data_hash":"abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890","file_name":"test.bin","rq_ids_ic":1,"signatures":"%s.fake","availability_commitment":%s}`, + sigData, string(commitmentJSON), + ) + + price := fmt.Sprintf("100000%s", lcfg.ChainDenom) + + // Submit the request-action transaction. + resp := cli.CustomCommand( + "tx", "action", "request-action", + "ACTION_TYPE_CASCADE", + metadata, + price, + expirationTime, + "--from", "node0", + ) + t.Logf("Request action response: %s", resp) + + // The tx may succeed or fail depending on signature validation. + // For the systemex test, we verify the CLI can construct and submit + // the transaction with LEP-5 commitment fields without crashing. + // A full E2E flow requires proper key-based signatures which the + // devnet tests (S08) cover. + txCode := gjson.Get(resp, "code") + t.Logf("TX code: %s", txCode.String()) +} diff --git a/x/action/v1/keeper/action_cascade.go b/x/action/v1/keeper/action_cascade.go index 88776ce3..901e871d 100644 --- a/x/action/v1/keeper/action_cascade.go +++ b/x/action/v1/keeper/action_cascade.go @@ -17,6 +17,90 @@ import ( gogoproto "github.com/cosmos/gogoproto/proto" ) +const ( + cascadeCommitmentType = "lep5/chunk-merkle/v1" + cascadeCommitmentMaxChunkSize = uint32(262144) // 256 KiB — default / ceiling + cascadeCommitmentMinChunkSize = uint32(1) // 1 byte — floor + cascadeCommitmentRootSize = 32 + cascadeCommitmentMinTotalSize = uint64(4) // reject trivially tiny files (< 4 bytes) +) + +var cascadeCommitmentHashAlgo = actiontypes.HashAlgo_HASH_ALGO_BLAKE3 + +// isPowerOf2 returns true if v is a positive power of two. +func isPowerOf2(v uint32) bool { + return v > 0 && (v&(v-1)) == 0 +} + +func validateAvailabilityCommitment(commitment *actiontypes.AvailabilityCommitment, challengeCount, minChunks uint32) error { + if commitment == nil { + return nil + } + + if commitment.CommitmentType != cascadeCommitmentType { + return fmt.Errorf("availability_commitment.commitment_type must be %q", cascadeCommitmentType) + } + if commitment.HashAlgo != cascadeCommitmentHashAlgo { + return fmt.Errorf("availability_commitment.hash_algo must be %q", cascadeCommitmentHashAlgo) + } + + // Reject trivially tiny files. + if commitment.TotalSize < cascadeCommitmentMinTotalSize { + return fmt.Errorf("availability_commitment.total_size must be >= %d bytes, got %d", + cascadeCommitmentMinTotalSize, commitment.TotalSize) + } + + // chunk_size must be a power of 2 in [minChunkSize, maxChunkSize]. + if !isPowerOf2(commitment.ChunkSize) { + return fmt.Errorf("availability_commitment.chunk_size must be a power of 2, got %d", commitment.ChunkSize) + } + if commitment.ChunkSize < cascadeCommitmentMinChunkSize || commitment.ChunkSize > cascadeCommitmentMaxChunkSize { + return fmt.Errorf("availability_commitment.chunk_size must be in [%d, %d], got %d", + cascadeCommitmentMinChunkSize, cascadeCommitmentMaxChunkSize, commitment.ChunkSize) + } + + expectedNumChunks := uint32((commitment.TotalSize + uint64(commitment.ChunkSize) - 1) / uint64(commitment.ChunkSize)) + if commitment.NumChunks != expectedNumChunks { + return fmt.Errorf("availability_commitment.num_chunks must be %d for total_size %d and chunk_size %d", expectedNumChunks, commitment.TotalSize, commitment.ChunkSize) + } + + // Unconditionally enforce minimum chunk count. The client MUST pick a + // chunk_size that yields >= minChunks chunks (default 4). + if commitment.NumChunks < minChunks { + return fmt.Errorf("availability_commitment.num_chunks %d is below minimum %d: file of %d bytes must produce at least %d chunks (reduce chunk_size)", + commitment.NumChunks, minChunks, commitment.TotalSize, minChunks) + } + + if len(commitment.Root) != cascadeCommitmentRootSize { + return fmt.Errorf("availability_commitment.root must be %d bytes", cascadeCommitmentRootSize) + } + + // Validate challenge indices when the file is large enough for SVC. + if commitment.NumChunks >= minChunks { + expectedIndices := challengeCount + if expectedIndices > commitment.NumChunks { + expectedIndices = commitment.NumChunks + } + if uint32(len(commitment.ChallengeIndices)) != expectedIndices { + return fmt.Errorf("availability_commitment.challenge_indices must have %d entries, got %d", + expectedIndices, len(commitment.ChallengeIndices)) + } + seen := make(map[uint32]bool, len(commitment.ChallengeIndices)) + for i, idx := range commitment.ChallengeIndices { + if idx >= commitment.NumChunks { + return fmt.Errorf("availability_commitment.challenge_indices[%d] = %d is out of range [0, %d)", + i, idx, commitment.NumChunks) + } + if seen[idx] { + return fmt.Errorf("availability_commitment.challenge_indices[%d] = %d is a duplicate", i, idx) + } + seen[idx] = true + } + } + + return nil +} + // CascadeActionHandler implements the ActionHandler interface for Cascade actions type CascadeActionHandler struct { keeper *Keeper // Reference to the keeper for logger and other services @@ -114,6 +198,11 @@ func (h CascadeActionHandler) RegisterAction(ctx sdk.Context, action *actiontype if err := gogoproto.Unmarshal(action.Metadata, &cascadeMeta); err != nil { return errors.Wrap(actiontypes.ErrInvalidMetadata, fmt.Sprintf("failed to unmarshal cascade metadata: %v", err)) } + params := h.keeper.GetParams(ctx) + challengeCount, minChunks := GetSVCParams(params) + if err := validateAvailabilityCommitment(cascadeMeta.AvailabilityCommitment, challengeCount, minChunks); err != nil { + return errors.Wrap(actiontypes.ErrInvalidMetadata, err.Error()) + } // Validate Signature. Signature field contains: `Base64(rq_ids).creators_signature` // Where `creators_signature` is the signature of the creator over `Base64(rq_ids)` @@ -186,6 +275,10 @@ func (h CascadeActionHandler) FinalizeAction(ctx sdk.Context, action *actiontype return actiontypes.ActionStateUnspecified, nil } + if err := h.keeper.VerifyChunkProofs(ctx, action, superNodeAccount, newCascadeMeta.GetChunkProofs()); err != nil { + return actiontypes.ActionStateUnspecified, err + } + // Cascade actions are finalized with a single supernode // Return DONE state since all validations passed return actiontypes.ActionStateDone, nil @@ -220,6 +313,8 @@ func (h CascadeActionHandler) GetUpdatedMetadata(ctx sdk.Context, existingMetada Signatures: existingMetadata.GetSignatures(), RqIdsIds: newMetadata.GetRqIdsIds(), Public: existingMetadata.GetPublic(), + AvailabilityCommitment: existingMetadata.GetAvailabilityCommitment(), + ChunkProofs: newMetadata.GetChunkProofs(), } return gogoproto.Marshal(updatedMetadata) diff --git a/x/action/v1/keeper/cascade_commitment_test.go b/x/action/v1/keeper/cascade_commitment_test.go new file mode 100644 index 00000000..0a708514 --- /dev/null +++ b/x/action/v1/keeper/cascade_commitment_test.go @@ -0,0 +1,389 @@ +package keeper_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/LumeraProtocol/lumera/x/action/v1/keeper" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/cosmos/gogoproto/jsonpb" + gogoproto "github.com/cosmos/gogoproto/proto" +) + +const ( + testCommitmentType = "lep5/chunk-merkle/v1" + testCommitmentChunkSize = uint32(262144) +) + +var testCommitmentHashAlgo = actiontypes.HashAlgo_HASH_ALGO_BLAKE3 + +type CascadeCommitmentValidationSuite struct { + KeeperTestSuite +} + +func TestCascadeCommitmentValidationSuite(t *testing.T) { + suite.Run(t, new(CascadeCommitmentValidationSuite)) +} + +func makeValidAvailabilityCommitment(totalSize uint64) *actiontypes.AvailabilityCommitment { + return makeValidAvailabilityCommitmentWithChunkSize(totalSize, testCommitmentChunkSize) +} + +func makeValidAvailabilityCommitmentWithChunkSize(totalSize uint64, chunkSize uint32) *actiontypes.AvailabilityCommitment { + numChunks := uint32(0) + if totalSize > 0 { + numChunks = uint32((totalSize + uint64(chunkSize) - 1) / uint64(chunkSize)) + } + + return &actiontypes.AvailabilityCommitment{ + CommitmentType: testCommitmentType, + HashAlgo: testCommitmentHashAlgo, + ChunkSize: chunkSize, + TotalSize: totalSize, + NumChunks: numChunks, + Root: bytes.Repeat([]byte{0xAB}, 32), + } +} + +// makeValidAvailabilityCommitmentWithIndices builds a commitment for a file +// whose chunk count is >= minChunks (4), including valid challenge indices. +// Uses the default 256 KiB chunk size. +func makeValidAvailabilityCommitmentWithIndices(numChunks uint32) *actiontypes.AvailabilityCommitment { + totalSize := uint64(numChunks) * uint64(testCommitmentChunkSize) + c := makeValidAvailabilityCommitment(totalSize) + + // Default challenge count is 8; use min(8, numChunks). + count := uint32(8) + if count > numChunks { + count = numChunks + } + c.ChallengeIndices = make([]uint32, count) + for i := uint32(0); i < count; i++ { + c.ChallengeIndices[i] = i + } + return c +} + +func (suite *CascadeCommitmentValidationSuite) requestCascadeAction(metadata *actiontypes.CascadeMetadata) (*actiontypes.MsgRequestActionResponse, error) { + msgServer := keeper.NewMsgServerImpl(suite.keeper) + + var metadataJSON bytes.Buffer + marshaler := &jsonpb.Marshaler{} + err := marshaler.Marshal(&metadataJSON, metadata) + suite.Require().NoError(err) + + msg := &actiontypes.MsgRequestAction{ + Creator: suite.creatorAddress.String(), + ActionType: actiontypes.ActionTypeCascade.String(), + Metadata: metadataJSON.String(), + Price: "100000ulume", + FileSizeKbs: "123", + } + + return msgServer.RequestAction(suite.ctx, msg) +} + +// AT09 +func (suite *CascadeCommitmentValidationSuite) TestRegistrationWithValidCommitmentSucceedsAndStoresCommitment() { + // Use 4 chunks at 262144 = 1 MiB — satisfies num_chunks >= 4 enforcement. + commitment := makeValidAvailabilityCommitmentWithIndices(4) + + inputMetadata := &actiontypes.CascadeMetadata{ + DataHash: "test_hash", + FileName: "test_file", + RqIdsIc: 20, + Signatures: suite.signatureCascade, + AvailabilityCommitment: commitment, + } + + res, err := suite.requestCascadeAction(inputMetadata) + suite.Require().NoError(err) + suite.Require().NotNil(res) + + storedAction, found := suite.keeper.GetActionByID(suite.ctx, res.ActionId) + suite.Require().True(found) + + var storedMetadata actiontypes.CascadeMetadata + err = gogoproto.Unmarshal(storedAction.Metadata, &storedMetadata) + suite.Require().NoError(err) + + suite.Require().NotNil(storedMetadata.AvailabilityCommitment) + suite.Equal(inputMetadata.AvailabilityCommitment, storedMetadata.AvailabilityCommitment) +} + +// AT09b — registration with a small file using a reduced chunk_size to satisfy minimum chunks. +func (suite *CascadeCommitmentValidationSuite) TestRegistrationWithSmallFileAndReducedChunkSizeSucceeds() { + // 500 KiB file (512000 bytes). At 262144 chunk_size → 2 chunks (would be rejected). + // At 131072 chunk_size → 4 chunks → accepted. + totalSize := uint64(512000) + chunkSize := uint32(131072) + numChunks := uint32((totalSize + uint64(chunkSize) - 1) / uint64(chunkSize)) // = 4 + challengeCount := numChunks + if challengeCount > 8 { + challengeCount = 8 + } + indices := make([]uint32, challengeCount) + for i := uint32(0); i < challengeCount; i++ { + indices[i] = i + } + + commitment := &actiontypes.AvailabilityCommitment{ + CommitmentType: testCommitmentType, + HashAlgo: testCommitmentHashAlgo, + ChunkSize: chunkSize, + TotalSize: totalSize, + NumChunks: numChunks, + Root: bytes.Repeat([]byte{0xAB}, 32), + ChallengeIndices: indices, + } + + inputMetadata := &actiontypes.CascadeMetadata{ + DataHash: "test_hash", + FileName: "test_file", + RqIdsIc: 20, + Signatures: suite.signatureCascade, + AvailabilityCommitment: commitment, + } + + res, err := suite.requestCascadeAction(inputMetadata) + suite.Require().NoError(err, "500 KiB file with 131072 chunk_size should succeed") + suite.Require().NotNil(res) +} + +// AT09c — 4-byte file with 1-byte chunk size produces exactly 4 chunks. +func (suite *CascadeCommitmentValidationSuite) TestRegistrationWith4ByteFileAnd1ByteChunkSizeSucceeds() { + totalSize := uint64(4) + chunkSize := uint32(1) + numChunks := uint32(4) // ceil(4 / 1) = 4 + challengeCount := numChunks + if challengeCount > 8 { + challengeCount = 8 + } + indices := make([]uint32, challengeCount) + for i := uint32(0); i < challengeCount; i++ { + indices[i] = i + } + + commitment := &actiontypes.AvailabilityCommitment{ + CommitmentType: testCommitmentType, + HashAlgo: testCommitmentHashAlgo, + ChunkSize: chunkSize, + TotalSize: totalSize, + NumChunks: numChunks, + Root: bytes.Repeat([]byte{0xAB}, 32), + ChallengeIndices: indices, + } + + inputMetadata := &actiontypes.CascadeMetadata{ + DataHash: "test_hash", + FileName: "test_file", + RqIdsIc: 20, + Signatures: suite.signatureCascade, + AvailabilityCommitment: commitment, + } + + res, err := suite.requestCascadeAction(inputMetadata) + suite.Require().NoError(err, "4-byte file with 1-byte chunk_size should succeed") + suite.Require().NotNil(res) +} + +// AT10 +func (suite *CascadeCommitmentValidationSuite) TestRegistrationWithInvalidCommitmentRejected() { + testCases := []struct { + name string + mutate func(*actiontypes.AvailabilityCommitment) + errorText string + }{ + { + name: "invalid commitment_type", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.CommitmentType = "invalid/type" + }, + errorText: "availability_commitment.commitment_type", + }, + { + name: "invalid num_chunks", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.NumChunks++ + }, + errorText: "availability_commitment.num_chunks", + }, + { + name: "non-power-of-2 chunk_size", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.ChunkSize = 3000 // not a power of 2 + }, + errorText: "power of 2", + }, + { + name: "chunk_size above maximum (524288 > 262144)", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.ChunkSize = 524288 + }, + errorText: "must be in", + }, + { + name: "chunk_size too large for file to produce 4 chunks", + mutate: func(c *actiontypes.AvailabilityCommitment) { + // 500 KiB file with 262144 chunk_size → 2 chunks < 4 → rejected. + c.TotalSize = 512000 + c.ChunkSize = 262144 + c.NumChunks = 2 + }, + errorText: "below minimum", + }, + { + name: "total_size below minimum (< 4 bytes)", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.TotalSize = 3 + c.ChunkSize = 1024 + c.NumChunks = 1 + }, + errorText: "total_size must be >= 4", + }, + } + + for _, tc := range testCases { + suite.Run(tc.name, func() { + // Use 4 chunks (1 MiB) as the base valid commitment. + commitment := makeValidAvailabilityCommitmentWithIndices(4) + tc.mutate(commitment) + + metadata := &actiontypes.CascadeMetadata{ + DataHash: "test_hash", + FileName: "test_file", + RqIdsIc: 20, + Signatures: suite.signatureCascade, + AvailabilityCommitment: commitment, + } + + res, err := suite.requestCascadeAction(metadata) + suite.Error(err) + suite.Nil(res) + suite.ErrorIs(err, actiontypes.ErrInvalidMetadata) + suite.ErrorContains(err, tc.errorText) + }) + } +} + +// AT11 +func (suite *CascadeCommitmentValidationSuite) TestRegistrationWithoutCommitmentStillSucceeds() { + inputMetadata := &actiontypes.CascadeMetadata{ + DataHash: "test_hash", + FileName: "test_file", + RqIdsIc: 20, + Signatures: suite.signatureCascade, + } + + res, err := suite.requestCascadeAction(inputMetadata) + suite.Require().NoError(err) + suite.Require().NotNil(res) + + storedAction, found := suite.keeper.GetActionByID(suite.ctx, res.ActionId) + suite.Require().True(found) + + var storedMetadata actiontypes.CascadeMetadata + err = gogoproto.Unmarshal(storedAction.Metadata, &storedMetadata) + suite.Require().NoError(err) + + suite.Nil(storedMetadata.AvailabilityCommitment) +} + +// AT16 – registration with valid challenge indices (>= minChunks). +func (suite *CascadeCommitmentValidationSuite) TestRegistrationWithValidChallengeIndicesSucceeds() { + testCases := []struct { + name string + numChunks uint32 + }{ + {"5 chunks (fewer than 8, expects 5 indices)", 5}, + {"8 chunks (exactly 8 indices)", 8}, + {"10 chunks (capped at 8 indices)", 10}, + } + for _, tc := range testCases { + suite.Run(tc.name, func() { + commitment := makeValidAvailabilityCommitmentWithIndices(tc.numChunks) + + inputMetadata := &actiontypes.CascadeMetadata{ + DataHash: "test_hash", + FileName: "test_file", + RqIdsIc: 20, + Signatures: suite.signatureCascade, + AvailabilityCommitment: commitment, + } + + res, err := suite.requestCascadeAction(inputMetadata) + suite.Require().NoError(err, "numChunks=%d", tc.numChunks) + suite.Require().NotNil(res) + }) + } +} + +// AT17 – invalid challenge indices are rejected at registration. +func (suite *CascadeCommitmentValidationSuite) TestRegistrationWithInvalidChallengeIndicesRejected() { + testCases := []struct { + name string + mutate func(*actiontypes.AvailabilityCommitment) + errorText string + }{ + { + name: "zero indices when file has enough chunks", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.ChallengeIndices = nil + }, + errorText: "challenge_indices must have", + }, + { + name: "too many indices", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.ChallengeIndices = append(c.ChallengeIndices, 9) + }, + errorText: "challenge_indices must have", + }, + { + name: "too few indices", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.ChallengeIndices = c.ChallengeIndices[:len(c.ChallengeIndices)-1] + }, + errorText: "challenge_indices must have", + }, + { + name: "index out of range", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.ChallengeIndices[0] = c.NumChunks // == numChunks, out of [0, numChunks) + }, + errorText: "out of range", + }, + { + name: "duplicate index", + mutate: func(c *actiontypes.AvailabilityCommitment) { + c.ChallengeIndices[1] = c.ChallengeIndices[0] + }, + errorText: "duplicate", + }, + } + + for _, tc := range testCases { + suite.Run(tc.name, func() { + // 10 chunks → expects exactly 8 indices by default. + commitment := makeValidAvailabilityCommitmentWithIndices(10) + tc.mutate(commitment) + + metadata := &actiontypes.CascadeMetadata{ + DataHash: "test_hash", + FileName: "test_file", + RqIdsIc: 20, + Signatures: suite.signatureCascade, + AvailabilityCommitment: commitment, + } + + res, err := suite.requestCascadeAction(metadata) + suite.Error(err) + suite.Nil(res) + suite.ErrorIs(err, actiontypes.ErrInvalidMetadata) + suite.ErrorContains(err, tc.errorText) + }) + } +} + diff --git a/x/action/v1/keeper/finalize_svc_test.go b/x/action/v1/keeper/finalize_svc_test.go new file mode 100644 index 00000000..190809da --- /dev/null +++ b/x/action/v1/keeper/finalize_svc_test.go @@ -0,0 +1,113 @@ +package keeper_test + +import ( + "testing" + + keepertest "github.com/LumeraProtocol/lumera/testutil/keeper" + "github.com/LumeraProtocol/lumera/testutil/cryptotestutils" + "github.com/LumeraProtocol/lumera/x/action/v1/merkle" + actionkeeper "github.com/LumeraProtocol/lumera/x/action/v1/keeper" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + supernodemocks "github.com/LumeraProtocol/lumera/x/supernode/v1/mocks" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + sdk "github.com/cosmos/cosmos-sdk/types" + gogoproto "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestFinalizeCascade_WithValidChunkProofs_SetsDone(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + creatorKey, creatorAddr := cryptotestutils.KeyAndAddress() + _, supernodeAddr := cryptotestutils.KeyAndAddress() + + k, ctx := keepertest.ActionKeeperWithAddress(t, ctrl, []keepertest.AccountPair{ + {Address: creatorAddr, PubKey: creatorKey.PubKey()}, + }) + + ctx = ctx.WithBlockHeight(42).WithEventManager(sdk.NewEventManager()) + + signatureCascade, err := cryptotestutils.CreateSignatureString([]secp256k1.PrivKey{creatorKey}, 50) + require.NoError(t, err) + + chunks := [][]byte{[]byte("C0"), []byte("C1"), []byte("C2"), []byte("C3")} + tree, err := merkle.BuildTree(chunks) + require.NoError(t, err) + + // Client-picked challenge indices stored at registration. + challengeIndices := []uint32{0, 1, 2, 3} + + root := append([]byte(nil), tree.Root[:]...) + registerMeta := &actiontypes.CascadeMetadata{ + DataHash: "hash", + FileName: "file.bin", + RqIdsIc: 20, + RqIdsMax: 50, + Signatures: signatureCascade, + AvailabilityCommitment: &actiontypes.AvailabilityCommitment{ + CommitmentType: "lep5/chunk-merkle/v1", + HashAlgo: actiontypes.HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: svcChunkSize, + TotalSize: uint64(4) * uint64(svcChunkSize), + NumChunks: 4, + Root: root, + ChallengeIndices: challengeIndices, + }, + } + + registerMetaBz, err := gogoproto.Marshal(registerMeta) + require.NoError(t, err) + + action := &actiontypes.Action{ + Creator: creatorAddr.String(), + ActionType: actiontypes.ActionTypeCascade, + Price: "100000ulume", + Metadata: registerMetaBz, + } + + _, err = k.RegisterAction(ctx, action) + require.NoError(t, err) + + mockQuery, ok := k.GetSupernodeQueryServer().(*supernodemocks.MockQueryServer) + require.True(t, ok) + mockQuery.EXPECT(). + GetTopSuperNodesForBlock(gomock.AssignableToTypeOf(ctx), gomock.AssignableToTypeOf(&sntypes.QueryGetTopSuperNodesForBlockRequest{})). + Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{Supernodes: []*sntypes.SuperNode{{SupernodeAccount: supernodeAddr.String()}}}, nil). + Times(1) + + // Generate proofs matching the stored challenge indices. + proofs := make([]*actiontypes.ChunkProof, 0, len(challengeIndices)) + for _, idx := range challengeIndices { + proof, pErr := tree.GenerateProof(int(idx)) + require.NoError(t, pErr) + proofs = append(proofs, toChunkProof(proof)) + } + + rqIDs := make([]string, 0, 50) + for i := uint64(20); i < 70; i++ { + id, idErr := actionkeeper.CreateKademliaID(signatureCascade, i) + require.NoError(t, idErr) + rqIDs = append(rqIDs, id) + } + + finalizeMeta := &actiontypes.CascadeMetadata{ + RqIdsIds: rqIDs, + ChunkProofs: proofs, + } + finalizeMetaBz, err := gogoproto.Marshal(finalizeMeta) + require.NoError(t, err) + + err = k.FinalizeAction(ctx, action.ActionID, supernodeAddr.String(), finalizeMetaBz) + require.NoError(t, err) + + stored, found := k.GetActionByID(ctx, action.ActionID) + require.True(t, found) + require.Equal(t, actiontypes.ActionStateDone, stored.State) + + var storedMeta actiontypes.CascadeMetadata + require.NoError(t, gogoproto.Unmarshal(stored.Metadata, &storedMeta)) + require.Len(t, storedMeta.GetChunkProofs(), 4) +} diff --git a/x/action/v1/keeper/svc.go b/x/action/v1/keeper/svc.go new file mode 100644 index 00000000..8052ac76 --- /dev/null +++ b/x/action/v1/keeper/svc.go @@ -0,0 +1,197 @@ +package keeper + +import ( + "fmt" + "strconv" + + errorsmod "cosmossdk.io/errors" + "github.com/LumeraProtocol/lumera/x/action/v1/merkle" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + sdk "github.com/cosmos/cosmos-sdk/types" + gogoproto "github.com/cosmos/gogoproto/proto" +) + +// Default SVC constants — used as fallback when params are zero (pre-migration). +const ( + SVCChallengeCount uint32 = 8 + SVCMinChunksForChallenge uint32 = 4 +) + +// GetSVCParams returns the effective SVC parameters from the given Params. +// Zero values (from pre-migration state) fall back to the package-level defaults. +func GetSVCParams(params actiontypes.Params) (challengeCount, minChunks uint32) { + challengeCount = params.SvcChallengeCount + if challengeCount == 0 { + challengeCount = SVCChallengeCount + } + minChunks = params.SvcMinChunksForChallenge + if minChunks == 0 { + minChunks = SVCMinChunksForChallenge + } + return challengeCount, minChunks +} + +// VerifyChunkProofs validates LEP-5 chunk proofs for Cascade finalization. +// +// The expected challenge indices are read from the AvailabilityCommitment +// stored at registration time. Each proof must match the corresponding +// stored index and verify against the committed Merkle root. +func (k *Keeper) VerifyChunkProofs( + ctx sdk.Context, + action *actiontypes.Action, + superNodeAccount string, + proofs []*actiontypes.ChunkProof, +) error { + if action == nil { + return errorsmod.Wrap(actiontypes.ErrInvalidMetadata, "action is nil") + } + + var metadata actiontypes.CascadeMetadata + if err := gogoproto.Unmarshal(action.Metadata, &metadata); err != nil { + return errorsmod.Wrapf(actiontypes.ErrInvalidMetadata, "failed to unmarshal cascade metadata: %v", err) + } + + commitment := metadata.GetAvailabilityCommitment() + if commitment == nil { + // Backward compatibility: pre-LEP-5 actions do not include commitments. + return nil + } + + params := k.GetParams(ctx) + challengeCount, minChunks := GetSVCParams(params) + + if commitment.NumChunks < minChunks { + // Small files are out of challenge scope. + return nil + } + + expectedCount := challengeCount + if expectedCount > commitment.NumChunks { + expectedCount = commitment.NumChunks + } + + if uint32(len(proofs)) != expectedCount { + err := errorsmod.Wrapf(actiontypes.ErrWrongProofCount, "expected %d proofs, got %d", expectedCount, len(proofs)) + emitSVCEvidenceEvent(ctx, action.ActionID, superNodeAccount, err.Error()) + return err + } + + root, err := bytesToMerkleHash("availability_commitment.root", commitment.Root) + if err != nil { + wrapped := errorsmod.Wrap(actiontypes.ErrInvalidMerkleProof, err.Error()) + emitSVCEvidenceEvent(ctx, action.ActionID, superNodeAccount, wrapped.Error()) + return wrapped + } + + // Read expected challenge indices from the stored commitment. + expectedIndices := commitment.ChallengeIndices + if uint32(len(expectedIndices)) != expectedCount { + err = errorsmod.Wrapf(actiontypes.ErrInvalidMetadata, + "commitment has %d challenge_indices, expected %d", len(expectedIndices), expectedCount) + emitSVCEvidenceEvent(ctx, action.ActionID, superNodeAccount, err.Error()) + return err + } + + for i, proof := range proofs { + if proof == nil { + err = errorsmod.Wrapf(actiontypes.ErrInvalidMerkleProof, "proof %d is nil", i) + emitSVCEvidenceEvent(ctx, action.ActionID, superNodeAccount, err.Error(), + sdk.NewAttribute(actiontypes.AttributeKeyProofIndex, strconv.Itoa(i)), + ) + return err + } + + if proof.ChunkIndex != expectedIndices[i] { + err = errorsmod.Wrapf( + actiontypes.ErrWrongChallengeIndex, + "proof %d: expected index %d, got %d", + i, + expectedIndices[i], + proof.ChunkIndex, + ) + emitSVCEvidenceEvent(ctx, action.ActionID, superNodeAccount, err.Error(), + sdk.NewAttribute(actiontypes.AttributeKeyProofIndex, strconv.Itoa(i)), + sdk.NewAttribute(actiontypes.AttributeKeyExpectedChunkIndex, strconv.FormatUint(uint64(expectedIndices[i]), 10)), + sdk.NewAttribute(actiontypes.AttributeKeyChunkIndex, strconv.FormatUint(uint64(proof.ChunkIndex), 10)), + ) + return err + } + + merkleProof, convErr := chunkProofToMerkleProof(proof) + if convErr != nil { + err = errorsmod.Wrap(actiontypes.ErrInvalidMerkleProof, convErr.Error()) + emitSVCEvidenceEvent(ctx, action.ActionID, superNodeAccount, err.Error(), + sdk.NewAttribute(actiontypes.AttributeKeyProofIndex, strconv.Itoa(i)), + sdk.NewAttribute(actiontypes.AttributeKeyChunkIndex, strconv.FormatUint(uint64(proof.ChunkIndex), 10)), + ) + return err + } + + if !merkleProof.Verify(root) { + err = errorsmod.Wrapf(actiontypes.ErrInvalidMerkleProof, "proof for chunk %d failed verification", proof.ChunkIndex) + emitSVCEvidenceEvent(ctx, action.ActionID, superNodeAccount, err.Error(), + sdk.NewAttribute(actiontypes.AttributeKeyProofIndex, strconv.Itoa(i)), + sdk.NewAttribute(actiontypes.AttributeKeyChunkIndex, strconv.FormatUint(uint64(proof.ChunkIndex), 10)), + ) + return err + } + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + actiontypes.EventTypeSVCVerificationPassed, + sdk.NewAttribute(actiontypes.AttributeKeyActionID, action.ActionID), + sdk.NewAttribute(actiontypes.AttributeKeySuperNodes, superNodeAccount), + ), + ) + + return nil +} + + +func chunkProofToMerkleProof(proof *actiontypes.ChunkProof) (*merkle.Proof, error) { + if len(proof.PathHashes) != len(proof.PathDirections) { + return nil, fmt.Errorf("path_hashes/path_directions length mismatch: %d/%d", len(proof.PathHashes), len(proof.PathDirections)) + } + + leafHash, err := bytesToMerkleHash("leaf_hash", proof.LeafHash) + if err != nil { + return nil, err + } + + pathHashes := make([][merkle.HashSize]byte, 0, len(proof.PathHashes)) + for i, pathHash := range proof.PathHashes { + decoded, decodeErr := bytesToMerkleHash(fmt.Sprintf("path_hashes[%d]", i), pathHash) + if decodeErr != nil { + return nil, decodeErr + } + pathHashes = append(pathHashes, decoded) + } + + return &merkle.Proof{ + ChunkIndex: proof.ChunkIndex, + LeafHash: leafHash, + PathHashes: pathHashes, + PathDirections: proof.PathDirections, + }, nil +} + +func bytesToMerkleHash(field string, value []byte) ([merkle.HashSize]byte, error) { + var out [merkle.HashSize]byte + if len(value) != merkle.HashSize { + return out, fmt.Errorf("%s must be %d bytes, got %d", field, merkle.HashSize, len(value)) + } + copy(out[:], value) + return out, nil +} + +func emitSVCEvidenceEvent(ctx sdk.Context, actionID, superNodeAccount, reason string, attrs ...sdk.Attribute) { + eventAttrs := []sdk.Attribute{ + sdk.NewAttribute(actiontypes.AttributeKeyActionID, actionID), + sdk.NewAttribute(actiontypes.AttributeKeySuperNodes, superNodeAccount), + sdk.NewAttribute(actiontypes.AttributeKeyError, reason), + } + eventAttrs = append(eventAttrs, attrs...) + + ctx.EventManager().EmitEvent(sdk.NewEvent(actiontypes.EventTypeSVCEvidence, eventAttrs...)) +} diff --git a/x/action/v1/keeper/svc_test.go b/x/action/v1/keeper/svc_test.go new file mode 100644 index 00000000..dd5880e0 --- /dev/null +++ b/x/action/v1/keeper/svc_test.go @@ -0,0 +1,173 @@ +package keeper_test + +import ( + "testing" + + "github.com/LumeraProtocol/lumera/testutil/cryptotestutils" + actionkeeper "github.com/LumeraProtocol/lumera/x/action/v1/keeper" + "github.com/LumeraProtocol/lumera/x/action/v1/merkle" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + sdk "github.com/cosmos/cosmos-sdk/types" + gogoproto "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + keepertest "github.com/LumeraProtocol/lumera/testutil/keeper" +) + +const svcChunkSize = uint32(262144) + +func TestVerifyChunkProofs(t *testing.T) { + t.Run("AT12_valid_proofs_succeed", func(t *testing.T) { + k, ctx, action, supernode, expected := setupSVCFixture(t, 4) + + err := k.VerifyChunkProofs(ctx, action, supernode, expected) + require.NoError(t, err) + }) + + t.Run("AT13_wrong_chunk_index_rejected", func(t *testing.T) { + k, ctx, action, supernode, expected := setupSVCFixture(t, 4) + expected[0].ChunkIndex = (expected[0].ChunkIndex + 1) % 4 + + err := k.VerifyChunkProofs(ctx, action, supernode, expected) + require.ErrorIs(t, err, actiontypes.ErrWrongChallengeIndex) + + assertSVCEvidenceEvent(t, ctx.EventManager().Events()) + }) + + t.Run("AT14_invalid_merkle_path_rejected", func(t *testing.T) { + k, ctx, action, supernode, expected := setupSVCFixture(t, 4) + expected[0].PathHashes[0][0] ^= 0xFF + + err := k.VerifyChunkProofs(ctx, action, supernode, expected) + require.ErrorIs(t, err, actiontypes.ErrInvalidMerkleProof) + + assertSVCEvidenceEvent(t, ctx.EventManager().Events()) + }) + + t.Run("AT15_wrong_proof_count_rejected", func(t *testing.T) { + k, ctx, action, supernode, expected := setupSVCFixture(t, 4) + short := expected[:len(expected)-1] + + err := k.VerifyChunkProofs(ctx, action, supernode, short) + require.ErrorIs(t, err, actiontypes.ErrWrongProofCount) + + assertSVCEvidenceEvent(t, ctx.EventManager().Events()) + }) + + t.Run("AT16_svc_skipped_for_small_files", func(t *testing.T) { + k, ctx, action, supernode, _ := setupSVCFixture(t, 3) + + err := k.VerifyChunkProofs(ctx, action, supernode, nil) + require.NoError(t, err) + }) +} + +func setupSVCFixture(t *testing.T, numChunks uint32) (k actionkeeper.Keeper, ctx sdk.Context, action *actiontypes.Action, supernode string, proofs []*actiontypes.ChunkProof) { + t.Helper() + + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + k, baseCtx := keepertest.ActionKeeper(t, ctrl) + + priv, addr := cryptotestutils.KeyAndAddress() + supernode = addr.String() + + chunks := make([][]byte, 0, numChunks) + for i := uint32(0); i < numChunks; i++ { + chunks = append(chunks, []byte{byte(i), byte(i + 1), byte(i + 2), byte(i + 3)}) + } + + tree, err := merkle.BuildTree(chunks) + require.NoError(t, err) + + var root []byte + root = append(root, tree.Root[:]...) + + // Client picks challenge indices at registration time. + // For testing, use simple sequential indices: [0, 1, 2, 3]. + challengeCount := uint32(4) + if challengeCount > numChunks { + challengeCount = numChunks + } + challengeIndices := make([]uint32, 0, challengeCount) + for i := uint32(0); i < challengeCount; i++ { + challengeIndices = append(challengeIndices, i) + } + + metadata := &actiontypes.CascadeMetadata{ + DataHash: "hash", + FileName: "file.bin", + RqIdsIc: 1, + RqIdsMax: 50, + Signatures: "sig", + AvailabilityCommitment: &actiontypes.AvailabilityCommitment{ + CommitmentType: "lep5/chunk-merkle/v1", + HashAlgo: actiontypes.HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: svcChunkSize, + TotalSize: uint64(numChunks) * uint64(svcChunkSize), + NumChunks: numChunks, + Root: root, + ChallengeIndices: challengeIndices, + }, + } + + metaBz, err := gogoproto.Marshal(metadata) + require.NoError(t, err) + + action = &actiontypes.Action{ + ActionID: "svc-action", + ActionType: actiontypes.ActionTypeCascade, + Creator: addr.String(), + Metadata: metaBz, + BlockHeight: 42, + } + + ctx = baseCtx.WithBlockHeight(42).WithEventManager(sdk.NewEventManager()) + + // Generate proofs matching the stored challenge indices. + proofs = make([]*actiontypes.ChunkProof, 0, len(challengeIndices)) + for _, idx := range challengeIndices { + p, pErr := tree.GenerateProof(int(idx)) + require.NoError(t, pErr) + proofs = append(proofs, toChunkProof(p)) + } + _ = priv + + return k, ctx, action, supernode, proofs +} + +func toChunkProof(p *merkle.Proof) *actiontypes.ChunkProof { + leaf := make([]byte, merkle.HashSize) + copy(leaf, p.LeafHash[:]) + + pathHashes := make([][]byte, 0, len(p.PathHashes)) + for _, h := range p.PathHashes { + b := make([]byte, merkle.HashSize) + copy(b, h[:]) + pathHashes = append(pathHashes, b) + } + + directions := append([]bool(nil), p.PathDirections...) + + return &actiontypes.ChunkProof{ + ChunkIndex: p.ChunkIndex, + LeafHash: leaf, + PathHashes: pathHashes, + PathDirections: directions, + } +} + +func assertSVCEvidenceEvent(t *testing.T, events sdk.Events) { + t.Helper() + + found := false + for _, e := range events { + if e.Type == actiontypes.EventTypeSVCEvidence { + found = true + break + } + } + require.True(t, found, "expected SVC evidence event") +} diff --git a/x/action/v1/merkle/merkle.go b/x/action/v1/merkle/merkle.go new file mode 100644 index 00000000..0febcd1a --- /dev/null +++ b/x/action/v1/merkle/merkle.go @@ -0,0 +1,149 @@ +package merkle + +import ( + "encoding/binary" + "errors" + + "lukechampine.com/blake3" +) + +const HashSize = 32 + +var ( + ErrEmptyInput = errors.New("empty input") + ErrIndexOutOfRange = errors.New("index out of range") +) + +var internalPrefix = [1]byte{0x01} + +// HashLeaf computes BLAKE3(0x00 || uint32be(index) || data). +func HashLeaf(index uint32, data []byte) [HashSize]byte { + var prefix [5]byte + prefix[0] = 0x00 + binary.BigEndian.PutUint32(prefix[1:], index) + + h := blake3.New(HashSize, nil) + _, _ = h.Write(prefix[:]) + _, _ = h.Write(data) + + var result [HashSize]byte + copy(result[:], h.Sum(nil)) + return result +} + +// HashInternal computes BLAKE3(0x01 || left || right). +func HashInternal(left, right [HashSize]byte) [HashSize]byte { + h := blake3.New(HashSize, nil) + _, _ = h.Write(internalPrefix[:]) + _, _ = h.Write(left[:]) + _, _ = h.Write(right[:]) + + var result [HashSize]byte + copy(result[:], h.Sum(nil)) + return result +} + +type Tree struct { + Root [HashSize]byte + Leaves [][HashSize]byte + Levels [][][HashSize]byte // levels[0] = leaves (possibly padded), levels[last] = root level + LeafCount int +} + +// BuildTree constructs a Merkle tree from chunk data. +// If a level has an odd number of nodes, the last node is duplicated. +func BuildTree(chunks [][]byte) (*Tree, error) { + n := len(chunks) + if n == 0 { + return nil, ErrEmptyInput + } + + leaves := make([][HashSize]byte, n) + for i, chunk := range chunks { + leaves[i] = HashLeaf(uint32(i), chunk) + } + + levels := make([][][HashSize]byte, 0) + current := make([][HashSize]byte, n) + copy(current, leaves) + levels = append(levels, current) + + for len(current) > 1 { + // If odd number of nodes, duplicate the last node. + if len(current)%2 != 0 { + current = append(current, current[len(current)-1]) + levels[len(levels)-1] = current + } + + next := make([][HashSize]byte, len(current)/2) + for i := 0; i < len(current); i += 2 { + next[i/2] = HashInternal(current[i], current[i+1]) + } + levels = append(levels, next) + current = next + } + + return &Tree{ + Root: current[0], + Leaves: leaves, + Levels: levels, + LeafCount: n, + }, nil +} + +type Proof struct { + ChunkIndex uint32 + LeafHash [HashSize]byte + PathHashes [][HashSize]byte + PathDirections []bool // true = sibling on right, false = sibling on left +} + +// GenerateProof creates a Merkle proof for a chunk index. +func (t *Tree) GenerateProof(index int) (*Proof, error) { + if index < 0 || index >= t.LeafCount { + return nil, ErrIndexOutOfRange + } + + proof := &Proof{ + ChunkIndex: uint32(index), + LeafHash: t.Leaves[index], + PathHashes: make([][HashSize]byte, 0, len(t.Levels)-1), + PathDirections: make([]bool, 0, len(t.Levels)-1), + } + + idx := index + for level := 0; level < len(t.Levels)-1; level++ { + nodes := t.Levels[level] + if idx%2 == 0 { + proof.PathHashes = append(proof.PathHashes, nodes[idx+1]) + proof.PathDirections = append(proof.PathDirections, true) + } else { + proof.PathHashes = append(proof.PathHashes, nodes[idx-1]) + proof.PathDirections = append(proof.PathDirections, false) + } + idx /= 2 + } + + return proof, nil +} + +// Verify checks the proof against a Merkle root. +func (p *Proof) Verify(root [HashSize]byte) bool { + if p == nil { + return false + } + if len(p.PathHashes) != len(p.PathDirections) { + return false + } + + current := p.LeafHash + for i, sibling := range p.PathHashes { + if p.PathDirections[i] { + current = HashInternal(current, sibling) + } else { + current = HashInternal(sibling, current) + } + } + + return current == root +} diff --git a/x/action/v1/merkle/merkle_test.go b/x/action/v1/merkle/merkle_test.go new file mode 100644 index 00000000..3fda6626 --- /dev/null +++ b/x/action/v1/merkle/merkle_test.go @@ -0,0 +1,129 @@ +package merkle + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" +) + +func mustDecodeHex32(t *testing.T, s string) [HashSize]byte { + t.Helper() + + b, err := hex.DecodeString(s) + require.NoError(t, err) + require.Len(t, b, HashSize) + + var out [HashSize]byte + copy(out[:], b) + return out +} + +// AT01: 4-chunk tree root matches LEP-5 vector shape (Section 10.1). +func TestBuildTree_FourChunkRootMatchesVector(t *testing.T) { + chunks := [][]byte{[]byte("C0"), []byte("C1"), []byte("C2"), []byte("C3")} + + tree, err := BuildTree(chunks) + require.NoError(t, err) + + // Precomputed from LEP-5 Section 10.1 formulas (Blake3). + expectedRoot := mustDecodeHex32(t, "c226a548f56fcac8d7c63a4fa74d01970c99d23552f81744821f4e0e5feb1bed") + require.Equal(t, expectedRoot, tree.Root) +} + +// AT02: Proof for chunk 2 verifies (LEP-5 Section 10.3 path semantics). +func TestGenerateProof_Chunk2Verifies(t *testing.T) { + chunks := [][]byte{[]byte("C0"), []byte("C1"), []byte("C2"), []byte("C3")} + + tree, err := BuildTree(chunks) + require.NoError(t, err) + + proof, err := tree.GenerateProof(2) + require.NoError(t, err) + + require.Equal(t, uint32(2), proof.ChunkIndex) + require.Equal(t, HashLeaf(2, []byte("C2")), proof.LeafHash) + require.Len(t, proof.PathHashes, 2) + require.Len(t, proof.PathDirections, 2) + require.Equal(t, []bool{true, false}, proof.PathDirections) + require.True(t, proof.Verify(tree.Root)) +} + +// AT03: Tampered leaf hash fails verification. +func TestVerify_TamperedLeafFails(t *testing.T) { + chunks := [][]byte{[]byte("C0"), []byte("C1"), []byte("C2"), []byte("C3")} + + tree, err := BuildTree(chunks) + require.NoError(t, err) + + proof, err := tree.GenerateProof(2) + require.NoError(t, err) + require.True(t, proof.Verify(tree.Root)) + + tampered := *proof + tampered.LeafHash[0] ^= 0xFF + require.False(t, tampered.Verify(tree.Root)) +} + +// AT04: Handle edge and scale cases (single chunk, 1000+ chunks). +func TestBuildTree_EdgeAndScaleCases(t *testing.T) { + t.Run("single chunk", func(t *testing.T) { + chunks := [][]byte{[]byte("only")} + tree, err := BuildTree(chunks) + require.NoError(t, err) + + expectedLeaf := HashLeaf(0, []byte("only")) + require.Equal(t, expectedLeaf, tree.Root) + + proof, err := tree.GenerateProof(0) + require.NoError(t, err) + require.Empty(t, proof.PathHashes) + require.Empty(t, proof.PathDirections) + require.True(t, proof.Verify(tree.Root)) + }) + + t.Run("1001 chunks", func(t *testing.T) { + chunks := make([][]byte, 1001) + for i := range chunks { + chunks[i] = []byte{byte(i & 0xFF), byte((i >> 8) & 0xFF), 0xAA} + } + + tree, err := BuildTree(chunks) + require.NoError(t, err) + require.Equal(t, 1001, tree.LeafCount) + require.NotEqual(t, [HashSize]byte{}, tree.Root) + + indices := []int{0, 500, 1000} + for _, idx := range indices { + proof, err := tree.GenerateProof(idx) + require.NoError(t, err) + require.True(t, proof.Verify(tree.Root)) + } + }) +} + +func TestBuildTree_Errors(t *testing.T) { + _, err := BuildTree(nil) + require.ErrorIs(t, err, ErrEmptyInput) +} + +func TestGenerateProof_OutOfRange(t *testing.T) { + tree, err := BuildTree([][]byte{[]byte("C0")}) + require.NoError(t, err) + + _, err = tree.GenerateProof(-1) + require.ErrorIs(t, err, ErrIndexOutOfRange) + + _, err = tree.GenerateProof(1) + require.ErrorIs(t, err, ErrIndexOutOfRange) +} + +func TestProofVerify_InvalidPathLengths(t *testing.T) { + root := HashLeaf(0, []byte("x")) + p := &Proof{ + LeafHash: root, + PathHashes: [][HashSize]byte{{}}, + PathDirections: nil, + } + require.False(t, p.Verify(root)) +} diff --git a/x/action/v1/simulation/lep5_cascade.go b/x/action/v1/simulation/lep5_cascade.go new file mode 100644 index 00000000..dd638fd9 --- /dev/null +++ b/x/action/v1/simulation/lep5_cascade.go @@ -0,0 +1,194 @@ +package simulation + +import ( + "encoding/json" + "fmt" + "math/rand" + "strconv" + + "github.com/cosmos/cosmos-sdk/baseapp" + sdk "github.com/cosmos/cosmos-sdk/types" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + + "github.com/LumeraProtocol/lumera/x/action/v1/keeper" + "github.com/LumeraProtocol/lumera/x/action/v1/merkle" + "github.com/LumeraProtocol/lumera/x/action/v1/types" +) + +const lep5ChunkSize = uint32(262144) + +// SimulateMsgCascadeWithSVCFlow simulates a full LEP-5 Cascade lifecycle: +// register with AvailabilityCommitment, finalize with valid chunk proofs, verify DONE. +func SimulateMsgCascadeWithSVCFlow( + ak types.AuthKeeper, + bk types.BankKeeper, + k keeper.Keeper, +) simtypes.Operation { + return func(r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, + ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { + params := k.GetParams(ctx) + challengeCount := keeper.SVCChallengeCount + minChunks := keeper.SVCMinChunksForChallenge + + // Use enough chunks so SVC is exercised. + numChunks := minChunks + uint32(r.Intn(int(challengeCount))) + if numChunks < minChunks { + numChunks = minChunks + } + + // Build Merkle tree from random chunks. + chunks := make([][]byte, numChunks) + for i := range chunks { + chunk := make([]byte, 4+r.Intn(60)) + r.Read(chunk) + chunks[i] = chunk + } + + tree, err := merkle.BuildTree(chunks) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, "lep5_cascade", fmt.Sprintf("build tree: %v", err)), nil, nil + } + + root := make([]byte, merkle.HashSize) + copy(root, tree.Root[:]) + + // Generate challenge indices. + m := challengeCount + if m > numChunks { + m = numChunks + } + challengeIndices := generateUniqueIndices(r, numChunks, m) + + // Select a funded account and register the action. + feeAmount := generateRandomFee(r, ctx, params.BaseActionFee.Add(params.FeePerKbyte)) + simAccount := selectRandomAccountWithSufficientFunds(r, ctx, accs, bk, ak, feeAmount, []string{""}) + + sigStr := generateCascadeSignature(simAccount) + + commitment := types.AvailabilityCommitment{ + CommitmentType: "lep5/chunk-merkle/v1", + HashAlgo: types.HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: lep5ChunkSize, + TotalSize: uint64(numChunks) * uint64(lep5ChunkSize), + NumChunks: numChunks, + Root: root, + ChallengeIndices: challengeIndices, + } + commitmentJSON, err := json.Marshal(&commitment) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, "lep5_cascade", fmt.Sprintf("marshal commitment: %v", err)), nil, nil + } + + dataHash := generateRandomHash(r) + fileName := generateRandomFileName(r) + + metadata := fmt.Sprintf( + `{"data_hash":"%s","file_name":"%s","rq_ids_ic":1,"signatures":"%s","availability_commitment":%s}`, + dataHash, fileName, sigStr, string(commitmentJSON), + ) + + expirationTime := getRandomExpirationTime(ctx, r, params) + + msg := types.NewMsgRequestAction( + simAccount.Address.String(), + types.ActionTypeCascade.String(), + metadata, + feeAmount.String(), + strconv.FormatInt(expirationTime, 10), + "", + ) + + msgServSim := keeper.NewMsgServerImpl(k) + result, err := msgServSim.RequestAction(ctx, msg) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, sdk.MsgTypeURL(msg), fmt.Sprintf("register: %v", err)), nil, nil + } + + // Verify action is pending. + action, found := k.GetActionByID(ctx, result.ActionId) + if !found { + return simtypes.NoOpMsg(types.ModuleName, sdk.MsgTypeURL(msg), "action not found after register"), nil, nil + } + if action.State != types.ActionStatePending { + return simtypes.NoOpMsg(types.ModuleName, sdk.MsgTypeURL(msg), "not in PENDING state"), nil, nil + } + + // Get a supernode to finalize. + supernodes, snErr := getRandomActiveSupernodes(r, ctx, 1, ak, k, accs) + if snErr != nil { + return simtypes.NoOpMsg(types.ModuleName, sdk.MsgTypeURL(msg), fmt.Sprintf("no supernodes: %v", snErr)), nil, nil + } + + // Build chunk proofs for challenged indices. + chunkProofs := make([]*types.ChunkProof, 0, len(challengeIndices)) + for _, idx := range challengeIndices { + p, pErr := tree.GenerateProof(int(idx)) + if pErr != nil { + return simtypes.NoOpMsg(types.ModuleName, sdk.MsgTypeURL(msg), fmt.Sprintf("gen proof: %v", pErr)), nil, nil + } + chunkProofs = append(chunkProofs, simChunkProof(p)) + } + + ids := generateKademliaIDs(1, 50, sigStr) + + finMeta := &types.CascadeMetadata{ + RqIdsIds: ids, + ChunkProofs: chunkProofs, + } + finMetaBytes, err := json.Marshal(finMeta) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, sdk.MsgTypeURL(msg), fmt.Sprintf("marshal finalize: %v", err)), nil, nil + } + + finMsg := types.NewMsgFinalizeAction( + supernodes[0].Address.String(), + result.ActionId, + types.ActionTypeCascade.String(), + string(finMetaBytes), + ) + + _, err = msgServSim.FinalizeAction(ctx, finMsg) + if err != nil { + return simtypes.NoOpMsg(types.ModuleName, sdk.MsgTypeURL(finMsg), fmt.Sprintf("finalize: %v", err)), nil, nil + } + + // Verify DONE state. + finalAction, found := k.GetActionByID(ctx, result.ActionId) + if !found || finalAction.State != types.ActionStateDone { + return simtypes.NoOpMsg(types.ModuleName, sdk.MsgTypeURL(finMsg), "not in DONE state after finalize"), nil, nil + } + + return simtypes.NewOperationMsg(msg, true, "lep5_cascade_svc_flow_success"), nil, nil + } +} + +func generateUniqueIndices(r *rand.Rand, numChunks, m uint32) []uint32 { + if m > numChunks { + m = numChunks + } + perm := r.Perm(int(numChunks)) + indices := make([]uint32, m) + for i := uint32(0); i < m; i++ { + indices[i] = uint32(perm[i]) + } + return indices +} + +func simChunkProof(p *merkle.Proof) *types.ChunkProof { + leaf := make([]byte, merkle.HashSize) + copy(leaf, p.LeafHash[:]) + + pathHashes := make([][]byte, 0, len(p.PathHashes)) + for _, h := range p.PathHashes { + b := make([]byte, merkle.HashSize) + copy(b, h[:]) + pathHashes = append(pathHashes, b) + } + + return &types.ChunkProof{ + ChunkIndex: p.ChunkIndex, + LeafHash: leaf, + PathHashes: pathHashes, + PathDirections: append([]bool(nil), p.PathDirections...), + } +} diff --git a/x/action/v1/simulation/operations.go b/x/action/v1/simulation/operations.go index c01686ec..b73f8b86 100644 --- a/x/action/v1/simulation/operations.go +++ b/x/action/v1/simulation/operations.go @@ -123,6 +123,10 @@ func WeightedOperations( weightActionExpirationSim, // Lower weight for state transition simulation SimulateActionExpiration(ak, bk, k), ), + simulation.NewWeightedOperation( + weightMsgFinalizeAction, // LEP-5 full SVC flow: register with commitment, finalize with proofs + SimulateMsgCascadeWithSVCFlow(ak, bk, k), + ), } return operations diff --git a/x/action/v1/types/errors.go b/x/action/v1/types/errors.go index cf3c4453..74a8486b 100644 --- a/x/action/v1/types/errors.go +++ b/x/action/v1/types/errors.go @@ -22,6 +22,9 @@ var ( ErrFinalizationError = errorsmod.Register(ModuleName, 14, "finalization error") ErrInvalidFileSize = errorsmod.Register(ModuleName, 15, "invalid file size") ErrInvalidAppPubKey = errorsmod.Register(ModuleName, 16, "invalid app pubkey") + ErrWrongProofCount = errorsmod.Register(ModuleName, 17, "wrong chunk proof count") + ErrWrongChallengeIndex = errorsmod.Register(ModuleName, 18, "wrong challenge index") + ErrInvalidMerkleProof = errorsmod.Register(ModuleName, 19, "invalid merkle proof") ErrInvalidSigner = errorsmod.Register(ModuleName, 1100, "expected gov account as only signer for proposal message") ErrInvalidPacketTimeout = errorsmod.Register(ModuleName, 1500, "invalid packet timeout") ErrInvalidVersion = errorsmod.Register(ModuleName, 1501, "invalid version") diff --git a/x/action/v1/types/events.go b/x/action/v1/types/events.go index 92a72ba4..3009efc1 100644 --- a/x/action/v1/types/events.go +++ b/x/action/v1/types/events.go @@ -8,15 +8,20 @@ const ( EventTypeActionApproved = "action_approved" EventTypeActionFailed = "action_failed" EventTypeActionExpired = "action_expired" + EventTypeSVCEvidence = "svc_verification_failed_evidence" + EventTypeSVCVerificationPassed = "svc_verification_passed" // Common Attributes - AttributeKeyActionID = "action_id" - AttributeKeyCreator = "creator" - AttributeKeyFinalizer = "finalizer" - AttributeKeySuperNodes = "supernodes" - AttributeKeyActionType = "action_type" - AttributeKeyResults = "results" - AttributeKeyFee = "fee" - AttributeKeyError = "error" - AttributeKeyEvidenceID = "evidence_id" + AttributeKeyActionID = "action_id" + AttributeKeyCreator = "creator" + AttributeKeyFinalizer = "finalizer" + AttributeKeySuperNodes = "supernodes" + AttributeKeyActionType = "action_type" + AttributeKeyResults = "results" + AttributeKeyFee = "fee" + AttributeKeyError = "error" + AttributeKeyEvidenceID = "evidence_id" + AttributeKeyProofIndex = "proof_index" + AttributeKeyChunkIndex = "chunk_index" + AttributeKeyExpectedChunkIndex = "expected_chunk_index" ) diff --git a/x/action/v1/types/metadata.pb.go b/x/action/v1/types/metadata.pb.go index f0761560..9082029b 100644 --- a/x/action/v1/types/metadata.pb.go +++ b/x/action/v1/types/metadata.pb.go @@ -23,6 +23,36 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// HashAlgo enumerates the supported hash algorithms for availability +// commitments. +type HashAlgo int32 + +const ( + HashAlgo_HASH_ALGO_UNSPECIFIED HashAlgo = 0 + HashAlgo_HASH_ALGO_BLAKE3 HashAlgo = 1 + HashAlgo_HASH_ALGO_SHA256 HashAlgo = 2 +) + +var HashAlgo_name = map[int32]string{ + 0: "HASH_ALGO_UNSPECIFIED", + 1: "HASH_ALGO_BLAKE3", + 2: "HASH_ALGO_SHA256", +} + +var HashAlgo_value = map[string]int32{ + "HASH_ALGO_UNSPECIFIED": 0, + "HASH_ALGO_BLAKE3": 1, + "HASH_ALGO_SHA256": 2, +} + +func (x HashAlgo) String() string { + return proto.EnumName(HashAlgo_name, int32(x)) +} + +func (HashAlgo) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_05a11a06dcddaaa2, []int{0} +} + // SenseMetadata contains information for Sense actions. // This metadata is directly embedded in the Action.metadata field. // For RequestAction: @@ -130,6 +160,173 @@ func (m *SenseMetadata) GetSignatures() string { return "" } +// AvailabilityCommitment is the LEP-5 on-chain file commitment included +// during Cascade registration. +type AvailabilityCommitment struct { + CommitmentType string `protobuf:"bytes,1,opt,name=commitment_type,proto3" json:"commitment_type,omitempty"` + HashAlgo HashAlgo `protobuf:"varint,2,opt,name=hash_algo,proto3,enum=lumera.action.v1.HashAlgo" json:"hash_algo,omitempty"` + ChunkSize uint32 `protobuf:"varint,3,opt,name=chunk_size,proto3" json:"chunk_size,omitempty"` + TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,proto3" json:"total_size,omitempty"` + NumChunks uint32 `protobuf:"varint,5,opt,name=num_chunks,proto3" json:"num_chunks,omitempty"` + Root []byte `protobuf:"bytes,6,opt,name=root,proto3" json:"root,omitempty"` + // Challenge indices chosen by the client at registration time. + // The SuperNode must provide Merkle proofs for these exact chunk + // indices during finalization. The keeper validates proofs match + // these stored indices and the committed root. + ChallengeIndices []uint32 `protobuf:"varint,7,rep,packed,name=challenge_indices,proto3" json:"challenge_indices,omitempty"` +} + +func (m *AvailabilityCommitment) Reset() { *m = AvailabilityCommitment{} } +func (m *AvailabilityCommitment) String() string { return proto.CompactTextString(m) } +func (*AvailabilityCommitment) ProtoMessage() {} +func (*AvailabilityCommitment) Descriptor() ([]byte, []int) { + return fileDescriptor_05a11a06dcddaaa2, []int{1} +} +func (m *AvailabilityCommitment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AvailabilityCommitment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AvailabilityCommitment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AvailabilityCommitment) XXX_Merge(src proto.Message) { + xxx_messageInfo_AvailabilityCommitment.Merge(m, src) +} +func (m *AvailabilityCommitment) XXX_Size() int { + return m.Size() +} +func (m *AvailabilityCommitment) XXX_DiscardUnknown() { + xxx_messageInfo_AvailabilityCommitment.DiscardUnknown(m) +} + +var xxx_messageInfo_AvailabilityCommitment proto.InternalMessageInfo + +func (m *AvailabilityCommitment) GetCommitmentType() string { + if m != nil { + return m.CommitmentType + } + return "" +} + +func (m *AvailabilityCommitment) GetHashAlgo() HashAlgo { + if m != nil { + return m.HashAlgo + } + return HashAlgo_HASH_ALGO_UNSPECIFIED +} + +func (m *AvailabilityCommitment) GetChunkSize() uint32 { + if m != nil { + return m.ChunkSize + } + return 0 +} + +func (m *AvailabilityCommitment) GetTotalSize() uint64 { + if m != nil { + return m.TotalSize + } + return 0 +} + +func (m *AvailabilityCommitment) GetNumChunks() uint32 { + if m != nil { + return m.NumChunks + } + return 0 +} + +func (m *AvailabilityCommitment) GetRoot() []byte { + if m != nil { + return m.Root + } + return nil +} + +func (m *AvailabilityCommitment) GetChallengeIndices() []uint32 { + if m != nil { + return m.ChallengeIndices + } + return nil +} + +// ChunkProof is a Merkle inclusion proof for one challenged chunk. +type ChunkProof struct { + ChunkIndex uint32 `protobuf:"varint,1,opt,name=chunk_index,proto3" json:"chunk_index,omitempty"` + LeafHash []byte `protobuf:"bytes,2,opt,name=leaf_hash,proto3" json:"leaf_hash,omitempty"` + PathHashes [][]byte `protobuf:"bytes,3,rep,name=path_hashes,proto3" json:"path_hashes,omitempty"` + PathDirections []bool `protobuf:"varint,4,rep,packed,name=path_directions,proto3" json:"path_directions,omitempty"` +} + +func (m *ChunkProof) Reset() { *m = ChunkProof{} } +func (m *ChunkProof) String() string { return proto.CompactTextString(m) } +func (*ChunkProof) ProtoMessage() {} +func (*ChunkProof) Descriptor() ([]byte, []int) { + return fileDescriptor_05a11a06dcddaaa2, []int{2} +} +func (m *ChunkProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChunkProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChunkProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChunkProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkProof.Merge(m, src) +} +func (m *ChunkProof) XXX_Size() int { + return m.Size() +} +func (m *ChunkProof) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkProof.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkProof proto.InternalMessageInfo + +func (m *ChunkProof) GetChunkIndex() uint32 { + if m != nil { + return m.ChunkIndex + } + return 0 +} + +func (m *ChunkProof) GetLeafHash() []byte { + if m != nil { + return m.LeafHash + } + return nil +} + +func (m *ChunkProof) GetPathHashes() [][]byte { + if m != nil { + return m.PathHashes + } + return nil +} + +func (m *ChunkProof) GetPathDirections() []bool { + if m != nil { + return m.PathDirections + } + return nil +} + // CascadeMetadata contains information for Cascade actions. // This metadata is directly embedded in the Action.metadata field. // For RequestAction: @@ -155,13 +352,16 @@ type CascadeMetadata struct { // mark the action as visible to all users; set to false for private // or restricted actions. Public bool `protobuf:"varint,7,opt,name=public,proto3" json:"public,omitempty"` + // LEP-5 fields + AvailabilityCommitment *AvailabilityCommitment `protobuf:"bytes,8,opt,name=availability_commitment,proto3" json:"availability_commitment,omitempty"` + ChunkProofs []*ChunkProof `protobuf:"bytes,9,rep,name=chunk_proofs,proto3" json:"chunk_proofs,omitempty"` } func (m *CascadeMetadata) Reset() { *m = CascadeMetadata{} } func (m *CascadeMetadata) String() string { return proto.CompactTextString(m) } func (*CascadeMetadata) ProtoMessage() {} func (*CascadeMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_05a11a06dcddaaa2, []int{1} + return fileDescriptor_05a11a06dcddaaa2, []int{3} } func (m *CascadeMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -239,38 +439,75 @@ func (m *CascadeMetadata) GetPublic() bool { return false } +func (m *CascadeMetadata) GetAvailabilityCommitment() *AvailabilityCommitment { + if m != nil { + return m.AvailabilityCommitment + } + return nil +} + +func (m *CascadeMetadata) GetChunkProofs() []*ChunkProof { + if m != nil { + return m.ChunkProofs + } + return nil +} + func init() { + proto.RegisterEnum("lumera.action.v1.HashAlgo", HashAlgo_name, HashAlgo_value) proto.RegisterType((*SenseMetadata)(nil), "lumera.action.v1.SenseMetadata") + proto.RegisterType((*AvailabilityCommitment)(nil), "lumera.action.v1.AvailabilityCommitment") + proto.RegisterType((*ChunkProof)(nil), "lumera.action.v1.ChunkProof") proto.RegisterType((*CascadeMetadata)(nil), "lumera.action.v1.CascadeMetadata") } func init() { proto.RegisterFile("lumera/action/v1/metadata.proto", fileDescriptor_05a11a06dcddaaa2) } var fileDescriptor_05a11a06dcddaaa2 = []byte{ - // 364 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x4d, 0x6b, 0xc2, 0x30, - 0x18, 0x36, 0x5a, 0x3b, 0x1b, 0x90, 0x6d, 0x61, 0xb8, 0x22, 0x52, 0x45, 0x3c, 0x08, 0x03, 0x8b, - 0x0c, 0xc6, 0xce, 0xee, 0xbc, 0x4b, 0x77, 0xdb, 0xa5, 0xc4, 0x36, 0xd6, 0x40, 0xdb, 0x74, 0x49, - 0x2a, 0xee, 0x5f, 0xec, 0xff, 0xec, 0x0f, 0x78, 0xf4, 0xb8, 0xd3, 0x18, 0x7a, 0xda, 0xbf, 0x18, - 0x89, 0xa2, 0x9d, 0xac, 0xb0, 0x53, 0xf2, 0x3e, 0xcf, 0xf3, 0x7e, 0x3c, 0x2f, 0x2f, 0xec, 0xc6, - 0x79, 0x42, 0x38, 0x76, 0x71, 0x20, 0x29, 0x4b, 0xdd, 0xc5, 0xd8, 0x4d, 0x88, 0xc4, 0x21, 0x96, - 0x78, 0x94, 0x71, 0x26, 0x19, 0xba, 0xd8, 0x09, 0x46, 0x3b, 0xc1, 0x68, 0x31, 0x6e, 0x5f, 0x45, - 0x2c, 0x62, 0x9a, 0x74, 0xd5, 0x6f, 0xa7, 0xeb, 0xbf, 0x57, 0x61, 0xf3, 0x89, 0xa4, 0x82, 0x3c, - 0xee, 0xf3, 0x51, 0x07, 0x5a, 0xea, 0xf5, 0xe7, 0x58, 0xcc, 0x6d, 0xd0, 0x03, 0x43, 0xcb, 0x3b, - 0x02, 0xe8, 0x0e, 0xb6, 0xc2, 0xd0, 0xc7, 0x69, 0xe8, 0xcf, 0x68, 0x1a, 0x11, 0x9e, 0x71, 0x9a, - 0x4a, 0xe1, 0xd3, 0xc0, 0xae, 0xf6, 0xc0, 0xd0, 0xf0, 0x4a, 0x58, 0x34, 0x80, 0xcd, 0x80, 0xc5, - 0x31, 0xd1, 0xe3, 0xf8, 0x34, 0xb4, 0x6b, 0xba, 0xf2, 0x6f, 0x10, 0xb5, 0x61, 0x23, 0xe2, 0x2c, - 0xcf, 0x94, 0xc0, 0xd0, 0x82, 0x43, 0x8c, 0xee, 0xe1, 0xf5, 0x5f, 0xb5, 0x13, 0xbc, 0xb4, 0xeb, - 0xba, 0x75, 0x19, 0x5d, 0x96, 0x49, 0x43, 0x61, 0x9b, 0xbd, 0xda, 0xd0, 0xf2, 0xca, 0x68, 0xe4, - 0x40, 0x28, 0x68, 0x94, 0x62, 0x99, 0x73, 0x22, 0xec, 0x33, 0x3d, 0x51, 0x01, 0xe9, 0x7f, 0x03, - 0x78, 0xfe, 0x80, 0x45, 0x80, 0xc3, 0xff, 0xee, 0xaf, 0x03, 0xad, 0x19, 0x8d, 0x89, 0x9f, 0xe2, - 0x84, 0xe8, 0x95, 0x59, 0xde, 0x11, 0x50, 0x2c, 0x7f, 0x51, 0x9d, 0xd5, 0x42, 0x6b, 0xda, 0xd5, - 0x11, 0x50, 0xd3, 0xec, 0x03, 0x65, 0xda, 0xd0, 0x74, 0x01, 0x41, 0x83, 0x03, 0xaf, 0xac, 0xd5, - 0x95, 0xb5, 0x89, 0xb1, 0xfa, 0xec, 0x02, 0xaf, 0x80, 0x9f, 0x78, 0x32, 0x4f, 0x3d, 0xa1, 0x16, - 0x34, 0xb3, 0x7c, 0x1a, 0xd3, 0x40, 0xfb, 0x6d, 0x78, 0xfb, 0x68, 0x72, 0xb3, 0xda, 0x38, 0x60, - 0xbd, 0x71, 0xc0, 0xd7, 0xc6, 0x01, 0x6f, 0x5b, 0xa7, 0xb2, 0xde, 0x3a, 0x95, 0x8f, 0xad, 0x53, - 0x79, 0xbe, 0x5c, 0x16, 0xee, 0x50, 0xbe, 0x66, 0x44, 0x4c, 0x4d, 0x7d, 0x5d, 0xb7, 0x3f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x90, 0x27, 0xb3, 0x47, 0xa8, 0x02, 0x00, 0x00, + // 681 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x4e, 0xdb, 0x4c, + 0x10, 0x8e, 0xe3, 0x90, 0x3f, 0x59, 0xc8, 0x4f, 0x58, 0x51, 0x70, 0x11, 0x0a, 0x56, 0xc4, 0xc1, + 0x6a, 0xab, 0x44, 0x04, 0x15, 0x71, 0x6c, 0x48, 0x69, 0x83, 0x4a, 0x5b, 0xba, 0x51, 0x2f, 0xbd, + 0xac, 0x36, 0xf6, 0xc6, 0x59, 0xd5, 0xf6, 0xba, 0xde, 0x0d, 0x82, 0xbe, 0x44, 0x7b, 0xec, 0xa9, + 0x2f, 0xd2, 0x17, 0xe0, 0xc8, 0xb1, 0xa7, 0xaa, 0x82, 0x17, 0xa9, 0x76, 0x9d, 0x62, 0x13, 0x88, + 0xd4, 0x93, 0x3d, 0xdf, 0x37, 0x33, 0x3b, 0xfa, 0xe6, 0xdb, 0x05, 0x5b, 0xc1, 0x24, 0xa4, 0x09, + 0x69, 0x13, 0x57, 0x32, 0x1e, 0xb5, 0x4f, 0x77, 0xda, 0x21, 0x95, 0xc4, 0x23, 0x92, 0xb4, 0xe2, + 0x84, 0x4b, 0x0e, 0xeb, 0x69, 0x42, 0x2b, 0x4d, 0x68, 0x9d, 0xee, 0x6c, 0xac, 0xfa, 0xdc, 0xe7, + 0x9a, 0x6c, 0xab, 0xbf, 0x34, 0xaf, 0xf9, 0xa3, 0x08, 0x6a, 0x03, 0x1a, 0x09, 0xfa, 0x7a, 0x5a, + 0x0f, 0x37, 0x41, 0x55, 0x7d, 0xf1, 0x98, 0x88, 0xb1, 0x65, 0xd8, 0x86, 0x53, 0x45, 0x19, 0x00, + 0xf7, 0xc0, 0x9a, 0xe7, 0x61, 0x12, 0x79, 0x78, 0xc4, 0x22, 0x9f, 0x26, 0x71, 0xc2, 0x22, 0x29, + 0x30, 0x73, 0xad, 0xa2, 0x6d, 0x38, 0x25, 0x34, 0x87, 0x85, 0xdb, 0xa0, 0xe6, 0xf2, 0x20, 0xa0, + 0x7a, 0x1c, 0xcc, 0x3c, 0xcb, 0xd4, 0x9d, 0x6f, 0x83, 0x70, 0x03, 0x54, 0xfc, 0x84, 0x4f, 0x62, + 0x95, 0x50, 0xd2, 0x09, 0x37, 0x31, 0xdc, 0x07, 0xeb, 0xf7, 0xf5, 0x0e, 0xc9, 0x99, 0xb5, 0xa0, + 0x8f, 0x9e, 0x47, 0xcf, 0xab, 0x64, 0x9e, 0xb0, 0xca, 0xb6, 0xe9, 0x54, 0xd1, 0x3c, 0x1a, 0x36, + 0x00, 0x10, 0xcc, 0x8f, 0x88, 0x9c, 0x24, 0x54, 0x58, 0xff, 0xe9, 0x89, 0x72, 0x48, 0xf3, 0x7b, + 0x11, 0xac, 0x75, 0x4f, 0x09, 0x0b, 0xc8, 0x90, 0x05, 0x4c, 0x9e, 0xf7, 0x78, 0x18, 0x32, 0x19, + 0xd2, 0x48, 0x42, 0x07, 0x2c, 0xbb, 0x37, 0x11, 0x96, 0xe7, 0x31, 0x9d, 0x8a, 0x39, 0x0b, 0xc3, + 0x7d, 0x50, 0x55, 0xd2, 0x62, 0x12, 0xf8, 0x5c, 0xab, 0xf8, 0x7f, 0x67, 0xa3, 0x35, 0xbb, 0xbe, + 0x56, 0x9f, 0x88, 0x71, 0x37, 0xf0, 0x39, 0xca, 0x92, 0xd5, 0x78, 0xee, 0x78, 0x12, 0x7d, 0xc4, + 0x82, 0x7d, 0xa6, 0x5a, 0xd1, 0x1a, 0xca, 0x21, 0x8a, 0x97, 0x5c, 0x92, 0x20, 0xe5, 0x4b, 0x5a, + 0xa5, 0x1c, 0xa2, 0xf8, 0x68, 0x12, 0x62, 0x5d, 0x21, 0xb4, 0x8a, 0x35, 0x94, 0x43, 0x20, 0x04, + 0xa5, 0x84, 0x73, 0x69, 0x95, 0x6d, 0xc3, 0x59, 0x42, 0xfa, 0x1f, 0x3e, 0x01, 0x2b, 0xee, 0x98, + 0x04, 0x01, 0x8d, 0x7c, 0x8a, 0x59, 0xe4, 0x31, 0x57, 0x2b, 0x63, 0x3a, 0x35, 0x74, 0x97, 0x68, + 0x7e, 0x33, 0x00, 0xe8, 0xa9, 0x66, 0x27, 0x09, 0xe7, 0x23, 0x68, 0x83, 0xc5, 0x74, 0x3c, 0x16, + 0x79, 0xf4, 0x4c, 0x0b, 0x52, 0x43, 0x79, 0x48, 0xb9, 0x2f, 0xa0, 0x64, 0x94, 0xba, 0xaf, 0xa8, + 0xcf, 0xcd, 0x00, 0x55, 0x1f, 0x13, 0x39, 0xd6, 0x01, 0x15, 0x96, 0x69, 0x9b, 0xce, 0x12, 0xca, + 0x43, 0x4a, 0x76, 0x1d, 0x7a, 0x2c, 0x49, 0x6d, 0x25, 0xac, 0x92, 0x6d, 0x3a, 0x15, 0x34, 0x0b, + 0x37, 0xbf, 0x98, 0x60, 0xb9, 0x47, 0x84, 0x4b, 0xbc, 0x7f, 0xf5, 0xfe, 0x26, 0xa8, 0x8e, 0x58, + 0x40, 0x71, 0x44, 0x42, 0xaa, 0x67, 0xab, 0xa2, 0x0c, 0x50, 0x6c, 0xf2, 0x49, 0xb9, 0x46, 0x5d, + 0x06, 0x53, 0x6b, 0x9d, 0x01, 0x4a, 0xea, 0x69, 0xa0, 0x0c, 0x3b, 0x5d, 0x45, 0x86, 0xc0, 0xed, + 0x1b, 0x5e, 0xd9, 0x72, 0x41, 0xd9, 0xf2, 0xa0, 0x74, 0xf1, 0x6b, 0xcb, 0x40, 0x39, 0x7c, 0xc6, + 0x8f, 0xe5, 0x59, 0x3f, 0xc2, 0x35, 0x50, 0x8e, 0x27, 0xc3, 0x80, 0xb9, 0xda, 0xab, 0x15, 0x34, + 0x8d, 0xe0, 0x10, 0xac, 0x93, 0x9c, 0x4d, 0x71, 0x66, 0x41, 0xab, 0x62, 0x1b, 0xce, 0x62, 0xc7, + 0xb9, 0x6b, 0xb8, 0xfb, 0x7d, 0x8d, 0xe6, 0x35, 0x82, 0xcf, 0xc0, 0x52, 0xba, 0xc8, 0x58, 0xad, + 0x5a, 0x58, 0x55, 0xdb, 0x74, 0x16, 0x3b, 0x9b, 0x77, 0x1b, 0x67, 0x7e, 0x40, 0xb7, 0x2a, 0x1e, + 0xbd, 0x03, 0x95, 0xbf, 0x2e, 0x87, 0x0f, 0xc1, 0x83, 0x7e, 0x77, 0xd0, 0xc7, 0xdd, 0xe3, 0x97, + 0x6f, 0xf1, 0xfb, 0x37, 0x83, 0x93, 0xc3, 0xde, 0xd1, 0x8b, 0xa3, 0xc3, 0xe7, 0xf5, 0x02, 0x5c, + 0x05, 0xf5, 0x8c, 0x3a, 0x38, 0xee, 0xbe, 0x3a, 0xdc, 0xad, 0x1b, 0xb7, 0xd1, 0x41, 0xbf, 0xdb, + 0x79, 0xba, 0x57, 0x2f, 0x1e, 0x3c, 0xbe, 0xb8, 0x6a, 0x18, 0x97, 0x57, 0x0d, 0xe3, 0xf7, 0x55, + 0xc3, 0xf8, 0x7a, 0xdd, 0x28, 0x5c, 0x5e, 0x37, 0x0a, 0x3f, 0xaf, 0x1b, 0x85, 0x0f, 0x2b, 0x67, + 0xb9, 0xc7, 0x53, 0xdd, 0x43, 0x31, 0x2c, 0xeb, 0x27, 0x71, 0xf7, 0x4f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x28, 0x1a, 0x84, 0x3a, 0x5d, 0x05, 0x00, 0x00, } func (m *SenseMetadata) Marshal() (dAtA []byte, err error) { @@ -343,6 +580,138 @@ func (m *SenseMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *AvailabilityCommitment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AvailabilityCommitment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AvailabilityCommitment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChallengeIndices) > 0 { + dAtA2 := make([]byte, len(m.ChallengeIndices)*10) + var j1 int + for _, num := range m.ChallengeIndices { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintMetadata(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0x3a + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintMetadata(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x32 + } + if m.NumChunks != 0 { + i = encodeVarintMetadata(dAtA, i, uint64(m.NumChunks)) + i-- + dAtA[i] = 0x28 + } + if m.TotalSize != 0 { + i = encodeVarintMetadata(dAtA, i, uint64(m.TotalSize)) + i-- + dAtA[i] = 0x20 + } + if m.ChunkSize != 0 { + i = encodeVarintMetadata(dAtA, i, uint64(m.ChunkSize)) + i-- + dAtA[i] = 0x18 + } + if m.HashAlgo != 0 { + i = encodeVarintMetadata(dAtA, i, uint64(m.HashAlgo)) + i-- + dAtA[i] = 0x10 + } + if len(m.CommitmentType) > 0 { + i -= len(m.CommitmentType) + copy(dAtA[i:], m.CommitmentType) + i = encodeVarintMetadata(dAtA, i, uint64(len(m.CommitmentType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ChunkProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChunkProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChunkProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PathDirections) > 0 { + for iNdEx := len(m.PathDirections) - 1; iNdEx >= 0; iNdEx-- { + i-- + if m.PathDirections[iNdEx] { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + } + i = encodeVarintMetadata(dAtA, i, uint64(len(m.PathDirections))) + i-- + dAtA[i] = 0x22 + } + if len(m.PathHashes) > 0 { + for iNdEx := len(m.PathHashes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PathHashes[iNdEx]) + copy(dAtA[i:], m.PathHashes[iNdEx]) + i = encodeVarintMetadata(dAtA, i, uint64(len(m.PathHashes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.LeafHash) > 0 { + i -= len(m.LeafHash) + copy(dAtA[i:], m.LeafHash) + i = encodeVarintMetadata(dAtA, i, uint64(len(m.LeafHash))) + i-- + dAtA[i] = 0x12 + } + if m.ChunkIndex != 0 { + i = encodeVarintMetadata(dAtA, i, uint64(m.ChunkIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *CascadeMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -363,6 +732,32 @@ func (m *CascadeMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ChunkProofs) > 0 { + for iNdEx := len(m.ChunkProofs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ChunkProofs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetadata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.AvailabilityCommitment != nil { + { + size, err := m.AvailabilityCommitment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetadata(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } if m.Public { i-- if m.Public { @@ -464,6 +859,67 @@ func (m *SenseMetadata) Size() (n int) { return n } +func (m *AvailabilityCommitment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CommitmentType) + if l > 0 { + n += 1 + l + sovMetadata(uint64(l)) + } + if m.HashAlgo != 0 { + n += 1 + sovMetadata(uint64(m.HashAlgo)) + } + if m.ChunkSize != 0 { + n += 1 + sovMetadata(uint64(m.ChunkSize)) + } + if m.TotalSize != 0 { + n += 1 + sovMetadata(uint64(m.TotalSize)) + } + if m.NumChunks != 0 { + n += 1 + sovMetadata(uint64(m.NumChunks)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovMetadata(uint64(l)) + } + if len(m.ChallengeIndices) > 0 { + l = 0 + for _, e := range m.ChallengeIndices { + l += sovMetadata(uint64(e)) + } + n += 1 + sovMetadata(uint64(l)) + l + } + return n +} + +func (m *ChunkProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChunkIndex != 0 { + n += 1 + sovMetadata(uint64(m.ChunkIndex)) + } + l = len(m.LeafHash) + if l > 0 { + n += 1 + l + sovMetadata(uint64(l)) + } + if len(m.PathHashes) > 0 { + for _, b := range m.PathHashes { + l = len(b) + n += 1 + l + sovMetadata(uint64(l)) + } + } + if len(m.PathDirections) > 0 { + n += 1 + sovMetadata(uint64(len(m.PathDirections))) + len(m.PathDirections)*1 + } + return n +} + func (m *CascadeMetadata) Size() (n int) { if m == nil { return 0 @@ -497,6 +953,16 @@ func (m *CascadeMetadata) Size() (n int) { if m.Public { n += 2 } + if m.AvailabilityCommitment != nil { + l = m.AvailabilityCommitment.Size() + n += 1 + l + sovMetadata(uint64(l)) + } + if len(m.ChunkProofs) > 0 { + for _, e := range m.ChunkProofs { + l = e.Size() + n += 1 + l + sovMetadata(uint64(l)) + } + } return n } @@ -754,7 +1220,7 @@ func (m *SenseMetadata) Unmarshal(dAtA []byte) error { } return nil } -func (m *CascadeMetadata) Unmarshal(dAtA []byte) error { +func (m *AvailabilityCommitment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -777,15 +1243,15 @@ func (m *CascadeMetadata) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CascadeMetadata: wiretype end group for non-group") + return fmt.Errorf("proto: AvailabilityCommitment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CascadeMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AvailabilityCommitment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CommitmentType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -813,13 +1279,486 @@ func (m *CascadeMetadata) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DataHash = string(dAtA[iNdEx:postIndex]) + m.CommitmentType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HashAlgo", wireType) } - var stringLen uint64 + m.HashAlgo = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HashAlgo |= HashAlgo(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkSize", wireType) + } + m.ChunkSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChunkSize |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalSize", wireType) + } + m.TotalSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumChunks", wireType) + } + m.NumChunks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumChunks |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetadata + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = append(m.Root[:0], dAtA[iNdEx:postIndex]...) + if m.Root == nil { + m.Root = []byte{} + } + iNdEx = postIndex + case 7: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ChallengeIndices = append(m.ChallengeIndices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetadata + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.ChallengeIndices) == 0 { + m.ChallengeIndices = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ChallengeIndices = append(m.ChallengeIndices, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ChallengeIndices", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetadata(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetadata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChunkProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChunkProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChunkProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkIndex", wireType) + } + m.ChunkIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChunkIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeafHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetadata + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeafHash = append(m.LeafHash[:0], dAtA[iNdEx:postIndex]...) + if m.LeafHash == nil { + m.LeafHash = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathHashes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetadata + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PathHashes = append(m.PathHashes, make([]byte, postIndex-iNdEx)) + copy(m.PathHashes[len(m.PathHashes)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType == 0 { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PathDirections = append(m.PathDirections, bool(v != 0)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetadata + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen + if elementCount != 0 && len(m.PathDirections) == 0 { + m.PathDirections = make([]bool, 0, elementCount) + } + for iNdEx < postIndex { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PathDirections = append(m.PathDirections, bool(v != 0)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PathDirections", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetadata(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetadata + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CascadeMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CascadeMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CascadeMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetadata + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMetadata @@ -969,6 +1908,76 @@ func (m *CascadeMetadata) Unmarshal(dAtA []byte) error { } } m.Public = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailabilityCommitment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetadata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AvailabilityCommitment == nil { + m.AvailabilityCommitment = &AvailabilityCommitment{} + } + if err := m.AvailabilityCommitment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkProofs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetadata + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetadata + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetadata + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkProofs = append(m.ChunkProofs, &ChunkProof{}) + if err := m.ChunkProofs[len(m.ChunkProofs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetadata(dAtA[iNdEx:]) diff --git a/x/action/v1/types/metadata_proto_test.go b/x/action/v1/types/metadata_proto_test.go new file mode 100644 index 00000000..c36b1f2e --- /dev/null +++ b/x/action/v1/types/metadata_proto_test.go @@ -0,0 +1,107 @@ +package types + +import ( + "testing" + + "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/require" +) + +func TestAvailabilityCommitmentAndChunkProofRoundTrip(t *testing.T) { + commitment := &AvailabilityCommitment{ + CommitmentType: "lep5/chunk-merkle/v1", + HashAlgo: HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: 262144, + TotalSize: 1048576, + NumChunks: 4, + Root: []byte("0123456789abcdef0123456789abcdef"), + } + + bz, err := proto.Marshal(commitment) + require.NoError(t, err) + + var decodedCommitment AvailabilityCommitment + require.NoError(t, proto.Unmarshal(bz, &decodedCommitment)) + require.Equal(t, commitment, &decodedCommitment) + + proof := &ChunkProof{ + ChunkIndex: 2, + LeafHash: []byte("leaf-hash-2"), + PathHashes: [][]byte{[]byte("sibling-0"), []byte("sibling-1")}, + PathDirections: []bool{ + true, + false, + }, + } + + proofBz, err := proto.Marshal(proof) + require.NoError(t, err) + + var decodedProof ChunkProof + require.NoError(t, proto.Unmarshal(proofBz, &decodedProof)) + require.Equal(t, proof, &decodedProof) +} + +func TestCascadeMetadataBackwardsCompatibleWithoutNewFields(t *testing.T) { + legacyLike := &CascadeMetadata{ + DataHash: "legacy-data-hash", + FileName: "legacy.txt", + RqIdsIc: 10, + RqIdsMax: 50, + RqIdsIds: []string{"id-1", "id-2"}, + Signatures: "legacy-signature", + Public: true, + } + + bz, err := proto.Marshal(legacyLike) + require.NoError(t, err) + + var decoded CascadeMetadata + require.NoError(t, proto.Unmarshal(bz, &decoded)) + + require.Equal(t, legacyLike.DataHash, decoded.DataHash) + require.Equal(t, legacyLike.FileName, decoded.FileName) + require.Equal(t, legacyLike.RqIdsIc, decoded.RqIdsIc) + require.Equal(t, legacyLike.RqIdsMax, decoded.RqIdsMax) + require.Equal(t, legacyLike.RqIdsIds, decoded.RqIdsIds) + require.Equal(t, legacyLike.Signatures, decoded.Signatures) + require.Equal(t, legacyLike.Public, decoded.Public) + require.Nil(t, decoded.AvailabilityCommitment) + require.Empty(t, decoded.ChunkProofs) +} + +func TestCascadeMetadataRoundTripWithNewFields(t *testing.T) { + extended := &CascadeMetadata{ + DataHash: "extended-data-hash", + FileName: "extended.txt", + RqIdsIc: 11, + RqIdsMax: 99, + RqIdsIds: []string{"id-a", "id-b", "id-c"}, + Signatures: "extended-signature", + Public: false, + AvailabilityCommitment: &AvailabilityCommitment{ + CommitmentType: "lep5/chunk-merkle/v1", + HashAlgo: HashAlgo_HASH_ALGO_BLAKE3, + ChunkSize: 262144, + TotalSize: 786432, + NumChunks: 3, + Root: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + }, + ChunkProofs: []*ChunkProof{ + { + ChunkIndex: 0, + LeafHash: []byte("leaf-0"), + PathHashes: [][]byte{[]byte("s0"), []byte("s1")}, + PathDirections: []bool{true, false}, + }, + }, + } + + bz, err := proto.Marshal(extended) + require.NoError(t, err) + + var decoded CascadeMetadata + require.NoError(t, proto.Unmarshal(bz, &decoded)) + require.Equal(t, extended, &decoded) +} + diff --git a/x/action/v1/types/params.go b/x/action/v1/types/params.go index f268a8bb..fe8fc494 100644 --- a/x/action/v1/types/params.go +++ b/x/action/v1/types/params.go @@ -22,8 +22,10 @@ var ( KeyExpirationDuration = []byte("ExpirationDuration") KeyMinProcessingTime = []byte("MinProcessingTime") KeyMaxProcessingTime = []byte("MaxProcessingTime") - KeySuperNodeFeeShare = []byte("SuperNodeFeeShare") - KeyFoundationFeeShare = []byte("FoundationFeeShare") + KeySuperNodeFeeShare = []byte("SuperNodeFeeShare") + KeyFoundationFeeShare = []byte("FoundationFeeShare") + KeySVCChallengeCount = []byte("SVCChallengeCount") + KeySVCMinChunksForChallenge = []byte("SVCMinChunksForChallenge") ) // Default parameter values @@ -37,8 +39,10 @@ var ( DefaultExpirationDuration = 24 * time.Hour // 24 hour expiration DefaultMinProcessingTime = 1 * time.Minute // 1 minute minimum processing time DefaultMaxProcessingTime = 1 * time.Hour // 1 hour maximum processing time - DefaultSuperNodeFeeShare = "1.000000000000000000" // 1.0 (100%) - DefaultFoundationFeeShare = "0.000000000000000000" // 0.0 (0%) + DefaultSuperNodeFeeShare = "1.000000000000000000" // 1.0 (100%) + DefaultFoundationFeeShare = "0.000000000000000000" // 0.0 (0%) + DefaultSVCChallengeCount = uint32(8) // LEP-5: number of chunks to challenge + DefaultSVCMinChunksForChallenge = uint32(4) // LEP-5: minimum chunks required for SVC ) // ParamKeyTable the param key table for launch module @@ -59,19 +63,23 @@ func NewParams( maxProcessingTime time.Duration, superNodeFeeShare string, foundationFeeShare string, + svcChallengeCount uint32, + svcMinChunksForChallenge uint32, ) Params { return Params{ - BaseActionFee: baseActionFee, - FeePerKbyte: feePerKbyte, - MaxActionsPerBlock: maxActionsPerBlock, - MinSuperNodes: minSuperNodes, - MaxDdAndFingerprints: maxDdAndFingerprints, - MaxRaptorQSymbols: maxRaptorQSymbols, - ExpirationDuration: expirationDuration, - MinProcessingTime: minProcessingTime, - MaxProcessingTime: maxProcessingTime, - SuperNodeFeeShare: superNodeFeeShare, - FoundationFeeShare: foundationFeeShare, + BaseActionFee: baseActionFee, + FeePerKbyte: feePerKbyte, + MaxActionsPerBlock: maxActionsPerBlock, + MinSuperNodes: minSuperNodes, + MaxDdAndFingerprints: maxDdAndFingerprints, + MaxRaptorQSymbols: maxRaptorQSymbols, + ExpirationDuration: expirationDuration, + MinProcessingTime: minProcessingTime, + MaxProcessingTime: maxProcessingTime, + SuperNodeFeeShare: superNodeFeeShare, + FoundationFeeShare: foundationFeeShare, + SvcChallengeCount: svcChallengeCount, + SvcMinChunksForChallenge: svcMinChunksForChallenge, } } @@ -89,6 +97,8 @@ func DefaultParams() Params { DefaultMaxProcessingTime, DefaultSuperNodeFeeShare, DefaultFoundationFeeShare, + DefaultSVCChallengeCount, + DefaultSVCMinChunksForChallenge, ) } @@ -106,6 +116,8 @@ func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { paramtypes.NewParamSetPair(KeyMaxProcessingTime, &p.MaxProcessingTime, validateDuration), paramtypes.NewParamSetPair(KeySuperNodeFeeShare, &p.SuperNodeFeeShare, validateDecString), paramtypes.NewParamSetPair(KeyFoundationFeeShare, &p.FoundationFeeShare, validateDecString), + paramtypes.NewParamSetPair(KeySVCChallengeCount, &p.SvcChallengeCount, validateUint32), + paramtypes.NewParamSetPair(KeySVCMinChunksForChallenge, &p.SvcMinChunksForChallenge, validateUint32), } } @@ -155,6 +167,14 @@ func (p Params) Validate() error { return err } + if err := validateUint32(p.SvcChallengeCount); err != nil { + return err + } + + if err := validateUint32(p.SvcMinChunksForChallenge); err != nil { + return err + } + // Additional validation rules if p.MinProcessingTime >= p.MaxProcessingTime { return fmt.Errorf("min processing time must be less than max processing time") @@ -203,6 +223,15 @@ func validateUint64(v interface{}) error { return nil } +func validateUint32(v interface{}) error { + _, ok := v.(uint32) + if !ok { + return fmt.Errorf("invalid parameter type: %T", v) + } + + return nil +} + func validateDuration(v interface{}) error { duration, ok := v.(time.Duration) if !ok { diff --git a/x/action/v1/types/params.pb.go b/x/action/v1/types/params.pb.go index 03436c1a..b4aefb10 100644 --- a/x/action/v1/types/params.pb.go +++ b/x/action/v1/types/params.pb.go @@ -47,6 +47,9 @@ type Params struct { // Reward Distribution SuperNodeFeeShare string `protobuf:"bytes,10,opt,name=super_node_fee_share,json=superNodeFeeShare,proto3" json:"super_node_fee_share,omitempty"` FoundationFeeShare string `protobuf:"bytes,11,opt,name=foundation_fee_share,json=foundationFeeShare,proto3" json:"foundation_fee_share,omitempty"` + // LEP-5: Storage Verification Challenge parameters + SvcChallengeCount uint32 `protobuf:"varint,12,opt,name=svc_challenge_count,json=svcChallengeCount,proto3" json:"svc_challenge_count,omitempty"` + SvcMinChunksForChallenge uint32 `protobuf:"varint,13,opt,name=svc_min_chunks_for_challenge,json=svcMinChunksForChallenge,proto3" json:"svc_min_chunks_for_challenge,omitempty"` } func (m *Params) Reset() { *m = Params{} } @@ -159,6 +162,20 @@ func (m *Params) GetFoundationFeeShare() string { return "" } +func (m *Params) GetSvcChallengeCount() uint32 { + if m != nil { + return m.SvcChallengeCount + } + return 0 +} + +func (m *Params) GetSvcMinChunksForChallenge() uint32 { + if m != nil { + return m.SvcMinChunksForChallenge + } + return 0 +} + func init() { proto.RegisterType((*Params)(nil), "lumera.action.v1.Params") } @@ -166,43 +183,47 @@ func init() { func init() { proto.RegisterFile("lumera/action/v1/params.proto", fileDescriptor_f412eae394529c22) } var fileDescriptor_f412eae394529c22 = []byte{ - // 572 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xbf, 0x6f, 0xd4, 0x3e, - 0x14, 0xbf, 0x7c, 0xbf, 0xe5, 0x68, 0x5d, 0x95, 0x72, 0xee, 0x21, 0xd2, 0x4a, 0xe4, 0x2a, 0x06, - 0x54, 0x81, 0x48, 0x74, 0x20, 0x16, 0x16, 0xe8, 0x51, 0x55, 0x48, 0x20, 0x74, 0xdc, 0x75, 0x62, - 0xb1, 0x9c, 0xe4, 0x25, 0x98, 0x9e, 0xed, 0x60, 0xe7, 0xaa, 0x74, 0xe6, 0x1f, 0x60, 0x64, 0x64, - 0x64, 0x64, 0xe0, 0x8f, 0xe8, 0x58, 0x31, 0x31, 0x15, 0xd4, 0x0e, 0x20, 0xfe, 0x0a, 0x64, 0x3b, - 0xe1, 0xaa, 0xaa, 0x12, 0xb0, 0x9c, 0xce, 0xef, 0xf3, 0xe3, 0x3d, 0xfb, 0xf3, 0x82, 0xae, 0x4d, - 0xa6, 0x1c, 0x14, 0x8d, 0x68, 0x52, 0x32, 0x29, 0xa2, 0xbd, 0x7e, 0x54, 0x50, 0x45, 0xb9, 0x0e, - 0x0b, 0x25, 0x4b, 0x89, 0x2f, 0x3b, 0x38, 0x74, 0x70, 0xb8, 0xd7, 0x5f, 0x5b, 0x4d, 0xa4, 0xe6, - 0x52, 0x13, 0x8b, 0x47, 0xee, 0xe0, 0xc8, 0x6b, 0x81, 0x3b, 0x45, 0x31, 0xd5, 0x10, 0xed, 0xf5, - 0x63, 0x28, 0x69, 0x3f, 0x4a, 0x24, 0x13, 0x35, 0xde, 0xcd, 0x65, 0x2e, 0x9d, 0xce, 0xfc, 0x6b, - 0x54, 0xb9, 0x94, 0xf9, 0x04, 0x22, 0x7b, 0x8a, 0xa7, 0x59, 0x94, 0x4e, 0x15, 0xb5, 0xdd, 0x1c, - 0xde, 0xa1, 0x9c, 0x09, 0x19, 0xd9, 0x5f, 0x57, 0xba, 0xfe, 0xa6, 0x8d, 0xda, 0x43, 0x3b, 0x26, - 0x7e, 0x8a, 0x96, 0x4d, 0x3b, 0xe2, 0x06, 0x24, 0x19, 0x80, 0xef, 0xad, 0x7b, 0x1b, 0x8b, 0x77, - 0x56, 0xc3, 0x7a, 0x36, 0x03, 0x87, 0xf5, 0x34, 0xe1, 0x23, 0xc9, 0xc4, 0x60, 0xe1, 0xe0, 0xa8, - 0xd7, 0xfa, 0xf0, 0xfd, 0xe3, 0x4d, 0x6f, 0xb4, 0x64, 0xd0, 0x4d, 0xab, 0xdd, 0x06, 0xc0, 0x8f, - 0xd1, 0x52, 0x06, 0x40, 0x0a, 0x50, 0x64, 0x37, 0xde, 0x2f, 0xc1, 0xff, 0xef, 0x1f, 0xbc, 0x16, - 0x33, 0x80, 0x21, 0xa8, 0x27, 0x46, 0x88, 0xfb, 0xe8, 0x0a, 0xa7, 0x55, 0x3d, 0x96, 0xb6, 0x8e, - 0xf1, 0x44, 0x26, 0xbb, 0xfe, 0xff, 0xeb, 0xde, 0xc6, 0xdc, 0x08, 0x73, 0x5a, 0xb9, 0xb6, 0x7a, - 0x08, 0x6a, 0x60, 0x10, 0x7c, 0x03, 0x2d, 0x73, 0x26, 0x88, 0x9e, 0x1a, 0xb2, 0x90, 0x29, 0x68, - 0x7f, 0xce, 0x92, 0x97, 0x38, 0x13, 0x63, 0x53, 0x7d, 0x66, 0x8a, 0xf8, 0x1e, 0xba, 0x6a, 0xac, - 0xd3, 0x94, 0x50, 0x91, 0x92, 0x8c, 0x89, 0x1c, 0x54, 0xa1, 0x98, 0x28, 0xb5, 0x7f, 0xc1, 0xf2, - 0xbb, 0x9c, 0x56, 0x5b, 0xe9, 0xa6, 0x48, 0xb7, 0x4f, 0x61, 0x38, 0x42, 0xa6, 0x4e, 0x14, 0x2d, - 0x4a, 0xa9, 0xc8, 0x6b, 0xa2, 0xf7, 0x79, 0x2c, 0x27, 0xda, 0x6f, 0x5b, 0x4d, 0x87, 0xd3, 0x6a, - 0x64, 0xa1, 0xe7, 0x63, 0x07, 0xe0, 0x1d, 0xb4, 0x02, 0x55, 0xc1, 0x5c, 0x18, 0xa4, 0x49, 0xc5, - 0xbf, 0x58, 0x3f, 0x89, 0x8b, 0x2d, 0x6c, 0x62, 0x0b, 0xb7, 0x6a, 0xc2, 0x60, 0xde, 0x3c, 0xc9, - 0xbb, 0xaf, 0x3d, 0x6f, 0x84, 0x67, 0xfa, 0x06, 0xc5, 0x63, 0xb4, 0x62, 0x6e, 0x59, 0x28, 0x99, - 0x80, 0xd6, 0x4c, 0xe4, 0xa4, 0x64, 0x1c, 0xfc, 0xf9, 0xbf, 0x77, 0xed, 0x70, 0x26, 0x86, 0xbf, - 0xe5, 0x3b, 0x8c, 0x03, 0x7e, 0x85, 0x56, 0xcc, 0xdd, 0xce, 0x9a, 0x2e, 0xfc, 0xc9, 0xb4, 0x67, - 0x4c, 0x7f, 0x1e, 0xf5, 0xce, 0x53, 0xd7, 0xbd, 0x68, 0x75, 0xa6, 0xd7, 0x03, 0xd4, 0x9d, 0x45, - 0x64, 0x16, 0x8e, 0xe8, 0x97, 0x54, 0x81, 0x8f, 0xd6, 0xbd, 0x8d, 0x85, 0xc1, 0xa5, 0xcf, 0x9f, - 0x6e, 0xa3, 0x7a, 0x5b, 0xb6, 0x20, 0x19, 0x75, 0x74, 0x13, 0xdc, 0x36, 0xc0, 0xd8, 0x10, 0xf1, - 0x43, 0xd4, 0xcd, 0xe4, 0x54, 0xa4, 0xb4, 0xd9, 0xd8, 0xda, 0x60, 0xf1, 0x5c, 0x03, 0x3c, 0xe3, - 0x36, 0x0e, 0xf7, 0xe7, 0x7e, 0xbc, 0xef, 0x79, 0x83, 0x5b, 0x07, 0xc7, 0x81, 0x77, 0x78, 0x1c, - 0x78, 0xdf, 0x8e, 0x03, 0xef, 0xed, 0x49, 0xd0, 0x3a, 0x3c, 0x09, 0x5a, 0x5f, 0x4e, 0x82, 0xd6, - 0x8b, 0x4e, 0x75, 0xea, 0x7b, 0x2e, 0xf7, 0x0b, 0xd0, 0x71, 0xdb, 0x5e, 0xfe, 0xee, 0xaf, 0x00, - 0x00, 0x00, 0xff, 0xff, 0xba, 0x83, 0x33, 0xfd, 0xf0, 0x03, 0x00, 0x00, + // 638 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xcf, 0x6e, 0x13, 0x3b, + 0x14, 0xc6, 0x33, 0xf7, 0xf6, 0xe6, 0xb6, 0x2e, 0xa1, 0xc4, 0x09, 0x62, 0x5a, 0x41, 0x12, 0xb1, + 0x40, 0x11, 0x88, 0x19, 0x05, 0xc4, 0x86, 0x05, 0xd0, 0xa4, 0x8a, 0x90, 0xf8, 0xa3, 0x90, 0x74, + 0xc5, 0xc6, 0x72, 0x66, 0x4e, 0x52, 0xd3, 0xd8, 0x1e, 0xec, 0x99, 0x68, 0xfa, 0x16, 0x2c, 0x59, + 0xb2, 0x64, 0xc9, 0x82, 0x87, 0xe8, 0xb2, 0x62, 0xc5, 0xaa, 0xa0, 0x76, 0x01, 0x62, 0xcb, 0x0b, + 0x20, 0x7b, 0x66, 0x9a, 0xaa, 0xaa, 0x04, 0x6c, 0xa2, 0xd8, 0xbf, 0xef, 0xfb, 0xce, 0xb1, 0x8f, + 0x35, 0xe8, 0xda, 0x2c, 0xe1, 0xa0, 0xa8, 0x4f, 0x83, 0x98, 0x49, 0xe1, 0xcf, 0x3b, 0x7e, 0x44, + 0x15, 0xe5, 0xda, 0x8b, 0x94, 0x8c, 0x25, 0xbe, 0x94, 0x61, 0x2f, 0xc3, 0xde, 0xbc, 0xb3, 0xb1, + 0x1e, 0x48, 0xcd, 0xa5, 0x26, 0x96, 0xfb, 0xd9, 0x22, 0x13, 0x6f, 0x34, 0xb2, 0x95, 0x3f, 0xa6, + 0x1a, 0xfc, 0x79, 0x67, 0x0c, 0x31, 0xed, 0xf8, 0x81, 0x64, 0x22, 0xe7, 0xf5, 0xa9, 0x9c, 0xca, + 0xcc, 0x67, 0xfe, 0x15, 0xae, 0xa9, 0x94, 0xd3, 0x19, 0xf8, 0x76, 0x35, 0x4e, 0x26, 0x7e, 0x98, + 0x28, 0x6a, 0xab, 0x65, 0xbc, 0x4a, 0x39, 0x13, 0xd2, 0xb7, 0xbf, 0xd9, 0xd6, 0xf5, 0x9f, 0x65, + 0x54, 0x1e, 0xd8, 0x36, 0xf1, 0x53, 0xb4, 0x66, 0xca, 0x91, 0xac, 0x41, 0x32, 0x01, 0x70, 0x9d, + 0x96, 0xd3, 0x5e, 0xbd, 0xb3, 0xee, 0xe5, 0xbd, 0x19, 0xec, 0xe5, 0xdd, 0x78, 0x3d, 0xc9, 0x44, + 0x77, 0x65, 0xff, 0xb0, 0x59, 0x7a, 0xff, 0xed, 0xc3, 0x4d, 0x67, 0x58, 0x31, 0x74, 0xd3, 0x7a, + 0xfb, 0x00, 0xf8, 0x31, 0xaa, 0x4c, 0x00, 0x48, 0x04, 0x8a, 0xec, 0x8e, 0xf7, 0x62, 0x70, 0xff, + 0xf9, 0x8b, 0xac, 0xd5, 0x09, 0xc0, 0x00, 0xd4, 0x13, 0x63, 0xc4, 0x1d, 0x74, 0x99, 0xd3, 0x34, + 0x6f, 0x4b, 0xdb, 0xc4, 0xf1, 0x4c, 0x06, 0xbb, 0xee, 0xbf, 0x2d, 0xa7, 0xbd, 0x34, 0xc4, 0x9c, + 0xa6, 0x59, 0x59, 0x3d, 0x00, 0xd5, 0x35, 0x04, 0xdf, 0x40, 0x6b, 0x9c, 0x09, 0xa2, 0x13, 0x23, + 0x16, 0x32, 0x04, 0xed, 0x2e, 0x59, 0x71, 0x85, 0x33, 0x31, 0x32, 0xbb, 0xcf, 0xcd, 0x26, 0xbe, + 0x87, 0xae, 0x98, 0xe8, 0x30, 0x24, 0x54, 0x84, 0x64, 0xc2, 0xc4, 0x14, 0x54, 0xa4, 0x98, 0x88, + 0xb5, 0xfb, 0x9f, 0xd5, 0xd7, 0x39, 0x4d, 0xb7, 0xc2, 0x4d, 0x11, 0xf6, 0x4f, 0x31, 0xec, 0x23, + 0xb3, 0x4f, 0x14, 0x8d, 0x62, 0xa9, 0xc8, 0x6b, 0xa2, 0xf7, 0xf8, 0x58, 0xce, 0xb4, 0x5b, 0xb6, + 0x9e, 0x2a, 0xa7, 0xe9, 0xd0, 0xa2, 0x17, 0xa3, 0x0c, 0xe0, 0x6d, 0x54, 0x83, 0x34, 0x62, 0xd9, + 0x30, 0x48, 0x31, 0x15, 0xf7, 0xff, 0xfc, 0x4a, 0xb2, 0xb1, 0x79, 0xc5, 0xd8, 0xbc, 0xad, 0x5c, + 0xd0, 0x5d, 0x36, 0x57, 0xf2, 0xf6, 0x4b, 0xd3, 0x19, 0xe2, 0x85, 0xbf, 0xa0, 0x78, 0x84, 0x6a, + 0xe6, 0x94, 0x91, 0x92, 0x01, 0x68, 0xcd, 0xc4, 0x94, 0xc4, 0x8c, 0x83, 0xbb, 0xfc, 0xe7, 0xa9, + 0x55, 0xce, 0xc4, 0xe0, 0xc4, 0xbe, 0xcd, 0x38, 0xe0, 0x57, 0xa8, 0x66, 0xce, 0x76, 0x36, 0x74, + 0xe5, 0x77, 0xa1, 0x4d, 0x13, 0xfa, 0xe3, 0xb0, 0x79, 0x9e, 0x3b, 0xaf, 0x45, 0xd3, 0x33, 0xb5, + 0x1e, 0xa2, 0xfa, 0x62, 0x44, 0xe6, 0xc1, 0x11, 0xbd, 0x43, 0x15, 0xb8, 0xa8, 0xe5, 0xb4, 0x57, + 0xba, 0x17, 0x3f, 0x7d, 0xbc, 0x8d, 0xf2, 0xd7, 0xb2, 0x05, 0xc1, 0xb0, 0xaa, 0x8b, 0xc1, 0xf5, + 0x01, 0x46, 0x46, 0x88, 0x1f, 0xa1, 0xfa, 0x44, 0x26, 0x22, 0xa4, 0xc5, 0x8b, 0xcd, 0x03, 0x56, + 0xcf, 0x0d, 0xc0, 0x0b, 0xed, 0x49, 0x82, 0x87, 0x6a, 0x7a, 0x1e, 0x90, 0x60, 0x87, 0xce, 0x66, + 0x20, 0xa6, 0x40, 0x02, 0x99, 0x88, 0xd8, 0xbd, 0xd0, 0x72, 0xda, 0x95, 0x61, 0x55, 0xcf, 0x83, + 0x5e, 0x41, 0x7a, 0x06, 0xe0, 0x07, 0xe8, 0xaa, 0xd1, 0x9b, 0x7b, 0x0f, 0x76, 0x12, 0xb1, 0xab, + 0xc9, 0x44, 0xaa, 0x85, 0xdd, 0xad, 0x58, 0xa3, 0xab, 0xe7, 0xc1, 0x33, 0x26, 0x7a, 0x56, 0xd1, + 0x97, 0xea, 0x24, 0xe4, 0xfe, 0xd2, 0xf7, 0x77, 0x4d, 0xa7, 0x7b, 0x6b, 0xff, 0xa8, 0xe1, 0x1c, + 0x1c, 0x35, 0x9c, 0xaf, 0x47, 0x0d, 0xe7, 0xcd, 0x71, 0xa3, 0x74, 0x70, 0xdc, 0x28, 0x7d, 0x3e, + 0x6e, 0x94, 0x5e, 0x56, 0xd3, 0x53, 0xdf, 0x8f, 0x78, 0x2f, 0x02, 0x3d, 0x2e, 0xdb, 0xcb, 0xbe, + 0xfb, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x6b, 0xc4, 0xa0, 0x60, 0x60, 0x04, 0x00, 0x00, } func (this *Params) Equal(that interface{}) bool { @@ -257,6 +278,12 @@ func (this *Params) Equal(that interface{}) bool { if this.FoundationFeeShare != that1.FoundationFeeShare { return false } + if this.SvcChallengeCount != that1.SvcChallengeCount { + return false + } + if this.SvcMinChunksForChallenge != that1.SvcMinChunksForChallenge { + return false + } return true } func (m *Params) Marshal() (dAtA []byte, err error) { @@ -279,6 +306,16 @@ func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SvcMinChunksForChallenge != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.SvcMinChunksForChallenge)) + i-- + dAtA[i] = 0x68 + } + if m.SvcChallengeCount != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.SvcChallengeCount)) + i-- + dAtA[i] = 0x60 + } if len(m.FoundationFeeShare) > 0 { i -= len(m.FoundationFeeShare) copy(dAtA[i:], m.FoundationFeeShare) @@ -407,6 +444,12 @@ func (m *Params) Size() (n int) { if l > 0 { n += 1 + l + sovParams(uint64(l)) } + if m.SvcChallengeCount != 0 { + n += 1 + sovParams(uint64(m.SvcChallengeCount)) + } + if m.SvcMinChunksForChallenge != 0 { + n += 1 + sovParams(uint64(m.SvcMinChunksForChallenge)) + } return n } @@ -750,6 +793,44 @@ func (m *Params) Unmarshal(dAtA []byte) error { } m.FoundationFeeShare = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SvcChallengeCount", wireType) + } + m.SvcChallengeCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SvcChallengeCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SvcMinChunksForChallenge", wireType) + } + m.SvcMinChunksForChallenge = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SvcMinChunksForChallenge |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:])