From 0da95a3a50729b901e4ea73121e19d5b950ba5e6 Mon Sep 17 00:00:00 2001 From: Ardit Marku Date: Tue, 10 Mar 2026 16:31:34 +0200 Subject: [PATCH] Enforce fair FIFO queue ordering on-chain --- solidity/src/FlowYieldVaultsRequests.sol | 159 +++++++--- solidity/test/FlowYieldVaultsRequests.t.sol | 319 +++++++++++++++++--- 2 files changed, 386 insertions(+), 92 deletions(-) diff --git a/solidity/src/FlowYieldVaultsRequests.sol b/solidity/src/FlowYieldVaultsRequests.sol index 7b4bef1..c892a1e 100644 --- a/solidity/src/FlowYieldVaultsRequests.sol +++ b/solidity/src/FlowYieldVaultsRequests.sol @@ -167,16 +167,19 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { /// @notice All requests indexed by request ID mapping(uint256 => Request) public requests; - /// @notice Array of pending request IDs awaiting processing (FIFO order) - uint256[] public pendingRequestIds; - - /// @notice Index of request ID in global pending array (for O(1) lookup) - mapping(uint256 => uint256) private _requestIndexInGlobalArray; - /// @notice Index of yieldVaultId in user's yieldVaultsByUser array (for O(1) removal) /// @dev Internal visibility allows test helpers to properly initialize state mapping(address => mapping(uint64 => uint256)) internal _yieldVaultIndexInUserArray; + /// @notice Mapping of queued request IDs awaiting processing (FIFO order) + mapping(uint256 => uint256) private requestsQueue; + + /// @notice Pointer to the current head in requestsQueue. Denotes the next request to be processed + uint256 private _requestsQueueHead = 1; + + /// @notice Pointer to the current tail in requestsQueue. Denotes the last request to be processed + uint256 private _requestsQueueTail = 1; + // ============================================ // Errors // ============================================ @@ -273,6 +276,12 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { /// @notice No refund available for the specified token error NoRefundAvailable(address token); + /// @notice Invalid dequeue operation on an empty requests queue + error EmptyRequestsQueue(); + + /// @notice Processed request does not match the head of requestsQueue + error RequestProcessOutOfOrder(uint256 expectedId, uint256 processedId); + // ============================================ // Events // ============================================ @@ -843,7 +852,8 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { if (userPendingRequestCount[request.user] > 0) { userPendingRequestCount[request.user]--; } - _removePendingRequest(requestId); + _removeUserPendingRequest(requestId); + _dropQueuedRequest(requestId); // === REFUND HANDLING (pull pattern) === // For CREATE/DEPOSIT requests, move funds from pendingUserBalances to claimableRefunds @@ -912,6 +922,9 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { * @notice Processes a batch of PENDING requests. * @dev For successful requests, marks them as PROCESSING. * For rejected requests, marks them as FAILED. + * Requests are classified as successful/rejected based on validation + * logic that is performed on Cadence side, and not on the authorized + * COA's discretion. * Single-request processing is supported by passing one request id in * successfulRequestIds and an empty rejectedRequestIds array. * @param successfulRequestIds The request ids to start processing (PENDING -> PROCESSING) @@ -1072,12 +1085,21 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { /// @notice Gets the count of pending requests /// @return Number of pending requests function getPendingRequestCount() external view returns (uint256) { - return pendingRequestIds.length; + return _requestsQueueLength(); } /// @notice Gets all pending request IDs /// @return Array of pending request IDs function getPendingRequestIds() external view returns (uint256[] memory) { + uint256[] memory pendingRequestIds = new uint256[](_requestsQueueLength()); + uint256 arrayIndex = 0; + for (uint256 i = _requestsQueueHead; i < _requestsQueueTail;) { + pendingRequestIds[arrayIndex] = requestsQueue[i]; + unchecked { + ++arrayIndex; + ++i; + } + } return pendingRequestIds; } @@ -1116,7 +1138,7 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { string[] memory strategyIdentifiers ) { - if (startIndex >= pendingRequestIds.length) { + if (startIndex >= _requestsQueueLength()) { return ( new uint256[](0), new address[](0), @@ -1132,7 +1154,7 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { ); } - uint256 remaining = pendingRequestIds.length - startIndex; + uint256 remaining = _requestsQueueLength() - startIndex; uint256 size = count == 0 ? remaining : (count < remaining ? count : remaining); @@ -1149,8 +1171,8 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { vaultIdentifiers = new string[](size); strategyIdentifiers = new string[](size); - for (uint256 i = 0; i < size; ) { - Request memory req = requests[pendingRequestIds[startIndex + i]]; + for (uint256 i = 0; i < size;) { + Request memory req = requests[requestsQueue[_requestsQueueHead + startIndex + i]]; ids[i] = req.id; users[i] = req.user; requestTypes[i] = uint8(req.requestType); @@ -1397,7 +1419,8 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { } // Remove from pending queues (both global and user-specific) - _removePendingRequest(requestId); + _removeUserPendingRequest(requestId); + _dropQueuedRequest(requestId); emit RequestProcessed( requestId, @@ -1502,7 +1525,9 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { if (userPendingRequestCount[request.user] > 0) { userPendingRequestCount[request.user]--; } - _removePendingRequest(requestId); + _removeUserPendingRequest(requestId); + uint256 reqId = _dequeueRequest(); + if (reqId != requestId) revert RequestProcessOutOfOrder(reqId, requestId); emit RequestProcessed( requestId, @@ -1743,8 +1768,7 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { }); // Add to global pending queue with index tracking for O(1) lookup - _requestIndexInGlobalArray[requestId] = pendingRequestIds.length; - pendingRequestIds.push(requestId); + _enqueueRequest(requestId); userPendingRequestCount[msg.sender]++; // Add to user's pending array with index tracking for O(1) removal @@ -1780,40 +1804,16 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { } /** - * @dev Removes a request from all pending queues while preserving request history. - * Uses two different removal strategies: - * - Global array: Shift elements to maintain FIFO order (O(n) but necessary for fair processing) - * - User array: Swap-and-pop for O(1) removal (order doesn't affect processing) + * @dev Removes a request from the user pending requests mapping while preserving request history. + * Uses the following removal strategy: + * - Swap-and-pop for O(1) removal (order doesn't affect processing) * * The request data remains in the `requests` mapping for historical queries; - * this function only removes it from the pending queues. - * @param requestId The request ID to remove from pending queues. + * This function only removes it from the user pending requests mapping. + * @param requestId The request ID to remove from the user pending requests mapping. */ - function _removePendingRequest(uint256 requestId) internal { - // === GLOBAL PENDING ARRAY REMOVAL === - // Uses O(1) lookup + O(n) shift to maintain FIFO order - // FIFO order is critical for DeFi fairness - requests must be processed in submission order - uint256 indexInGlobal = _requestIndexInGlobalArray[requestId]; - uint256 globalLength = pendingRequestIds.length; - - // Safety check: verify element exists at expected index - if (globalLength > 0 && indexInGlobal < globalLength && pendingRequestIds[indexInGlobal] == requestId) { - // Shift all subsequent elements left to maintain FIFO order - for (uint256 j = indexInGlobal; j < globalLength - 1; ) { - pendingRequestIds[j] = pendingRequestIds[j + 1]; - // Update index mapping for each shifted element - _requestIndexInGlobalArray[pendingRequestIds[j]] = j; - unchecked { - ++j; - } - } - // Remove the last element (now duplicated or the one to remove) - pendingRequestIds.pop(); - // Clean up index mapping - delete _requestIndexInGlobalArray[requestId]; - } - - // === USER PENDING ARRAY REMOVAL === + function _removeUserPendingRequest(uint256 requestId) internal { + // === USER PENDING REQUESTS ARRAY REMOVAL === // Uses swap-and-pop for O(1) removal (order doesn't affect FIFO processing) address user = requests[requestId].user; uint256[] storage userPendingIds = pendingRequestIdsByUser[user]; @@ -1835,4 +1835,69 @@ contract FlowYieldVaultsRequests is ReentrancyGuard, Ownable2Step { delete _requestIndexInUserArray[requestId]; } } + + /** + * @dev Enqueues a request in the requestsQueue and shifts the queue's tail pointer. + * + * @param requestId The request ID to enqueue in the pending requests queue. + */ + function _enqueueRequest(uint256 requestId) internal { + requestsQueue[_requestsQueueTail] = requestId; + _requestsQueueTail += 1; + } + + /** + * @dev Dequeues the head of requestsQueue and shifts the queue's head pointer. + * + * @return The request ID that was dequeued. + */ + function _dequeueRequest() internal returns (uint256) { + if (_requestsQueueLength() == 0) revert EmptyRequestsQueue(); + + uint256 requestId = requestsQueue[_requestsQueueHead]; + + delete requestsQueue[_requestsQueueHead]; + _requestsQueueHead += 1; + + return requestId; + } + + /** + * @dev Drops a request from the requestsQueue and shifts the queue to + * maintain FIFO order after the removal. + * + * @param requestId The request ID to remove from the pending requests queue. + */ + function _dropQueuedRequest(uint256 requestId) internal { + bool requestFound = false; + for (uint256 i = _requestsQueueHead; i < _requestsQueueTail;) { + if (requestsQueue[i] == requestId) { + requestFound = true; + } + + // Shift the matching request to the queue's tail, then delete it + if (requestFound && (i + 1 < _requestsQueueTail)) { + requestsQueue[i] = requestsQueue[i + 1]; + } else if (requestFound) { + delete requestsQueue[i]; + } + + unchecked { + ++i; + } + } + + // Decrement the queue tail only if the given requestId was found + if (!requestFound) revert RequestNotFound(); + _requestsQueueTail -= 1; + } + + /** + * @dev Counts the total number of pending requests in the requestsQueue. + * + * @return The current requestsQueue length. + */ + function _requestsQueueLength() internal view returns (uint256) { + return _requestsQueueTail - _requestsQueueHead; + } } diff --git a/solidity/test/FlowYieldVaultsRequests.t.sol b/solidity/test/FlowYieldVaultsRequests.t.sol index a7fff0a..54610d4 100644 --- a/solidity/test/FlowYieldVaultsRequests.t.sol +++ b/solidity/test/FlowYieldVaultsRequests.t.sol @@ -339,6 +339,189 @@ contract FlowYieldVaultsRequestsTest is Test { _startProcessingBatch(reqId); } + function test_StartProcessingBatch_RevertRequestProcessOutOfOrder() public { + vm.startPrank(user); + uint256 req1 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); + uint256 req2 = c.createYieldVault{value: 2 ether}(NATIVE_FLOW, 2 ether, VAULT_ID, STRATEGY_ID); + vm.stopPrank(); + + vm.startPrank(coa); + vm.expectRevert(abi.encodeWithSelector( + FlowYieldVaultsRequests.RequestProcessOutOfOrder.selector, + req1, // the expected requestId, in the queue head + req2 // the provided requestId + )); + _startProcessingBatch(req2); + vm.stopPrank(); + } + + function test_StartProcessingBatch_MultipleSuccessfulRequests() public { + vm.startPrank(user); + uint256 req1 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); + uint256 req2 = c.createYieldVault{value: 2 ether}(NATIVE_FLOW, 2 ether, VAULT_ID, STRATEGY_ID); + uint256 req3 = c.createYieldVault{value: 3 ether}(NATIVE_FLOW, 3 ether, VAULT_ID, STRATEGY_ID); + uint256 req4 = c.createYieldVault{value: 4 ether}(NATIVE_FLOW, 4 ether, VAULT_ID, STRATEGY_ID); + uint256 req5 = c.createYieldVault{value: 5 ether}(NATIVE_FLOW, 5 ether, VAULT_ID, STRATEGY_ID); + vm.stopPrank(); + + vm.prank(coa); + // All 5 successful, transition to PROCESSING + uint256[] memory successfulRequestIds = new uint256[](5); + successfulRequestIds[0] = req1; + successfulRequestIds[1] = req2; + successfulRequestIds[2] = req3; + successfulRequestIds[3] = req4; + successfulRequestIds[4] = req5; + c.startProcessingBatch(successfulRequestIds, new uint256[](0)); + + (uint256[] memory requestIds, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); + assertEq(requestIds.length, 0); + + for (uint256 i = 0; i < successfulRequestIds.length; i++) { + FlowYieldVaultsRequests.Request memory req = c.getRequest(successfulRequestIds[i]); + assertEq(uint8(req.status), uint8(FlowYieldVaultsRequests.RequestStatus.PROCESSING)); + } + } + + function test_StartProcessingBatch_MultipleSuccessfulAndRejectRequests() public { + vm.startPrank(user); + uint256 req1 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); + uint256 req2 = c.createYieldVault{value: 2 ether}(NATIVE_FLOW, 2 ether, VAULT_ID, STRATEGY_ID); + uint256 req3 = c.createYieldVault{value: 3 ether}(NATIVE_FLOW, 3 ether, VAULT_ID, STRATEGY_ID); + uint256 req4 = c.createYieldVault{value: 4 ether}(NATIVE_FLOW, 4 ether, VAULT_ID, STRATEGY_ID); + uint256 req5 = c.createYieldVault{value: 5 ether}(NATIVE_FLOW, 5 ether, VAULT_ID, STRATEGY_ID); + vm.stopPrank(); + + vm.prank(coa); + // 2 successful, 3 rejected + uint256[] memory successfulRequestIds = new uint256[](2); + successfulRequestIds[0] = req2; + successfulRequestIds[1] = req4; + uint256[] memory rejectedRequestIds = new uint256[](3); + rejectedRequestIds[0] = req1; + rejectedRequestIds[1] = req3; + rejectedRequestIds[2] = req5; + c.startProcessingBatch(successfulRequestIds, rejectedRequestIds); + + (uint256[] memory requestIds, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); + assertEq(requestIds.length, 0); + + for (uint256 i = 0; i < successfulRequestIds.length; i++) { + FlowYieldVaultsRequests.Request memory req = c.getRequest(successfulRequestIds[i]); + assertEq(uint8(req.status), uint8(FlowYieldVaultsRequests.RequestStatus.PROCESSING)); + } + + for (uint256 i = 0; i < rejectedRequestIds.length; i++) { + FlowYieldVaultsRequests.Request memory req = c.getRequest(rejectedRequestIds[i]); + assertEq(uint8(req.status), uint8(FlowYieldVaultsRequests.RequestStatus.FAILED)); + } + } + + function test_StartProcessingBatch_MultipleRejectedRequests() public { + vm.startPrank(user); + uint256 req1 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); + uint256 req2 = c.createYieldVault{value: 2 ether}(NATIVE_FLOW, 2 ether, VAULT_ID, STRATEGY_ID); + uint256 req3 = c.createYieldVault{value: 3 ether}(NATIVE_FLOW, 3 ether, VAULT_ID, STRATEGY_ID); + uint256 req4 = c.createYieldVault{value: 4 ether}(NATIVE_FLOW, 4 ether, VAULT_ID, STRATEGY_ID); + uint256 req5 = c.createYieldVault{value: 5 ether}(NATIVE_FLOW, 5 ether, VAULT_ID, STRATEGY_ID); + vm.stopPrank(); + + vm.prank(coa); + // All 5 rejected, transition to FAILED + uint256[] memory rejectedRequestIds = new uint256[](5); + rejectedRequestIds[0] = req1; + rejectedRequestIds[1] = req2; + rejectedRequestIds[2] = req3; + rejectedRequestIds[3] = req4; + rejectedRequestIds[4] = req5; + c.startProcessingBatch(new uint256[](0), rejectedRequestIds); + + (uint256[] memory requestIds, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); + assertEq(requestIds.length, 0); + + for (uint256 i = 0; i < rejectedRequestIds.length; i++) { + FlowYieldVaultsRequests.Request memory req = c.getRequest(rejectedRequestIds[i]); + assertEq(uint8(req.status), uint8(FlowYieldVaultsRequests.RequestStatus.FAILED)); + } + } + + function test_StartProcessingBatch_MultipleSuccessfulRequestsRevertRequestProcessOutOfOrder() public { + vm.startPrank(user); + uint256 req1 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); + uint256 req2 = c.createYieldVault{value: 2 ether}(NATIVE_FLOW, 2 ether, VAULT_ID, STRATEGY_ID); + uint256 req3 = c.createYieldVault{value: 3 ether}(NATIVE_FLOW, 3 ether, VAULT_ID, STRATEGY_ID); + uint256 req4 = c.createYieldVault{value: 4 ether}(NATIVE_FLOW, 4 ether, VAULT_ID, STRATEGY_ID); + uint256 req5 = c.createYieldVault{value: 5 ether}(NATIVE_FLOW, 5 ether, VAULT_ID, STRATEGY_ID); + vm.stopPrank(); + + vm.prank(coa); + // 2 successful but out-of-order, 3 rejected + uint256[] memory successfulRequestIds = new uint256[](2); + // These are out-of-order + successfulRequestIds[0] = req4; + successfulRequestIds[1] = req2; + uint256[] memory rejectedRequestIds = new uint256[](3); + rejectedRequestIds[0] = req1; + rejectedRequestIds[1] = req3; + rejectedRequestIds[2] = req5; + + vm.expectRevert(abi.encodeWithSelector( + FlowYieldVaultsRequests.RequestProcessOutOfOrder.selector, + req2, // the expected requestId, in the queue head + req4 // the provided requestId + )); + c.startProcessingBatch(successfulRequestIds, rejectedRequestIds); + + (uint256[] memory requestIds, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); + assertEq(requestIds.length, 5); + + for (uint256 i = 0; i < successfulRequestIds.length; i++) { + FlowYieldVaultsRequests.Request memory req = c.getRequest(successfulRequestIds[i]); + assertEq(uint8(req.status), uint8(FlowYieldVaultsRequests.RequestStatus.PENDING)); + } + + for (uint256 i = 0; i < rejectedRequestIds.length; i++) { + FlowYieldVaultsRequests.Request memory req = c.getRequest(rejectedRequestIds[i]); + assertEq(uint8(req.status), uint8(FlowYieldVaultsRequests.RequestStatus.PENDING)); + } + } + + function test_StartProcessingBatch_MultipleRejectedRequestsOutOfOrder() public { + vm.startPrank(user); + uint256 req1 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); + uint256 req2 = c.createYieldVault{value: 2 ether}(NATIVE_FLOW, 2 ether, VAULT_ID, STRATEGY_ID); + uint256 req3 = c.createYieldVault{value: 3 ether}(NATIVE_FLOW, 3 ether, VAULT_ID, STRATEGY_ID); + uint256 req4 = c.createYieldVault{value: 4 ether}(NATIVE_FLOW, 4 ether, VAULT_ID, STRATEGY_ID); + uint256 req5 = c.createYieldVault{value: 5 ether}(NATIVE_FLOW, 5 ether, VAULT_ID, STRATEGY_ID); + vm.stopPrank(); + + vm.prank(coa); + // 2 successful, 3 rejected but out-of-order + uint256[] memory successfulRequestIds = new uint256[](2); + successfulRequestIds[0] = req4; + successfulRequestIds[1] = req5; + uint256[] memory rejectedRequestIds = new uint256[](3); + // These are out-of-order, but it shouldn't matter + rejectedRequestIds[0] = req3; + rejectedRequestIds[1] = req1; + rejectedRequestIds[2] = req2; + + c.startProcessingBatch(successfulRequestIds, rejectedRequestIds); + + (uint256[] memory requestIds, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); + assertEq(requestIds.length, 0); + + for (uint256 i = 0; i < successfulRequestIds.length; i++) { + FlowYieldVaultsRequests.Request memory req = c.getRequest(successfulRequestIds[i]); + assertEq(uint8(req.status), uint8(FlowYieldVaultsRequests.RequestStatus.PROCESSING)); + } + + for (uint256 i = 0; i < rejectedRequestIds.length; i++) { + FlowYieldVaultsRequests.Request memory req = c.getRequest(rejectedRequestIds[i]); + assertEq(uint8(req.status), uint8(FlowYieldVaultsRequests.RequestStatus.FAILED)); + } + } + function test_CompleteProcessing_Success() public { vm.prank(user); uint256 reqId = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); @@ -850,17 +1033,16 @@ contract FlowYieldVaultsRequestsTest is Test { vm.prank(user); uint256 req5 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); - // Process middle request (req3) vm.startPrank(coa); - _startProcessingBatch(req3); - c.completeProcessing(req3, true, 200, "Created"); + _startProcessingBatch(req1); + c.completeProcessing(req1, true, 200, "Created"); vm.stopPrank(); - // Verify FIFO order is maintained: [req1, req2, req4, req5] + // Verify FIFO order is maintained: [req2, req3, req4, req5] (uint256[] memory ids, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); assertEq(ids.length, 4, "Should have 4 pending requests"); - assertEq(ids[0], req1, "First should be req1"); - assertEq(ids[1], req2, "Second should be req2"); + assertEq(ids[0], req2, "First should be req2"); + assertEq(ids[1], req3, "Second should be req3"); assertEq(ids[2], req4, "Third should be req4"); assertEq(ids[3], req5, "Fourth should be req5"); } @@ -891,11 +1073,8 @@ contract FlowYieldVaultsRequestsTest is Test { uint256 req3 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); vm.stopPrank(); - // Remove last element - vm.startPrank(coa); - _startProcessingBatch(req3); - c.completeProcessing(req3, true, 100, "Created"); - vm.stopPrank(); + // Remove last queue element, by cancelling the last request + c.cancelRequest(req3); (uint256[] memory ids, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); assertEq(ids.length, 2); @@ -933,35 +1112,34 @@ contract FlowYieldVaultsRequestsTest is Test { uint256 req4 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); vm.stopPrank(); - // Process out of order: req2, req4, req1, req3 vm.startPrank(coa); - _startProcessingBatch(req2); - c.completeProcessing(req2, true, 100, "Created"); + _startProcessingBatch(req1); + c.completeProcessing(req1, true, 100, "Created"); - // After removing req2: [req1, req3, req4] + // After removing req1: [req2, req3, req4] (uint256[] memory ids1, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); - assertEq(ids1[0], req1); + assertEq(ids1[0], req2); assertEq(ids1[1], req3); assertEq(ids1[2], req4); - _startProcessingBatch(req4); - c.completeProcessing(req4, true, 101, "Created"); + _startProcessingBatch(req2); + c.completeProcessing(req2, true, 101, "Created"); - // After removing req4: [req1, req3] + // After removing req2: [req3, req4] (uint256[] memory ids2, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); - assertEq(ids2[0], req1); - assertEq(ids2[1], req3); + assertEq(ids2[0], req3); + assertEq(ids2[1], req4); - _startProcessingBatch(req1); - c.completeProcessing(req1, true, 102, "Created"); + _startProcessingBatch(req3); + c.completeProcessing(req3, true, 102, "Created"); - // After removing req1: [req3] + // After removing req3: [req4] (uint256[] memory ids3, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); assertEq(ids3.length, 1); - assertEq(ids3[0], req3); + assertEq(ids3[0], req4); - _startProcessingBatch(req3); - c.completeProcessing(req3, true, 103, "Created"); + _startProcessingBatch(req4); + c.completeProcessing(req4, true, 103, "Created"); vm.stopPrank(); assertEq(c.getPendingRequestCount(), 0); @@ -1040,18 +1218,18 @@ contract FlowYieldVaultsRequestsTest is Test { // Process req2 vm.startPrank(coa); - _startProcessingBatch(req2); - c.completeProcessing(req2, true, 100, "Created"); + _startProcessingBatch(req1); + c.completeProcessing(req1, true, 100, "Created"); vm.stopPrank(); - // User should now have req1 and req3 + // User should now have req2 and req3 (uint256[] memory ids, , , , uint256[] memory amounts, , , , , , uint256 pendingBalance, ) = c.getPendingRequestsByUserUnpacked(user); assertEq(ids.length, 2, "User should have 2 remaining requests"); // Note: Order in user array may change due to swap-and-pop optimization - assertTrue(ids[0] == req1 || ids[0] == req3, "Should contain req1 or req3"); - assertTrue(ids[1] == req1 || ids[1] == req3, "Should contain req1 or req3"); + assertTrue(ids[0] == req2 || ids[0] == req3, "Should contain req2 or req3"); + assertTrue(ids[1] == req2 || ids[1] == req3, "Should contain req2 or req3"); assertTrue(ids[0] != ids[1], "Should be different requests"); - assertEq(pendingBalance, 4 ether, "Pending balance should be 4 ether"); + assertEq(pendingBalance, 5 ether, "Pending balance should be 5 ether"); } function test_GetPendingRequestsByUserUnpacked_AfterCancel() public { @@ -1096,10 +1274,10 @@ contract FlowYieldVaultsRequestsTest is Test { vm.prank(user); uint256 u1r3 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); - // Remove user's middle request (u1r2) + // Process user's first request (u1r1) vm.startPrank(coa); - _startProcessingBatch(u1r2); - c.completeProcessing(u1r2, true, 100, "Created"); + _startProcessingBatch(u1r1); + c.completeProcessing(u1r1, true, 100, "Created"); vm.stopPrank(); // Verify user1's remaining requests @@ -1249,7 +1427,7 @@ contract FlowYieldVaultsRequestsTest is Test { // Process every other request (simulating out-of-order processing) vm.startPrank(coa); - for (uint256 i = 1; i < numRequests; i += 2) { + for (uint256 i = 0; i < numRequests / 2; i++) { _startProcessingBatch(requestIds[i]); c.completeProcessing(requestIds[i], true, uint64(100 + i), "Created"); } @@ -1262,9 +1440,9 @@ contract FlowYieldVaultsRequestsTest is Test { (uint256[] memory ids, , , , , , , , , , ) = c.getPendingRequestsUnpacked(0, 0); assertEq(ids.length, numRequests / 2); - // Even-indexed original requests should remain in order - for (uint256 i = 0; i < ids.length; i++) { - assertEq(ids[i], requestIds[i * 2], "FIFO order not maintained"); + // Pending requests should remain in order + for (uint256 i = 0; i < ids.length - 1; i++) { + assertLt(ids[i], ids[i+1], "FIFO order not maintained"); } } @@ -1286,17 +1464,17 @@ contract FlowYieldVaultsRequestsTest is Test { vm.stopPrank(); } - // Process user[2]'s middle request + // Process user[1]'s first request vm.startPrank(coa); - _startProcessingBatch(userRequestIds[2][1]); - c.completeProcessing(userRequestIds[2][1], true, 300, "Created"); + _startProcessingBatch(userRequestIds[0][0]); + c.completeProcessing(userRequestIds[0][0], true, 300, "Created"); vm.stopPrank(); // Verify all other users still have 3 requests for (uint256 i = 0; i < 5; i++) { (uint256[] memory ids, , , , , , , , , , , ) = c.getPendingRequestsByUserUnpacked(users[i]); - if (i == 2) { - assertEq(ids.length, 2, "User 2 should have 2 requests"); + if (i == 0) { + assertEq(ids.length, 2, "User 1 should have 2 requests"); } else { assertEq(ids.length, 3, "Other users should have 3 requests"); } @@ -1429,4 +1607,55 @@ contract FlowYieldVaultsRequestsTest is Test { )); uint256 closeReq = c.closeYieldVault(100); } + + function test_CancelRequests_RandomOrder() public { + vm.startPrank(user); + uint256 req1 = c.createYieldVault{value: 1 ether}(NATIVE_FLOW, 1 ether, VAULT_ID, STRATEGY_ID); + uint256 req2 = c.createYieldVault{value: 2 ether}(NATIVE_FLOW, 2 ether, VAULT_ID, STRATEGY_ID); + uint256 req3 = c.createYieldVault{value: 3 ether}(NATIVE_FLOW, 3 ether, VAULT_ID, STRATEGY_ID); + uint256 req4 = c.createYieldVault{value: 4 ether}(NATIVE_FLOW, 4 ether, VAULT_ID, STRATEGY_ID); + uint256 req5 = c.createYieldVault{value: 5 ether}(NATIVE_FLOW, 5 ether, VAULT_ID, STRATEGY_ID); + + uint256[] memory requestIDs = c.getPendingRequestIds(); + + assertEq(requestIDs.length, 5); + assertEq(requestIDs[0], req1, "First should be req1"); + assertEq(requestIDs[1], req2, "Second should be req2"); + assertEq(requestIDs[2], req3, "Third should be req3"); + assertEq(requestIDs[3], req4, "Fourth should be req4"); + assertEq(requestIDs[4], req5, "Fifth should be req5"); + + // Cancel last request + c.cancelRequest(req5); + requestIDs = c.getPendingRequestIds(); + + assertEq(requestIDs.length, 4); + assertEq(requestIDs[0], req1, "First should be req1"); + assertEq(requestIDs[1], req2, "Second should be req2"); + assertEq(requestIDs[2], req3, "Third should be req3"); + assertEq(requestIDs[3], req4, "Fourth should be req4"); + + // Cancel first request + c.cancelRequest(req1); + requestIDs = c.getPendingRequestIds(); + + assertEq(requestIDs.length, 3); + assertEq(requestIDs[0], req2, "First should be req2"); + assertEq(requestIDs[1], req3, "Second should be req3"); + assertEq(requestIDs[2], req4, "Third should be req4"); + + // Cancel middle request + c.cancelRequest(req3); + requestIDs = c.getPendingRequestIds(); + + assertEq(requestIDs.length, 2); + assertEq(requestIDs[0], req2, "First should be req2"); + assertEq(requestIDs[1], req4, "Second should be req4"); + vm.stopPrank(); + + vm.prank(c.owner()); + c.dropRequests(requestIDs); + + assertEq(c.getPendingRequestIds().length, 0); + } }