diff --git a/.gitmodules b/.gitmodules index de464b85..f994953b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -7,12 +7,9 @@ [submodule "service_contracts/lib/openzeppelin-contracts-upgradeable"] path = service_contracts/lib/openzeppelin-contracts-upgradeable url = https://github.com/OpenZeppelin/openzeppelin-contracts-upgradeable -[submodule "service_contracts/lib/fws-payments"] - path = service_contracts/lib/fws-payments - url = https://github.com/FilOzone/fws-payments -[submodule "service_contracts/lib/pdp"] - path = service_contracts/lib/pdp - url = https://github.com/FilOzone/pdp -[submodule "service_contracts/lib/session-key-registry"] - path = service_contracts/lib/session-key-registry - url = https://github.com/FilOzone/SessionKeyRegistry +[submodule "service_contracts/lib/pyth-sdk-solidity"] + path = service_contracts/lib/pyth-sdk-solidity + url = https://github.com/pyth-network/pyth-sdk-solidity +[submodule "service_contracts/lib/prb-math"] + path = service_contracts/lib/prb-math + url = https://github.com/PaulRBerg/prb-math diff --git a/service_contracts/foundry.toml b/service_contracts/foundry.toml index 5376f48d..4c066064 100644 --- a/service_contracts/foundry.toml +++ b/service_contracts/foundry.toml @@ -15,10 +15,12 @@ remappings = [ '@openzeppelin/contracts/=lib/openzeppelin-contracts/contracts/', '@openzeppelin/contracts-upgradeable/=lib/openzeppelin-contracts-upgradeable/contracts/', 'forge-std/=lib/forge-std/src/', - '@fws-payments/=lib/fws-payments/src/', - '@pdp/=lib/pdp/src/', - '@session-key-registry/=lib/session-key-registry/src/', - '@pythnetwork/pyth-sdk-solidity/=lib/pdp/lib/pyth-sdk-solidity/', + '@payments/=src/payments/contracts/', + '@pdp/=src/pdp/contracts/', + '@session-key-registry/=src/session-key-registry/contracts/', + '@service-provider/=src/service-provider/', + '@pythnetwork/pyth-sdk-solidity/=lib/pyth-sdk-solidity/', + '@prb-math/=lib/prb-math/src/', ] # Allow reading test data files diff --git a/service_contracts/generated_view.sol b/service_contracts/generated_view.sol new file mode 100644 index 00000000..4a2fc72a --- /dev/null +++ b/service_contracts/generated_view.sol @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.20; + +// Generated with ./tools/generate_view_contract.sh out/FilecoinWarmStorageService.sol/FilecoinWarmStorageService.json + +import {IPDPProvingSchedule} from "@pdp/IPDPProvingSchedule.sol"; +import "./FilecoinWarmStorageService.sol"; +import "./lib/FilecoinWarmStorageServiceStateInternalLibrary.sol"; +contract FilecoinWarmStorageServiceStateView is IPDPProvingSchedule { + using FilecoinWarmStorageServiceStateInternalLibrary for FilecoinWarmStorageService; + FilecoinWarmStorageService public immutable service; + constructor(FilecoinWarmStorageService _service) { + service = _service; + } + function UPGRADE_INTERFACE_VERSION() external view returns (string memory) { + return FilecoinWarmStorageServiceStateInternalLibrary.UPGRADE_INTERFACE_VERSION(); + } + function calculateRatesPerEpoch(uint256 totalBytes) external view returns (uint256 storageRate, uint256 cacheMissRate, uint256 cdnRate) { + return FilecoinWarmStorageServiceStateInternalLibrary.calculateRatesPerEpoch(totalBytes); + } + function configureProvingPeriod(uint64 _maxProvingPeriod, uint256 _challengeWindowSize) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.configureProvingPeriod(_maxProvingPeriod, _challengeWindowSize); + } + function dataSetCreated(uint256 dataSetId, address creator, bytes extraData) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.dataSetCreated(dataSetId, creator, extraData); + } + function dataSetDeleted(uint256 dataSetId, uint256 , bytes extraData) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.dataSetDeleted(dataSetId, , extraData); + } + function eip712Domain() external view returns (bytes1 fields, string memory name, string memory version, uint256 chainId, address verifyingContract, bytes32 salt, uint256[] memory extensions) { + return FilecoinWarmStorageServiceStateInternalLibrary.eip712Domain(); + } + function extsload(bytes32 slot) external view returns (bytes32) { + return FilecoinWarmStorageServiceStateInternalLibrary.extsload(slot); + } + function extsloadStruct(bytes32 slot, uint256 size) external view returns (bytes32[] memory) { + return FilecoinWarmStorageServiceStateInternalLibrary.extsloadStruct(slot, size); + } + function filCDNAddress() external view returns (address) { + return FilecoinWarmStorageServiceStateInternalLibrary.filCDNAddress(); + } + function getEffectiveRates() external view returns (uint256 serviceFee, uint256 spPayment) { + return FilecoinWarmStorageServiceStateInternalLibrary.getEffectiveRates(); + } + function getProvingPeriodForEpoch(uint256 dataSetId, uint256 epoch) external view returns (uint256) { + return FilecoinWarmStorageServiceStateInternalLibrary.getProvingPeriodForEpoch(dataSetId, epoch); + } + function getServicePrice() external view returns (FilecoinWarmStorageService.ServicePricing memory pricing) { + return FilecoinWarmStorageServiceStateInternalLibrary.getServicePrice(); + } + function initialize(uint64 _maxProvingPeriod, uint256 _challengeWindowSize) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.initialize(_maxProvingPeriod, _challengeWindowSize); + } + function isEpochProven(uint256 dataSetId, uint256 epoch) external view returns (bool) { + return FilecoinWarmStorageServiceStateInternalLibrary.isEpochProven(dataSetId, epoch); + } + function migrate() external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.migrate(); + } + function nextProvingPeriod(uint256 dataSetId, uint256 challengeEpoch, uint256 leafCount, bytes ) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.nextProvingPeriod(dataSetId, challengeEpoch, leafCount, ); + } + function owner() external view returns (address) { + return FilecoinWarmStorageServiceStateInternalLibrary.owner(); + } + function paymentsContractAddress() external view returns (address) { + return FilecoinWarmStorageServiceStateInternalLibrary.paymentsContractAddress(); + } + function pdpVerifierAddress() external view returns (address) { + return FilecoinWarmStorageServiceStateInternalLibrary.pdpVerifierAddress(); + } + function piecesAdded(uint256 dataSetId, uint256 firstAdded, tuple[] pieceData, bytes extraData) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.piecesAdded(dataSetId, firstAdded, pieceData, extraData); + } + function piecesScheduledRemove(uint256 dataSetId, uint256[] pieceIds, bytes extraData) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.piecesScheduledRemove(dataSetId, pieceIds, extraData); + } + function possessionProven(uint256 dataSetId, uint256 , uint256 , uint256 challengeCount) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.possessionProven(dataSetId, , , challengeCount); + } + function proxiableUUID() external view returns (bytes32) { + return FilecoinWarmStorageServiceStateInternalLibrary.proxiableUUID(); + } + function railTerminated(uint256 railId, address terminator, uint256 endEpoch) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.railTerminated(railId, terminator, endEpoch); + } + function renounceOwnership() external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.renounceOwnership(); + } + function serviceCommissionBps() external view returns (uint256) { + return FilecoinWarmStorageServiceStateInternalLibrary.serviceCommissionBps(); + } + function storageProviderChanged(uint256 dataSetId, address oldServiceProvider, address newServiceProvider, bytes extraData) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.storageProviderChanged(dataSetId, oldServiceProvider, newServiceProvider, extraData); + } + function terminateService(uint256 dataSetId) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.terminateService(dataSetId); + } + function transferOwnership(address newOwner) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.transferOwnership(newOwner); + } + function updateServiceCommission(uint256 newCommissionBps) external nonpayable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.updateServiceCommission(newCommissionBps); + } + function upgradeToAndCall(address newImplementation, bytes data) external payable returns () { + return FilecoinWarmStorageServiceStateInternalLibrary.upgradeToAndCall(newImplementation, data); + } + function usdfcTokenAddress() external view returns (address) { + return FilecoinWarmStorageServiceStateInternalLibrary.usdfcTokenAddress(); + } + function validatePayment(uint256 railId, uint256 proposedAmount, uint256 fromEpoch, uint256 toEpoch, uint256 ) external nonpayable returns (IValidator.ValidationResult memory result) { + return FilecoinWarmStorageServiceStateInternalLibrary.validatePayment(railId, proposedAmount, fromEpoch, toEpoch, ); + } + +} diff --git a/service_contracts/lib/fws-payments b/service_contracts/lib/fws-payments deleted file mode 160000 index 477228d2..00000000 --- a/service_contracts/lib/fws-payments +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 477228d2d1e93bf7b2aa7e24018e88994806ddba diff --git a/service_contracts/lib/pdp b/service_contracts/lib/pdp deleted file mode 160000 index 61681392..00000000 --- a/service_contracts/lib/pdp +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 61681392933926fbccb142ab7767e037680850b4 diff --git a/service_contracts/lib/prb-math b/service_contracts/lib/prb-math new file mode 160000 index 00000000..119f37e4 --- /dev/null +++ b/service_contracts/lib/prb-math @@ -0,0 +1 @@ +Subproject commit 119f37e49edd96252a2c75536733ae0786aea4a4 diff --git a/service_contracts/lib/pyth-sdk-solidity b/service_contracts/lib/pyth-sdk-solidity new file mode 160000 index 00000000..11d6bcfc --- /dev/null +++ b/service_contracts/lib/pyth-sdk-solidity @@ -0,0 +1 @@ +Subproject commit 11d6bcfc2e56885535a9a8e3c8417847cb20be14 diff --git a/service_contracts/lib/session-key-registry b/service_contracts/lib/session-key-registry deleted file mode 160000 index e472ca2b..00000000 --- a/service_contracts/lib/session-key-registry +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e472ca2b525fb2396832216182b64a0c165cb49c diff --git a/service_contracts/src/payments/README.md b/service_contracts/src/payments/README.md new file mode 100644 index 00000000..a2438e21 --- /dev/null +++ b/service_contracts/src/payments/README.md @@ -0,0 +1,1030 @@ +# Filecoin Pay + +The Filecoin Pay Payments contract enables ERC20 token payment flows through "rails" - automated payment channels between payers and recipients. The contract supports continuous rate based payments, one-time transfers, and payment validation during settlement. + +- [Deployment Info](#deployment-info) +- [Key Concepts](#key-concepts) + - [Account](#account) + - [Rail](#rail) + - [Validator](#validator) + - [Operator](#operator) + - [Per-Rail Lockup: The Guarantee Mechanism](#per-rail-lockup-the-guarantee-mechanism) +- [Core Functions](#core-functions) + - [Account Management](#account-management) + - [Operator Management](#operator-management) + - [Rail Management](#rail-management) + - [One-Time Payments](#one-time-payments) + - [Operator One-Time Payment Window](#operator-one-time-payment-window) + - [Handling Reductions to maxLockupPeriod](#handling-reductions-to-maxlockupperiod) + - [Settlement](#settlement) + - [Validation](#validation) +- [Worked Example](#worked-example) + - [1. Initial Funding](#1-initial-funding) + - [2. Operator Approval](#2-operator-approval) + - [3. Deal Proposal (Rail Creation)](#3-deal-proposal-rail-creation) + - [4. Deal Acceptance and Service Start](#4-deal-acceptance-and-service-start) + - [5. Periodic Settlement](#5-periodic-settlement) + - [6. Deal Modification](#6-deal-modification) + - [7. Ending a Deal](#7-ending-a-deal) + - [8. Final Settlement and Withdrawal](#8-final-settlement-and-withdrawal) +- [Emergency Scenarios](#emergency-scenarios) + - [Reducing Operator Allowance](#reducing-operator-allowance) + - [Rail Termination (by payer)](#rail-termination-by-payer) + - [Rail Termination (by operator)](#rail-termination-by-operator) + - [Rail Settlement Without Validation](#rail-settlement-without-validation) + - [Payer Reducing Operator Allowance After Deal Proposal](#payer-reducing-operator-allowance-after-deal-proposal) +- [Contributing](#contributing) + - [Before Contributing](#before-contributing) + - [Pull Request Guidelines](#pull-request-guidelines) + - [Commit Message Guidelines](#commit-message-guidelines) +- [License](#license) + +## Deployment Info + +- On calibration net at `0x0E690D3e60B0576D01352AB03b258115eb84A047` +- Filecoin Pay Contract (Alpha) is on Mainnet at `0x8c81C77E433725393Ba1eD5439ACdA098278eE1A` + - **⚠️ WARNING (issued 2025-08-18): May be deprecated within 1-month without migration support. DO NOT use this deployment for production applications or store significant value.** + +## Security Audits + +The Filecoin-Pay contracts have undergone the following security audits: +- [Zellic Security Audit (August 2025)](https://github.com/Zellic/publications/blob/master/Filecoin%20Services%20Payments%20-%20Zellic%20Audit%20Report.pdf) + +## Key Concepts + +- **Account**: Represents a user's token balance and locked funds +- **Rail**: A payment channel between a payer and recipient with configurable terms +- **Validator**: An optional contract that acts as a trusted "arbitrator". It can: + - Validate and modify payment amounts during settlement. + - Veto a rail termination attempt from any party by reverting the `railTerminated` callback. + - Decide the final financial outcome (the total payout) of a rail that has been successfully terminated. +- **Operator**: An authorized third party who can manage rails on behalf of payers + +### Account + +Tracks the funds, lockup, obligations, etc. associated with a single “owner” (where the owner is a smart contract or a wallet). Accounts can be both *payers* and *payees* but we’ll often talk about them as if they were separate types. + +- **Payer —** An account that *pays* a payee (this may be for a service, in which case we may refer to the Payer as the *Client*) +- **Payee** — An account which receives payment from a payer (this may be for a service, in which case we may refer to the Payee as the *Service Provider*). + +### Rail + +A rail along which payments flow from a payer to a payee. Rails track lockup, maximum payment rates, and obligations between a payer and a payee. Payer ↔ Payee pairs can have multiple payment rails between them but they can also reuse the same rail across multiple deals. Importantly, rails: +- Specify the maximum rate at which the payer will pay the payee, the actual amount paid for any given period is subject to validation by the **validator** described below. +- Define a lockup period. The lockup period of a rail is the time period over which the payer is required to maintain locked funds to fully cover the current outgoing payment rate from the rail if the payer stops adding funds to the account. This provides a reliable way for payees to verify that a payer is guaranteed to pay up to a certain point in the future. When a rail's payer account drops to only cover the lockup period this is a signal to the payee that the payer is at risk of defaulting. The lockup period gives the payee time to settle and gracefully close down the rail without missing payment. +- Strictly enforce lockups. While the contract cannot force a payer to deposit funds from their external wallet, it strictly enforces lockups on all funds held within their contract account. It prevents payers from withdrawing locked funds and blocks operator actions that would increase a payer's lockup obligation beyond their available balance. This system provides an easy way for payees to verify a payer's funding commitment for the rail. + + +### Validator + +A validator is an optional contract that acts as a trusted arbitrator for a rail. Its primary role is to validate payments during settlement, but it also plays a crucial part in the rail's lifecycle, especially during termination. + +When a validator is assigned to a rail, it gains the ability to: + +- **Mediate Payments:** During settlement, a validator can prevent a payment, refuse to settle past a certain epoch, or reduce the payout amount to account for actual services rendered, penalties, etc. +- **Oversee Termination:** When `terminateRail` is called by either the payer or the operator, the Payments contract makes a synchronous call to the validator's `railTerminated` function. The payee (payee) cannot directly terminate a rail. +- **Veto Termination:** The validator can block the termination attempt entirely by reverting inside the `railTerminated` callback. This gives the validator the ultimate say on whether a rail can be terminated, irrespective of who initiated the call. + +### Operator + +An operator is a smart contract (typically the main contract for a given service) that manages payment rails on behalf of payers. It is also sometimes referred to as the "service contract". A payer must explicitly approve an operator and grant it specific allowances, which act as a budget for how much the operator can spend or lock up on the payer's behalf. + +The operator role is powerful, so the operator contract must be trusted by both the payer and the payee. The payer trusts it not to abuse its spending allowances, and the payee trusts it to correctly configure and manage the payment rail. + +An approved operator can perform the following actions: + +- **Create Rails (`createRail`):** Establish a new payment rail from a payer to a payee, specifying the token, payee, and an optional validator. +- **Modify Rail Terms (`modifyRailLockup`, `modifyRailPayment`):** Adjust the payment rate, lockup period, and fixed lockup amount for any rail it manages. Any increase in the payer's financial commitment is checked against the operator's allowances. +- **Execute One-Time Payments (`modifyRailPayment`):** Execute one-time payments from the rail's fixed lockup. +- **Settle Rails (`settleRail`):** Trigger payment settlement for a rail to process due payments within the existing terms of the rail. As a rail participant, the operator can initiate settlement at any time. The operator cannot, however, arbitrarily settle a rail for a higher-than-expected amount or higher than expected duration. +- **Terminate Rails (`terminateRail`):** End a payment rail. Unlike payers, an operator can terminate a rail even if the payer's account is not fully funded. + +### Per-Rail Lockup: The Guarantee Mechanism + +Each payment rail can be configured to require the payer to lock funds to guarantee future payments. This lockup is composed of two distinct components: + +- **Streaming Lockup (`paymentRate × lockupPeriod`):** A calculated guarantee for rate based payments for a pre-agreed lockup period. +- **Fixed Lockup (`lockupFixed`):** A specific amount set aside for one-time payments. + +The total lockup for a payer's account is the sum of these requirements across *all* their active rails. This total is reserved from their deposited funds and cannot be withdrawn. + +#### The Crucial Role of Streaming Lockup: A Safety Hatch, Not a Pre-payment + +It is critical to understand that the streaming lockup is **not** a pre-paid account that is drawn from during normal operation. Instead, it functions as a **safety hatch** that can only be fully utilized *after* a rail is terminated. + +**1. During Normal Operation (Before Termination)** + +While a rail is active, the streaming lockup acts as a **guarantee of solvency for a pre-agreed number of epochs**, not as a direct source of payment. + +- **Payments from General Funds:** When `settleRail` is called on an active rail, payments are drawn from the payer's general `funds`. +- **Lockup as a Floor:** The lockup simply acts as a minimum balance. The contract prevents the payer from withdrawing funds below this floor. +- **Settlement Requires Solvency:** Critically, the contract will only settle an active rail up to the epoch where the payer's account is fully funded (`lockupLastSettledAt`). If a payer stops depositing funds and their account becomes insolvent for new epochs, **settlement for new epochs will stop**, even if there is a large theoretical lockup. The lockup itself is not automatically spent. + +**2. After Rail Termination (Activating the Safety Hatch)** + +The true purpose of the streaming lockup is realized when a rail is terminated. It becomes a guaranteed payment window for the payee. + +- **Activating the Guarantee:** When `terminateRail` is called, the contract sets a final, unchangeable settlement deadline (`endEpoch`), calculated as the payer's last solvent epoch (`lockupLastSettledAt`) plus the `lockupPeriod`. +- **Drawing from Locked Funds:** The contract now permits `settleRail` to process payments up to this `endEpoch`, drawing directly from the funds that were previously reserved by the lockup. +- **Guaranteed Payment Window:** This mechanism is the safety hatch. It guarantees that the payee can continue to get paid for the full `lockupPeriod` after the payer's last known point of solvency. This protects the provider if a payer stops paying and disappears. + +#### Fixed Lockup (`lockupFixed`) + +The fixed lockup is more straightforward. It is a dedicated pool of funds for immediate, one-time payments. When an operator makes a one-time payment, the funds are drawn directly from `lockupFixed`, and the payer's total lockup requirement is reduced at the same time. + +#### Detailed Example of Lockup Calculations + +The following scenarios illustrate how the lockup for a single rail is calculated and how changes affect the payer's total lockup obligation. + +Assume a rail is configured as follows: +- `paymentRate = 3 tokens/epoch` +- `lockupPeriod = 8 epochs` +- `lockupFixed = 7 tokens` + +The total lockup requirement for this specific rail is: +`(3 tokens/epoch × 8 epochs) + 7 tokens = 31 tokens` + +The payer's account must have at least 31 tokens in *available* funds before this lockup can be established. Once set, 31 tokens will be added to the payer's `Account.lockupCurrent`. + +**Scenario 1: Making a One-Time Payment** +The operator makes an immediate one-time payment of 4 tokens. +- **Action:** `modifyRailPayment` is called with `oneTimePayment = 4`. +- **Result:** The 4 tokens are paid from the payer's `funds`. The `lockupFixed` on the rail is reduced to `3` (7 - 4). +- **New Lockup Requirement:** The rail's total lockup requirement drops to `(3 × 8) + 3 = 27 tokens`. The payer's `Account.lockupCurrent` is reduced by 4 tokens. + +**Scenario 2: Increasing the Streaming Rate** +The operator needs to increase the payment rate to 4 tokens/epoch. +- **Action:** `modifyRailPayment` is called with `newRate = 4`. +- **New Lockup Requirement:** The rail's streaming lockup becomes `4 × 8 = 32 tokens`. The total requirement is now `32 + 3 = 35 tokens`. +- **Funding Check:** This change increases the rail's lockup requirement by 8 tokens (from 27 to 35). The transaction will only succeed if the payer's account has at least 8 tokens in available (non-locked) funds to cover this increase. If not, the call will revert. + +**Scenario 3: Reducing the Lockup Period** +The operator reduces the lockup period to 5 epochs. +- **Action:** `modifyRailLockup` is called with `period = 5`. +- **New Lockup Requirement:** The streaming lockup becomes `3 × 5 = 15 tokens`. The total requirement is now `15 + 3 = 18 tokens`. +- **Result:** The rail's total lockup requirement is reduced from 27 to 18 tokens. This frees up 9 tokens in the payer's `Account.lockupCurrent`, which they can now withdraw (assuming no other lockups). + + +#### Best Practices for Payees + +This lockup mechanism places clear responsibilities on the payee to manage risk: + +- **Settle Regularly:** Depending on the solvency guarantees put in place by the operator contract's lockup requirements, you must settle rails frequently. A rail's `lockupPeriod` is a measure of the risk you are willing to take. If you wait longer than the `lockupPeriod` to settle, you allow a payer to build up a payment obligation that may not be fully covered by the lockup guarantee if they become insolvent. +- **Monitor Payer Solvency:** Use the `getAccountInfoIfSettled` function to check if a payer is funded. If their `fundedUntilEpoch` is approaching the current epoch, they are at risk. +- **Terminate Proactively:** If a payer becomes insolvent or unresponsive, request the operator to terminate the rail immediately. This is the **only way** to activate the safety hatch and ensure you can claim payment from the funds guaranteed by the streaming lockup. + +## Core Functions + +### Account Management + +Functions for managing user accounts, including depositing and withdrawing funds. These functions support both ERC20 tokens and the native network token ($FIL) by using `address(0)` as the token address. + +#### `deposit(address token, address to, uint256 amount)` + +Deposits tokens into a specified account. This is the standard method for funding an account if not using permits. It intelligently handles fee-on-transfer tokens by calculating the actual amount received by the contract. + +**When to use:** Use this for direct transfers from a wallet or another contract that has already approved the Payments contract to spend tokens. + +**Native Token (FIL):** To deposit the native network token, use `address(0)` for the `token` parameter and send the corresponding amount in the transaction's `value`. + +**Parameters**: +- `token`: ERC20 token contract address (`address(0)` for FIL). +- `to`: The account address to credit with the deposit. +- `amount`: The amount of tokens to transfer. + +**Requirements**: +- For ERC20s, the direct caller (`msg.sender`) must have approved the Payments contract to transfer at least `amount` of the specified `token`. +- For the native token, `msg.value` must equal `amount`. + +#### `depositWithPermit(address token, address to, uint256 amount, uint256 deadline, uint8 v, bytes32 r, bytes32 s)` + +Deposits tokens using an EIP-2612 permit, allowing for gasless token approval. + +**When to use:** Ideal for user-facing applications where the user can sign a permit off-chain. This combines approval and deposit into a single on-chain transaction, saving gas and improving user experience. + +**Note:** This function is for ERC20 tokens only and does not support the native token. + +**Parameters**: +- `token`: ERC20 token contract address supporting EIP-2612 permits. +- `to`: The account address to credit (must be the signer of the permit). +- `amount`: Token amount to deposit. +- `deadline`: Permit expiration timestamp. +- `v`, `r`, `s`: Signature components for the EIP-2612 permit. + +**Requirements**: +- Token must support EIP-2612. +- `to` must be `msg.sender` (the one submitting the transaction). + +#### `depositWithPermitAndApproveOperator(...)` + +A powerful convenience function that combines three actions into one transaction: +1. Approves token spending via an EIP-2612 permit. +2. Deposits tokens into the specified account. +3. Sets approval for an operator. + +**When to use:** This is the most efficient way for a new user to get started. It funds their account and authorizes a service contract (operator) in a single step. + +**Note:** This function is for ERC20 tokens only. + +**Parameters**: +- `token`: ERC20 token contract address supporting EIP-2612 permits. +- `to`: The account address to credit (must be the signer of the permit). +- `amount`: Token amount to deposit. +- `deadline`: Permit expiration timestamp. +- `v`, `r`, `s`: Signature components for the EIP-2612 permit. +- `operator`: The address of the operator to approve. +- `rateAllowance`: The maximum payment rate the operator can set across all rails. +- `lockupAllowance`: The maximum funds the operator can lock up for future payments. +- `maxLockupPeriod`: The maximum lockup period in epochs the operator can set. + +#### `depositWithPermitAndIncreaseOperatorApproval(...)` + +Similar to the above, but for increasing the allowances of an *existing* operator while depositing funds. + +**When to use:** Useful when a user needs to top up their funds and simultaneously grant an existing operator higher spending or lockup limits for new or modified deals. + +**Note:** This function is for ERC20 tokens only. + +**Requirements**: +- Operator must already be approved. + +**Parameters**: +- `token`: ERC20 token contract address supporting E-2612 permits. +- `to`: The account address to credit (must be the signer of the permit). +- `amount`: Token amount to deposit. +- `deadline`: Permit expiration timestamp. +- `v`, `r`, `s`: Signature components for the EIP-2612 permit. +- `operator`: The address of the operator whose allowances are being increased. +- `rateAllowanceIncrease`: The amount to increase the rate allowance by. +- `lockupAllowanceIncrease`: The amount to increase the lockup allowance by. + +#### `withdraw(address token, uint256 amount)` + +Withdraws available (unlocked) tokens from the caller's account to their own wallet address. + +**When to use:** When a user wants to retrieve funds from the Payments contract that are not currently reserved in lockups for active rails. + +**Native Token (FIL):** To withdraw the native network token, use `address(0)` for the `token` parameter. + +**Parameters**: +- `token`: ERC20 token contract address. +- `amount`: Token amount to withdraw. + +**Requirements**: +- The `amount` must not exceed the user's available funds (`account.funds - account.lockupCurrent`). The contract runs a settlement check before withdrawal to ensure the lockup accounting is up-to-date. + +#### `withdrawTo(address token, address to, uint256 amount)` + +Withdraws available tokens from the caller's account to a *specified* recipient address. + +**When to use:** Same as `withdraw`, but allows sending the funds to any address, not just the caller's wallet. + +**Native Token (FIL):** To withdraw the native network token, use `address(0)` for the `token` parameter. + +**Parameters**: +- `token`: ERC20 token contract address. +- `to`: Recipient address. +- `amount`: Token amount to withdraw. + +**Requirements**: +- Amount must not exceed the caller's unlocked funds. + +#### `getAccountInfoIfSettled(address token, address owner)` + +This is a key read-only function that provides a real-time snapshot of an account's financial health. It works by performing an off-chain simulation of what the account's state *would be* if a settlement were to happen at the current block, without actually making any state changes. + +This function is the primary tool for monitoring an account's solvency and should be used by all participants in the system. + +- **For Payees and Operators:** Before performing a service or attempting a transaction that increases a payer's lockup (like `modifyRailLockup` or `modifyRailPayment`), call this function to assess risk. A `fundedUntilEpoch` that is in the past or very near the current block number is a strong indicator that the payer is underfunded and that a termination of the rail may be necessary to activate the safety hatch. +- **For Payers (Payers):** This function allows payers to monitor their own account health. By checking `fundedUntilEpoch` and `availableFunds`, they can determine when a top-up is needed to avoid service interruptions or defaulting on their payment obligations. +- **For UIs and Dashboards:** This is the essential endpoint for building user-facing interfaces. It provides all the necessary information to display an account's total balance, what's available for withdrawal, its "burn rate", and a clear "funded until" status. + +**Parameters**: +- `token`: The token address to get account info for. +- `owner`: The address of the account owner. + +**Returns**: +- `fundedUntilEpoch`: The future epoch at which the account is projected to run out of funds, given its current balance and `currentLockupRate`. + - If this value is `type(uint256).max`, it means the account has a zero lockup rate and is funded indefinitely. + - If this value is in the past, the account is currently in deficit and cannot be settled further for active rails. +- `currentFunds`: The raw, total balance of tokens held by the account in the contract. +- `availableFunds`: The portion of `currentFunds` that is *not* currently locked. This is the amount the user could successfully withdraw if they called `withdraw` right now. +- `currentLockupRate`: The aggregate "burn rate" of the account, representing the total `paymentRate` per epoch summed across all of the owner's active rails. + +### Operator Management + +Functions for payers to manage the permissions of operators. + +#### `setOperatorApproval(address token, address operator, bool approved, uint256 rateAllowance, uint256 lockupAllowance, uint256 maxLockupPeriod)` + +Configures an operator's permissions to manage rails on behalf of the caller (payer). This is the primary mechanism for delegating rail management. + +**When to use:** A payer calls this to authorize a new service contract as an operator or to completely overwrite the permissions of an existing one. + +**Parameters**: +- `token`: ERC20 token contract address. +- `operator`: The address being granted or denied permissions. +- `approved`: A boolean to approve or revoke the operator's ability to create new rails. +- `rateAllowance`: The maximum cumulative payment rate the operator can set across all rails they manage for this payer. +- `lockupAllowance`: The maximum cumulative funds the operator can lock (both streaming and fixed) across all rails. +- `maxLockupPeriod`: The maximum `lockupPeriod` (in epochs) the operator can set on any single rail. + +#### `increaseOperatorApproval(address token, address operator, uint256 rateAllowanceIncrease, uint256 lockupAllowanceIncrease)` + +Increases the rate and lockup allowances for an existing operator approval without affecting other settings. + +**When to use:** Use this as a convenient way to grant an operator more spending or lockup power without having to re-specify their `maxLockupPeriod` or approval status. + +**Parameters**: +- `token`: ERC20 token contract address. +- `operator`: The address of the approved operator. +- `rateAllowanceIncrease`: The amount to add to the existing `rateAllowance`. +- `lockupAllowanceIncrease`: The amount to add to the existing `lockupAllowance`. + +**Requirements**: +- The operator must already be approved. + +### Rail Management + +Functions for operators to create and manage payment rails. These are typically called by service contracts on behalf of payers. + +#### `createRail(address token, address from, address to, address validator, uint256 commissionRateBps, address serviceFeeRecipient)` + +Creates a new payment rail. This is the first step in setting up a new payment relationship. + +**When to use:** An operator calls this to establish a payment channel from a payer (`from`) to a payee (`to`). + +**Parameters**: +- `token`: ERC20 token contract address. +- `from`: The payer (payer) address. +- `to`: The recipient (payee) address. +- `validator`: Optional validation contract address (`address(0)` for none). +- `commissionRateBps`: Optional operator commission in basis points (e.g., 100 BPS = 1%). +- `serviceFeeRecipient`: The address that receives the operator commission. This is **required** if `commissionRateBps` is greater than 0. + +**Returns**: +- `railId`: A unique `railId`. + +**Requirements**: +- The caller (`msg.sender`) must be an approved operator for the `from` address and `token`. + +#### `getRail(uint256 railId)` + +Retrieves the current state of a payment rail. + +**When to use:** To inspect the parameters of an existing rail. + +**Parameters**: +- `railId`: The rail's unique identifier. + +**Returns**: +- `RailView`: A `RailView` struct containing the rail's public data. + ```solidity + struct RailView { + address token; // The ERC20 token used for payments + address from; // The payer's address + address to; // The payee's address + address operator; // The operator's address + address validator; // The validator's address + uint256 paymentRate; // The current payment rate per epoch + uint256 lockupPeriod; // The lockup period in epochs + uint256 lockupFixed; // The fixed lockup amount + uint256 settledUpTo; // The epoch up to which the rail has been settled + uint256 endEpoch; // The epoch at which a terminated rail can no longer be settled + uint256 commissionRateBps; // The operator's commission rate in basis points + address serviceFeeRecipient; // The address that receives the operator's commission + } + ``` + +**Requirements**: +- The rail must be active (not yet finalized). + +#### `terminateRail(uint256 railId)` + +Initiates the graceful shutdown of a payment rail. This is a critical function that formally ends a payment agreement and activates the lockup safety hatch for the payee. + +- **When to use:** Called by an operator or a payer to end a service agreement, either amicably or in an emergency. + +**Who Can Call This Function?** + +Authorization to terminate a rail is strictly controlled: + +- **The Operator:** The rail's operator can call this function at any time. +- **The Payer (Payer):** The payer can only call this function if their account is fully funded (`isAccountLockupFullySettled` is true). +- **The Payee:** The payee (payee) **cannot** call this function. + +**Core Logic and State Changes** + +- **Sets a Final Deadline:** Termination sets a final settlement deadline (`endEpoch`). This is calculated as `payer.lockupLastSettledAt + rail.lockupPeriod`, activating the `lockupPeriod` as a guaranteed payment window. +- **Stops Future Lockups:** The payer's account `lockupRate` is immediately reduced by the rail's `paymentRate`. This is a crucial step that stops the payer from accruing any *new* lockup obligations for this rail. +- **Frees Operator Allowances:** The operator's rate usage is decreased, freeing up their `rateAllowance` for other rails. + +**Validator Callback** + +If the rail has a validator, `terminateRail` makes a synchronous call to the `validator.railTerminated` function. This is a powerful mechanism: + +- **Veto Power:** The validator can block the termination attempt entirely by reverting inside this callback. This gives the validator the ultimate say on whether a rail can be terminated, irrespective of who initiated the call. +- **Notification:** It serves as a direct notification to the validator that a rail it oversees is being terminated, allowing it to update its own internal state if needed. + +**Parameters**: +- `railId`: The rail's unique identifier. + +**Requirements**: +- Caller must be the rail's payer (and have a fully funded account) or the rail's operator. +- The rail must not have been already terminated. + +#### `modifyRailLockup(uint256 railId, uint256 period, uint256 lockupFixed)` + +Changes a rail's lockup parameters (`lockupPeriod` and `lockupFixed`). + +- **When to use:** An operator calls this to adjust the payer's funding guarantee. This is used to set an initial `lockupFixed` for an onboarding fee, increase the `lockupPeriod` for a longer-term commitment, or decrease lockups when a deal's terms change. + +**Lockup Calculation and State Changes** + +This function recalculates the rail's total lockup requirement based on the new `period` and `lockupFixed` values. The change in the rail's individual lockup is then applied to the payer's total account lockup (`Account.lockupCurrent`). + +- **State Impact:** It modifies both the `Rail` struct (updating `lockupPeriod` and `lockupFixed`) and the payer's `Account` struct (updating `lockupCurrent`). + +**Parameters**: +- `railId`: The rail's unique identifier. +- `period`: The new lockup period in epochs. +- `lockupFixed`: The new fixed lockup amount. + +**Requirements**: +- Caller must be the rail operator. +- **For Terminated Rails:** The lockup period cannot be changed, and the `lockupFixed` can only be decreased. +- **For Active Rails:** + - Any increase to the `period` is checked against the operator's `maxLockupPeriod` allowance. + - **Critical**: If the payer's account is **not** fully funded (`isAccountLockupFullySettled` is false), changes are heavily restricted: the `period` cannot be changed, and `lockupFixed` can only be decreased. This prevents increasing the financial burden on an underfunded payer. + +#### `modifyRailPayment(uint256 railId, uint256 newRate, uint256 oneTimePayment)` + +Modifies a rail's payment rate, makes an immediate one-time payment, or both. + +- **When to use:** This is the primary function for starting a payment stream (by setting an initial `newRate`), adjusting it, or making ad-hoc [One-Time Payments](#one-time-payments). + +**Rate Change Behavior** + +When this function is used to change a rail's payment rate (`newRate` is different from the current rate), the change is not applied retroactively. The contract uses an internal queue to ensure that rate changes are applied precisely at the correct epoch: + +- **Old Rate Preservation:** The contract records the *old* payment rate with a deadline (`untilEpoch`) set to the current block number. +- **Future Application:** The `newRate` becomes the rail's new default rate and will be used for settlement for all epochs *after* the current one. +- **Settlement Logic:** When `settleRail` is called, it processes this queue. It will use the old rate to settle payments up to and including the block where the change was made, and then use the new rate for subsequent blocks. This ensures perfect, per-epoch accounting even if rates change frequently. + +**Parameters**: +- `railId`: The rail's unique identifier. +- `newRate`: The new per-epoch payment rate. +- `oneTimePayment`: An optional amount for an immediate payment, drawn from `lockupFixed`. + +**Requirements**: +- Caller must be the rail operator. +- `oneTimePayment` cannot exceed the rail's current `lockupFixed`. +- **For Terminated Rails:** + - The rate can only be decreased (`newRate <= oldRate`). + - **Edge Case**: This function will revert if called after the rail's final settlement window (`endEpoch`) has passed. +- **For Active Rails:** + - **Critical**: If the payer's account is **not** fully funded (`isAccountLockupFullySettled` is false), the payment rate **cannot be changed at all**. `newRate` must equal `oldRate`. This is a strict safety measure. + +#### `getRailsForPayerAndToken(address payer, address token)` + +Retrieves all rails where the given address is the payer for a specific token. + +**When to use:** Useful for UIs or payer-side applications to list all outgoing payment rails for a user. + +**Parameters**: +- `payer`: The payer's address. +- `token`: The ERC20 token contract address. + +**Returns**: +- `RailInfo[]`: An array of `RailInfo` structs. + +#### `getRailsForPayeeAndToken(address payee, address token)` + +Retrieves all rails where the given address is the payee for a specific token. + +**When to use:** Useful for UIs or payee-side applications to list all incoming payment rails. + +**Parameters**: +- `payee`: The payee's address. +- `token`: The ERC20 token contract address. + +**Returns**: +- `RailInfo[]`: An array of `RailInfo` structs. + +#### `getRateChangeQueueSize(uint256 railId)` + +Returns the number of pending rate changes in the queue for a specific rail. When `modifyRailPayment` is called, the old rate is enqueued to ensure past periods are settled correctly. + +**When to use:** For debugging or advanced monitoring to see if there are pending rate changes that need to be cleared through settlement. + +**Parameters**: +- `railId`: Rail identifier. + +**Returns**: +- `uint256`: The number of `RateChange` items in the queue. + +**Requirements**: None. + +### One-Time Payments + +One-time payments enable operators to transfer fixed amounts immediately from payer to payee, bypassing the regular rate-based payment flow. These payments are deducted from the rail's fixed lockup amount. + +#### Key Characteristics + +- **Operator-Initiated**: Only the rail operator can execute one-time payments through `modifyRailPayment` +- **Fixed Lockup Source**: Payments are drawn from `rail.lockupFixed`, which must be pre-allocated via `modifyRailLockup` +- **Always Available**: Once locked, these funds remain available regardless of the payer's account balance +- **Operator Approval**: Counts against the operator's `lockupAllowance` and reduces `lockupUsage` when spent +- **Commission Applied**: One-time payments are subject to the rail's operator commission rate, just like regular payments + +#### Usage + +One-time payments require a two-step process: + +1. **Lock funds** using `modifyRailLockup` to allocate fixed lockup: + +```solidity +// Allocate 10 tokens for one-time payments +Payments.modifyRailLockup( + railId, // Rail ID + lockupPeriod, // Lockup period (unchanged or new value) + 10 * 10**18 // Fixed lockup amount +); +``` + +This will revert if: +- The payer lacks sufficient unlocked funds to cover the requested lockup +- The operator exceeds their `lockupAllowance` or `maxLockupPeriod` limits + +2. **Make payments** using `modifyRailPayment` with a non-zero `oneTimePayment`: + +```solidity +// Make a 5 token one-time payment from the locked funds +Payments.modifyRailPayment( + railId, // Rail ID + newRate, // Payment rate (can remain unchanged) + 5 * 10**18 // One-time payment amount (must be ≤ rail.lockupFixed) +); +``` + +#### Lifecycle + +1. **Allocation**: Fixed lockup is set when creating / modifying a rail via `modifyRailLockup` +2. **Usage**: Operator makes one-time payments, reducing the available fixed lockup +3. **Termination**: Unused fixed lockup remains available for one-time payments even after rail termination +4. **Finalization**: After full rail settlement, any remaining fixed lockup is automatically refunded to the payer + +#### Example Use Cases + +- Onboarding fees or setup costs +- Performance bonuses or penalties +- Urgent payments outside regular settlement cycles +- Termination fees when canceling services + +### Operator One-Time Payment Window + +**Lifecycle:** + +1. **Rail Active:** While the rail is active, the operator can make one-time payments at any time, provided there is sufficient fixed lockup remaining. +2. **Rail Termination:** When a rail is terminated (either by the payer or operator), the payment stream stops flowing out of the payer's account. However the payment stream does not stop flowing to the payee. Instead, the lockup period acts as a grace period with funds flowing to the payee out of the payee's rate based lockup. Additionally the fixed lockup is not released until the end of the lockup period allowing the operator to continue making one-time payments for a limited time after termination. + * **The end of this window is calculated as the last epoch up to which the payer's account lockup was settled (`lockupLastSettledAt`) plus the rail's lockup period.** If the account was only settled up to an earlier epoch, the window will close sooner than if it was fully up to date at the time of termination. +1. **End of Window:** Once the current epoch surpasses `(rail termination epoch + rail lockup period)`, the one-time payment window closes. At this point, any unused fixed lockup is automatically refunded to the payer, and no further one-time payments can be made. + +**Example Timeline:** + - Rail is created at epoch 100, with a lockup period of 20 epochs. + - At epoch 150, the operator calls `terminateRail`, but the payer's lockup is only settled up to epoch 120. + - The rail's termination epoch is set to 120 (the last settled lockup epoch). + - The operator can make one-time payments from the fixed lockup until epoch 140 (`120 + 20`). + - After epoch 140, any remaining fixed lockup is refunded to the payer. + +**Note:** The one-time payment window after termination is **not** always the epoch at which `terminateRail` is called plus the lockup period. It depends on how far the payer's account lockup has been settled at the time of termination. If the account is not fully settled, the window will be shorter. + +### Handling Reductions to maxLockupPeriod + +A payer can reduce the operator's `maxLockupPeriod` or `lockupAllowance` after a deal proposal, which may prevent the operator from setting a meaningful lockup period and thus block one-time payments. + +**Edge Case Explanation:** + - If the payer reduces the operator's `maxLockupPeriod` or `lockupAllowance` after a deal is proposed but before the operator has set the lockup, the operator may be unable to allocate enough fixed lockup for one-time payments. This can hamper the operator's ability to secure payment for work performed, especially if the lockup period is set to a very low value or zero. + - This risk exists because the operator's ability to set or increase the lockup is always subject to the current allowances set by the payer. If the payer reduces these allowances before the operator calls `modifyRailLockup`, the transaction will fail, and the operator cannot secure the funds. + +**Best Practice:** + - Before performing any work or incurring costs, the operator should always call `modifyRailLockup` to allocate the required fixed lockup. Only if this call is successful should the operator proceed with the work. This guarantees that the fixed lockup amount is secured for one-time payments, regardless of any future reductions to operator allowances by the payer. + +**Practical Scenario:** + 1. Operator and payer agree on a deal, and the operator intends to lock 10 tokens for one-time payments. + 2. Before the operator calls `modifyRailLockup`, the payer reduces the operator's `maxLockupPeriod` to 0 or lowers the `lockupAllowance` below 10 tokens. + 3. The operator's attempt to set the lockup fails, and they cannot secure the funds for one-time payments. + 4. If the operator had called `modifyRailLockup` and succeeded before the payer reduced the allowance, the lockup would be secured, and the operator could draw one-time payments as needed, even if the payer later reduces the allowance. + +**Summary:** + - Always secure the fixed lockup before starting work. This is the only way to guarantee access to one-time payments, regardless of changes to operator allowances by the payer. + +### Settlement + +Functions for processing payments by moving funds from the payer to the payee based on the rail's terms. + +#### `settleRail(uint256 railId, uint256 untilEpoch)` + +This is the primary function for processing payments. It can be called by any rail participant (payer, payee, or operator) to settle due payments up to a specified epoch. A network fee in the native token may be required for this transaction. + +**Parameters**: +- `railId`: The ID of the rail to settle. +- `untilEpoch`: The epoch up to which to settle. + +**Returns**: +- `totalSettledAmount`: The total amount settled and transferred. +- `totalNetPayeeAmount`: The net amount credited to the payee after fees. +- `totalOperatorCommission`: The commission credited to the operator. +- `finalSettledEpoch`: The epoch up to which settlement was actually completed. +- `note`: Additional information about the settlement (especially from validation). + +The behavior of `settleRail` critically depends on whether the rail is active or terminated: + +- **For Active Rails:** Settlement can only proceed up to the epoch the payer's account was last known to be fully funded (`lockupLastSettledAt`). This is a key safety feature: if a payer becomes insolvent, settlement of an active rail halts, preventing it from running a deficit. +- **For Terminated Rails:** Settlement can proceed up to the rail's final `endEpoch`, drawing directly from the streaming lockup. + +**The Role of the Validator in Settlement** + +If a rail has a validator, `settleRail` will call the `validatePayment` function on the validator contract for each segment being settled. This gives the validator significant power: + +- **It can approve the proposed payment** by returning the same amount and end epoch. +- **It can partially settle** by returning a `settleUpto` epoch that is earlier than the proposed end of the segment. +- **It can modify the payment amount** for the settled period by returning a `modifiedAmount`. +- **It can effectively reject settlement** for a segment by returning 0 for the settlement duration (`result.settleUpto` equals `epochStart`). + +However, the validator's power is not absolute. The Payments contract enforces these critical constraints on the validator's response: +- It **cannot** settle a rail beyond the proposed settlement segment. +- It **cannot** approve a payment amount that is greater than the maximum allowed by the rail's `paymentRate` for the duration it is approving. + +**Note**: While the validator has significant control, the final settlement outcome is also dependent on the payer having sufficient funds for the amount being settled. + +#### `settleTerminatedRailWithoutValidation(uint256 railId)` + +This is a crucial escape-hatch function that allows the **payer** to finalize a terminated rail that is otherwise stuck, for example, due to a malfunctioning validator. + +**When to use:** As a last resort, after a rail has been terminated and its full settlement window (`endEpoch`) has passed. + +**What it does:** It settles the rail in full up to its `endEpoch`, completely bypassing the `validator`. This ensures that any funds owed to the payee are paid and any remaining payer funds are unlocked. + +**Parameters**: +- `railId`: The ID of the rail to settle. + +**Returns**: +- `totalSettledAmount`: The total amount settled and transferred. +- `totalNetPayeeAmount`: The net amount credited to the payee after fees. +- `totalOperatorCommission`: The commission credited to the operator. +- `finalSettledEpoch`: The epoch up to which settlement was actually completed. +- `note`: Additional information about the settlement. + +**Requirements**: +- Caller must be the rail's payer. +- The rail must be terminated. +- The current block number must be past the rail's final settlement window (`rail.endEpoch`). + +### Validation + +The contract supports optional payment validation through the `IValidator` interface. When a rail has a validator: + +1. During settlement, the validator contract is called +2. The validator can adjust payment amounts or partially settle epochs +3. This provides dispute resolution capabilities for complex payment arrangements + +## Worked Example + +This worked example demonstrates how users interact with the FWS Payments contract through a typical service deal lifecycle. + +### 1. Initial Funding + +A payer first deposits tokens to fund their account in the payments contract: + +#### Traditional Approach (Two transactions): + +```solidity +// 1. Payer approves the Payments contract to spend tokens +IERC20(tokenAddress).approve(paymentsContractAddress, 100 * 10**18); // 100 tokens + +// 2. Payer or anyone else can deposit to the payer's account +Payments(paymentsContractAddress).deposit( + tokenAddress, // ERC20 token address + payerAddress, // Recipient's address (the payer) + 100 * 10**18 // Amount to deposit (100 tokens) +); +``` + +#### Single Transaction Alternative (for EIP-2612 tokens): + +```solidity +// Payer signs a permit off-chain and deposits in one transaction +Payments(paymentsContractAddress).depositWithPermit( + tokenAddress, // ERC20 token address (must support EIP-2612) + payerAddress, // Recipient's address (must be the permit signer) + 100 * 10**18, // Amount to deposit (100 tokens) + deadline, // Permit expiration timestamp + v, r, s // Signature components from signed permit +); +``` + +After this operation, the payer's `Account.funds` is credited with 100 tokens, enabling them to use services within the FWS ecosystem. + +This operation _may_ be deferred until the funds are actually required, funding is always "on-demand". + +### 2. Operator Approval + +Before using a service, the payer must approve the service's contract as an operator. This can be done in two ways: + +#### Option A: Separate Operator Approval + +If you've already deposited funds, you can approve operators separately: + +```solidity +// Payer approves a service contract as an operator +Payments(paymentsContractAddress).setOperatorApproval( + tokenAddress, // ERC20 token address + serviceContractAddress, // Operator address (service contract) + true, // Approval status + 5 * 10**18, // Maximum rate (tokens per epoch) the operator can allocate + 20 * 10**18, // Maximum lockup the operator can set + 100 // Maximum lockup period in epochs +); +``` + +#### Option B: Combined Deposit and Operator Approval (Single transaction) + +For EIP-2612 tokens, you can combine funding and operator approval: + +```solidity +// Payer signs permit off-chain, then deposits AND approves operator in one transaction +Payments(paymentsContractAddress).depositWithPermitAndApproveOperator( + tokenAddress, // ERC20 token address (must support EIP-2612) + payerAddress, // Recipient's address (must be the permit signer) + 100 * 10**18, // Amount to deposit (100 tokens) + deadline, // Permit expiration timestamp + v, r, s, // Signature components from signed permit + serviceContractAddress, // Operator to approve + 5 * 10**18, // Rate allowance (5 tokens/epoch) + 20 * 10**18, // Lockup allowance (20 tokens) + 100 // Max lockup period (100 epochs) +); +``` + +This approval has three key components: + +- The `rateAllowance` (5 tokens/epoch) limits the total continuous payment rate across all rails created by this operator +- The `lockupAllowance` (20 tokens) limits the total fixed amount the operator can lock up for one-time payments or escrow +- The `maxLockupPeriod` (100 epochs) limits how far in advance the operator can lock funds + +### 3. Deal Proposal (Rail Creation) + +When a payer proposes a deal with a payee, the service contract (acting as an operator) creates a payment rail: + +```solidity +// Service contract creates a rail +uint256 railId = Payments(paymentsContractAddress).createRail( + tokenAddress, // Token used for payments + payerAddress, // Payer (payer) + payee, // Payee (payee) + validatorAddress, // Optional validator (can be address(0) for no validation / arbitration) + commissionRateBps, // Optional operator commission rate in basis points + serviceFeeRecipient // The address that receives the operator commission +); + +// Set up initial lockup for onboarding costs - for example, 10 tokens as fixed lockup +Payments(paymentsContractAddress).modifyRailLockup( + railId, // Rail ID + 100, // Lockup period (100 epochs) + 10 * 10**18 // Fixed lockup amount (10 tokens for onboarding) +); +``` + +At this point: + +- A rail is established between the payer and payee +- The rail has a `fixedLockup` of 10 tokens and a `lockupPeriod` of 100 epochs +- The payment `rate` is still 0 (service hasn't started yet) +- The payer's account `lockupCurrent` is increased by 10 tokens. + +### 4. Deal Acceptance and Service Start + +When the payee accepts the deal, the operator starts the payment stream: + +```solidity +// Service contract (operator) increases the payment rate and makes a one-time payment +Payments(paymentsContractAddress).modifyRailPayment( + railId, // Rail ID + 2 * 10**18, // New payment rate (2 tokens per epoch) + 3 * 10**18 // One-time onboarding payment (3 tokens) +); +``` + +This single operation has several effects: +- An immediate one-time payment of 3 tokens is transferred to the payee. This is deducted from the rail's `lockupFixed`, which is now 7 tokens. +- The payer's total `lockupCurrent` is recalculated. The old rail lockup (10) is replaced by the new lockup: `(2 * 100) + 7 = 207` tokens. This change requires the payer to have sufficient available funds. +- The payer's account `lockupRate` is now increased by 2 tokens/epoch. This rate is used to calculate future lockup requirements whenever settlement occurs. + +### 5. Periodic Settlement + +Payment settlement can be triggered by any rail participant to process due payments. + +```solidity +// Settlement call - can be made by payer, payee, or operator +(uint256 amount, uint256 settledEpoch, string memory note) = Payments(paymentsContractAddress).settleRail( + railId, // Rail ID + block.number // Settle up to current epoch +); +``` + +This settlement: + +- Calculates amount owed based on the rail's rate and time elapsed since the last settlement. +- Transfers tokens from the payer's `funds` to the payee's account. +- If a validator is specified, it may modify the payment amount or limit settlement epochs. +- Records the new `settledUpTo` epoch for the rail. + +A rail may only be settled if either (a) the payer's account is fully funded or (b) the rail is terminated (in which case the rail may be settled up to the rail's `endEpoch`). + +### 6. Deal Modification + +If service terms change, the operator can adjust the rail's parameters. + +```solidity +// Operator modifies payment parameters +Payments(paymentsContractAddress).modifyRailPayment( + railId, // Rail ID + 4 * 10**18, // Increased rate (4 tokens per epoch) + 0 // No one-time payment +); + +// If lockup terms need changing +Payments(paymentsContractAddress).modifyRailLockup( + railId, // Rail ID + 150, // Extended lockup period (150 epochs) + 15 * 10**18 // Increased fixed lockup (15 tokens) +); +``` + +### 7. Ending a Deal + +There are two primary ways to end a deal: + +**Method 1: Soft End (Rate to Zero)** + +The operator can set the payment rate to zero and optionally charge a final termination fee. This keeps the rail active but stops recurring payments. + +```solidity +// Service contract reduces payment rate and issues an optional termination payment +Payments(paymentsContractAddress).modifyRailPayment( + railId, // Rail ID + 0, // Zero out payment rate + 5 * 10**18 // Termination fee (5 tokens) +); +``` + +**Method 2: Hard Termination (Safety Hatch)** + +The operator (or a fully-funded payer) can call `terminateRail`. This formally ends the agreement and activates the `lockupPeriod` as a final, guaranteed settlement window for the payee. + +```solidity +// Operator or payer terminates the rail +Payments(paymentsContractAddress).terminateRail(railId); +``` + +### 8. Final Settlement and Withdrawal + +After a rail is terminated and its final settlement window (`endEpoch`) has been reached, a final settlement call will unlock any remaining funds. + +```solidity +// 1. First, get the rail's details to find its endEpoch +RailView memory railInfo = Payments(paymentsContractAddress).getRail(railId); + +// 2. Perform the final settlement up to the endEpoch +(uint256 amount, uint256 settledEpoch, string memory note) = Payments(paymentsContractAddress).settleRail( + railId, + railInfo.endEpoch +); + +// 3. Payer can now withdraw all remaining funds that are no longer locked +Payments(paymentsContractAddress).withdraw( + tokenAddress, + remainingAmount // Amount to withdraw +); +``` + +## Emergency Scenarios + +If some component in the system (operator, validator, payer, payee) misbehaves, all parties have escape hatches that allow them to walk away with predictable losses. + +### Reducing Operator Allowance + +At any time, the payer can reduce the operator's allowance (e.g., to zero) and / or change whether or not the operator is allowed to create new rails. Such modifications won't affect existing rails, although the operator will not be able to increase the payment rates on any rails they manage until they're back under their limits. + +### Rail Termination (by payer) + +If something goes wrong (e.g., the operator is buggy and is refusing to terminate deals or stop payments), the payer may terminate the rail to prevent future payment obligations beyond the guaranteed lockup period. + +```solidity +// Payer terminates the rail +Payments(paymentsContractAddress).terminateRail(railId); +``` + +- **Requirements**: The payer must ensure their account is fully funded (`isAccountLockupFullySettled` is true) before they can terminate any rails. + +**Consequences of Termination:** + +- **Sets a Final Deadline:** Termination sets a final settlement deadline (`endEpoch`). This activates the `lockupPeriod` as a guaranteed payment window for the payee. +- **Stops Future Lockups:** The payer's account immediately stops accruing new lockup for this rail's payment rate. +- **Unlocks Funds After Final Settlement:** The funds reserved for the rail (both streaming and fixed) are only released back to the payer after the `endEpoch` has passed *and* a final `settleRail` call has been made. They do not unlock automatically. + +### Rail Termination (by operator) + +At any time, even if the payer's account isn't fully funded, the operator can terminate a rail. This will allow the recipient to settle any funds available in the rail to receive partial payment. + +### Rail Settlement Without Validation + +If a validator contract is malfunctioning, the _payer_ may forcibly settle the rail the rail "in full" (skipping validation) to prevent the funds from getting stuck in the rail pending final validation. This can only be done after the rail has been terminated (either by the payer or by the operator), and should be used as a last resort. + +```solidity +// Emergency settlement for terminated rails with stuck validation +(uint256 amount, uint256 settledEpoch, string memory note) = Payments(paymentsContractAddress).settleTerminatedRailWithoutValidation(railId); +``` + +### Payer Reducing Operator Allowance After Deal Proposal + +#### Scenario + +If a payer reduces an operator’s `rateAllowance` after a deal proposal, but before the payee accepts the deal, the following can occur: +1. The operator has already locked a fixed amount in a rail for the deal. +2. The payee, seeing the locked funds, does the work and tries to accept the deal. +3. The payer reduces the operator’s `rateAllowance` before the operator can start the payment stream. +4. When the operator tries to begin payments (by setting the payment rate), the contract checks the current allowance and **the operation fails** if the new rate exceeds the reduced allowance—even if there is enough fixed lockup. + +#### Contract Behavior + +- The contract enforces that operators cannot lock funds at a rate higher than their current allowance. +- The operator might not be able to initiate the payment stream as planned if the allowance is decreased after the rail setup. + +#### Resolution: One-Time Payment from Fixed Lockup + +From the fixed lockup, the operator can still use the `modifyRailPayment` function to make a **one-time payment** to the payee. Even if the rate allowance was lowered following the deal proposal, this still enables the payee to be compensated for their work. + +**Example Usage:** +```solidity +Payments.modifyRailPayment( + railId, + 0, + oneTimePayment +); +``` + +#### Best Practice + +- Unless absolutely required, payers should refrain from cutting operator allowances for ongoing transactions. +- In the event that the rate stream cannot be initiated, operators should be prepared for this possibility and utilize one-time payments as a backup. + +## Contributing + +We welcome contributions to the payments contract! To ensure consistency and quality across the project, please follow these guidelines when contributing. + +### Before Contributing + +- **New Features**: Always create an issue first and discuss with maintainers before implementing new features. This ensures alignment with project goals and prevents duplicate work. +- **Bug Fixes**: While you can submit bug fix PRs without prior issues, please include detailed reproduction steps in your PR description. + +### Pull Request Guidelines + +- **Link to Issue**: All feature PRs should reference a related issue (e.g., "Closes #123" or "Addresses #456"). +- **Clear Description**: Provide a detailed description of what your PR does, why it's needed, and how to test it. +- **Tests**: Include comprehensive tests for new functionality or bug fixes. +- **Documentation**: Update relevant documentation for any API or behavior changes. + +### Commit Message Guidelines + +This project follows the [Conventional Commits specification](https://www.conventionalcommits.org/). All commit messages should be structured as follows: + +``` +[optional scope]: + +[optional body] + +[optional footer(s)] +``` + +**Types:** +- `feat`: A new feature +- `fix`: A bug fix +- `docs`: Documentation only changes +- `style`: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) +- `refactor`: A code change that neither fixes a bug nor adds a feature +- `test`: Adding missing tests or correcting existing tests +- `chore`: Changes to the build process or auxiliary tools and libraries + +**Examples:** +- `feat: add rail termination functionality` +- `fix: resolve settlement calculation bug` +- `docs: update README with new API examples` +- `chore: update dependencies` + +Following these conventions helps maintain a clear project history and makes handling of releases and changelogs easier. + +## License + +Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) diff --git a/service_contracts/src/payments/SPEC.md b/service_contracts/src/payments/SPEC.md new file mode 100644 index 00000000..648e597b --- /dev/null +++ b/service_contracts/src/payments/SPEC.md @@ -0,0 +1,273 @@ + +# Payments Contract In Depth Implementation SPEC + +This document exists as a supplement to the very thorough and useful README. The README covers essentially everything you need to know as a user of the payments contract. This document exists for very advanced users and implementers to cover the internal workings of the contract in depth. You should understand the README first before reading this document. + +- [Skeleton Keys for Understanding](#skeleton-keys-for-understanding) + - [Three Core Datastructures](#three-core-data-structures) + - [The Fundamental Flow of Funds](#the-fundamental-flow-of-funds) + - [Mixing of Buckets](#mixing-of-buckets) + - [Invariants Enforced Eagerly](#invariants-are-enforced-eagerly) +- [Operator Approval](#operator-approval) +- [Accounts and Account Settlement](#accounts-and-account-settlement) +- [Rails and Rail Settlement](#rails-and-rail-settlement) + - [One Time Payments](#one-time-payments) + - [Rail Changes](#rail-changes) + - [Validation](#validation) +- [Rail Termination](#rail-termination) + + + +## Skeleton Keys for Understanding + +Some concepts are a bit tricky and show up throughout the code in subtle ways. Once you understand them it makes things easier. + +### Three Core Data Structures + +There are three essential data structures in this contract. The [`Account`](#accounts-and-account-settlement), the [`Rail`](#rails-and-rail-settlement) and the [`OperatorApproval`](#operator-approval). Accounts hold funds of a particular token associated with a public key. They are used for paying and receiving payment. Rails are used to track point to point payments between Accounts. OperatorApprovals allow an operator contract to set up and modify payments between parties under usage constraints. + +A public key identity can have multiple Accounts of different token type. Each Account can have multiple operators that it has approved to process payments. Each Account can also have multiple outgoing payment rails. Each rail represents a different payee. There is one operator per rail. One operator can manage many rails and each rail can have a different operator. To consider the general picture it can be helpful to think of a set of operators per account and a set of rails per operator. + +Finally note that independent to its outgoing payment rails accounts can have any amount of incoming payment rails from different payers. + +### The Fundamental Flow of Funds + +The first key principle of fund movements: + +> All funds paid from payer to payee in the payment contract are 1) deposited into the payer's account 2) temporarily locked up in the `lockupCurrent` of the payer account 3) moved into the payee account + +This applies to both one time payments and standard rate based rail payment flows. + +In the case of live rail payment flows, funds are temporarily locked during account settlement and moved into the payee account during rail settlement. We'll refer to these lockup funds as "temporary settling lockup" in this document. + +For one time payments lockup is explicitly added to `lockupCurrent` of the payer account when setting up the rail with a call to`modifyRailLockup`. Payments are processed immediately in `modifyRailPayment` with a nonzero `oneTimePayment` parameter -- there is no waiting for rail settlement to process these funds. + +Rail payment flows on terminated rails are locked and known as the streaming lockup. These funds are locked when `modifyRailPayment` increases the rail's payment rate or when `modifyRailLockup` changes the lockup period. These funds can never be withdrawn from a live rail and are only released during settlement of the rail after termination. This is a very essential point to understand the payments contract. Rate based payments paid out during the `lockupPeriod` for a terminated rail share characteristics of both one time payments and live rail payment streams. Like one time payments all rails are required to lockup up front the amount needed to cover the lockup period payment. Like live rail payments the `lockupPeriod` payments are released at the rail's rate through time. Unique to rail payments after termination is that they *must* flow from payer to payee, barring validation interference. One time payments have no such requirement and live rail payments can always be stopped by terminating the rail. + +One important difference between these three cases is how they interact with operator approval. Live rail payment flow approval is managed with `rateAllowance` and `rateUsage`. Hence temporary settling lockup is added to `lockupCurrent` without any modifications to `lockupUsage` or requirements on `lockupAllowance`. In contrast the streaming lockup that covers terminated rail settlement is locked throughout rail duration and consumes `lockupAllowance` to increase the operator approval's `lockupUsage`. And of course this is also true of fixed lockup for one time payments. + +The second key principle of fund movements: + +> Payer account funds may be set aside for transfer but end up unused in which case they are 1) first deposited into the payer's account 2) temporarily locked up in `lockupCurrent` of the payer account 3) moved back to the available balance of the payer account + +This is the case for unused fixed lockup set aside for one time payments that are never made when a rail is finalized. This is also true for funds that don't end up flowing during rail settlement because rail validation fails. + +One last thing to note is that all funds that complete movement from payer to payee are potentially charged a percentage commission fee to a serviceFeeRecipient. This address is specified per rail. + +### Mixing of Buckets + +Schematic of the contents of the Operator approval `lockupUsage` bucket of funds + +``` ++-------------------+ +-------------------------------+ +| Operator Approval | | rail 1 fixed lockup usage | +| | +-------------------------------+ +| lockupUsage | == | rail 1 streaming lockup usage | +| | +-------------------------------+ +| | | rail 2 fixed lockup usage | +| | +-------------------------------+ +| | | rail 2 streaming lockup usage | +| | +-------------------------------+ +| | | ... | ++-------------------+ +-------------------------------+ +``` + +Schematic of the contents of the account `lockupCurrent` bucket of funds. +Fixed, streaming and temporary settling lockup from all rails of all operators are contained in the single `lockupCurrent` bucket of funds tracked in the `Account` datastructure. +``` ++-------------------+ +-----------------------------------+ +| Account | | rail 1 (operator A) fixed lockup | +| | +-----------------------------------+ +| lockupCurrent | == | rail 1 (op A) streaming lockup | +| | +-----------------------------------+ +| | | rail 1 (op A) tmp settling lockup | +| | +-----------------------------------+ +| | | rail 2 (op B) fixed lockup usage | +| | +-----------------------------------+ +| | | rail 2 (op B) streaming lockup | +| | +-----------------------------------+ +| | | rail 2 (op B) tmp settling lockup | +| | +-----------------------------------+ +| | | ... | ++-------------------+ +-----------------------------------+ +``` + +The payments contract has two main methods of payment: rate based payments and one time payments. Each core datastructure has a pairs of variables that seem to reflect this dichotomy: (`rateUsage`/`rateAllowance`, `lockupUsage`/`lockupAllowance`) for operator approval, (`lockupCurrent`, `lockupRate`) for accounts, and (`lockupFixed`, `paymentRate`) for rails. The payments contract does separate accounting based on rates and funds available for one time payment largely by manipulating these separate variables. But there is a big exception that shows up throughout -- the streaming lockup. + +As explained in the README the streaming lockup are funds that must be locked to cover a rail's `lockupPeriod` between rail termination and rail finalization, i.e. its end of life. For motivation on the `lockupPeriod` see the README. Internally the payments contract does not consistently organize these buckets of funds separately but sometimes mixes them together. The accounting for approval and accounts *mixes these buckets* while rail accounting keeps them separate. `lockupUsage` and `lockupCurrent` both track one number that is a sum of streaming lockups for rate requirements during the `lockupPeriod` and fixed lockup for one time payment coverage. Further complicating things the account data structure also inclues temporary settling lockup between account settlement and rail settlement. See the schematics above. + +As an example of how this manifests itself consider a call to `modifyRailPayment` increasing the payment rate of a rail. For this operation to go through not only does the `rateAllowance` need to be high enough for the operator increase its `rateUsage`, the `lockupAllowance` must also be high enough to cover the new component of streaming lockup in the `lockupUsage`. + +### Invariants are Enforced Eagerly + +The most pervasive pattern in the payments contract is the usage of pre and post condition modifiers. The bulk of these modifier calls force invariants within the fields of the three core datastructures to be true. The major invariant being enforced is that accounts are always settled as far as possible. In fact function modifiers is the only place where account settlement occurs (for more detail see [section below](#accounts-and-account-settlement)). Additionally there are invariants making sure that rails don't attempt to spend more than their fixed lockup and that account locked funds are always covered by account balance. There are also selectively used invariants asserting that rails are in particular termination states for particular methods. + +Every interesting function modifying the state of the payments contract runs a group of core account settlement related invariant pre and post conditions via the `settleAccountLockupBeforeAndAfter` or the `settleAccountLockupBeforeAndAfterForRail` modifier. This is a critical mechanism to be aware of when reasoning through which invariants apply during the execution of payments contract methods. + +## Operator Approval + +As describe above operator approvals consist of the pair of `rateAllowance` and `lockupAllowance`. Approvals are per operator and rate and lockup resource usage are summed across all of an operator's rails when checking for sufficient operator approval during rail operations. Approvals also include a `maxLockupPeriod` restricting the operator's ability to make lockup period too long. + +The OperatorApproval struct + +```solidity + struct OperatorApproval { + bool isApproved; + uint256 rateAllowance; + uint256 lockupAllowance; + uint256 rateUsage; // Track actual usage for rate + uint256 lockupUsage; // Track actual usage for lockup + uint256 maxLockupPeriod; // Maximum lockup period the operator can set for rails created on behalf of the client + } +``` + +An important counterintuitive fact about the approval allowances is that they are not constrained in relation to current usage. Usage can be lower than allowance if an operator has not used all of their existing allowance. Usage can be higher than allowance if a client has manually reduced the operator's allowance. As explained in the README, reducing allowance below usage on any of the allowance resources (rate, lockup, period) will not impact existing rails. Allowance invariants are checked at the point in time of rail modification not continuously enforced, so a new modification increasing a rail's usage can fail after reducing allowance. Furthermore reductions in usage always go through even if the current allowance is below the new usage. For example if a rail has an allowance of 20 locked tokens and uses all of them to lock up 20 tokens, and then the client brings allowance for the operator down to 1 locked token the operator can still modify the rail usage down to 15 locked tokens even though it exceeds the operator's current allowance. + +Another quirk of the allowance system is the difference with which rate changes and one time payments impact the lockup allowance. When modifying a rail's rate change down, say from 5 tokens a block to 4 tokens a block, the operator's lockup approval usage can go down by 1 token * `lockupPeriod` to account for the reduction in streaming lockup. Now the operator can leverage this reduced usage to modify payments upwards in other rails. For one time payments this is not true. When a one time payment clears the approval lockup usage goes down, but additionally the `lockupAllowance` *also goes down* limiting the operator from doing this again. This is essential for the payments sytem to work correctly, otherwise 1 unit of `lockupAllowance` could be used to spend an entire accounts funds in repeated one time payments. + +## Accounts and Account Settlement + +Account settlement roughly speaking flows funds out of a depositing payer's account into a staging bucket (`lockupCurrent`) without completing the flow of funds to the payee -- that part is done per-rail during rail settlement. To enable the contract to efficiently handle account settlement over many rails, accounts only maintain global state of the lockup requirements of all rails: `lockupRate`. Accounts track deposited funds, total locked funds, rate of continuous lockup and the last epoch they were settled at. + +The Account struct +```solidity + struct Account { + uint256 funds; + uint256 lockupCurrent; + uint256 lockupRate; + // epoch up to and including which lockup has been settled for the account + uint256 lockupLastSettledAt; + } +``` + +The `lockupCurrent` field is the intermediate bucket holding onto funds claimed by rails. The free funds of the account are `funds` - `lockupCurrent`. Free funds flow into `lockupCurrent` at `lockupRate` tokens per epoch. + +As mentioned above account settlement is a precondition to every state modifying call in the payments contract. It is actually structured as both a pre and post condition + +```solidity + modifier settleAccountLockupBeforeAndAfter(address token, address owner, bool settleFull) { + Account storage payer = accounts[token][owner]; + + // Before function execution + performSettlementCheck(token, owner, payer, settleFull, true); + + _; + + // After function execution + performSettlementCheck(token, owner, payer, settleFull, false); + } +``` + +The core of account settlement is calculating how much funds should be flowing out of this account since the previous settlement epoch `lockupLastSettledAt`. In this simple case `lockupRate * (block.current - lockupLastSettledAt)` is added to `lockupCurrent`. If there are insufficient funds to do this then account settlement first calculates how many epochs can be settled up to with the current funds: `fractionalEpochs = availableFunds / account.lockupRate;`. Then settlement is completed up to `lockupLastSettledAt + fractoinalEpochs`. + +The withdraw function is special in that it requires that the account is fully settled by assigning `true` to `settleFull` in its modifier. All other methods allow account settlement to progress as far as possible without fully settling as valid pre and post conditions. This means that accounts are allowed to be in debt with lower temporary settling lockup in their `lockupCurrent` then the total that all the account's rails have a claim on. Note that this notion of debt does not take into account the streaming lockup. If the rail is terminated then a `lockupPeriod` of funds is guaranteed to be covered since those funds are enforced to be locked in `lockupCurrent` upon rail modification. + +## Rails and Rail Settlement + +Rail settlement completes the fundamental flow of funds from payer account to payee account by moving funds from account `lockupCurrent` to the rail payee's account. Any party involved in the rail, operator, payee or payer, can call settlement. It is useful to keep the rail datastructure in mind when discussing rail settlement: + +```solidity + struct Rail { + address token; + address from; + address to; + address operator; + address validator; + uint256 paymentRate; + uint256 lockupPeriod; + uint256 lockupFixed; + // epoch up to and including which this rail has been settled + uint256 settledUpTo; + RateChangeQueue.Queue rateChangeQueue; + uint256 endEpoch; // Final epoch up to which the rail can be settled (0 if not terminated) + // Operator commission rate in basis points (e.g., 100 BPS = 1%) + uint256 commissionRateBps; + address serviceFeeRecipient; // address to collect operator comission + } +``` + +At its core rail settlement simply multiplies the duration of the total time being settled by the rail's outgoing rate, reduces the payer Account's `lockupCurrent` and `funds` by this amount and adds this amount to the `funds` of the payee Account. + +This is a bit more complicated in practice because rail rates can change. For more on how this happens see [Rail Changes](#rail-changes) below. For this reason Rails are always settled in segments. Segments are a record of the rail's changing rate over time. Each rail tracks its segments in a RateChangeQueue. New segments are added to the queue each time the rate is changed. Rail settlement then performs the core settlement operation on each segment with a different rate. The function at the heart of rail settlement is called `_settleSegment`. The function organizing traversal of segments and calling `_settleSegment` on each one individually is `_settleWithRateChanges`. + +Settlement is further complicated because the settlement period can vary. Rails are settled up to a user defined parameter `untilEpoch` which may be any epoch before the current network epoch. The `untilEpoch` is internally restricted to be the minimum of the user specified epoch and the payer account's `lockupLastSettledAt` epoch. This comes from the nature of the fundamental flow of funds -- funds cannot flow into a payee rail without first being locked up in the payer account's `lockupCurrent` and the last epoch the rail's rate of funds are locked is exactly the `lockupLastSettledAt`. + +Each segment of the rate change queue is pushed once and popped once. Rail settlment reads every segment up to the `untilEpoch` and processes them. Rail settlment may not empty the queue in the case that the `untilEpoch` is in the past. Logic in `_settleWithRateChanges` handles edge cases like partially settled segments and zero rate segments. + +As part of its logic `_settleSegment` checks the rail's `validator` address. If it is nonzero then the validator contract is consulted for modifying the payment. Validator's can modify the rail settlement amount adn the final `untilEpoch`. For background on the purpose of rail validation please see the README. For more about validation see [the section below](#validation). + +### Terminated Rail Settlement + +Terminated rails settle in much the same way as live rails. Terminated rails are also processed via calls to `_settleSegment` and move funds locked in an accounts `lockupCurrent` into the payee account. The major difference is that terminated rail settlement funds are completely covered by the streaming lockup which contract invariants enforce must be held in `lockupCurrent`. For this reason the `untilEpoch` is not checked against the account's `lockupLastSettledAt` in the termianted rail case -- the funds are already kept locked in the account and can be spent without checking. + +Rail settlement always tries to finalize a terminated rail before returning. Finalization has three effects. First it has the effect of flowing unused rail fixed lockup funds out of the payer account `lockupCurrent` and back to the account's available balance. Second the operator usage for streaming lockup and unused fixed lockup is removed and the operator reclaims this allowance for lockup operations on other rails. Finally the `Rail` datastructure is zeroed out indicating that the rail is finalized and therefore invalid for modifications. The zeroed out condition is checked in various places in the code and operations on rails meeting this condition revert with `Errors.RailInactiveOrSettled(railId)`. + +### Validation + +With one exception validation is run for all instances of rail segment settlement live and terminated. When many segments are settled validation is run on each segment. The validation interface is + +```solidity +interface IValidator { + struct ValidationResult { + // The actual payment amount determined by the validator after validation of a rail during settlement + uint256 modifiedAmount; + // The epoch up to and including which settlement should occur. + uint256 settleUpto; + // A placeholder note for any additional information the validator wants to send to the caller of `settleRail` + string note; + } + + function validatePayment( + uint256 railId, + uint256 proposedAmount, + // the epoch up to and including which the rail has already been settled + uint256 fromEpoch, + // the epoch up to and including which validation is requested; payment will be validated for (toEpoch - fromEpoch) epochs + uint256 toEpoch, + uint256 rate + ) external returns (ValidationResult memory result); +} +``` + +The parameters encode a settlement segment and the result allows the validator to change the total amount settled and the epoch up to which settlement takes place. A few sanity checks constrain the `ValidationResult`. The validator can't authorize more payment than would flow through the rail without validation or settle the rail up to an epoch beyond the provided `toEpoch`. The zero address is an allowed validator. + +Note that when the validator withholds some of the funds from being paid out the rail settlement code still unlocks those funds from the `lockupCurrent` bucket in the payer account. Essentially the validator flows those funds back to the payer account's available balance. + +The one exception when rails can be settled without validation is in the post termination failsafe `settleTerminatedRailWithoutValidation` which exists to protect against buggy validators stopping all payments between parties. This method calls `_settleSegment` with no validation and hence pays in full. + +### One Time Payments + +One time payments are a way to pay lump sums of tokens over a rail. They require a rail to be setup but do not have any persistent rate based flow. One time payments don't interact with rail or account settlement at all but still follow the fundamental principle of flow of funds. All one time payments are paid directly out of the fixed lockup of a rail which is locked into account `lockupCurrent` during rail changes via `modifyRailLockup`. One time payments are initiated with a call to `modifyRailPayment` with a nonzero third parameter. This method reduces all lockup tracking parameters by the one time payment amount -- the account `lockupCurrent` and `funds`, the rail `fixedLockup` and the approval `lockupUsage` and `lockupAllowance`. Then it increases the payee's `funds` by the payment. + +One time payments can be made after termination but only before the rail's end epoch. + +### Rail Changes + +All rails start with no payments or lockup. `createRail` just makes an empty rail between a payer and payee overseen by an operator and optionally arbitrated with a validator. + +Rails can be modified in three main ways. The first is by changing the rail's `fixedLockup` via the `modifyRailLockup` call. The second is by changing the rail's `lockupPeriod` and hence streaming lockup, again via `modifyRailLockup` call. And the third is by chaning `modifyRailPayment` with a new rail rate. + +Rate changes to a rail are the most complex. They require adding a segment to the rate change queue to enable correct accounting of future rail settlement. They also enforce changes to locked funds because rate changes alway imply a change to the streaming lockup (which is `rate * lockupPeriod`). + +All three modifications change the total amount of `lockupCurrent` in the payer's account. These changes are made over the payer's account under the assumption that they have enough available balance which is then checked in the post condition modifier. + +Only live fully settled accounts without any debt, i.e. with `lockupLastSettledAt == block.number`, are allowed to increase `fixedLockup`, make any changes to the `lockupPeriod` or increase to the rail's `paymentRate`. Terminated and debtor rails *are* allowed to *reduce* their `fixedLockup`. And terminated rails are allowed to decrease the rail's payment rate (debtors can't make any changes). + +For all three changes the operator approval must be consulted to check that the proposed modifications are within the operator's remaining allowances. It is worth noting that the operator approval has a field `maxLockupPeriod` which sets a ceiling on the lockup period and hence streaming lockup. + +All rail modifications including rail creation must be called by the operator. + + +## Rail Termination + +If you've read this far you've seen several implications of termination on rail modification, settlement, and allowance accounting. By now it is not too surprising to hear that terminated and not yet finalized rails are not so much an edge case as a distinct third type of payment process alongside one time payments and live rails. + +The process of termination itself is very simple compared to its handling throughout the rail code. Rail termination does exactly three things. First it sets up an end epoch on the rail equal to one `lockupPeriod` past the rail's last settlement epoch. Second it removes the rail's `paymentRate` from the payee account's `lockupRate`. And finally it reduces the operator approval's rate usage to match the reduction in rate usage. + +With this account settlement no longer flows funds into the `lockupCurrent` of the payer. The streaming lockup is now used for exactly one `lockupPeriod` to move payments to the payee's account. And with the end epoch set the rail will only payout exactly the streaming lockup for exactly the `lockupPeriod`. + +Rails become finalized when settled at or beyond their end epoch. Finalization refunds the unused fixed lockup back to the payer and releases the `lockupUsage` from any remaining fixed lockup and all of the recently paid streaming lockup. + + + + diff --git a/service_contracts/src/payments/contracts/Dutch.sol b/service_contracts/src/payments/contracts/Dutch.sol new file mode 100644 index 00000000..7147fe1a --- /dev/null +++ b/service_contracts/src/payments/contracts/Dutch.sol @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.30; + +import {UD60x18, uEXP2_MAX_INPUT, uUNIT} from "@prb-math/UD60x18.sol"; + +/** + * @dev Recurring dutch auction + */ +library Dutch { + // Target 1 auction per week, on average + uint256 public constant RESET_FACTOR = 4; + uint256 public constant HALVING_INTERVAL = 3.5 days; + + uint256 public constant MAX_DECAY = uEXP2_MAX_INPUT * HALVING_INTERVAL / uUNIT; + + /** + * @notice Exponential decay by 1/4 per week + * @param startPrice The initial price in attoFIL at elapsed = 0 + * @param elapsed Seconds of time since the startPrice + * @return price The decayed price in attoFIL + */ + function decay(uint256 startPrice, uint256 elapsed) internal pure returns (uint256 price) { + if (elapsed > MAX_DECAY) { + return 0; + } + UD60x18 coefficient = UD60x18.wrap(startPrice); + UD60x18 decayFactor = UD60x18.wrap(elapsed * uUNIT / HALVING_INTERVAL).exp2(); + + return coefficient.div(decayFactor).unwrap(); + } +} diff --git a/service_contracts/src/payments/contracts/Errors.sol b/service_contracts/src/payments/contracts/Errors.sol new file mode 100644 index 00000000..5028126a --- /dev/null +++ b/service_contracts/src/payments/contracts/Errors.sol @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; + +/// @title Errors +/// @notice Centralized library for custom error definitions across the protocol +/// @dev Convention: For any error comparing two values, always pass the expected value first, followed by the actual value +library Errors { + /// @notice Rail does not exist or is beyond its last settlement after termination + /// @param railId The ID of the rail + error RailInactiveOrSettled(uint256 railId); + + /// @notice Only the rail client can perform this action + /// @param expected The expected client address + /// @param caller The actual caller address + error OnlyRailClientAllowed(address expected, address caller); + + /// @notice Only the rail operator can perform this action + /// @param expected The expected operator address + /// @param caller The actual caller address + error OnlyRailOperatorAllowed(address expected, address caller); + + /// @notice Only the rail participant (client, operator, or recipient) can perform this action + /// @param expectedFrom The expected client address + /// @param expectedOperator The expected operator address + /// @param expectedTo The expected recipient address + /// @param caller The actual caller address + error OnlyRailParticipantAllowed(address expectedFrom, address expectedOperator, address expectedTo, address caller); + + /// @notice Rail is already terminated + /// @param railId The ID of the rail + error RailAlreadyTerminated(uint256 railId); + + /// @notice Rail is not terminated, but the action requires a terminated rail + /// @param railId The ID of the rail + error RailNotTerminated(uint256 railId); + + /// @notice The provided address is zero, which is not allowed + /// @param varName The name of the variable that was expected to be non-zero + error ZeroAddressNotAllowed(string varName); + + /// @notice One-time payment exceeds the lockup amount for the rail + /// @param railId The ID of the rail + /// @param available The available lockup amount for the rail + /// @param required The required lockup amount for the rail + error OneTimePaymentExceedsLockup(uint256 railId, uint256 available, uint256 required); + + /// @notice The caller is not authorized to terminate the rail + /// @dev Only the rail operator or the rail client (with fully settled lockup) can terminate the rail + /// @param railId The ID of the rail being terminated + /// @param allowedClient The rail client address (from) + /// @param allowedOperator The rail operator address + /// @param caller The address attempting to terminate the rail + error NotAuthorizedToTerminateRail(uint256 railId, address allowedClient, address allowedOperator, address caller); + + /// @notice The payer's lockup rate is inconsistent with the rail's payment rate + /// @dev Indicates that the payer's lockup rate is less than the rail's payment rate, which should not occur + /// @param railId The ID of the rail to terminate + /// @param from The address of the payer + /// @param paymentRate The payment rate for the rail + /// @param lockupRate The current lockup rate of the payer + error LockupRateInconsistent(uint256 railId, address from, uint256 paymentRate, uint256 lockupRate); + + /// @notice Ether sent must equal the amount for native token transfers + /// @param required The required amount (must match msg.value) + /// @param sent The msg.value sent with the transaction + error MustSendExactNativeAmount(uint256 required, uint256 sent); + + /// @notice Ether (msg.value) must not be sent when transferring ERC20 tokens + /// @param sent The msg.value sent with the transaction + error NativeTokenNotAccepted(uint256 sent); + + /// @notice Native tokens are not supported in depositWithPermit; only ERC20 tokens are allowed + error NativeTokenNotSupported(); + + /// @notice Attempted to withdraw more than the available unlocked funds + /// @param available The amount of unlocked funds available for withdrawal + /// @param requested The amount requested for withdrawal + error InsufficientUnlockedFunds(uint256 available, uint256 requested); + + /// @notice The receiving contract rejected the native token transfer + /// @param to The address to which the transfer was attempted + /// @param amount The amount of native token attempted to send + error NativeTransferFailed(address to, uint256 amount); + + /// @notice The operator is not approved for the client (from address) + /// @param from The address of the client (payer) + /// @param operator The operator attempting the action + error OperatorNotApproved(address from, address operator); + + /// @notice The specified commission rate exceeds the allowed maximum. + /// @param maxAllowed The maximum allowed commission rate in basis points (BPS) + /// @param actual The actual commission rate that was attempted to be set + error CommissionRateTooHigh(uint256 maxAllowed, uint256 actual); + + /// @notice A non-zero commission rate was provided, but no service fee recipient was set + error MissingServiceFeeRecipient(); + + /// @notice Invalid attempt to modify a terminated rail's lockup settings + /// @param actualPeriod The rail's actual period value + /// @param actualLockupFixed The current lockupFixed value + /// @param attemptedPeriod The period value provided + /// @param attemptedLockupFixed The new lockupFixed value proposed + error InvalidTerminatedRailModification( + uint256 actualPeriod, uint256 actualLockupFixed, uint256 attemptedPeriod, uint256 attemptedLockupFixed + ); + + /// @notice The payer's current lockup is insufficient to cover the requested lockup reduction + /// @param from The address of the payer + /// @param token The token involved in the lockup + /// @param currentLockup The payer's current lockup amount + /// @param lockupReduction The reduction attempted to be made + error InsufficientCurrentLockup(IERC20 token, address from, uint256 currentLockup, uint256 lockupReduction); + + /// @notice Cannot change the lockup period due to insufficient funds to cover the current lockup + /// @param token The token for the lockup + /// @param from The address whose account is checked (from) + /// @param actualLockupPeriod The current rail lockup period + /// @param attemptedLockupPeriod The new period requested + error LockupPeriodChangeNotAllowedDueToInsufficientFunds( + IERC20 token, address from, uint256 actualLockupPeriod, uint256 attemptedLockupPeriod + ); + + /// @notice Cannot increase the fixed lockup due to insufficient funds to cover the current lockup + /// @param token The token for the lockup + /// @param from The address whose account is checked + /// @param actualLockupFixed The current rail fixed lockup amount + /// @param attemptedLockupFixed The new fixed lockup amount requested + error LockupFixedIncreaseNotAllowedDueToInsufficientFunds( + IERC20 token, address from, uint256 actualLockupFixed, uint256 attemptedLockupFixed + ); + + /// @notice The requested lockup period exceeds the operator's maximum allowed lockup period + /// @param token The token for the lockup + /// @param operator The operator for the rail + /// @param maxAllowedPeriod The operator's maximum allowed lockup period + /// @param requestedPeriod The lockup period requested + error LockupPeriodExceedsOperatorMaximum( + IERC20 token, address operator, uint256 maxAllowedPeriod, uint256 requestedPeriod + ); + + /// @notice The payer's current lockup is less than the old lockup value + /// @param token The token for the lockup + /// @param from The address whose account is checked + /// @param oldLockup The calculated old lockup amount + /// @param currentLockup The current lockup value in the account + error CurrentLockupLessThanOldLockup(IERC20 token, address from, uint256 oldLockup, uint256 currentLockup); + + /// @notice Cannot modify a terminated rail beyond its end epoch + /// @param railId The ID of the rail + /// @param maxSettlementEpoch The last allowed block for modifications + /// @param blockNumber The current block number + error CannotModifyTerminatedRailBeyondEndEpoch(uint256 railId, uint256 maxSettlementEpoch, uint256 blockNumber); + + /// @notice Cannot increase the payment rate or change the rate on a terminated rail + /// @param railId The ID of the rail + error RateChangeNotAllowedOnTerminatedRail(uint256 railId); + + /// @notice Account lockup must be fully settled to change the payment rate on an active rail + /// @param railId The ID of the rail + /// @param from The address whose lockup is being checked + /// @param isSettled Whether the account lockup is fully settled + /// @param currentRate The current payment rate + /// @param attemptedRate The attempted new payment rate + error LockupNotSettledRateChangeNotAllowed( + uint256 railId, address from, bool isSettled, uint256 currentRate, uint256 attemptedRate + ); + + /// @notice Payer's lockup rate is less than the old payment rate when updating an active rail + /// @param railId The ID of the rail + /// @param from The address whose lockup is being checked + /// @param lockupRate The current lockup rate of the payer + /// @param oldRate The current payment rate for the rail + error LockupRateLessThanOldRate(uint256 railId, address from, uint256 lockupRate, uint256 oldRate); + + /// @notice The payer does not have enough funds for the one-time payment + /// @param token The token being used for payment + /// @param from The payer's address + /// @param required The amount required (oneTimePayment) + /// @param actual The actual funds available in the payer's account + error InsufficientFundsForOneTimePayment(IERC20 token, address from, uint256 required, uint256 actual); + + /// @notice Cannot settle a terminated rail without validation until after the max settlement epoch has passed + /// @param railId The ID of the rail being settled + /// @param currentBlock The current block number (actual) + /// @param requiredBlock The max settlement epoch block (expected, must be exceeded) + error CannotSettleTerminatedRailBeforeMaxEpoch( + uint256 railId, + uint256 requiredBlock, // expected (maxSettleEpoch + 1) + uint256 currentBlock // actual (block.number) + ); + + /// @notice Cannot settle a rail for epochs in the future. + /// @param railId The ID of the rail being settled + /// @param maxAllowedEpoch The latest epoch that can be settled (expected, must be >= actual) + /// @param attemptedEpoch The epoch up to which settlement was attempted (actual) + error CannotSettleFutureEpochs(uint256 railId, uint256 maxAllowedEpoch, uint256 attemptedEpoch); + + /// @notice No progress was made in settlement; settledUpTo did not advance. + /// @param railId The ID of the rail + /// @param expectedSettledUpTo The expected value for settledUpTo (must be > startEpoch) + /// @param actualSettledUpTo The actual value after settlement attempt + error NoProgressInSettlement(uint256 railId, uint256 expectedSettledUpTo, uint256 actualSettledUpTo); + + /// @notice The payer's current lockup is less than the fixed lockup amount during rail finalization. + /// @param railId The ID of the rail being finalized + /// @param token The token used for the rail + /// @param from The address whose lockup is being reduced + /// @param expectedLockup The expected minimum lockup amount (rail.lockupFixed) + /// @param actualLockup The actual current lockup in the payer's account (payer.lockupCurrent) + error LockupInconsistencyDuringRailFinalization( + uint256 railId, IERC20 token, address from, uint256 expectedLockup, uint256 actualLockup + ); + + /// @notice The next rate change in the queue is scheduled before the current processed epoch, indicating an invalid state. + /// @param nextRateChangeUntilEpoch The untilEpoch of the next rate change in the queue + /// @param processedEpoch The epoch that has been processed up to + error InvalidRateChangeQueueState(uint256 nextRateChangeUntilEpoch, uint256 processedEpoch); + + /// @notice The validator attempted to settle an epoch before the allowed segment start + /// @param railId The ID of the rail being settled + /// @param allowedStart The minimum epoch allowed (segment start) + /// @param attemptedStart The epoch at which settlement was attempted + error ValidatorSettledBeforeSegmentStart(uint256 railId, uint256 allowedStart, uint256 attemptedStart); + + /// @notice The validator attempted to settle an epoch beyond the allowed segment end + /// @param railId The ID of the rail being settled + /// @param allowedEnd The maximum epoch allowed (segment end) + /// @param attemptedEnd The epoch at which settlement was attempted + error ValidatorSettledBeyondSegmentEnd(uint256 railId, uint256 allowedEnd, uint256 attemptedEnd); + + /// @notice The validator returned a modified amount exceeding the maximum allowed for the confirmed epochs + /// @param railId The ID of the rail being settled + /// @param maxAllowed The maximum allowed settlement amount for the segment + /// @param attempted The attempted (modified) settlement amount + error ValidatorModifiedAmountExceedsMaximum(uint256 railId, uint256 maxAllowed, uint256 attempted); + + /// @notice The account does not have enough funds to cover the required settlement amount + /// @param token The token used for the settlement + /// @param from The address of the account being checked + /// @param available The actual funds available in the account + /// @param required The amount required for settlement + error InsufficientFundsForSettlement(IERC20 token, address from, uint256 available, uint256 required); + + /// @notice The payer does not have enough lockup to cover the required settlement amount + /// @param token The token used for the settlement + /// @param from The payer address being checked + /// @param available The actual lockup available in the account + /// @param required The required lockup amount for the settlement + error InsufficientLockupForSettlement(IERC20 token, address from, uint256 available, uint256 required); + + /// @notice Invariant violation: The payer's lockup exceeds their available funds after settlement + /// @dev Indicates a critical accounting bug or logic error in the settlement process. + /// @param token The token being checked + /// @param account The address whose lockup is being checked + /// @param lockupCurrent The current lockup amount + /// @param fundsCurrent The current funds available + error LockupExceedsFundsInvariant(IERC20 token, address account, uint256 lockupCurrent, uint256 fundsCurrent); + + /// @notice The rate change queue must be empty after full settlement, but it's not + /// @param nextUntilEpoch The untilEpoch value of the next queued rate change (tail of the queue) + error RateChangeQueueNotEmpty(uint256 nextUntilEpoch); + + /// @notice The attempted operation exceeds the operator's allowed rate usage + /// @param allowed The total rate allowance for the operator + /// @param attemptedUsage The rate usage attempted after increase + error OperatorRateAllowanceExceeded(uint256 allowed, uint256 attemptedUsage); + + /// @notice The attempted operation exceeds the operator's allowed lockup usage + /// @param allowed The total lockup allowance for the operator + /// @param attemptedUsage The lockup usage attempted after increase + error OperatorLockupAllowanceExceeded(uint256 allowed, uint256 attemptedUsage); + + /// @notice Attempted to withdraw more than the accumulated fees for the given token + /// @param token The token address + /// @param available The current accumulated fees + /// @param requested The amount attempted to withdraw + error WithdrawAmountExceedsAccumulatedFees(IERC20 token, uint256 available, uint256 requested); + + /// @notice Native token transfer failed during fee withdrawal + /// @param to The recipient address + /// @param amount The amount attempted to send + error FeeWithdrawalNativeTransferFailed(address to, uint256 amount); + + /// @notice Not enough native token sent for the burn operation + /// @param required The minimum required native token amount + /// @param sent The amount of native token sent with the transaction + error InsufficientNativeTokenForBurn(uint256 required, uint256 sent); + + /// @notice The 'to' address must equal the transaction sender (self-recipient enforcement) + /// @dev Used by flows like permit and transfer-with-authorization to ensure only self-deposits + /// @param expected The expected address (msg.sender) + /// @param actual The actual 'to' address provided + error SignerMustBeMsgSender(address expected, address actual); +} diff --git a/service_contracts/src/payments/contracts/Payments.sol b/service_contracts/src/payments/contracts/Payments.sol new file mode 100644 index 00000000..34302b35 --- /dev/null +++ b/service_contracts/src/payments/contracts/Payments.sol @@ -0,0 +1,1834 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import {IERC20Permit} from "@openzeppelin/contracts/token/ERC20/extensions/IERC20Permit.sol"; +import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; +import {ReentrancyGuard} from "@openzeppelin/contracts/utils/ReentrancyGuard.sol"; +import {Strings} from "@openzeppelin/contracts/utils/Strings.sol"; +import {Dutch} from "./Dutch.sol"; +import {Errors} from "./Errors.sol"; +import {RateChangeQueue} from "./RateChangeQueue.sol"; +import {IERC3009} from "./interfaces/IERC3009.sol"; + +uint88 constant UINT88_MAX = 0xffffffffffffffffffffff; + +// FIL max supply cap is 2 billion +uint88 constant MAX_AUCTION_START_PRICE = UINT88_MAX; // 309,485,009.821345068724781055 FIL +uint88 constant FIRST_AUCTION_START_PRICE = 31.32 ether; // 31.32 FIL + +interface IValidator { + struct ValidationResult { + // The actual payment amount determined by the validator after validation of a rail during settlement + uint256 modifiedAmount; + // The epoch up to and including which settlement should occur. + uint256 settleUpto; + // A placeholder note for any additional information the validator wants to send to the caller of `settleRail` + string note; + } + + function validatePayment( + uint256 railId, + uint256 proposedAmount, + // the epoch up to and including which the rail has already been settled + uint256 fromEpoch, + // the epoch up to and including which validation is requested; payment will be validated for (toEpoch - fromEpoch) epochs + uint256 toEpoch, + uint256 rate + ) external returns (ValidationResult memory result); + + function railTerminated(uint256 railId, address terminator, uint256 endEpoch) external; +} + +// @title Payments contract. +contract Payments is ReentrancyGuard { + using Dutch for uint256; + using SafeERC20 for IERC20; + using RateChangeQueue for RateChangeQueue.Queue; + + // Maximum commission rate in basis points (100% = 10000 BPS) + uint256 public constant COMMISSION_MAX_BPS = 10000; + + uint256 public constant NETWORK_FEE_NUMERATOR = 1; // 0.5% + uint256 public constant NETWORK_FEE_DENOMINATOR = 200; + + address payable private constant BURN_ADDRESS = payable(0xff00000000000000000000000000000000000063); + IERC20 private constant NATIVE_TOKEN = IERC20(address(0)); + + // Events + event AccountLockupSettled( + IERC20 indexed token, + address indexed owner, + uint256 lockupCurrent, + uint256 lockupRate, + uint256 lockupLastSettledAt + ); + event OperatorApprovalUpdated( + IERC20 indexed token, + address indexed client, + address indexed operator, + bool approved, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 maxLockupPeriod + ); + + event RailCreated( + uint256 indexed railId, + address indexed payer, + address indexed payee, + IERC20 token, + address operator, + address validator, + address serviceFeeRecipient, + uint256 commissionRateBps + ); + event RailLockupModified( + uint256 indexed railId, + uint256 oldLockupPeriod, + uint256 newLockupPeriod, + uint256 oldLockupFixed, + uint256 newLockupFixed + ); + event RailOneTimePaymentProcessed( + uint256 indexed railId, uint256 netPayeeAmount, uint256 operatorCommission, uint256 networkFee + ); + event RailRateModified(uint256 indexed railId, uint256 oldRate, uint256 newRate); + event RailSettled( + uint256 indexed railId, + uint256 totalSettledAmount, + uint256 totalNetPayeeAmount, + uint256 operatorCommission, + uint256 networkFee, + uint256 settledUpTo + ); + event RailTerminated(uint256 indexed railId, address indexed by, uint256 endEpoch); + event RailFinalized(uint256 indexed railId); + + event DepositRecorded(IERC20 indexed token, address indexed from, address indexed to, uint256 amount); + event WithdrawRecorded(IERC20 indexed token, address indexed from, address indexed to, uint256 amount); + + struct Account { + uint256 funds; + uint256 lockupCurrent; + uint256 lockupRate; + // epoch up to and including which lockup has been settled for the account + uint256 lockupLastSettledAt; + } + + struct Rail { + IERC20 token; + address from; + address to; + address operator; + address validator; + uint256 paymentRate; + uint256 lockupPeriod; + uint256 lockupFixed; + // epoch up to and including which this rail has been settled + uint256 settledUpTo; + RateChangeQueue.Queue rateChangeQueue; + uint256 endEpoch; // Final epoch up to which the rail can be settled (0 if not terminated) + // Operator commission rate in basis points (e.g., 100 BPS = 1%) + uint256 commissionRateBps; + address serviceFeeRecipient; // address to collect operator comission + } + + struct OperatorApproval { + bool isApproved; + uint256 rateAllowance; + uint256 lockupAllowance; + uint256 rateUsage; // Track actual usage for rate + uint256 lockupUsage; // Track actual usage for lockup + uint256 maxLockupPeriod; // Maximum lockup period the operator can set for rails created on behalf of the client + } + + // Counter for generating unique rail IDs + uint256 private _nextRailId = 1; + + // Internal balances + // The self-balance collects network fees + mapping(IERC20 token => mapping(address owner => Account)) public accounts; + + // railId => Rail + mapping(uint256 railId => Rail) internal rails; + + // Struct to hold rail data without the RateChangeQueue (for external returns) + struct RailView { + IERC20 token; + address from; + address to; + address operator; + address validator; + uint256 paymentRate; + uint256 lockupPeriod; + uint256 lockupFixed; + uint256 settledUpTo; + uint256 endEpoch; + // Operator commission rate in basis points (e.g., 100 BPS = 1%) + uint256 commissionRateBps; + address serviceFeeRecipient; // address to collect operator commission + } + + // token => client => operator => Approval + mapping(IERC20 token => mapping(address client => mapping(address operator => OperatorApproval))) public + operatorApprovals; + + // Define a struct for rails by payee information + struct RailInfo { + uint256 railId; // The rail ID + bool isTerminated; // True if rail is terminated + uint256 endEpoch; // End epoch for terminated rails (0 for active rails) + } + + // token => payee => array of railIds + mapping(IERC20 token => mapping(address payee => uint256[])) private payeeRails; + + // token => payer => array of railIds + mapping(IERC20 token => mapping(address payer => uint256[])) private payerRails; + + // pack into one storage slot + struct AuctionInfo { + uint88 startPrice; // highest possible price is MAX_AUCTION_START_PRICE + uint168 startTime; + } + + mapping(IERC20 token => AuctionInfo) public auctionInfo; + + struct SettlementState { + uint256 totalSettledAmount; + uint256 totalNetPayeeAmount; + uint256 totalOperatorCommission; + uint256 totalNetworkFee; + uint256 processedEpoch; + string note; + } + + constructor() { + _nextRailId = 1; + } + + modifier validateRailActive(uint256 railId) { + require(rails[railId].from != address(0), Errors.RailInactiveOrSettled(railId)); + _; + } + + modifier onlyRailClient(uint256 railId) { + require(rails[railId].from == msg.sender, Errors.OnlyRailClientAllowed(rails[railId].from, msg.sender)); + _; + } + + modifier onlyRailOperator(uint256 railId) { + require( + rails[railId].operator == msg.sender, Errors.OnlyRailOperatorAllowed(rails[railId].operator, msg.sender) + ); + _; + } + + modifier validateRailNotTerminated(uint256 railId) { + require(rails[railId].endEpoch == 0, Errors.RailAlreadyTerminated(railId)); + _; + } + + modifier validateRailTerminated(uint256 railId) { + require(isRailTerminated(rails[railId], railId), Errors.RailNotTerminated(railId)); + _; + } + + modifier validateNonZeroAddress(address addr, string memory varName) { + require(addr != address(0), Errors.ZeroAddressNotAllowed(varName)); + _; + } + + modifier validateSignerIsRecipient(address to) { + require(to == msg.sender, Errors.SignerMustBeMsgSender(msg.sender, to)); + _; + } + + modifier settleAccountLockupBeforeAndAfter(IERC20 token, address owner, bool settleFull) { + Account storage payer = accounts[token][owner]; + + // Before function execution + performSettlementCheck(token, owner, payer, settleFull, true); + + _; + + // After function execution + performSettlementCheck(token, owner, payer, settleFull, false); + } + + modifier settleAccountLockupBeforeAndAfterForRail(uint256 railId, bool settleFull, uint256 oneTimePayment) { + Rail storage rail = rails[railId]; + + require(rail.from != address(0), Errors.RailInactiveOrSettled(railId)); + + Account storage payer = accounts[rail.token][rail.from]; + + require( + rail.lockupFixed >= oneTimePayment, + Errors.OneTimePaymentExceedsLockup(railId, rail.lockupFixed, oneTimePayment) + ); + + // Before function execution + performSettlementCheck(rail.token, rail.from, payer, settleFull, true); + + // ---- EXECUTE FUNCTION + _; + // ---- FUNCTION EXECUTION COMPLETE + + // After function execution + performSettlementCheck(rail.token, rail.from, payer, settleFull, false); + } + + function performSettlementCheck(IERC20 token, address owner, Account storage payer, bool settleFull, bool isBefore) + internal + { + require( + payer.funds >= payer.lockupCurrent, + isBefore + ? "invariant failure: insufficient funds to cover lockup before function execution" + : "invariant failure: insufficient funds to cover lockup after function execution" + ); + + settleAccountLockup(token, owner, payer); + + // Verify full settlement if required + // TODO: give the user feedback on what they need to deposit in their account to complete the operation. + require( + !settleFull || isAccountLockupFullySettled(payer), + isBefore + ? "payers's full account lockup was not met as a precondition of the requested operation" + : "payers's full account lockup was not met as a postcondition of the requested operation" + ); + + require( + payer.funds >= payer.lockupCurrent, + isBefore + ? "invariant failure: insufficient funds to cover lockup before function execution" + : "invariant failure: insufficient funds to cover lockup after function execution" + ); + } + + /// @notice Gets the current state of the target rail or reverts if the rail isn't active. + /// @param railId the ID of the rail. + function getRail(uint256 railId) external view validateRailActive(railId) returns (RailView memory) { + Rail storage rail = rails[railId]; + return RailView({ + token: rail.token, + from: rail.from, + to: rail.to, + operator: rail.operator, + validator: rail.validator, + paymentRate: rail.paymentRate, + lockupPeriod: rail.lockupPeriod, + lockupFixed: rail.lockupFixed, + settledUpTo: rail.settledUpTo, + endEpoch: rail.endEpoch, + commissionRateBps: rail.commissionRateBps, + serviceFeeRecipient: rail.serviceFeeRecipient + }); + } + + /// @notice Updates the approval status and allowances for an operator on behalf of the message sender. + /// @param token The ERC20 token address for which the approval is being set. + /// @param operator The address of the operator whose approval is being modified. + /// @param approved Whether the operator is approved (true) or not (false) to create new rails. + /// @param rateAllowance The maximum payment rate the operator can set across all rails created by the operator on behalf of the message sender. If this is less than the current payment rate, the operator will only be able to reduce rates until they fall below the target. + /// @param lockupAllowance The maximum amount of funds the operator can lock up on behalf of the message sender towards future payments. If this exceeds the current total amount of funds locked towards future payments, the operator will only be able to reduce future lockup. + /// @param maxLockupPeriod The maximum number of epochs (blocks) the operator can lock funds for. If this is less than the current lockup period for a rail, the operator will only be able to reduce the lockup period. + function setOperatorApproval( + IERC20 token, + address operator, + bool approved, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 maxLockupPeriod + ) external nonReentrant validateNonZeroAddress(operator, "operator") { + _setOperatorApproval(token, operator, approved, rateAllowance, lockupAllowance, maxLockupPeriod); + } + + function _setOperatorApproval( + IERC20 token, + address operator, + bool approved, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 maxLockupPeriod + ) internal { + OperatorApproval storage approval = operatorApprovals[token][msg.sender][operator]; + + // Update approval status and allowances + approval.isApproved = approved; + approval.rateAllowance = rateAllowance; + approval.lockupAllowance = lockupAllowance; + approval.maxLockupPeriod = maxLockupPeriod; + + emit OperatorApprovalUpdated( + token, msg.sender, operator, approved, rateAllowance, lockupAllowance, maxLockupPeriod + ); + } + + /// @notice Increases the rate and lockup allowances for an existing operator approval. + /// @param token The ERC20 token address for which the approval is being increased. + /// @param operator The address of the operator whose allowances are being increased. + /// @param rateAllowanceIncrease The amount to increase the rate allowance by. + /// @param lockupAllowanceIncrease The amount to increase the lockup allowance by. + /// @custom:constraint Operator must already be approved. + function increaseOperatorApproval( + IERC20 token, + address operator, + uint256 rateAllowanceIncrease, + uint256 lockupAllowanceIncrease + ) external nonReentrant validateNonZeroAddress(operator, "operator") { + _increaseOperatorApproval(token, operator, rateAllowanceIncrease, lockupAllowanceIncrease); + } + + function _increaseOperatorApproval( + IERC20 token, + address operator, + uint256 rateAllowanceIncrease, + uint256 lockupAllowanceIncrease + ) internal { + OperatorApproval storage approval = operatorApprovals[token][msg.sender][operator]; + + // Operator must already be approved + require(approval.isApproved, Errors.OperatorNotApproved(msg.sender, operator)); + + // Directly update allowances + approval.rateAllowance += rateAllowanceIncrease; + approval.lockupAllowance += lockupAllowanceIncrease; + + emit OperatorApprovalUpdated( + token, + msg.sender, + operator, + approval.isApproved, + approval.rateAllowance, + approval.lockupAllowance, + approval.maxLockupPeriod + ); + } + + /// @notice Terminates a payment rail, preventing further payments after the rail's lockup period. After calling this method, the lockup period cannot be changed, and the rail's rate and fixed lockup may only be reduced. + /// @param railId The ID of the rail to terminate. + /// @custom:constraint Caller must be a rail client or operator. + /// @custom:constraint Rail must be active and not already terminated. + /// @custom:constraint If called by the client, the payer's account must be fully funded. + /// @custom:constraint If called by the operator, the payer's funding status isn't checked. + function terminateRail(uint256 railId) + external + validateRailActive(railId) + nonReentrant + validateRailNotTerminated(railId) + settleAccountLockupBeforeAndAfterForRail(railId, false, 0) + { + Rail storage rail = rails[railId]; + Account storage payer = accounts[rail.token][rail.from]; + + // Only client with fully settled lockup or operator can terminate a rail + require( + (msg.sender == rail.from && isAccountLockupFullySettled(payer)) || msg.sender == rail.operator, + Errors.NotAuthorizedToTerminateRail(railId, rail.from, rail.operator, msg.sender) + ); + + rail.endEpoch = payer.lockupLastSettledAt + rail.lockupPeriod; + + emit RailTerminated(railId, msg.sender, rail.endEpoch); + + // Notify the validator if one exists + if (rail.validator != address(0)) { + IValidator(rail.validator).railTerminated(railId, msg.sender, rail.endEpoch); + } + + // Remove the rail rate from account lockup rate but don't set rail rate to zero yet. + // The rail rate will be used to settle the rail and so we can't zero it yet. + // However, we remove the rail rate from the client lockup rate because we don't want to + // lock funds for the rail beyond `rail.endEpoch` as we're exiting the rail + // after that epoch. + require( + payer.lockupRate >= rail.paymentRate, + Errors.LockupRateInconsistent(railId, rail.from, rail.paymentRate, payer.lockupRate) + ); + payer.lockupRate -= rail.paymentRate; + + // Reduce operator rate allowance + OperatorApproval storage operatorApproval = operatorApprovals[rail.token][rail.from][rail.operator]; + updateOperatorRateUsage(operatorApproval, rail.paymentRate, 0); + } + + /// @notice Deposits tokens from the message sender's account into `to`'s account. + /// @param token The ERC20 token address to deposit. + /// @param to The address whose account will be credited. + /// @param amount The amount of tokens to deposit. + /// @custom:constraint The message sender must have approved this contract to spend the requested amount via the ERC-20 token (`token`). + function deposit(IERC20 token, address to, uint256 amount) + external + payable + nonReentrant + validateNonZeroAddress(to, "to") + settleAccountLockupBeforeAndAfter(token, to, false) + { + // Transfer tokens from sender to contract + if (token == NATIVE_TOKEN) { + require(msg.value == amount, Errors.MustSendExactNativeAmount(amount, msg.value)); + } else { + require(msg.value == 0, Errors.NativeTokenNotAccepted(msg.value)); + amount = transferIn(token, msg.sender, amount); + } + + accounts[token][to].funds += amount; + + emit DepositRecorded(token, msg.sender, to, amount); + } + + /** + * @notice Deposits tokens using permit (EIP-2612) approval in a single transaction. + * @param token The ERC20 token address to deposit. + * @param to The address whose account will be credited (must be the permit signer). + * @param amount The amount of tokens to deposit. + * @param deadline Permit deadline (timestamp). + * @param v,r,s Permit signature. + */ + function depositWithPermit( + IERC20 token, + address to, + uint256 amount, + uint256 deadline, + uint8 v, + bytes32 r, + bytes32 s + ) external nonReentrant validateNonZeroAddress(to, "to") settleAccountLockupBeforeAndAfter(token, to, false) { + _depositWithPermit(token, to, amount, deadline, v, r, s); + } + + function _depositWithPermit( + IERC20 token, + address to, + uint256 amount, + uint256 deadline, + uint8 v, + bytes32 r, + bytes32 s + ) internal { + // Revert if token is address(0) as permit is not supported for native tokens + require(token != NATIVE_TOKEN, Errors.NativeTokenNotSupported()); + + // Use 'to' as the owner in permit call (the address that signed the permit) + IERC20Permit(address(token)).permit(to, address(this), amount, deadline, v, r, s); + + amount = transferIn(token, to, amount); + + accounts[token][to].funds += amount; + + emit DepositRecorded(token, to, to, amount); + } + + /** + * @notice Deposits tokens using permit (EIP-2612) approval in a single transaction, + * while also setting operator approval. + * @param token The ERC20 token address to deposit and for which the operator approval is being set. + * Note: The token must support EIP-2612 permit functionality. + * @param to The address whose account will be credited (must be the permit signer). + * @param amount The amount of tokens to deposit. + * @param deadline Permit deadline (timestamp). + * @param v,r,s Permit signature. + * @param operator The address of the operator whose approval is being modified. + * @param rateAllowance The maximum payment rate the operator can set across all rails created by the operator + * on behalf of the message sender. If this is less than the current payment rate, the operator will + * only be able to reduce rates until they fall below the target. + * @param lockupAllowance The maximum amount of funds the operator can lock up on behalf of the message sender + * towards future payments. If this exceeds the current total amount of funds locked towards future payments, + * the operator will only be able to reduce future lockup. + * @param maxLockupPeriod The maximum number of epochs (blocks) the operator can lock funds for. If this is less than + * the current lockup period for a rail, the operator will only be able to reduce the lockup period. + */ + function depositWithPermitAndApproveOperator( + IERC20 token, + address to, + uint256 amount, + uint256 deadline, + uint8 v, + bytes32 r, + bytes32 s, + address operator, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 maxLockupPeriod + ) + external + nonReentrant + validateNonZeroAddress(operator, "operator") + validateNonZeroAddress(to, "to") + validateSignerIsRecipient(to) + settleAccountLockupBeforeAndAfter(token, to, false) + { + _setOperatorApproval(token, operator, true, rateAllowance, lockupAllowance, maxLockupPeriod); + _depositWithPermit(token, to, amount, deadline, v, r, s); + } + + /** + * @notice Deposits tokens using permit (EIP-2612) approval in a single transaction, + * while also increasing operator approval allowances. + * @param token The ERC20 token address to deposit and for which the operator approval is being increased. + * Note: The token must support EIP-2612 permit functionality. + * @param to The address whose account will be credited (must be the permit signer). + * @param amount The amount of tokens to deposit. + * @param deadline Permit deadline (timestamp). + * @param v,r,s Permit signature. + * @param operator The address of the operator whose allowances are being increased. + * @param rateAllowanceIncrease The amount to increase the rate allowance by. + * @param lockupAllowanceIncrease The amount to increase the lockup allowance by. + * @custom:constraint Operator must already be approved. + */ + function depositWithPermitAndIncreaseOperatorApproval( + IERC20 token, + address to, + uint256 amount, + uint256 deadline, + uint8 v, + bytes32 r, + bytes32 s, + address operator, + uint256 rateAllowanceIncrease, + uint256 lockupAllowanceIncrease + ) + external + nonReentrant + validateNonZeroAddress(operator, "operator") + validateNonZeroAddress(to, "to") + validateSignerIsRecipient(to) + settleAccountLockupBeforeAndAfter(token, to, false) + { + _increaseOperatorApproval(token, operator, rateAllowanceIncrease, lockupAllowanceIncrease); + _depositWithPermit(token, to, amount, deadline, v, r, s); + } + + /** + * @notice Deposits tokens using an ERC-3009 authorization in a single transaction. + * @param token The ERC-3009-compliant token contract. + * @param to The address whose account within the contract will be credited. + * @param amount The amount of tokens to deposit. + * @param validAfter The timestamp after which the authorization is valid. + * @param validBefore The timestamp before which the authorization is valid. + * @param nonce A unique nonce for the authorization, used to prevent replay attacks. + * @param v,r,s The signature of the authorization. + */ + function depositWithAuthorization( + IERC3009 token, + address to, + uint256 amount, + uint256 validAfter, + uint256 validBefore, + bytes32 nonce, + uint8 v, + bytes32 r, + bytes32 s + ) external nonReentrant validateNonZeroAddress(to, "to") settleAccountLockupBeforeAndAfter(token, to, false) { + _depositWithAuthorization(token, to, amount, validAfter, validBefore, nonce, v, r, s); + } + + /** + * @notice Deposits tokens using an ERC-3009 authorization in a single transaction. + * while also setting operator approval. + * @param token The ERC-3009-compliant token contract. + * @param to The address whose account within the contract will be credited. + * @param amount The amount of tokens to deposit. + * @param validAfter The timestamp after which the authorization is valid. + * @param validBefore The timestamp before which the authorization is valid. + * @param nonce A unique nonce for the authorization, used to prevent replay attacks. + * @param v,r,s The signature of the authorization. + * @param operator The address of the operator whose approval is being modified. + * @param rateAllowance The maximum payment rate the operator can set across all rails created by the operator + * on behalf of the message sender. If this is less than the current payment rate, the operator will + * only be able to reduce rates until they fall below the target. + * @param lockupAllowance The maximum amount of funds the operator can lock up on behalf of the message sender + * towards future payments. If this exceeds the current total amount of funds locked towards future payments, + * the operator will only be able to reduce future lockup. + * @param maxLockupPeriod The maximum number of epochs (blocks) the operator can lock funds for. If this is less than + * the current lockup period for a rail, the operator will only be able to reduce the lockup period. + */ + function depositWithAuthorizationAndApproveOperator( + IERC3009 token, + address to, + uint256 amount, + uint256 validAfter, + uint256 validBefore, + bytes32 nonce, + uint8 v, + bytes32 r, + bytes32 s, + address operator, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 maxLockupPeriod + ) + external + nonReentrant + validateNonZeroAddress(operator, "operator") + validateNonZeroAddress(to, "to") + validateSignerIsRecipient(to) + settleAccountLockupBeforeAndAfter(token, to, false) + { + _setOperatorApproval(token, operator, true, rateAllowance, lockupAllowance, maxLockupPeriod); + _depositWithAuthorization(token, to, amount, validAfter, validBefore, nonce, v, r, s); + } + + /** + * @notice Deposits tokens using an ERC-3009 authorization in a single transaction. + * while also setting operator approval. + * @param token The ERC-3009-compliant token contract. + * @param to The address whose account within the contract will be credited. + * @param amount The amount of tokens to deposit. + * @param validAfter The timestamp after which the authorization is valid. + * @param validBefore The timestamp before which the authorization is valid. + * @param nonce A unique nonce for the authorization, used to prevent replay attacks. + * @param v,r,s The signature of the authorization. + * @param operator The address of the operator whose allowances are being increased. + * @param rateAllowanceIncrease The amount to increase the rate allowance by. + * @param lockupAllowanceIncrease The amount to increase the lockup allowance by. + * @custom:constraint Operator must already be approved. + */ + function depositWithAuthorizationAndIncreaseOperatorApproval( + IERC3009 token, + address to, + uint256 amount, + uint256 validAfter, + uint256 validBefore, + bytes32 nonce, + uint8 v, + bytes32 r, + bytes32 s, + address operator, + uint256 rateAllowanceIncrease, + uint256 lockupAllowanceIncrease + ) + external + nonReentrant + validateNonZeroAddress(operator, "operator") + validateNonZeroAddress(to, "to") + validateSignerIsRecipient(to) + settleAccountLockupBeforeAndAfter(token, to, false) + { + _increaseOperatorApproval(token, operator, rateAllowanceIncrease, lockupAllowanceIncrease); + _depositWithAuthorization(token, to, amount, validAfter, validBefore, nonce, v, r, s); + } + + function _depositWithAuthorization( + IERC3009 token, + address to, + uint256 amount, + uint256 validAfter, + uint256 validBefore, + bytes32 nonce, + uint8 v, + bytes32 r, + bytes32 s + ) internal { + // Revert if token is address(0) as authorization is not supported for native tokens + require(token != NATIVE_TOKEN, Errors.NativeTokenNotSupported()); + + // Use balance-before/balance-after accounting to correctly handle fee-on-transfer tokens + uint256 balanceBefore = token.balanceOf(address(this)); + + // Call ERC-3009 receiveWithAuthorization. + // This will transfer 'amount' from 'to' to this contract. + // The token contract itself verifies the signature. + token.receiveWithAuthorization(to, address(this), amount, validAfter, validBefore, nonce, v, r, s); + + uint256 balanceAfter = token.balanceOf(address(this)); + amount = balanceAfter - balanceBefore; + + // Credit the beneficiary's internal account + accounts[token][to].funds += amount; + + // Emit an event to record the deposit, marking it as made via an off-chain signature. + emit DepositRecorded(token, to, to, amount); + } + + /// @notice Withdraws tokens from the caller's account to the caller's account, up to the amount of currently available tokens (the tokens not currently locked in rails). + /// @param token The ERC20 token address to withdraw. + /// @param amount The amount of tokens to withdraw. + function withdraw(IERC20 token, uint256 amount) + external + nonReentrant + settleAccountLockupBeforeAndAfter(token, msg.sender, true) + { + return withdrawToInternal(token, msg.sender, amount); + } + + /// @notice Withdraws tokens (`token`) from the caller's account to `to`, up to the amount of currently available tokens (the tokens not currently locked in rails). + /// @param token The ERC20 token address to withdraw. + /// @param to The address to receive the withdrawn tokens. + /// @param amount The amount of tokens to withdraw. + function withdrawTo(IERC20 token, address to, uint256 amount) + external + nonReentrant + validateNonZeroAddress(to, "to") + settleAccountLockupBeforeAndAfter(token, msg.sender, true) + { + return withdrawToInternal(token, to, amount); + } + + function withdrawToInternal(IERC20 token, address to, uint256 amount) internal { + Account storage account = accounts[token][msg.sender]; + uint256 available = account.funds - account.lockupCurrent; + require(amount <= available, Errors.InsufficientUnlockedFunds(available, amount)); + if (token == NATIVE_TOKEN) { + (bool success,) = payable(to).call{value: amount}(""); + require(success, Errors.NativeTransferFailed(to, amount)); + } else { + uint256 actual = transferOut(token, to, amount); + if (amount != actual) { + amount = actual; + require(amount <= available, Errors.InsufficientUnlockedFunds(available, amount)); + } + } + account.funds -= amount; + + emit WithdrawRecorded(token, msg.sender, to, amount); + } + + function transferOut(IERC20 token, address to, uint256 amount) internal returns (uint256 actual) { + // handle fee-on-transfer and hidden-denominator tokens + uint256 balanceBefore = token.balanceOf(address(this)); + token.safeTransfer(to, amount); + uint256 balanceAfter = token.balanceOf(address(this)); + actual = balanceBefore - balanceAfter; + } + + function transferIn(IERC20 token, address from, uint256 amount) internal returns (uint256 actual) { + // handle fee-on-transfer and hidden-denominator tokens + uint256 balanceBefore = token.balanceOf(address(this)); + token.safeTransferFrom(from, address(this), amount); + uint256 balanceAfter = token.balanceOf(address(this)); + actual = balanceAfter - balanceBefore; + } + + /// @notice Create a new rail from `from` to `to`, operated by the caller. + /// @param token The ERC20 token address for payments on this rail. + /// @param from The client address (payer) for this rail. + /// @param to The recipient address for payments on this rail. + /// @param validator Optional address of an validator contract (can be address(0) for no validation). + /// @param commissionRateBps Optional operator commission in basis points (0-10000). + /// @param serviceFeeRecipient Address to receive operator commission + /// @return The ID of the newly created rail. + /// @custom:constraint Caller must be approved as an operator by the client (from address). + function createRail( + IERC20 token, + address from, + address to, + address validator, + uint256 commissionRateBps, + address serviceFeeRecipient + ) external nonReentrant validateNonZeroAddress(from, "from") validateNonZeroAddress(to, "to") returns (uint256) { + address operator = msg.sender; + + // Check if operator is approved - approval is required for rail creation + OperatorApproval storage approval = operatorApprovals[token][from][operator]; + require(approval.isApproved, Errors.OperatorNotApproved(from, operator)); + + // Validate commission rate + require( + commissionRateBps <= COMMISSION_MAX_BPS, Errors.CommissionRateTooHigh(COMMISSION_MAX_BPS, commissionRateBps) + ); + + require(commissionRateBps == 0 || serviceFeeRecipient != address(0), Errors.MissingServiceFeeRecipient()); + + uint256 railId = _nextRailId++; + + Rail storage rail = rails[railId]; + rail.token = token; + rail.from = from; + rail.to = to; + rail.operator = operator; + rail.validator = validator; + rail.settledUpTo = block.number; + rail.endEpoch = 0; + rail.commissionRateBps = commissionRateBps; + rail.serviceFeeRecipient = serviceFeeRecipient; + + // Record this rail in the payee's and payer's lists + payeeRails[token][to].push(railId); + payerRails[token][from].push(railId); + + emit RailCreated(railId, from, to, token, operator, validator, serviceFeeRecipient, commissionRateBps); + + return railId; + } + + /// @notice Modifies the fixed lockup and lockup period of a rail. + /// - If the rail has already been terminated, the lockup period may not be altered and the fixed lockup may only be reduced. + /// - If the rail is active, the lockup may only be modified if the payer's account is fully funded and will remain fully funded after the operation. + /// @param railId The ID of the rail to modify. + /// @param period The new lockup period (in epochs/blocks). + /// @param lockupFixed The new fixed lockup amount. + /// @custom:constraint Caller must be the rail operator. + /// @custom:constraint Operator must have sufficient lockup allowance to cover any increases the lockup period or the fixed lockup. + function modifyRailLockup(uint256 railId, uint256 period, uint256 lockupFixed) + external + validateRailActive(railId) + onlyRailOperator(railId) + nonReentrant + settleAccountLockupBeforeAndAfterForRail(railId, false, 0) + { + Rail storage rail = rails[railId]; + bool isTerminated = isRailTerminated(rail, railId); + + uint256 oldLockupPeriod = rail.lockupPeriod; + uint256 oldLockupFixed = rail.lockupFixed; + + if (isTerminated) { + modifyTerminatedRailLockup(rail, period, lockupFixed); + } else { + modifyNonTerminatedRailLockup(rail, period, lockupFixed); + } + + emit RailLockupModified(railId, oldLockupPeriod, period, oldLockupFixed, lockupFixed); + } + + function modifyTerminatedRailLockup(Rail storage rail, uint256 period, uint256 lockupFixed) internal { + require( + period == rail.lockupPeriod && lockupFixed <= rail.lockupFixed, + Errors.InvalidTerminatedRailModification(rail.lockupPeriod, rail.lockupFixed, period, lockupFixed) + ); + + Account storage payer = accounts[rail.token][rail.from]; + + // Calculate the fixed lockup reduction - this is the only change allowed for terminated rails + uint256 lockupReduction = rail.lockupFixed - lockupFixed; + + // Update payer's lockup - subtract the exact reduction amount + require( + payer.lockupCurrent >= lockupReduction, + Errors.InsufficientCurrentLockup(rail.token, rail.from, payer.lockupCurrent, lockupReduction) + ); + payer.lockupCurrent -= lockupReduction; + + // Reduce operator rate allowance + OperatorApproval storage operatorApproval = operatorApprovals[rail.token][rail.from][rail.operator]; + updateOperatorLockupUsage(operatorApproval, rail.lockupFixed, lockupFixed); + + rail.lockupFixed = lockupFixed; + } + + function modifyNonTerminatedRailLockup(Rail storage rail, uint256 period, uint256 lockupFixed) internal { + Account storage payer = accounts[rail.token][rail.from]; + + // Don't allow changing the lockup period or increasing the fixed lockup unless the payer's + // account is fully settled. + if (!isAccountLockupFullySettled(payer)) { + require( + period == rail.lockupPeriod, + Errors.LockupPeriodChangeNotAllowedDueToInsufficientFunds( + rail.token, rail.from, rail.lockupPeriod, period + ) + ); + + require( + lockupFixed <= rail.lockupFixed, + Errors.LockupFixedIncreaseNotAllowedDueToInsufficientFunds( + rail.token, rail.from, rail.lockupFixed, lockupFixed + ) + ); + } + + // Get operator approval + OperatorApproval storage operatorApproval = operatorApprovals[rail.token][rail.from][rail.operator]; + + // Check if period exceeds the max lockup period allowed for this operator + // Only enforce this constraint when increasing the period, not when decreasing + if (period > rail.lockupPeriod) { + require( + period <= operatorApproval.maxLockupPeriod, + Errors.LockupPeriodExceedsOperatorMaximum( + rail.token, rail.operator, operatorApproval.maxLockupPeriod, period + ) + ); + } + + // Calculate current (old) lockup. + uint256 oldLockup = rail.lockupFixed + (rail.paymentRate * rail.lockupPeriod); + + // Calculate new lockup amount with new parameters + uint256 newLockup = lockupFixed + (rail.paymentRate * period); + + require( + payer.lockupCurrent >= oldLockup, + Errors.CurrentLockupLessThanOldLockup(rail.token, rail.from, oldLockup, payer.lockupCurrent) + ); + + // We blindly update the payer's lockup. If they don't have enough funds to cover the new + // amount, we'll revert in the post-condition. + payer.lockupCurrent = payer.lockupCurrent - oldLockup + newLockup; + + updateOperatorLockupUsage(operatorApproval, oldLockup, newLockup); + + // Update rail lockup parameters + rail.lockupPeriod = period; + rail.lockupFixed = lockupFixed; + } + + /// @notice Modifies the payment rate and optionally makes a one-time payment. + /// - If the rail has already been terminated, one-time payments can be made and the rate may always be decreased (but never increased) regardless of the status of the payer's account. + /// - If the payer's account isn't fully funded and the rail is active (not terminated), the rail's payment rate may not be changed at all (increased or decreased). + /// - Regardless of the payer's account status, one-time payments will always go through provided that the rail has sufficient fixed lockup to cover the payment. + /// @param railId The ID of the rail to modify. + /// @param newRate The new payment rate (per epoch). This new rate applies starting the next epoch after the current one. + /// @param oneTimePayment Optional one-time payment amount to transfer immediately, taken out of the rail's fixed lockup. + /// @custom:constraint Caller must be the rail operator. + /// @custom:constraint Operator must have sufficient rate and lockup allowances for any increases. + function modifyRailPayment(uint256 railId, uint256 newRate, uint256 oneTimePayment) + external + nonReentrant + validateRailActive(railId) + onlyRailOperator(railId) + settleAccountLockupBeforeAndAfterForRail(railId, false, oneTimePayment) + { + Rail storage rail = rails[railId]; + Account storage payer = accounts[rail.token][rail.from]; + Account storage payee = accounts[rail.token][rail.to]; + + uint256 oldRate = rail.paymentRate; + bool isTerminated = isRailTerminated(rail, railId); + + // Validate rate changes based on rail state and account lockup + if (isTerminated) { + uint256 maxSettlementEpoch = maxSettlementEpochForTerminatedRail(rail, railId); + require( + block.number < maxSettlementEpoch, + Errors.CannotModifyTerminatedRailBeyondEndEpoch(railId, maxSettlementEpoch, block.number) + ); + + require(newRate <= oldRate, Errors.RateChangeNotAllowedOnTerminatedRail(railId)); + } else { + bool isSettled = isAccountLockupFullySettled(payer); + require( + isSettled || newRate == oldRate, + Errors.LockupNotSettledRateChangeNotAllowed(railId, rail.from, isSettled, oldRate, newRate) + ); + } + + // enqueuing rate change + enqueueRateChange(rail, oldRate, newRate); + + // Calculate the effective lockup period + uint256 effectiveLockupPeriod; + if (isTerminated) { + effectiveLockupPeriod = remainingEpochsForTerminatedRail(rail, railId); + } else { + effectiveLockupPeriod = rail.lockupPeriod; + } + + // Verify one-time payment doesn't exceed fixed lockup + require( + rail.lockupFixed >= oneTimePayment, + Errors.OneTimePaymentExceedsLockup(railId, rail.lockupFixed, oneTimePayment) + ); + + // Update the rail fixed lockup and payment rate + rail.lockupFixed = rail.lockupFixed - oneTimePayment; + rail.paymentRate = newRate; + + OperatorApproval storage operatorApproval = operatorApprovals[rail.token][rail.from][rail.operator]; + + // Update payer's lockup rate - only if the rail is not terminated + // for terminated rails, the payer's lockup rate is already updated during rail termination + if (!isTerminated) { + require( + payer.lockupRate >= oldRate, + Errors.LockupRateLessThanOldRate(railId, rail.from, oldRate, payer.lockupRate) + ); + payer.lockupRate = payer.lockupRate - oldRate + newRate; + updateOperatorRateUsage(operatorApproval, oldRate, newRate); + } + + // Update payer's current lockup with effective lockup period calculation + // Remove old rate lockup for the effective period, add new rate lockup for the same period + payer.lockupCurrent = + payer.lockupCurrent - (oldRate * effectiveLockupPeriod) + (newRate * effectiveLockupPeriod) - oneTimePayment; + + updateOperatorLockupUsage(operatorApproval, oldRate * effectiveLockupPeriod, newRate * effectiveLockupPeriod); + + // Update operator allowance for one-time payment + updateOperatorAllowanceForOneTimePayment(operatorApproval, oneTimePayment); + + emit RailRateModified(railId, oldRate, newRate); + + // --- Process the One-Time Payment --- + processOneTimePayment(railId, payer, payee, rail, oneTimePayment); + } + + function enqueueRateChange(Rail storage rail, uint256 oldRate, uint256 newRate) internal { + // If rate hasn't changed or rail is already settled up to current block, nothing to do + if (newRate == oldRate || rail.settledUpTo == block.number) { + return; + } + + // Skip putting a 0-rate entry on an empty queue + if (oldRate == 0 && rail.rateChangeQueue.isEmpty()) { + rail.settledUpTo = block.number; + return; + } + + // Only queue the previous rate once per epoch + if (rail.rateChangeQueue.isEmpty() || rail.rateChangeQueue.peekTail().untilEpoch != block.number) { + // For validated rails, we need to enqueue the old rate. + // This ensures that the old rate is applied up to and including the current block. + // The new rate will be applicable starting from the next block. + rail.rateChangeQueue.enqueue(oldRate, block.number); + } + } + + function calculateAndPayFees(uint256 amount, IERC20 token, address serviceFeeRecipient, uint256 commissionRateBps) + internal + returns (uint256 netPayeeAmount, uint256 operatorCommission, uint256 fee) + { + // ceil() + fee = (amount * NETWORK_FEE_NUMERATOR + (NETWORK_FEE_DENOMINATOR - 1)) / NETWORK_FEE_DENOMINATOR; + if (token == NATIVE_TOKEN) { + (bool success,) = BURN_ADDRESS.call{value: fee}(""); + require(success, Errors.NativeTransferFailed(BURN_ADDRESS, msg.value)); + } else { + accounts[token][address(this)].funds += fee; + // start fee auction if necessary + AuctionInfo storage auction = auctionInfo[token]; + if (auction.startPrice == 0) { + auction.startPrice = FIRST_AUCTION_START_PRICE; + auction.startTime = uint168(block.timestamp); + } + } + amount -= fee; + + // Calculate operator commission (if any) based on remaining amount + operatorCommission = 0; + if (commissionRateBps > 0) { + operatorCommission = (amount * commissionRateBps) / COMMISSION_MAX_BPS; + } + + // Calculate net amount for payee + netPayeeAmount = amount - operatorCommission; + + // Credit operator (if commission exists) + if (operatorCommission > 0) { + Account storage serviceFeeRecipientAccount = accounts[token][serviceFeeRecipient]; + serviceFeeRecipientAccount.funds += operatorCommission; + } + } + + function processOneTimePayment( + uint256 railId, + Account storage payer, + Account storage payee, + Rail storage rail, + uint256 oneTimePayment + ) internal { + if (oneTimePayment > 0) { + require( + payer.funds >= oneTimePayment, + Errors.InsufficientFundsForOneTimePayment(rail.token, rail.from, oneTimePayment, payer.funds) + ); + + // Transfer funds from payer (full amount) + payer.funds -= oneTimePayment; + + // Calculate fees, pay operator commission and track platform fees + (uint256 netPayeeAmount, uint256 operatorCommission, uint256 networkFee) = + calculateAndPayFees(oneTimePayment, rail.token, rail.serviceFeeRecipient, rail.commissionRateBps); + + // Credit payee (net amount after fees) + payee.funds += netPayeeAmount; + + emit RailOneTimePaymentProcessed(railId, netPayeeAmount, operatorCommission, networkFee); + } + } + + /// @notice Settles payments for a terminated rail without validation. This may only be called by the payee and after the terminated rail's max settlement epoch has passed. It's an escape-hatch to unblock payments in an otherwise stuck rail (e.g., due to a buggy validator contract) and it always pays in full. + /// @param railId The ID of the rail to settle. + /// @return totalSettledAmount The total amount settled and transferred. + /// @return totalNetPayeeAmount The net amount credited to the payee after fees. + /// @return totalOperatorCommission The commission credited to the operator. + /// @return totalNetworkFee The fee accrued for burning FIL. + /// @return finalSettledEpoch The epoch up to which settlement was actually completed. + /// @return note Additional information about the settlement. + function settleTerminatedRailWithoutValidation(uint256 railId) + external + nonReentrant + validateRailActive(railId) + validateRailTerminated(railId) + onlyRailClient(railId) + settleAccountLockupBeforeAndAfterForRail(railId, false, 0) + returns ( + uint256 totalSettledAmount, + uint256 totalNetPayeeAmount, + uint256 totalOperatorCommission, + uint256 totalNetworkFee, + uint256 finalSettledEpoch, + string memory note + ) + { + // Verify the current epoch is greater than the max settlement epoch + uint256 maxSettleEpoch = maxSettlementEpochForTerminatedRail(rails[railId], railId); + require( + block.number > maxSettleEpoch, + Errors.CannotSettleTerminatedRailBeforeMaxEpoch(railId, maxSettleEpoch + 1, block.number) + ); + + return settleRailInternal(railId, maxSettleEpoch, true); + } + + /// @notice Settles payments for a rail up to the specified epoch. Settlement may fail to reach the target epoch if either the client lacks the funds to pay up to the current epoch or the validator refuses to settle the entire requested range. + /// @param railId The ID of the rail to settle. + /// @param untilEpoch The epoch up to which to settle (must not exceed current block number). + /// @return totalSettledAmount The total amount settled and transferred. + /// @return totalNetPayeeAmount The net amount credited to the payee after fees. + /// @return totalOperatorCommission The commission credited to the operator. + /// @return totalNetworkFee The fee accrued to burn FIL. + /// @return finalSettledEpoch The epoch up to which settlement was actually completed. + /// @return note Additional information about the settlement (especially from validation). + function settleRail(uint256 railId, uint256 untilEpoch) + public + nonReentrant + validateRailActive(railId) + settleAccountLockupBeforeAndAfterForRail(railId, false, 0) + returns ( + uint256 totalSettledAmount, + uint256 totalNetPayeeAmount, + uint256 totalOperatorCommission, + uint256 totalNetworkFee, + uint256 finalSettledEpoch, + string memory note + ) + { + return settleRailInternal(railId, untilEpoch, false); + } + + function settleRailInternal(uint256 railId, uint256 untilEpoch, bool skipValidation) + internal + returns ( + uint256 totalSettledAmount, + uint256 totalNetPayeeAmount, + uint256 totalOperatorCommission, + uint256 totalNetworkFee, + uint256 finalSettledEpoch, + string memory note + ) + { + require(untilEpoch <= block.number, Errors.CannotSettleFutureEpochs(railId, untilEpoch, block.number)); + + Rail storage rail = rails[railId]; + Account storage payer = accounts[rail.token][rail.from]; + + // Handle terminated and fully settled rails that are still not finalised + if (isRailTerminated(rail, railId) && rail.settledUpTo >= rail.endEpoch) { + finalizeTerminatedRail(railId, rail, payer); + return (0, 0, 0, 0, rail.settledUpTo, "rail fully settled and finalized"); + } + + // Calculate the maximum settlement epoch based on account lockup + uint256 maxSettlementEpoch; + if (!isRailTerminated(rail, railId)) { + maxSettlementEpoch = min(untilEpoch, payer.lockupLastSettledAt); + } else { + maxSettlementEpoch = min(untilEpoch, rail.endEpoch); + } + + uint256 startEpoch = rail.settledUpTo; + // Nothing to settle (already settled or zero-duration) + if (startEpoch >= maxSettlementEpoch) { + return ( + 0, + 0, + 0, + 0, + startEpoch, + string.concat("already settled up to epoch ", Strings.toString(maxSettlementEpoch)) + ); + } + + // Process settlement depending on whether rate changes exist + if (rail.rateChangeQueue.isEmpty()) { + (totalSettledAmount, totalNetPayeeAmount, totalOperatorCommission, totalNetworkFee, note) = + _settleSegment(railId, startEpoch, maxSettlementEpoch, rail.paymentRate, skipValidation); + + require( + rail.settledUpTo > startEpoch, Errors.NoProgressInSettlement(railId, startEpoch + 1, rail.settledUpTo) + ); + } else { + (totalSettledAmount, totalNetPayeeAmount, totalOperatorCommission, totalNetworkFee, note) = + _settleWithRateChanges(railId, rail.paymentRate, startEpoch, maxSettlementEpoch, skipValidation); + } + finalSettledEpoch = rail.settledUpTo; + note = checkAndFinalizeTerminatedRail(railId, rail, payer, note); + + emit RailSettled( + railId, totalSettledAmount, totalNetPayeeAmount, totalOperatorCommission, totalNetworkFee, finalSettledEpoch + ); + + return + (totalSettledAmount, totalNetPayeeAmount, totalOperatorCommission, totalNetworkFee, finalSettledEpoch, note); + } + + function checkAndFinalizeTerminatedRail( + uint256 railId, + Rail storage rail, + Account storage payer, + string memory regularNote + ) internal returns (string memory) { + // Check if rail is a terminated rail that's now fully settled + if (isRailTerminated(rail, railId) && rail.settledUpTo >= maxSettlementEpochForTerminatedRail(rail, railId)) { + finalizeTerminatedRail(railId, rail, payer); + return string.concat(regularNote, "terminated rail fully settled and finalized."); + } + + return regularNote; + } + + function finalizeTerminatedRail(uint256 railId, Rail storage rail, Account storage payer) internal { + // Reduce the lockup by the fixed amount + require( + payer.lockupCurrent >= rail.lockupFixed, + Errors.LockupInconsistencyDuringRailFinalization( + railId, rail.token, rail.from, rail.lockupFixed, payer.lockupCurrent + ) + ); + payer.lockupCurrent -= rail.lockupFixed; + + // Get operator approval for finalization update + OperatorApproval storage operatorApproval = operatorApprovals[rail.token][rail.from][rail.operator]; + // Calculate current (old) lockup. + uint256 oldLockup = rail.lockupFixed + (rail.paymentRate * rail.lockupPeriod); + + updateOperatorLockupUsage(operatorApproval, oldLockup, 0); + + // Zero out the rail to mark it as inactive + _zeroOutRail(rail); + + emit RailFinalized(railId); + } + + function _settleWithRateChanges( + uint256 railId, + uint256 currentRate, + uint256 startEpoch, + uint256 targetEpoch, + bool skipValidation + ) + internal + returns ( + uint256 totalSettledAmount, + uint256 totalNetPayeeAmount, + uint256 totalOperatorCommission, + uint256 totalNetworkFee, + string memory note + ) + { + Rail storage rail = rails[railId]; + RateChangeQueue.Queue storage rateQueue = rail.rateChangeQueue; + + SettlementState memory state = SettlementState({ + totalSettledAmount: 0, + totalNetPayeeAmount: 0, + totalOperatorCommission: 0, + totalNetworkFee: 0, + processedEpoch: startEpoch, + note: "" + }); + + // Process each segment until we reach the target epoch or hit an early exit condition + while (state.processedEpoch < targetEpoch) { + (uint256 segmentEndBoundary, uint256 segmentRate) = + _getNextSegmentBoundary(rateQueue, currentRate, state.processedEpoch, targetEpoch); + + // if current segment rate is zero, advance settlement to end of this segment and continue + if (segmentRate == 0) { + rail.settledUpTo = segmentEndBoundary; + state.processedEpoch = segmentEndBoundary; + + // Remove the processed rate change from the queue if it exists AND we have processed it entirely + if (!rateQueue.isEmpty() && segmentEndBoundary >= rateQueue.peek().untilEpoch) { + rateQueue.dequeue(); + } + + // Continue to next segment + continue; + } + + // Settle the current segment with potentially validated outcomes + ( + uint256 segmentSettledAmount, + uint256 segmentNetPayeeAmount, + uint256 segmentOperatorCommission, + uint256 segmentNetworkFee, + string memory validationNote + ) = _settleSegment(railId, state.processedEpoch, segmentEndBoundary, segmentRate, skipValidation); + + // If validator returned no progress, exit early without updating state + if (rail.settledUpTo <= state.processedEpoch) { + return ( + state.totalSettledAmount, + state.totalNetPayeeAmount, + state.totalOperatorCommission, + state.totalNetworkFee, + validationNote + ); + } + + // Add the settled amounts to our running totals + state.totalSettledAmount += segmentSettledAmount; + state.totalNetPayeeAmount += segmentNetPayeeAmount; + state.totalNetworkFee += segmentNetworkFee; + state.totalOperatorCommission += segmentOperatorCommission; + + // If validator partially settled the segment, exit early + if (rail.settledUpTo < segmentEndBoundary) { + return ( + state.totalSettledAmount, + state.totalNetPayeeAmount, + state.totalOperatorCommission, + state.totalNetworkFee, + validationNote + ); + } + + // Successfully settled full segment, update tracking values + state.processedEpoch = rail.settledUpTo; + state.note = validationNote; + + // Remove the processed rate change from the queue + if (!rateQueue.isEmpty() && segmentEndBoundary >= rateQueue.peek().untilEpoch) { + rateQueue.dequeue(); + } + } + + // We've successfully settled up to the target epoch + return ( + state.totalSettledAmount, + state.totalNetPayeeAmount, + state.totalOperatorCommission, + state.totalNetworkFee, + state.note + ); + } + + function _getNextSegmentBoundary( + RateChangeQueue.Queue storage rateQueue, + uint256 currentRate, + uint256 processedEpoch, + uint256 targetEpoch + ) internal view returns (uint256 segmentEndBoundary, uint256 segmentRate) { + // Default boundary is the target we want to reach + segmentEndBoundary = targetEpoch; + segmentRate = currentRate; + + // If we have rate changes in the queue, use the rate from the next change + if (!rateQueue.isEmpty()) { + RateChangeQueue.RateChange memory nextRateChange = rateQueue.peek(); + + // Validate rate change queue consistency + require( + nextRateChange.untilEpoch >= processedEpoch, + Errors.InvalidRateChangeQueueState(nextRateChange.untilEpoch, processedEpoch) + ); + + // Boundary is the minimum of our target or the next rate change epoch + segmentEndBoundary = min(targetEpoch, nextRateChange.untilEpoch); + segmentRate = nextRateChange.rate; + } + } + + function _settleSegment(uint256 railId, uint256 epochStart, uint256 epochEnd, uint256 rate, bool skipValidation) + internal + returns ( + uint256 settledAmount, + uint256 netPayeeAmount, + uint256 operatorCommission, + uint256 networkFee, + string memory note + ) + { + Rail storage rail = rails[railId]; + Account storage payer = accounts[rail.token][rail.from]; + Account storage payee = accounts[rail.token][rail.to]; + + if (rate == 0) { + rail.settledUpTo = epochEnd; + return (0, 0, 0, 0, "Zero rate payment rail"); + } + + // Calculate the default settlement values (without validation) + uint256 duration = epochEnd - epochStart; + settledAmount = rate * duration; + uint256 settledUntilEpoch = epochEnd; + note = ""; + + // If this rail has an validator and we're not skipping validation, let it decide on the final settlement amount + if (rail.validator != address(0) && !skipValidation) { + IValidator validator = IValidator(rail.validator); + IValidator.ValidationResult memory result = + validator.validatePayment(railId, settledAmount, epochStart, epochEnd, rate); + + // Ensure validator doesn't settle beyond our segment's end boundary + require( + result.settleUpto <= epochEnd, + Errors.ValidatorSettledBeyondSegmentEnd(railId, epochEnd, result.settleUpto) + ); + require( + result.settleUpto >= epochStart, + Errors.ValidatorSettledBeforeSegmentStart(railId, epochStart, result.settleUpto) + ); + + settledUntilEpoch = result.settleUpto; + settledAmount = result.modifiedAmount; + note = result.note; + + // Ensure validator doesn't allow more payment than the maximum possible + // for the epochs they're confirming + uint256 maxAllowedAmount = rate * (settledUntilEpoch - epochStart); + + require( + result.modifiedAmount <= maxAllowedAmount, + Errors.ValidatorModifiedAmountExceedsMaximum(railId, maxAllowedAmount, result.modifiedAmount) + ); + } + + // Verify payer has sufficient funds for the settlement + require( + payer.funds >= settledAmount, + Errors.InsufficientFundsForSettlement(rail.token, rail.from, settledAmount, payer.funds) + ); + + // Verify payer has sufficient lockup for the settlement + require( + payer.lockupCurrent >= settledAmount, + Errors.InsufficientLockupForSettlement(rail.token, rail.from, payer.lockupCurrent, settledAmount) + ); + uint256 actualSettledDuration = settledUntilEpoch - epochStart; + uint256 requiredLockup = rate * actualSettledDuration; + + // Transfer funds from payer (always pays full settled amount) + payer.funds -= settledAmount; + + // Calculate fees, pay operator commission and track platform fees + (netPayeeAmount, operatorCommission, networkFee) = + calculateAndPayFees(settledAmount, rail.token, rail.serviceFeeRecipient, rail.commissionRateBps); + + // Credit payee + payee.funds += netPayeeAmount; + + // Reduce lockup based on actual settled duration, not requested duration + // so that if the validator only settles for a partial duration, we only reduce the client lockup by the actual locked amount + // for that reduced duration. + payer.lockupCurrent -= requiredLockup; + + // Update the rail's settled epoch + rail.settledUpTo = settledUntilEpoch; + + // Invariant check: lockup should never exceed funds + require( + payer.lockupCurrent <= payer.funds, + Errors.LockupExceedsFundsInvariant(rail.token, rail.from, payer.lockupCurrent, payer.funds) + ); + } + + function isAccountLockupFullySettled(Account storage account) internal view returns (bool) { + return account.lockupLastSettledAt == block.number; + } + + // attempts to settle account lockup up to and including the current epoch + // returns the actual epoch upto and including which the lockup was settled + function settleAccountLockup(IERC20 token, address owner, Account storage account) internal returns (uint256) { + uint256 currentEpoch = block.number; + uint256 elapsedTime = currentEpoch - account.lockupLastSettledAt; + + if (elapsedTime <= 0) { + return account.lockupLastSettledAt; + } + + if (account.lockupRate == 0) { + account.lockupLastSettledAt = currentEpoch; + + // Emit event for zero rate case + emit AccountLockupSettled( + token, owner, account.lockupCurrent, account.lockupRate, account.lockupLastSettledAt + ); + return currentEpoch; + } + + uint256 additionalLockup = account.lockupRate * elapsedTime; + + // we have sufficient funds to cover account lockup upto and including the current epoch + if (account.funds >= account.lockupCurrent + additionalLockup) { + account.lockupCurrent += additionalLockup; + account.lockupLastSettledAt = currentEpoch; + } else { + require( + account.funds >= account.lockupCurrent, + Errors.LockupExceedsFundsInvariant(token, owner, account.lockupCurrent, account.funds) + ); + + // If insufficient, calculate the fractional epoch where funds became insufficient + uint256 availableFunds = account.funds - account.lockupCurrent; + + if (availableFunds == 0) { + return account.lockupLastSettledAt; + } + + // Round down to the nearest whole epoch + uint256 fractionalEpochs = availableFunds / account.lockupRate; + + // Apply lockup up to this point + account.lockupCurrent += account.lockupRate * fractionalEpochs; + account.lockupLastSettledAt = account.lockupLastSettledAt + fractionalEpochs; + } + + // event emission for all other cases where state changed + emit AccountLockupSettled(token, owner, account.lockupCurrent, account.lockupRate, account.lockupLastSettledAt); + return account.lockupLastSettledAt; + } + + function remainingEpochsForTerminatedRail(Rail storage rail, uint256 railId) + internal + view + validateRailTerminated(railId) + returns (uint256) + { + // If current block beyond end epoch, return 0 + if (block.number > rail.endEpoch) { + return 0; + } + + // Return the number of epochs (blocks) remaining until end epoch + return rail.endEpoch - block.number; + } + + function isRailTerminated(Rail storage rail, uint256 railId) internal view returns (bool) { + require(rail.from != address(0), Errors.RailInactiveOrSettled(railId)); + return rail.endEpoch > 0; + } + + // Get the final settlement epoch for a terminated rail + function maxSettlementEpochForTerminatedRail(Rail storage rail, uint256 railId) + internal + view + validateRailTerminated(railId) + returns (uint256) + { + return rail.endEpoch; + } + + function _zeroOutRail(Rail storage rail) internal { + // IMPORTANT: Do not use `require(cond, Errors.Custom(peekTail()))` here, + // because Solidity evaluates all arguments before checking the condition. + // That would call `peekTail()` even if the queue is empty, causing an unwanted revert. + // Use `if (!cond) revert Errors.Custom(peekTail());` to safely handle the error. + // Check if queue is empty before clearing + if (!rail.rateChangeQueue.isEmpty()) { + revert Errors.RateChangeQueueNotEmpty(rail.rateChangeQueue.peekTail().untilEpoch); + } + + rail.token = IERC20(address(0)); + rail.from = address(0); // This now marks the rail as inactive + rail.to = address(0); + rail.operator = address(0); + rail.validator = address(0); + rail.paymentRate = 0; + rail.lockupFixed = 0; + rail.lockupPeriod = 0; + rail.settledUpTo = 0; + rail.endEpoch = 0; + rail.commissionRateBps = 0; + } + + function updateOperatorRateUsage(OperatorApproval storage approval, uint256 oldRate, uint256 newRate) internal { + if (newRate > oldRate) { + uint256 rateIncrease = newRate - oldRate; + // If the increase exceeds the allowance, revert + require( + approval.rateUsage + rateIncrease <= approval.rateAllowance, + Errors.OperatorRateAllowanceExceeded(approval.rateAllowance, approval.rateUsage + rateIncrease) + ); + approval.rateUsage += rateIncrease; + } else if (oldRate > newRate) { + uint256 rateDecrease = oldRate - newRate; + approval.rateUsage = approval.rateUsage > rateDecrease ? approval.rateUsage - rateDecrease : 0; + } + } + + function updateOperatorLockupUsage(OperatorApproval storage approval, uint256 oldLockup, uint256 newLockup) + internal + { + if (newLockup > oldLockup) { + uint256 lockupIncrease = newLockup - oldLockup; + // If the increase exceeds the allowance, revert + require( + approval.lockupUsage + lockupIncrease <= approval.lockupAllowance, + Errors.OperatorLockupAllowanceExceeded(approval.lockupAllowance, approval.lockupUsage + lockupIncrease) + ); + approval.lockupUsage += lockupIncrease; + } else if (oldLockup > newLockup) { + uint256 lockupDecrease = oldLockup - newLockup; + approval.lockupUsage = approval.lockupUsage > lockupDecrease ? approval.lockupUsage - lockupDecrease : 0; + } + } + + function updateOperatorAllowanceForOneTimePayment(OperatorApproval storage approval, uint256 oneTimePayment) + internal + { + if (oneTimePayment == 0) return; + + // Reduce lockup usage + approval.lockupUsage = approval.lockupUsage - oneTimePayment; + + // Reduce lockup allowance + approval.lockupAllowance = + oneTimePayment > approval.lockupAllowance ? 0 : approval.lockupAllowance - oneTimePayment; + } + + /** + * @notice Gets all rails where the given address is the payer for a specific token. + * @param payer The address of the payer to get rails for. + * @param token The token address to filter rails by. + * @param offset The offset to start from. + * @param limit Maximum number of entries to return + * @return results Array of RailInfo structs containing rail IDs and termination status. + * @return nextOffset The next offset to use for pagination. + * @return total The total number of rails. + */ + function getRailsForPayerAndToken(address payer, IERC20 token, uint256 offset, uint256 limit) + external + view + returns (RailInfo[] memory results, uint256 nextOffset, uint256 total) + { + return _getRailsForAddressAndToken(payerRails[token][payer], offset, limit); + } + + /** + * @notice Gets all rails where the given address is the payee for a specific token. + * @param payee The address of the payee to get rails for. + * @param token The token address to filter rails by. + * @param offset The offset to start from. + * @param limit Maximum number of entries to return + * @return results Array of RailInfo structs containing rail IDs and termination status. + * @return nextOffset The next offset to use for pagination. + * @return total The total number of rails. + */ + function getRailsForPayeeAndToken(address payee, IERC20 token, uint256 offset, uint256 limit) + external + view + returns (RailInfo[] memory results, uint256 nextOffset, uint256 total) + { + return _getRailsForAddressAndToken(payeeRails[token][payee], offset, limit); + } + + /** + * @dev Internal function to get rails for either a payer or payee. + * @param allRailIds The array of rail IDs to filter rails by. + * @param offset The offset to start from. + * @param limit Maximum number of entries to return + * @return results Array of RailInfo structs containing rail IDs and termination status. + * @return nextOffset The next offset to use for pagination. + * @return total The total number of rails. + */ + function _getRailsForAddressAndToken(uint256[] storage allRailIds, uint256 offset, uint256 limit) + internal + view + returns (RailInfo[] memory results, uint256 nextOffset, uint256 total) + { + uint256 railsLength = allRailIds.length; + if (limit == 0) limit = railsLength; + if (offset >= railsLength) return (new RailInfo[](0), railsLength, railsLength); + uint256 end = offset + limit > railsLength ? railsLength : offset + limit; + + results = new RailInfo[](end - offset); + uint256 resultCount = 0; + + for (uint256 i = offset; i < end; i++) { + uint256 railId = allRailIds[i]; + Rail storage rail = rails[railId]; + + // Skip non-existent rails + if (rail.from == address(0)) continue; + + // Add rail info to results + results[resultCount] = RailInfo({railId: railId, isTerminated: rail.endEpoch > 0, endEpoch: rail.endEpoch}); + resultCount++; + } + + // Truncate + assembly ("memory-safe") { + mstore(results, resultCount) + } + + return (results, end, railsLength); + } + + /// @notice Number of pending rate-change entries for a rail + function getRateChangeQueueSize(uint256 railId) external view returns (uint256) { + return rails[railId].rateChangeQueue.size(); + } + + /** + * @notice Gets information about an account - when it would go into debt, total balance, available balance, and lockup rate. + * @param token The token address to get account info for. + * @param owner The address of the account owner. + * @return fundedUntilEpoch The epoch at which the account would go into debt given current lockup rate and balance. + * @return currentFunds The current funds in the account. + * @return availableFunds The funds available after accounting for simulated lockup. + * @return currentLockupRate The current lockup rate per epoch. + */ + function getAccountInfoIfSettled(IERC20 token, address owner) + external + view + returns (uint256 fundedUntilEpoch, uint256 currentFunds, uint256 availableFunds, uint256 currentLockupRate) + { + Account storage account = accounts[token][owner]; + + currentFunds = account.funds; + currentLockupRate = account.lockupRate; + + uint256 currentEpoch = block.number; + + fundedUntilEpoch = account.lockupRate == 0 + ? type(uint256).max + : account.lockupLastSettledAt + (account.funds - account.lockupCurrent) / account.lockupRate; + uint256 simulatedSettledAt = fundedUntilEpoch >= currentEpoch ? currentEpoch : fundedUntilEpoch; + uint256 simulatedLockupCurrent = + account.lockupCurrent + account.lockupRate * (simulatedSettledAt - account.lockupLastSettledAt); + availableFunds = account.funds - simulatedLockupCurrent; + + return (fundedUntilEpoch, currentFunds, availableFunds, currentLockupRate); + } + + /** + * @notice Burn FIL to buy the network fees + * @param token Which kind of fees to buy + * @param recipient Receives the purchased fees + * @param requested Exact amount of fees transferred + */ + function burnForFees(IERC20 token, address recipient, uint256 requested) external payable nonReentrant { + Account storage fees = accounts[token][address(this)]; + uint256 available = fees.funds; + require(available >= requested, Errors.WithdrawAmountExceedsAccumulatedFees(token, available, requested)); + + AuctionInfo storage auction = auctionInfo[token]; + uint256 auctionPrice = uint256(auction.startPrice).decay(block.timestamp - auction.startTime); + require(msg.value >= auctionPrice, Errors.InsufficientNativeTokenForBurn(msg.value, auctionPrice)); + + auctionPrice *= Dutch.RESET_FACTOR; + if (auctionPrice > MAX_AUCTION_START_PRICE) { + auctionPrice = MAX_AUCTION_START_PRICE; + } + auction.startPrice = uint88(auctionPrice); + auction.startTime = uint168(block.timestamp); + + (bool success,) = BURN_ADDRESS.call{value: msg.value}(""); + require(success, Errors.NativeTransferFailed(BURN_ADDRESS, msg.value)); + + uint256 actual = transferOut(token, recipient, requested); + fees.funds = available - actual; + } +} + +function min(uint256 a, uint256 b) pure returns (uint256) { + return a < b ? a : b; +} diff --git a/service_contracts/src/payments/contracts/RateChangeQueue.sol b/service_contracts/src/payments/contracts/RateChangeQueue.sol new file mode 100644 index 00000000..d8a3c8e3 --- /dev/null +++ b/service_contracts/src/payments/contracts/RateChangeQueue.sol @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +library RateChangeQueue { + struct RateChange { + // The payment rate to apply + uint256 rate; + // The epoch up to and including which this rate will be used to settle a rail + uint256 untilEpoch; + } + + struct Queue { + uint256 head; + RateChange[] changes; + } + + function enqueue(Queue storage queue, uint256 rate, uint256 untilEpoch) internal { + queue.changes.push(RateChange(rate, untilEpoch)); + } + + function dequeue(Queue storage queue) internal returns (RateChange memory) { + RateChange[] storage c = queue.changes; + require(queue.head < c.length, "Queue is empty"); + RateChange memory change = c[queue.head]; + delete c[queue.head]; + + if (isEmpty(queue)) { + queue.head = 0; + // The array is already empty, waste no time zeroing it. + assembly { + sstore(c.slot, 0) + } + } else { + queue.head++; + } + + return change; + } + + function peek(Queue storage queue) internal view returns (RateChange memory) { + require(queue.head < queue.changes.length, "Queue is empty"); + return queue.changes[queue.head]; + } + + function peekTail(Queue storage queue) internal view returns (RateChange memory) { + require(queue.head < queue.changes.length, "Queue is empty"); + return queue.changes[queue.changes.length - 1]; + } + + function isEmpty(Queue storage queue) internal view returns (bool) { + return queue.head == queue.changes.length; + } + + function size(Queue storage queue) internal view returns (uint256) { + return queue.changes.length - queue.head; + } +} diff --git a/service_contracts/src/payments/contracts/interfaces/IERC3009.sol b/service_contracts/src/payments/contracts/interfaces/IERC3009.sol new file mode 100644 index 00000000..b37fab4c --- /dev/null +++ b/service_contracts/src/payments/contracts/interfaces/IERC3009.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; + +interface IERC3009 is IERC20 { + /** + * @notice Receive a transfer with a signed authorization from the payer + * @dev This has an additional check to ensure that the payee's address matches + * the caller of this function to prevent front-running attacks. + * @param from Payer's address (Authorizer) + * @param to Payee's address + * @param value Amount to be transferred + * @param validAfter The time after which this is valid (unix time) + * @param validBefore The time before which this is valid (unix time) + * @param nonce Unique nonce + * @param v v of the signature + * @param r r of the signature + * @param s s of the signature + */ + function receiveWithAuthorization( + address from, + address to, + uint256 value, + uint256 validAfter, + uint256 validBefore, + bytes32 nonce, + uint8 v, + bytes32 r, + bytes32 s + ) external; + + function authorizationState(address user, bytes32 nonce) external view returns (bool used); +} diff --git a/service_contracts/src/pdp/README.md b/service_contracts/src/pdp/README.md new file mode 100644 index 00000000..8978e577 --- /dev/null +++ b/service_contracts/src/pdp/README.md @@ -0,0 +1,90 @@ +# Provable Data Possession (PDP) - Service Contract and Tools + +## Table of Contents +- [Overview](#overview) +- [Build](#build) +- [Test](#test) +- [Deploy](#deploy) +- [Design Documentation](#design-documentation) +- [Security Audits](#security-audits) +- [Contributing](#contributing) +- [License](#license) + +## Overview +This project contains the implementation of the PDP service contract, auxiliary contracts, and development tools for the Provable Data Possession protocol. + +### Contracts + +The PDP service contract and the PDP verifier contracts are deployed on Filecoin Mainnet and Calibration Testnet. + +> Disclaimer: ⚠️ These contracts are still in beta testing and might be upgraded for bug fixes and/or improvements. Please use with caution for production environments. ⚠️ + +#### v2.1.0 + +**Mainnet:** +- PDPVerifier Implementation: [0xf2a47b4136Ab2dfB6FA67Fb85c7a031f56F6f024](https://filfox.info/en/address/0xf2a47b4136Ab2dfB6FA67Fb85c7a031f56F6f024) +- PDPVerifier Proxy: [0x31D87004Fc0C38D897725978e51BC06163603E5A](https://filfox.info/en/address/0x31D87004Fc0C38D897725978e51BC06163603E5A) + +**Calibnet:** +- PDPVerifier Implementation: [0x648E8D9103Ec91542DcD0045A65Ef9679F886e82](https://calibration.filfox.info/en/address/0x648E8D9103Ec91542DcD0045A65Ef9679F886e82) +- PDPVerifier Proxy: [0x445238Eca6c6aB8Dff1Aa6087d9c05734D22f137](https://calibration.filfox.info/en/address/0x445238Eca6c6aB8Dff1Aa6087d9c05734D22f137) + +#### v1.1.0 + +⚠️ Deprecation Notice: The following contracts will be deprecated (read-only) by End of August. Please upgrade/migrate to the latest contracts as soon as they are available. + +**Mainnet** +- [PDP Verifier]([url](https://github.com/FilOzone/pdp/blob/main/src/PDPVerifier.sol)): [0x9C65E8E57C98cCc040A3d825556832EA1e9f4Df6]([url](https://filfox.info/en/address/0x9C65E8E57C98cCc040A3d825556832EA1e9f4Df6)) +- [PDP Service]([url](https://github.com/FilOzone/pdp/blob/main/src/SimplePDPService.sol)): [0x805370387fA5Bd8053FD8f7B2da4055B9a4f8019]([url](https://filfox.info/en/address/0x805370387fA5Bd8053FD8f7B2da4055B9a4f8019)) + +**Calibration Testnet** +- [PDP Verifier]([url](https://github.com/FilOzone/pdp/blob/main/src/PDPVerifier.sol)): [0x5A23b7df87f59A291C26A2A1d684AD03Ce9B68DC]([url](https://calibration.filfox.info/en/address/0x5A23b7df87f59A291C26A2A1d684AD03Ce9B68DC)) +- [PDP Service]([url](https://github.com/FilOzone/pdp/blob/main/src/SimplePDPService.sol)): [0x6170dE2b09b404776197485F3dc6c968Ef948505]([url](https://calibration.filfox.info/en/address/0x6170dE2b09b404776197485F3dc6c968Ef948505)) Note this has proving period every 30 minutes instead of every day + +## Build +Depends on [Foundry](https://github.com/foundry-rs/foundry) for development. +``` +make build +``` +## Test +``` +make test +``` +## Deploy +To deploy on devnet, run: +``` +make deploy-devnet +``` + +To deploy on calibrationnet, run: +``` +make deploy-calibnet +``` + +To deploy on mainnet, run: +``` +make deploy-mainnet +``` + +## Design Documentation +For comprehensive design details, see [DESIGN.md](docs/design.md) + +## Security Audits +The PDP contracts have undergone the following security audits: +- [Zellic Security Audit (April 2025)](https://github.com/Zellic/publications/blob/master/Proof%20of%20Data%20Possession%20-%20Zellic%20Audit%20Report.pdf) + +## Contributing +Contributions are welcome! Please follow these contribution guidelines: + +### Implementing Changes +Follow the existing code style and patterns. Write clear, descriptive commit messages and include relevant tests for new features or bug fixes. Keep changes focused and well-encapsulated, and document any new functionality. + +### Pull Requests +Use descriptive PR titles that summarize the change. Include a clear description of the changes and their purpose, reference any related issues, and ensure all tests pass and code is properly linted. + +### Getting Help +If you need assistance, feel free to open a issue or reach out to the maintainers of the contract in the #fil-pdp channel on [Filecoin Slack](https://filecoin.io/slack). + +## License + +Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) diff --git a/service_contracts/src/pdp/contracts/BitOps.sol b/service_contracts/src/pdp/contracts/BitOps.sol new file mode 100644 index 00000000..ddc8a3fb --- /dev/null +++ b/service_contracts/src/pdp/contracts/BitOps.sol @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.20; + +// Library for bit operations. +library BitOps { + // Calculates the number of leading zeros in binary representation. + function clz(uint256 x) internal pure returns (uint256) { + uint256 n = 256; + uint256 y; + + y = x >> 128; + if (y != 0) { + n -= 128; + x = y; + } + y = x >> 64; + if (y != 0) { + n -= 64; + x = y; + } + y = x >> 32; + if (y != 0) { + n -= 32; + x = y; + } + y = x >> 16; + if (y != 0) { + n -= 16; + x = y; + } + y = x >> 8; + if (y != 0) { + n -= 8; + x = y; + } + y = x >> 4; + if (y != 0) { + n -= 4; + x = y; + } + y = x >> 2; + if (y != 0) { + n -= 2; + x = y; + } + y = x >> 1; + if (y != 0) return n - 2; + return n - x; + } + + int256 constant MASK128 = 0x00000000000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF; + int256 constant MASK64 = 0x0000000000000000FFFFFFFFFFFFFFFF0000000000000000FFFFFFFFFFFFFFFF; + int256 constant MASK32 = 0x00000000FFFFFFFF00000000FFFFFFFF00000000FFFFFFFF00000000FFFFFFFF; + int256 constant MASK16 = 0x0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF; + int256 constant MASK8 = 0x00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF; + int256 constant MASK4 = 0x0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F; + int256 constant MASK2 = 0x3333333333333333333333333333333333333333333333333333333333333333; + int256 constant MASK1 = 0x5555555555555555555555555555555555555555555555555555555555555555; + + // Calculates the number of trailing zeros in binary representation. + function ctz(uint256 x) internal pure returns (uint256) { + require(x <= uint256(type(int256).max), "Input exceeds maximum int256 value"); + uint256 c = 256; + + int256 v = -int256(x); + v = v & int256(x); + if (v != 0) { + c--; + } + if (v & MASK128 != 0) { + c -= 128; + } + if (v & MASK64 != 0) { + c -= 64; + } + if (v & MASK32 != 0) { + c -= 32; + } + if (v & MASK16 != 0) { + c -= 16; + } + if (v & MASK8 != 0) { + c -= 8; + } + if (v & MASK4 != 0) { + c -= 4; + } + if (v & MASK2 != 0) { + c -= 2; + } + if (v & MASK1 != 0) { + c -= 1; + } + + return c; + } +} diff --git a/service_contracts/src/pdp/contracts/Cids.sol b/service_contracts/src/pdp/contracts/Cids.sol new file mode 100644 index 00000000..918bca55 --- /dev/null +++ b/service_contracts/src/pdp/contracts/Cids.sol @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.20; + +library Cids { + uint256 public constant COMMP_LEAF_SIZE = 32; + // 0x01 0x55 0x9120 + // (cidv1) (raw) (fr32-sha2-256-trunc254-padded-binary-tree) + bytes4 public constant COMMP_V2_PREFIX = hex"01559120"; + + // A helper struct for events + getter functions to display digests as CommpV2 CIDs + struct Cid { + bytes data; + } + + // Returns the last 32 bytes of a CID payload as a bytes32. + function digestFromCid(Cid memory cid) internal pure returns (bytes32) { + require(cid.data.length >= 32, "Cid data is too short"); + bytes memory dataSlice = new bytes(32); + for (uint256 i = 0; i < 32; i++) { + dataSlice[i] = cid.data[cid.data.length - 32 + i]; + } + return bytes32(dataSlice); + } + + // Returns the height of the tree from the CID. + function heightFromCid(Cid memory cid) internal pure returns (uint8) { + require(cid.data.length >= 33, "Cid data is too short"); + return uint8(cid.data[cid.data.length - 32 - 1]); + } + + // Checks that CID is CommPv2 and decomposes it into its components. + // See: https://github.com/filecoin-project/FIPs/blob/master/FRCs/frc-0069.md + function validateCommPv2(Cid memory cid) + internal + pure + returns (uint256 padding, uint8 height, uint256 digestOffset) + { + for (uint256 i = 0; i < 4; i++) { + if (cid.data[i] != COMMP_V2_PREFIX[i]) { + revert("Cid must be CommPv2"); + } + } + uint256 offset = 4; + uint256 mhLength; + (mhLength, offset) = _readUvarint(cid.data, offset); + require(mhLength >= 34, "CommPv2 multihash length must be at least 34"); + if (mhLength + offset != cid.data.length) { + revert("CommPv2 multihash length does not match data length"); + } + (padding, offset) = _readUvarint(cid.data, offset); + + height = uint8(cid.data[offset]); + offset++; + + return (padding, height, offset); + } + + // isPaddingExcessive checks if the padding size exceeds the size of the tree + function isPaddingExcessive(uint256 padding, uint8 height) internal pure returns (bool) { + return (128 * padding) / 127 >= 1 << (height + 5); + } + + // pieceSize returns the size of the data defined by amount of padding and height of the tree + // this is after the Fr32 expansion, if 1 bit of actual data spills into padding byte, the whole byte is counted as data + // as the padding is specified as before expansion + function pieceSize(uint256 padding, uint8 height) internal pure returns (uint256) { + // 2^height * 32 - padding + // we can fold the 32 into height + return (1 << (uint256(height) + 5)) - (128 * padding) / 127; + } + + // leafCount returns the number of 32b leaves that contain any amount of data + function leafCount(uint256 padding, uint8 height) internal pure returns (uint256) { + // the padding itself is # of bytes before Fr32 expansion + // so we need to expand it by factor 128/127 + // then we divide by 32 with a floor to get the number of leaves that are fully padding + uint256 paddingLeafs = (128 * padding) / 127 >> 5; + // 1<= 0x80) { + data[offset++] = bytes1(uint8(value) | 0x80); + value >>= 7; + } + data[offset++] = bytes1(uint8(value)); + return offset; + } + + // Helper function to calculate the length of a uvarint + function _uvarintLength(uint256 value) internal pure returns (uint256) { + uint256 length = 1; + while (value >= 0x80) { + value >>= 7; + length++; + } + return length; + } + + // Helper function reading uvarints <= 256 bits + // returns (value, offset) with offset advanced to the following byte + function _readUvarint(bytes memory data, uint256 offset) internal pure returns (uint256, uint256) { + uint256 i = 0; + uint256 value = uint256(uint8(data[offset])) & 0x7F; + while (data[offset + i] >= 0x80) { + i++; + value = value | uint256(uint8(data[offset + i]) & 0x7F) << (i * 7); + } + i++; + return (value, offset + i); + } +} diff --git a/service_contracts/src/pdp/contracts/ERC1967Proxy.sol b/service_contracts/src/pdp/contracts/ERC1967Proxy.sol new file mode 100644 index 00000000..e9296f3c --- /dev/null +++ b/service_contracts/src/pdp/contracts/ERC1967Proxy.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v5.0.0) (proxy/ERC1967/ERC1967Proxy.sol) + +pragma solidity ^0.8.20; + +import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; + +// This contract is a thin wrapper around the OpenZeppelin ERC1967Proxy. +// It exists for ease of deployment of PDP contracts. +contract MyERC1967Proxy is ERC1967Proxy { + constructor(address _implementation, bytes memory _data) ERC1967Proxy(_implementation, _data) {} +} diff --git a/service_contracts/src/pdp/contracts/Fees.sol b/service_contracts/src/pdp/contracts/Fees.sol new file mode 100644 index 00000000..c9dcf241 --- /dev/null +++ b/service_contracts/src/pdp/contracts/Fees.sol @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.20; + +/// @title PDPFees +/// @notice A library for calculating fees for the PDP. +library PDPFees { + uint256 constant ATTO_FIL = 1; + uint256 constant FIL_TO_ATTO_FIL = 1e18 * ATTO_FIL; + + // 0.1 FIL + uint256 constant SYBIL_FEE = FIL_TO_ATTO_FIL / 10; + + // 2 USD/Tib/month is the current reward earned by Storage Providers + uint256 constant ESTIMATED_MONTHLY_TIB_STORAGE_REWARD_USD = 2; + // 1% of reward per period + uint256 constant PROOF_FEE_PERCENTAGE = 1; + // 4% of reward per period for gas limit left bound + uint256 constant GAS_LIMIT_LEFT_PERCENTAGE = 4; + // 5% of reward per period for gas limit right bound + uint256 constant GAS_LIMIT_RIGHT_PERCENTAGE = 5; + uint256 constant USD_DECIMALS = 1e18; + + // 1 TiB in bytes (2^40) + uint256 constant TIB_IN_BYTES = 2 ** 40; + // Number of epochs per month (30 days * 2880 epochs per day) + uint256 constant EPOCHS_PER_MONTH = 86400; + + /// @notice Calculates the proof fee based on the gas fee and the raw size of the proof. + /// @param estimatedGasFee The estimated gas fee in AttoFIL. + /// @param filUsdPrice The price of FIL in USD. + /// @param filUsdPriceExpo The exponent of the price of FIL in USD. + /// @param rawSize The raw size of the proof in bytes. + /// @param nProofEpochs The number of proof epochs. + /// @return proof fee in AttoFIL + /// @dev The proof fee is calculated based on the gas fee and the raw size of the proof + /// The fee is 1% of the projected reward and is reduced in the case gas cost of proving is too high. + function proofFeeWithGasFeeBound( + uint256 estimatedGasFee, // in AttoFIL + uint64 filUsdPrice, + int32 filUsdPriceExpo, + uint256 rawSize, + uint256 nProofEpochs + ) internal view returns (uint256) { + require( + estimatedGasFee > 0 || block.basefee == 0, "failed to validate: estimated gas fee must be greater than 0" + ); + require(filUsdPrice > 0, "failed to validate: AttoFIL price must be greater than 0"); + require(rawSize > 0, "failed to validate: raw size must be greater than 0"); + + // Calculate reward per epoch per byte (in AttoFIL) + uint256 rewardPerEpochPerByte; + if (filUsdPriceExpo >= 0) { + rewardPerEpochPerByte = (ESTIMATED_MONTHLY_TIB_STORAGE_REWARD_USD * FIL_TO_ATTO_FIL) + / (TIB_IN_BYTES * EPOCHS_PER_MONTH * filUsdPrice * (10 ** uint32(filUsdPriceExpo))); + } else { + rewardPerEpochPerByte = ( + ESTIMATED_MONTHLY_TIB_STORAGE_REWARD_USD * FIL_TO_ATTO_FIL * (10 ** uint32(-filUsdPriceExpo)) + ) / (TIB_IN_BYTES * EPOCHS_PER_MONTH * filUsdPrice); + } + + // Calculate total reward for the proving period + uint256 estimatedCurrentReward = rewardPerEpochPerByte * nProofEpochs * rawSize; + + // Calculate gas limits + uint256 gasLimitRight = (estimatedCurrentReward * GAS_LIMIT_RIGHT_PERCENTAGE) / 100; + uint256 gasLimitLeft = (estimatedCurrentReward * GAS_LIMIT_LEFT_PERCENTAGE) / 100; + + if (estimatedGasFee >= gasLimitRight) { + return 0; // No proof fee if gas fee is above right limit + } else if (estimatedGasFee >= gasLimitLeft) { + return gasLimitRight - estimatedGasFee; // Partial discount on proof fee + } else { + return (estimatedCurrentReward * PROOF_FEE_PERCENTAGE) / 100; + } + } + + // sybil fee adds cost to adding state to the pdp verifier contract to prevent + // wasteful state growth. 0.1 FIL + function sybilFee() internal pure returns (uint256) { + return SYBIL_FEE; + } +} diff --git a/service_contracts/src/pdp/contracts/IPDPProvingSchedule.sol b/service_contracts/src/pdp/contracts/IPDPProvingSchedule.sol new file mode 100644 index 00000000..cc6ee1dd --- /dev/null +++ b/service_contracts/src/pdp/contracts/IPDPProvingSchedule.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.20; + +/// @title IPDPProvingSchedule +/// @notice Interface for PDP Service SLA specifications +interface IPDPProvingSchedule { + /** + * @notice Returns PDP configuration values + * @return maxProvingPeriod Maximum number of epochs between proofs + * @return challengeWindow Number of epochs for the challenge window + * @return challengesPerProof Number of challenges required per proof + * @return initChallengeWindowStart Initial challenge window start for new data sets assuming proving period starts now + */ + function getPDPConfig() + external + view + returns ( + uint64 maxProvingPeriod, + uint256 challengeWindow, + uint256 challengesPerProof, + uint256 initChallengeWindowStart + ); + + /** + * @notice Returns the start of the next challenge window for a data set + * @param setId The ID of the data set + * @return The block number when the next challenge window starts + */ + function nextPDPChallengeWindowStart(uint256 setId) external view returns (uint256); +} diff --git a/service_contracts/src/pdp/contracts/PDPVerifier.sol b/service_contracts/src/pdp/contracts/PDPVerifier.sol new file mode 100644 index 00000000..5c78a808 --- /dev/null +++ b/service_contracts/src/pdp/contracts/PDPVerifier.sol @@ -0,0 +1,848 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.20; + +import {BitOps} from "./BitOps.sol"; +import {Cids} from "./Cids.sol"; +import {MerkleVerify} from "./Proofs.sol"; +import {PDPFees} from "./Fees.sol"; +import {ERC1967Utils} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Utils.sol"; +import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; +import {UUPSUpgradeable} from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; +import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; +import {IPyth} from "@pythnetwork/pyth-sdk-solidity/IPyth.sol"; +import {PythStructs} from "@pythnetwork/pyth-sdk-solidity/PythStructs.sol"; +import {IPDPTypes} from "./interfaces/IPDPTypes.sol"; + +/// @title PDPListener +/// @notice Interface for PDP Service applications managing data storage. +/// @dev This interface exists to provide an extensible hook for applications to use the PDP verification contract +/// to implement data storage applications. +interface PDPListener { + function dataSetCreated(uint256 dataSetId, address creator, bytes calldata extraData) external; + function dataSetDeleted(uint256 dataSetId, uint256 deletedLeafCount, bytes calldata extraData) external; + function piecesAdded(uint256 dataSetId, uint256 firstAdded, Cids.Cid[] memory pieceData, bytes calldata extraData) + external; + function piecesScheduledRemove(uint256 dataSetId, uint256[] memory pieceIds, bytes calldata extraData) external; + // Note: extraData not included as proving messages conceptually always originate from the SP + function possessionProven(uint256 dataSetId, uint256 challengedLeafCount, uint256 seed, uint256 challengeCount) + external; + function nextProvingPeriod(uint256 dataSetId, uint256 challengeEpoch, uint256 leafCount, bytes calldata extraData) + external; + /// @notice Called when data set storage provider is changed in PDPVerifier. + function storageProviderChanged( + uint256 dataSetId, + address oldStorageProvider, + address newStorageProvider, + bytes calldata extraData + ) external; +} + +uint256 constant NEW_DATA_SET_SENTINEL = 0; + +contract PDPVerifier is Initializable, UUPSUpgradeable, OwnableUpgradeable { + // Constants + address public constant BURN_ACTOR = 0xff00000000000000000000000000000000000063; + uint256 public constant LEAF_SIZE = 32; + uint256 public constant MAX_PIECE_SIZE_LOG2 = 50; + uint256 public constant MAX_ENQUEUED_REMOVALS = 2000; + address public constant RANDOMNESS_PRECOMPILE = 0xfE00000000000000000000000000000000000006; + uint256 public constant EXTRA_DATA_MAX_SIZE = 2048; + uint256 public constant SECONDS_IN_DAY = 86400; + IPyth public constant PYTH = IPyth(0xA2aa501b19aff244D90cc15a4Cf739D2725B5729); + + // FIL/USD price feed query ID on the Pyth network + bytes32 public constant FIL_USD_PRICE_FEED_ID = 0x150ac9b959aee0051e4091f0ef5216d941f590e1c5e7f91cf7635b5c11628c0e; + uint256 public constant NO_CHALLENGE_SCHEDULED = 0; + uint256 public constant NO_PROVEN_EPOCH = 0; + + // Events + event DataSetCreated(uint256 indexed setId, address indexed storageProvider); + event StorageProviderChanged( + uint256 indexed setId, address indexed oldStorageProvider, address indexed newStorageProvider + ); + event DataSetDeleted(uint256 indexed setId, uint256 deletedLeafCount); + event DataSetEmpty(uint256 indexed setId); + + event PiecesAdded(uint256 indexed setId, uint256[] pieceIds, Cids.Cid[] pieceCids); + event PiecesRemoved(uint256 indexed setId, uint256[] pieceIds); + + event ProofFeePaid(uint256 indexed setId, uint256 fee, uint64 price, int32 expo); + + event PossessionProven(uint256 indexed setId, IPDPTypes.PieceIdAndOffset[] challenges); + event NextProvingPeriod(uint256 indexed setId, uint256 challengeEpoch, uint256 leafCount); + + // Types + // State fields + /* + A data set is the metadata required for tracking data for proof of possession. + It maintains a list of CIDs of data to be proven and metadata needed to + add and remove data to the set and prove possession efficiently. + + ** logical structure of the data set** + /* + struct DataSet { + Cid[] pieces; + uint256[] leafCounts; + uint256[] sumTree; + uint256 leafCount; + address storageProvider; + address proposed storageProvider; + nextPieceID uint64; + nextChallengeEpoch: uint64; + listenerAddress: address; + challengeRange: uint256 + enqueuedRemovals: uint256[] + } + ** PDP Verifier contract tracks many possible data sets ** + []DataSet dataSets + + To implement this logical structure in the solidity data model we have + arrays tracking the singleton fields and two dimensional arrays + tracking linear data set data. The first index is the data set id + and the second index if any is the index of the data in the array. + + Invariant: pieceCids.length == pieceLeafCount.length == sumTreeCounts.length + */ + + // Network epoch delay between last proof of possession and next + // randomness sampling for challenge generation. + // + // The purpose of this delay is to prevent SPs from biasing randomness by running forking attacks. + // Given a small enough challengeFinality an SP can run several trials of challenge sampling and + // fork around samples that don't suit them, grinding the challenge randomness. + // For the filecoin L1, a safe value is 150 using the same analysis setting 150 epochs between + // PoRep precommit and PoRep provecommit phases. + // + // We keep this around for future portability to a variety of environments with different assumptions + // behind their challenge randomness sampling methods. + uint256 challengeFinality; + + // TODO PERF: https://github.com/FILCAT/pdp/issues/16#issuecomment-2329838769 + uint64 nextDataSetId; + // The CID of each piece. Pieces and all their associated data can be appended and removed but not modified. + mapping(uint256 => mapping(uint256 => Cids.Cid)) pieceCids; + // The leaf count of each piece + mapping(uint256 => mapping(uint256 => uint256)) pieceLeafCounts; + // The sum tree array for finding the piece id of a given leaf index. + mapping(uint256 => mapping(uint256 => uint256)) sumTreeCounts; + mapping(uint256 => uint256) nextPieceId; + // The number of leaves (32 byte chunks) in the data set when tallying up all pieces. + // This includes the leaves in pieces that have been added but are not yet eligible for proving. + mapping(uint256 => uint256) dataSetLeafCount; + // The epoch for which randomness is sampled for challenge generation while proving possession this proving period. + mapping(uint256 => uint256) nextChallengeEpoch; + // Each data set notifies a configurable listener to implement extensible applications managing data storage. + mapping(uint256 => address) dataSetListener; + // The first index that is not challenged in prove possession calls this proving period. + // Updated to include the latest added leaves when starting the next proving period. + mapping(uint256 => uint256) challengeRange; + // Enqueued piece ids for removal when starting the next proving period + mapping(uint256 => uint256[]) scheduledRemovals; + // storage provider of data set is initialized upon creation to create message sender + // storage provider has exclusive permission to add and remove pieces and delete the data set + mapping(uint256 => address) storageProvider; + mapping(uint256 => address) dataSetProposedStorageProvider; + mapping(uint256 => uint256) dataSetLastProvenEpoch; + + // Methods + + /// @custom:oz-upgrades-unsafe-allow constructor + constructor() { + _disableInitializers(); + } + + function initialize(uint256 _challengeFinality) public initializer { + __Ownable_init(msg.sender); + __UUPSUpgradeable_init(); + challengeFinality = _challengeFinality; + nextDataSetId = 1; // Data sets start at 1 + } + + string public constant VERSION = "2.1.0"; + + event ContractUpgraded(string version, address implementation); + + function migrate() external onlyOwner reinitializer(2) { + emit ContractUpgraded(VERSION, ERC1967Utils.getImplementation()); + } + + function _authorizeUpgrade(address newImplementation) internal override onlyOwner {} + + function burnFee(uint256 amount) internal { + require(msg.value >= amount, "Incorrect fee amount"); + (bool success,) = BURN_ACTOR.call{value: amount}(""); + require(success, "Burn failed"); + } + + // Returns the current challenge finality value + function getChallengeFinality() public view returns (uint256) { + return challengeFinality; + } + + // Returns the next data set ID + function getNextDataSetId() public view returns (uint64) { + return nextDataSetId; + } + + // Returns false if the data set is 1) not yet created 2) deleted + function dataSetLive(uint256 setId) public view returns (bool) { + return setId < nextDataSetId && storageProvider[setId] != address(0); + } + + // Returns false if the data set is not live or if the piece id is 1) not yet created 2) deleted + function pieceLive(uint256 setId, uint256 pieceId) public view returns (bool) { + return dataSetLive(setId) && pieceId < nextPieceId[setId] && pieceLeafCounts[setId][pieceId] > 0; + } + + // Returns false if the piece is not live or if the piece id is not yet in challenge range + function pieceChallengable(uint256 setId, uint256 pieceId) public view returns (bool) { + uint256 top = 256 - BitOps.clz(nextPieceId[setId]); + IPDPTypes.PieceIdAndOffset memory ret = findOnePieceId(setId, challengeRange[setId] - 1, top); + require( + ret.offset == pieceLeafCounts[setId][ret.pieceId] - 1, + "challengeRange -1 should align with the very last leaf of a piece" + ); + return pieceLive(setId, pieceId) && pieceId <= ret.pieceId; + } + + // Returns the leaf count of a data set + function getDataSetLeafCount(uint256 setId) public view returns (uint256) { + require(dataSetLive(setId), "Data set not live"); + return dataSetLeafCount[setId]; + } + + // Returns the next piece ID for a data set + function getNextPieceId(uint256 setId) public view returns (uint256) { + require(dataSetLive(setId), "Data set not live"); + return nextPieceId[setId]; + } + + // Returns the next challenge epoch for a data set + function getNextChallengeEpoch(uint256 setId) public view returns (uint256) { + require(dataSetLive(setId), "Data set not live"); + return nextChallengeEpoch[setId]; + } + + // Returns the listener address for a data set + function getDataSetListener(uint256 setId) public view returns (address) { + require(dataSetLive(setId), "Data set not live"); + return dataSetListener[setId]; + } + + // Returns the storage provider of a data set and the proposed storage provider if any + function getDataSetStorageProvider(uint256 setId) public view returns (address, address) { + require(dataSetLive(setId), "Data set not live"); + return (storageProvider[setId], dataSetProposedStorageProvider[setId]); + } + + function getDataSetLastProvenEpoch(uint256 setId) public view returns (uint256) { + require(dataSetLive(setId), "Data set not live"); + return dataSetLastProvenEpoch[setId]; + } + + // Returns the piece CID for a given data set and piece ID + function getPieceCid(uint256 setId, uint256 pieceId) public view returns (Cids.Cid memory) { + require(dataSetLive(setId), "Data set not live"); + return pieceCids[setId][pieceId]; + } + + // Returns the piece leaf count for a given data set and piece ID + function getPieceLeafCount(uint256 setId, uint256 pieceId) public view returns (uint256) { + require(dataSetLive(setId), "Data set not live"); + return pieceLeafCounts[setId][pieceId]; + } + + // Returns the index of the most recently added leaf that is challengeable in the current proving period + function getChallengeRange(uint256 setId) public view returns (uint256) { + require(dataSetLive(setId), "Data set not live"); + return challengeRange[setId]; + } + + // Returns the piece ids of the pieces scheduled for removal at the start of the next proving period + function getScheduledRemovals(uint256 setId) public view returns (uint256[] memory) { + require(dataSetLive(setId), "Data set not live"); + uint256[] storage removals = scheduledRemovals[setId]; + uint256[] memory result = new uint256[](removals.length); + for (uint256 i = 0; i < removals.length; i++) { + result[i] = removals[i]; + } + return result; + } + + /** + * @notice Returns the count of active pieces (non-zero leaf count) for a data set + * @param setId The data set ID + * @return activeCount The number of active pieces in the data set + */ + function getActivePieceCount(uint256 setId) public view returns (uint256 activeCount) { + require(dataSetLive(setId), "Data set not live"); + + uint256 maxPieceId = nextPieceId[setId]; + for (uint256 i = 0; i < maxPieceId; i++) { + if (pieceLeafCounts[setId][i] > 0) { + activeCount++; + } + } + } + + /** + * @notice Returns active pieces (non-zero leaf count) for a data set with pagination + * @param setId The data set ID + * @param offset Starting index for pagination (0-based) + * @param limit Maximum number of pieces to return + * @return pieces Array of active piece CIDs + * @return pieceIds Array of corresponding piece IDs + * @return rawSizes Array of raw sizes for each piece (in bytes) + * @return hasMore True if there are more pieces beyond this page + */ + function getActivePieces(uint256 setId, uint256 offset, uint256 limit) + public + view + returns (Cids.Cid[] memory pieces, uint256[] memory pieceIds, uint256[] memory rawSizes, bool hasMore) + { + require(dataSetLive(setId), "Data set not live"); + require(limit > 0, "Limit must be greater than 0"); + + // Single pass: collect data and check for more + uint256 maxPieceId = nextPieceId[setId]; + + // Over-allocate arrays to limit size + Cids.Cid[] memory tempPieces = new Cids.Cid[](limit); + uint256[] memory tempPieceIds = new uint256[](limit); + uint256[] memory tempRawSizes = new uint256[](limit); + + uint256 activeCount = 0; + uint256 resultIndex = 0; + + for (uint256 i = 0; i < maxPieceId; i++) { + if (pieceLeafCounts[setId][i] > 0) { + if (activeCount >= offset && resultIndex < limit) { + tempPieces[resultIndex] = pieceCids[setId][i]; + tempPieceIds[resultIndex] = i; + tempRawSizes[resultIndex] = pieceLeafCounts[setId][i] * 32; + resultIndex++; + } else if (activeCount >= offset + limit) { + // Found at least one more active piece beyond our limit + hasMore = true; + break; + } + activeCount++; + } + } + + // Handle case where we found fewer items than limit + if (resultIndex == 0) { + // No items found + return (new Cids.Cid[](0), new uint256[](0), new uint256[](0), false); + } else if (resultIndex < limit) { + // Found fewer items than limit - need to resize arrays + pieces = new Cids.Cid[](resultIndex); + pieceIds = new uint256[](resultIndex); + rawSizes = new uint256[](resultIndex); + + for (uint256 i = 0; i < resultIndex; i++) { + pieces[i] = tempPieces[i]; + pieceIds[i] = tempPieceIds[i]; + rawSizes[i] = tempRawSizes[i]; + } + } else { + // Found exactly limit items - use temp arrays directly + pieces = tempPieces; + pieceIds = tempPieceIds; + rawSizes = tempRawSizes; + } + } + + // storage provider proposes new storage provider. If the storage provider proposes themself delete any outstanding proposed storage provider + function proposeDataSetStorageProvider(uint256 setId, address newStorageProvider) public { + require(dataSetLive(setId), "Data set not live"); + address currentStorageProvider = storageProvider[setId]; + require( + currentStorageProvider == msg.sender, "Only the current storage provider can propose a new storage provider" + ); + if (currentStorageProvider == newStorageProvider) { + // If the storage provider proposes themself delete any outstanding proposed storage provider + delete dataSetProposedStorageProvider[setId]; + } else { + dataSetProposedStorageProvider[setId] = newStorageProvider; + } + } + + function claimDataSetStorageProvider(uint256 setId, bytes calldata extraData) public { + require(dataSetLive(setId), "Data set not live"); + require( + dataSetProposedStorageProvider[setId] == msg.sender, + "Only the proposed storage provider can claim storage provider role" + ); + address oldStorageProvider = storageProvider[setId]; + storageProvider[setId] = msg.sender; + delete dataSetProposedStorageProvider[setId]; + emit StorageProviderChanged(setId, oldStorageProvider, msg.sender); + address listenerAddr = dataSetListener[setId]; + if (listenerAddr != address(0)) { + PDPListener(listenerAddr).storageProviderChanged(setId, oldStorageProvider, msg.sender, extraData); + } + } + + // Removes a data set. Must be called by the storage provider. + function deleteDataSet(uint256 setId, bytes calldata extraData) public { + require(extraData.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large"); + if (setId >= nextDataSetId) { + revert("data set id out of bounds"); + } + + require(storageProvider[setId] == msg.sender, "Only the storage provider can delete data sets"); + uint256 deletedLeafCount = dataSetLeafCount[setId]; + dataSetLeafCount[setId] = 0; + storageProvider[setId] = address(0); + nextChallengeEpoch[setId] = 0; + dataSetLastProvenEpoch[setId] = NO_PROVEN_EPOCH; + + address listenerAddr = dataSetListener[setId]; + if (listenerAddr != address(0)) { + PDPListener(listenerAddr).dataSetDeleted(setId, deletedLeafCount, extraData); + } + emit DataSetDeleted(setId, deletedLeafCount); + } + + // Create Dataset and Add Pieces, When setId == NEW_DATA_SET_SENTINEL, this will create a new dataset with piece data provided + // with the provided listenerAddr and expect extraData to be abi.encode(bytes createPayload, bytes addPayload). + // When adding to an existing set, pass listenerAddr == address(0) and setId to the live dataset. + function addPieces(uint256 setId, address listenerAddr, Cids.Cid[] calldata pieceData, bytes calldata extraData) + public + payable + returns (uint256) + { + if (setId == NEW_DATA_SET_SENTINEL) { + (bytes memory createPayload, bytes memory addPayload) = abi.decode(extraData, (bytes, bytes)); + + require(createPayload.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large"); + uint256 sybilFee = PDPFees.sybilFee(); + require(msg.value >= sybilFee, "sybil fee not met"); + burnFee(sybilFee); + + require(listenerAddr != address(0), "listener required for new dataset"); + uint256 newSetId = nextDataSetId++; + storageProvider[newSetId] = msg.sender; + dataSetListener[newSetId] = listenerAddr; + + if (listenerAddr != address(0)) { + PDPListener(listenerAddr).dataSetCreated(newSetId, msg.sender, createPayload); + } + emit DataSetCreated(newSetId, msg.sender); + + // Add pieces to the newly created data set (if any) + if (pieceData.length > 0) { + _addPiecesToDataSet(newSetId, pieceData, addPayload); + } + + // Return the at the end to avoid any possible re-entrency issues. + if (msg.value > sybilFee) { + (bool success,) = msg.sender.call{value: msg.value - sybilFee}(""); + require(success, "Transfer failed."); + } + + return newSetId; + } else { + // Adding to an existing set; no fee should be sent and listenerAddr must be zero + require(listenerAddr == address(0), "listener must be zero for existing dataset"); + require(msg.value == 0, "no fee on add to existing dataset"); + + require(dataSetLive(setId), "Data set not live"); + require(storageProvider[setId] == msg.sender, "Only the storage provider can add pieces"); + + return _addPiecesToDataSet(setId, pieceData, extraData); + } + } + + // Internal function to add pieces to a data set and handle events/listeners + function _addPiecesToDataSet(uint256 setId, Cids.Cid[] calldata pieceData, bytes memory extraData) + internal + returns (uint256 firstAdded) + { + require(extraData.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large"); + uint256 nPieces = pieceData.length; + require(nPieces > 0, "Must add at least one piece"); + + firstAdded = nextPieceId[setId]; + uint256[] memory pieceIds = new uint256[](nPieces); + Cids.Cid[] memory pieceCidsAdded = new Cids.Cid[](nPieces); + + for (uint256 i = 0; i < nPieces; i++) { + addOnePiece(setId, i, pieceData[i]); + pieceIds[i] = firstAdded + i; + pieceCidsAdded[i] = pieceData[i]; + } + + emit PiecesAdded(setId, pieceIds, pieceCidsAdded); + + address listenerAddr = dataSetListener[setId]; + if (listenerAddr != address(0)) { + PDPListener(listenerAddr).piecesAdded(setId, firstAdded, pieceData, extraData); + } + } + + error IndexedError(uint256 idx, string msg); + + function addOnePiece(uint256 setId, uint256 callIdx, Cids.Cid calldata piece) internal returns (uint256) { + (uint256 padding, uint8 height,) = Cids.validateCommPv2(piece); + if (Cids.isPaddingExcessive(padding, height)) { + revert IndexedError(callIdx, "Padding is too large"); + } + if (height > MAX_PIECE_SIZE_LOG2) { + revert IndexedError(callIdx, "Piece size must be less than 2^50"); + } + + uint256 leafCount = Cids.leafCount(padding, height); + uint256 pieceId = nextPieceId[setId]++; + sumTreeAdd(setId, leafCount, pieceId); + pieceCids[setId][pieceId] = piece; + pieceLeafCounts[setId][pieceId] = leafCount; + dataSetLeafCount[setId] += leafCount; + return pieceId; + } + + // schedulePieceDeletions schedules deletion of a batch of pieces from a data set for the start of the next + // proving period. It must be called by the storage provider. + function schedulePieceDeletions(uint256 setId, uint256[] calldata pieceIds, bytes calldata extraData) public { + require(extraData.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large"); + require(dataSetLive(setId), "Data set not live"); + require(storageProvider[setId] == msg.sender, "Only the storage provider can schedule removal of pieces"); + require( + pieceIds.length + scheduledRemovals[setId].length <= MAX_ENQUEUED_REMOVALS, + "Too many removals wait for next proving period to schedule" + ); + + for (uint256 i = 0; i < pieceIds.length; i++) { + require(pieceIds[i] < nextPieceId[setId], "Can only schedule removal of existing pieces"); + scheduledRemovals[setId].push(pieceIds[i]); + } + + address listenerAddr = dataSetListener[setId]; + if (listenerAddr != address(0)) { + PDPListener(listenerAddr).piecesScheduledRemove(setId, pieceIds, extraData); + } + } + + // Verifies and records that the provider proved possession of the + // data set Merkle pieces at some epoch. The challenge seed is determined + // by the epoch of the previous proof of possession. + function provePossession(uint256 setId, IPDPTypes.Proof[] calldata proofs) public payable { + uint256 initialGas = gasleft(); + uint256 nProofs = proofs.length; + require(msg.sender == storageProvider[setId], "Only the storage provider can prove possession"); + require(nProofs > 0, "empty proof"); + { + uint256 challengeEpoch = nextChallengeEpoch[setId]; + require(block.number >= challengeEpoch, "premature proof"); + require(challengeEpoch != NO_CHALLENGE_SCHEDULED, "no challenge scheduled"); + } + + IPDPTypes.PieceIdAndOffset[] memory challenges = new IPDPTypes.PieceIdAndOffset[](proofs.length); + + uint256 seed = drawChallengeSeed(setId); + { + uint256 leafCount = challengeRange[setId]; + uint256 sumTreeTop = 256 - BitOps.clz(nextPieceId[setId]); + for (uint64 i = 0; i < nProofs; i++) { + // Hash (SHA3) the seed, data set id, and proof index to create challenge. + // Note -- there is a slight deviation here from the uniform distribution. + // Some leaves are challenged with probability p and some have probability p + deviation. + // This deviation is bounded by leafCount / 2^256 given a 256 bit hash. + // Deviation grows with data set leaf count. + // Assuming a 1000EiB = 1 ZiB network size ~ 2^70 bytes of data or 2^65 leaves + // This deviation is bounded by 2^65 / 2^256 = 2^-191 which is negligible. + // If modifying this code to use a hash function with smaller output size + // this deviation will increase and caution is advised. + // To remove this deviation we could use the standard solution of rejection sampling + // This is complicated and slightly more costly at one more hash on average for maximally misaligned data sets + // and comes at no practical benefit given how small the deviation is. + bytes memory payload = abi.encodePacked(seed, setId, i); + uint256 challengeIdx = uint256(keccak256(payload)) % leafCount; + + // Find the piece that has this leaf, and the offset of the leaf within that piece. + challenges[i] = findOnePieceId(setId, challengeIdx, sumTreeTop); + Cids.Cid memory pieceCid = getPieceCid(setId, challenges[i].pieceId); + bytes32 pieceHash = Cids.digestFromCid(pieceCid); + uint8 pieceHeight = Cids.heightFromCid(pieceCid) + 1; // because MerkleVerify.verify assumes that base layer is 1 + bool ok = + MerkleVerify.verify(proofs[i].proof, pieceHash, proofs[i].leaf, challenges[i].offset, pieceHeight); + require(ok, "proof did not verify"); + } + } + + // Note: We don't want to include gas spent on the listener call in the fee calculation + // to only account for proof verification fees and avoid gamability by getting the listener + // to do extraneous work just to inflate the gas fee. + // + // (add 32 bytes to the `callDataSize` to also account for the `setId` calldata param) + uint256 gasUsed = (initialGas - gasleft()) + ((calculateCallDataSize(proofs) + 32) * 1300); + uint256 refund = calculateAndBurnProofFee(setId, gasUsed); + + { + address listenerAddr = dataSetListener[setId]; + if (listenerAddr != address(0)) { + PDPListener(listenerAddr).possessionProven(setId, dataSetLeafCount[setId], seed, proofs.length); + } + } + + dataSetLastProvenEpoch[setId] = block.number; + emit PossessionProven(setId, challenges); + + // Return the overpayment after doing everything else to avoid re-entrancy issues (all state has been updated by this point). If this + // call fails, the entire operation reverts. + if (refund > 0) { + (bool success,) = msg.sender.call{value: refund}(""); + require(success, "Transfer failed."); + } + } + + function calculateProofFee(uint256 setId, uint256 estimatedGasFee) public view returns (uint256) { + uint256 rawSize = 32 * challengeRange[setId]; + (uint64 filUsdPrice, int32 filUsdPriceExpo) = getFILUSDPrice(); + + return PDPFees.proofFeeWithGasFeeBound( + estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, block.number - dataSetLastProvenEpoch[setId] + ); + } + + function calculateAndBurnProofFee(uint256 setId, uint256 gasUsed) internal returns (uint256 refund) { + uint256 estimatedGasFee = gasUsed * block.basefee; + uint256 rawSize = 32 * challengeRange[setId]; + (uint64 filUsdPrice, int32 filUsdPriceExpo) = getFILUSDPrice(); + + uint256 proofFee = PDPFees.proofFeeWithGasFeeBound( + estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, block.number - dataSetLastProvenEpoch[setId] + ); + burnFee(proofFee); + emit ProofFeePaid(setId, proofFee, filUsdPrice, filUsdPriceExpo); + + return msg.value - proofFee; // burnFee asserts that proofFee <= msg.value; + } + + function calculateCallDataSize(IPDPTypes.Proof[] calldata proofs) internal pure returns (uint256) { + uint256 callDataSize = 0; + for (uint256 i = 0; i < proofs.length; i++) { + // 64 for the (leaf + abi encoding overhead ) + each element in the proof is 32 bytes + callDataSize += 64 + (proofs[i].proof.length * 32); + } + return callDataSize; + } + + function getRandomness(uint256 epoch) public view returns (uint256) { + // Call the precompile + (bool success, bytes memory result) = RANDOMNESS_PRECOMPILE.staticcall(abi.encodePacked(epoch)); + + // Check if the call was successful + require(success, "Randomness precompile call failed"); + + // Decode and return the result + return abi.decode(result, (uint256)); + } + + function drawChallengeSeed(uint256 setId) internal view returns (uint256) { + return getRandomness(nextChallengeEpoch[setId]); + } + + // Roll over to the next proving period + // + // This method updates the collection of provable pieces in the data set by + // 1. Actually removing the pieces that have been scheduled for removal + // 2. Updating the challenge range to now include leaves added in the last proving period + // So after this method is called pieces scheduled for removal are no longer eligible for challenging + // and can be deleted. And pieces added in the last proving period must be available for challenging. + // + // Additionally this method forces sampling of a new challenge. It enforces that the new + // challenge epoch is at least `challengeFinality` epochs in the future. + // + // Note that this method can be called at any time but the pdpListener will likely consider it + // a "fault" or other penalizeable behavior to call this method before calling provePossesion. + function nextProvingPeriod(uint256 setId, uint256 challengeEpoch, bytes calldata extraData) public { + require(extraData.length <= EXTRA_DATA_MAX_SIZE, "Extra data too large"); + require(msg.sender == storageProvider[setId], "only the storage provider can move to next proving period"); + require(dataSetLeafCount[setId] > 0, "can only start proving once leaves are added"); + + if (dataSetLastProvenEpoch[setId] == NO_PROVEN_EPOCH) { + dataSetLastProvenEpoch[setId] = block.number; + } + + // Take removed pieces out of proving set + uint256[] storage removals = scheduledRemovals[setId]; + uint256 nRemovals = removals.length; + if (nRemovals > 0) { + uint256[] memory removalsToProcess = new uint256[](nRemovals); + + for (uint256 i = 0; i < nRemovals; i++) { + removalsToProcess[i] = removals[removals.length - 1]; + removals.pop(); + } + + removePieces(setId, removalsToProcess); + emit PiecesRemoved(setId, removalsToProcess); + } + + // Bring added pieces into proving set + challengeRange[setId] = dataSetLeafCount[setId]; + if (challengeEpoch < block.number + challengeFinality) { + revert("challenge epoch must be at least challengeFinality epochs in the future"); + } + nextChallengeEpoch[setId] = challengeEpoch; + + // Clear next challenge epoch if the set is now empty. + // It will be re-set after new data is added and nextProvingPeriod is called. + if (dataSetLeafCount[setId] == 0) { + emit DataSetEmpty(setId); + dataSetLastProvenEpoch[setId] = NO_PROVEN_EPOCH; + nextChallengeEpoch[setId] = NO_CHALLENGE_SCHEDULED; + } + + address listenerAddr = dataSetListener[setId]; + if (listenerAddr != address(0)) { + PDPListener(listenerAddr).nextProvingPeriod( + setId, nextChallengeEpoch[setId], dataSetLeafCount[setId], extraData + ); + } + emit NextProvingPeriod(setId, challengeEpoch, dataSetLeafCount[setId]); + } + + // removes pieces from a data set's state. + function removePieces(uint256 setId, uint256[] memory pieceIds) internal { + require(dataSetLive(setId), "Data set not live"); + uint256 totalDelta = 0; + for (uint256 i = 0; i < pieceIds.length; i++) { + totalDelta += removeOnePiece(setId, pieceIds[i]); + } + dataSetLeafCount[setId] -= totalDelta; + } + + // removeOnePiece removes a piece's array entries from the data sets state and returns + // the number of leafs by which to reduce the total data set leaf count. + function removeOnePiece(uint256 setId, uint256 pieceId) internal returns (uint256) { + uint256 delta = pieceLeafCounts[setId][pieceId]; + sumTreeRemove(setId, pieceId, delta); + delete pieceLeafCounts[setId][pieceId]; + delete pieceCids[setId][pieceId]; + return delta; + } + + /* Sum tree functions */ + /* + A sumtree is a variant of a Fenwick or binary indexed tree. It is a binary + tree where each node is the sum of its children. It is designed to support + efficient query and update operations on a base array of integers. Here + the base array is the pieces leaf count array. Asymptotically the sum tree + has logarithmic search and update functions. Each slot of the sum tree is + logically a node in a binary tree. + + The node’s height from the leaf depth is defined as -1 + the ruler function + (https://oeis.org/A001511 [0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,...]) applied to + the slot’s index + 1, i.e. the number of trailing 0s in the binary representation + of the index + 1. Each slot in the sum tree array contains the sum of a range + of the base array. The size of this range is defined by the height assigned + to this slot in the binary tree structure of the sum tree, i.e. the value of + the ruler function applied to the slot’s index. The range for height d and + current index j is [j + 1 - 2^d : j] inclusive. For example if the node’s + height is 0 its value is set to the base array’s value at the same index and + if the node’s height is 3 then its value is set to the sum of the last 2^3 = 8 + values of the base array. The reason to do things with recursive partial sums + is to accommodate O(log len(base array)) updates for add and remove operations + on the base array. + */ + + // Perform sumtree addition + // + function sumTreeAdd(uint256 setId, uint256 count, uint256 pieceId) internal { + uint256 index = pieceId; + uint256 h = heightFromIndex(index); + + uint256 sum = count; + // Sum BaseArray[j - 2^i] for i in [0, h) + for (uint256 i = 0; i < h; i++) { + uint256 j = index - (1 << i); + sum += sumTreeCounts[setId][j]; + } + sumTreeCounts[setId][pieceId] = sum; + } + + // Perform sumtree removal + // + function sumTreeRemove(uint256 setId, uint256 index, uint256 delta) internal { + uint256 top = uint256(256 - BitOps.clz(nextPieceId[setId])); + uint256 h = uint256(heightFromIndex(index)); + + // Deletion traversal either terminates at + // 1) the top of the tree or + // 2) the highest node right of the removal index + while (h <= top && index < nextPieceId[setId]) { + sumTreeCounts[setId][index] -= delta; + index += 1 << h; + h = heightFromIndex(index); + } + } + + // Perform sumtree find + function findOnePieceId(uint256 setId, uint256 leafIndex, uint256 top) + internal + view + returns (IPDPTypes.PieceIdAndOffset memory) + { + require(leafIndex < dataSetLeafCount[setId], "Leaf index out of bounds"); + uint256 searchPtr = (1 << top) - 1; + uint256 acc = 0; + + // Binary search until we find the index of the sumtree leaf covering the index range + uint256 candidate; + for (uint256 h = top; h > 0; h--) { + // Search has taken us past the end of the sumtree + // Only option is to go left + if (searchPtr >= nextPieceId[setId]) { + searchPtr -= 1 << (h - 1); + continue; + } + + candidate = acc + sumTreeCounts[setId][searchPtr]; + // Go right + if (candidate <= leafIndex) { + acc += sumTreeCounts[setId][searchPtr]; + searchPtr += 1 << (h - 1); + } else { + // Go left + searchPtr -= 1 << (h - 1); + } + } + candidate = acc + sumTreeCounts[setId][searchPtr]; + if (candidate <= leafIndex) { + // Choose right + return IPDPTypes.PieceIdAndOffset(searchPtr + 1, leafIndex - candidate); + } // Choose left + return IPDPTypes.PieceIdAndOffset(searchPtr, leafIndex - acc); + } + + // findPieceIds is a batched version of findOnePieceId + function findPieceIds(uint256 setId, uint256[] calldata leafIndexs) + public + view + returns (IPDPTypes.PieceIdAndOffset[] memory) + { + // The top of the sumtree is the largest power of 2 less than the number of pieces + uint256 top = 256 - BitOps.clz(nextPieceId[setId]); + IPDPTypes.PieceIdAndOffset[] memory result = new IPDPTypes.PieceIdAndOffset[](leafIndexs.length); + for (uint256 i = 0; i < leafIndexs.length; i++) { + result[i] = findOnePieceId(setId, leafIndexs[i], top); + } + return result; + } + + // Return height of sumtree node at given index + // Calculated by taking the trailing zeros of 1 plus the index + function heightFromIndex(uint256 index) internal pure returns (uint256) { + return BitOps.ctz(index + 1); + } + + // Add function to get FIL/USD price + function getFILUSDPrice() public view returns (uint64, int32) { + PythStructs.Price memory priceData = PYTH.getPriceUnsafe(FIL_USD_PRICE_FEED_ID); + require(priceData.price > 0, "failed to validate: price must be greater than 0"); + return (uint64(priceData.price), priceData.expo); + } +} diff --git a/service_contracts/src/pdp/contracts/Proofs.sol b/service_contracts/src/pdp/contracts/Proofs.sol new file mode 100644 index 00000000..0e15f92b --- /dev/null +++ b/service_contracts/src/pdp/contracts/Proofs.sol @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: MIT +// The verification functions are adapted from OpenZeppelin Contracts (last updated v5.0.0) (utils/cryptography/MerkleProof.sol) + +pragma solidity ^0.8.20; + +import {BitOps} from "./BitOps.sol"; + +/** + * Functions for the generation and verification of Merkle proofs. + * These are specialised to the hash function of SHA254 and implicitly balanced trees. + * + * Note that only the verification functions are intended to execute on-chain. + * The commitment and proof generation functions are co-located for convenience and to function + * as a specification for off-chain operations. + */ +library MerkleVerify { + /** + * Returns true if a `leaf` can be proved to be a part of a Merkle tree + * defined by `root` at `position`. For this, a `proof` must be provided, containing + * sibling hashes on the branch from the leaf to the root of the tree. + * + * Will only return true if the leaf is at the bottom of the tree for the given tree height + * + * This version handles proofs in memory. + */ + function verify(bytes32[] memory proof, bytes32 root, bytes32 leaf, uint256 position, uint256 treeHeight) + internal + view + returns (bool) + { + // Tree heigh includes root, proof does not + require(proof.length == treeHeight - 1, "proof length does not match tree height"); + return processInclusionProofMemory(proof, leaf, position) == root; + } + + /** + * Returns the rebuilt hash obtained by traversing a Merkle tree up + * from `leaf` at `position` using `proof`. A `proof` is valid if and only if the rebuilt + * hash matches the root of the tree. + * + * This version handles proofs in memory. + */ + function processInclusionProofMemory(bytes32[] memory proof, bytes32 leaf, uint256 position) + internal + view + returns (bytes32) + { + bytes32 computedHash = leaf; + for (uint256 i = 0; i < proof.length; i++) { + // If position is even, the leaf/node is on the left and sibling is on the right. + bytes32 sibling = proof[i]; + if (position % 2 == 0) { + computedHash = Hashes.orderedHash(computedHash, sibling); + } else { + computedHash = Hashes.orderedHash(sibling, computedHash); + } + position /= 2; + } + return computedHash; + } + + /** + * Returns the root of a Merkle tree of all zero leaves and specified height. + * A height of zero returns zero (the leaf value). + * A height of 1 returns the hash of two zero leaves. + * A height of n returns the hash of two nodes of height n-1. + * Height must be <= 50 (representing 2^50 leaves or 32EiB). + */ + function zeroRoot(uint256 height) internal pure returns (bytes32) { + require(height <= 50, "Height must be <= 50"); + // These roots were generated by code in Proots.t.sol. + uint256[51] memory zeroRoots = [ + 0x0000000000000000000000000000000000000000000000000000000000000000, + 0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb0b, + 0x3731bb99ac689f66eef5973e4a94da188f4ddcae580724fc6f3fd60dfd488333, + 0x642a607ef886b004bf2c1978463ae1d4693ac0f410eb2d1b7a47fe205e5e750f, + 0x57a2381a28652bf47f6bef7aca679be4aede5871ab5cf3eb2c08114488cb8526, + 0x1f7ac9595510e09ea41c460b176430bb322cd6fb412ec57cb17d989a4310372f, + 0xfc7e928296e516faade986b28f92d44a4f24b935485223376a799027bc18f833, + 0x08c47b38ee13bc43f41b915c0eed9911a26086b3ed62401bf9d58b8d19dff624, + 0xb2e47bfb11facd941f62af5c750f3ea5cc4df517d5c4f16db2b4d77baec1a32f, + 0xf9226160c8f927bfdcc418cdf203493146008eaefb7d02194d5e548189005108, + 0x2c1a964bb90b59ebfe0f6da29ad65ae3e417724a8f7c11745a40cac1e5e74011, + 0xfee378cef16404b199ede0b13e11b624ff9d784fbbed878d83297e795e024f02, + 0x8e9e2403fa884cf6237f60df25f83ee40dca9ed879eb6f6352d15084f5ad0d3f, + 0x752d9693fa167524395476e317a98580f00947afb7a30540d625a9291cc12a07, + 0x7022f60f7ef6adfa17117a52619e30cea82c68075adf1c667786ec506eef2d19, + 0xd99887b973573a96e11393645236c17b1f4c7034d723c7a99f709bb4da61162b, + 0xd0b530dbb0b4f25c5d2f2a28dfee808b53412a02931f18c499f5a254086b1326, + 0x84c0421ba0685a01bf795a2344064fe424bd52a9d24377b394ff4c4b4568e811, + 0x65f29e5d98d246c38b388cfc06db1f6b021303c5a289000bdce832a9c3ec421c, + 0xa2247508285850965b7e334b3127b0c042b1d046dc54402137627cd8799ce13a, + 0xdafdab6da9364453c26d33726b9fefe343be8f81649ec009aad3faff50617508, + 0xd941d5e0d6314a995c33ffbd4fbe69118d73d4e5fd2cd31f0f7c86ebdd14e706, + 0x514c435c3d04d349a5365fbd59ffc713629111785991c1a3c53af22079741a2f, + 0xad06853969d37d34ff08e09f56930a4ad19a89def60cbfee7e1d3381c1e71c37, + 0x39560e7b13a93b07a243fd2720ffa7cb3e1d2e505ab3629e79f46313512cda06, + 0xccc3c012f5b05e811a2bbfdd0f6833b84275b47bf229c0052a82484f3c1a5b3d, + 0x7df29b69773199e8f2b40b77919d048509eed768e2c7297b1f1437034fc3c62c, + 0x66ce05a3667552cf45c02bcc4e8392919bdeac35de2ff56271848e9f7b675107, + 0xd8610218425ab5e95b1ca6239d29a2e420d706a96f373e2f9c9a91d759d19b01, + 0x6d364b1ef846441a5a4a68862314acc0a46f016717e53443e839eedf83c2853c, + 0x077e5fde35c50a9303a55009e3498a4ebedff39c42b710b730d8ec7ac7afa63e, + 0xe64005a6bfe3777953b8ad6ef93f0fca1049b2041654f2a411f7702799cece02, + 0x259d3d6b1f4d876d1185e1123af6f5501af0f67cf15b5216255b7b178d12051d, + 0x3f9a4d411da4ef1b36f35ff0a195ae392ab23fee7967b7c41b03d1613fc29239, + 0xfe4ef328c61aa39cfdb2484eaa32a151b1fe3dfd1f96dd8c9711fd86d6c58113, + 0xf55d68900e2d8381eccb8164cb9976f24b2de0dd61a31b97ce6eb23850d5e819, + 0xaaaa8c4cb40aacee1e02dc65424b2a6c8e99f803b72f7929c4101d7fae6bff32, + 0xc91a84c057fd4afcc209c3b482360cf7493b9129fa164cd1fe6b045a683b5322, + 0x64a2c1df312ecb443b431946c02fe701514b5291091b888f03189bee8ea11416, + 0x739953434ead6e24f1d1bf5b68ca823b2692b3000a7806d08c76640da98c3526, + 0x771f5b63af6f7d1d515d134084d535f5f4d8ab8529b2c3f581f143f8cc38be2f, + 0x9031a15bf51550a85db1f64f4db739e01125478a50ee332bc2b4f6462214b20b, + 0xc83ba84710b74413f3be84a5466aff2d7f0c5472248ffbeb2266466a92ac4f12, + 0x2fe598945de393714c10f447cec237039b5944077a78e0a9811cf5f7a45abe1b, + 0x395355ae44754a5cde74898a3f2ef60d5871ab35019c610fc413a62d57646501, + 0x4bd4712084416c77eec00cab23416eda8c8dbf681c8ccd0b96c0be980a40d818, + 0xf6eeae7dee22146564155ebe4bdf633333401de68da4aa2a6e946c2363807a34, + 0x8b43a114ba1c1bb80781e85f87b0bbee11c69fdbbd2ed81d6c9b4c7859c04e34, + 0xf74dc344ee4fa47f07fb2732ad9443d94892ca8b53d006c9891a32ef2b74491e, + 0x6f5246ae0f965e5424162403d3ab81ef8d15439c5f3a49038488e3640ef98718, + 0x0b5b44ccf91ff135af58d2cf694b2ac99f22f5264863d6b9272b6155956aa10e + ]; + return bytes32(zeroRoots[height]); + } +} + +library MerkleProve { + // Builds a merkle tree from an array of leaves. + // The tree is an array of arrays of bytes32. + // The last array is the leaves, and each prior array is the result of the hash of pairs in the previous array. + // An unpaired element is paired with the root of a tree of the same height with zero leaves. + // The first element of the first array is the root. + function buildTree(bytes32[] memory leaves) internal view returns (bytes32[][] memory) { + require(leaves.length > 0, "Leaves array must not be empty"); + + uint256 levels = 256 - BitOps.clz(leaves.length - 1); + bytes32[][] memory tree = new bytes32[][](levels + 1); + tree[levels] = leaves; + + for (uint256 i = levels; i > 0; i--) { + bytes32[] memory currentLevel = tree[i]; + uint256 nextLevelSize = (currentLevel.length + 1) / 2; + tree[i - 1] = new bytes32[](nextLevelSize); + + for (uint256 j = 0; j < nextLevelSize; j++) { + if (2 * j + 1 < currentLevel.length) { + tree[i - 1][j] = Hashes.orderedHash(currentLevel[2 * j], currentLevel[2 * j + 1]); + } else { + // Pair final odd node with a zero-tree of same height. + tree[i - 1][j] = Hashes.orderedHash(currentLevel[2 * j], MerkleVerify.zeroRoot(levels - i)); + } + } + } + + return tree; + } + + // Gets an inclusion proof from a Merkle tree for a leaf at a given index. + // The proof is constructed by traversing up the tree to the root, and the sibling of each node is appended to the proof. + // A final unpaired element in any level is paired with the zero-tree of the same height. + // Every proof thus has length equal to the height of the tree minus 1. + function buildProof(bytes32[][] memory tree, uint256 index) internal pure returns (bytes32[] memory) { + require(index < tree[tree.length - 1].length, "Index out of bounds"); + + bytes32[] memory proof = new bytes32[](tree.length - 1); + uint256 proofIndex = 0; + + for (uint256 i = tree.length - 1; i > 0; i--) { + uint256 levelSize = tree[i].length; + uint256 pairIndex = index ^ 1; // XOR with 1 to get the pair index + + if (pairIndex < levelSize) { + proof[proofIndex] = tree[i][pairIndex]; + } else { + // Pair final odd node with zero-tree of same height. + proof[proofIndex] = MerkleVerify.zeroRoot(tree.length - 1 - i); + } + proofIndex++; + index /= 2; // Move to the parent node + } + return proof; + } +} + +library Hashes { + // "The Sha254 functions are identical to Sha256 except that the last two bits of the Sha256 256-bit digest are zeroed out." + // The bytes of uint256 are arranged in big-endian order, MSB first in memory. + // The bits in each byte are arranged in little-endian order. + // Thus, the "last two bits" are the first two bits of the last byte. + uint256 constant SHA254_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF3F; + + /** + * Order-dependent hash of pair of bytes32. + */ + function orderedHash(bytes32 a, bytes32 b) internal view returns (bytes32) { + return _efficientSHA254(a, b); + } + + /** + * Implementation equivalent to using sha256(abi.encode(a, b)) that doesn't allocate or expand memory. + */ + function _efficientSHA254(bytes32 a, bytes32 b) private view returns (bytes32 value) { + assembly ("memory-safe") { + mstore(0x00, a) + mstore(0x20, b) + + // Call the SHA256 precompile + if iszero(staticcall(gas(), 0x2, 0x00, 0x40, 0x00, 0x20)) { revert(0, 0) } + + value := mload(0x00) + // SHA254 hash for compatibility with Filecoin piece commitments. + value := and(value, SHA254_MASK) + } + } +} diff --git a/service_contracts/src/pdp/contracts/SimplePDPService.sol b/service_contracts/src/pdp/contracts/SimplePDPService.sol new file mode 100644 index 00000000..436caeaa --- /dev/null +++ b/service_contracts/src/pdp/contracts/SimplePDPService.sol @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.20; + +import {PDPListener} from "./PDPVerifier.sol"; +import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; +import {UUPSUpgradeable} from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; +import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; +import {Cids} from "./Cids.sol"; +import {IPDPProvingSchedule} from "./IPDPProvingSchedule.sol"; + +// PDPRecordKeeper tracks PDP operations. It is used as a base contract for PDPListeners +// in order to give users the capability to consume events async. +/// @title PDPRecordKeeper +/// @dev This contract is unused by the SimplePDPService as it is too expensive. +/// we've kept it here for future reference and testing. +contract PDPRecordKeeper { + enum OperationType { + NONE, + CREATE, + DELETE, + ADD, + REMOVE_SCHEDULED, + PROVE_POSSESSION, + NEXT_PROVING_PERIOD + } + + // Struct to store event details + struct EventRecord { + uint64 epoch; + uint256 dataSetId; + OperationType operationType; + bytes extraData; + } + + // Eth event emitted when a new record is added + event RecordAdded(uint256 indexed dataSetId, uint64 epoch, OperationType operationType); + + // Mapping to store events for each data set + mapping(uint256 => EventRecord[]) public dataSetEvents; + + function receiveDataSetEvent(uint256 dataSetId, OperationType operationType, bytes memory extraData) + internal + returns (uint256) + { + uint64 epoch = uint64(block.number); + EventRecord memory newRecord = + EventRecord({epoch: epoch, dataSetId: dataSetId, operationType: operationType, extraData: extraData}); + dataSetEvents[dataSetId].push(newRecord); + emit RecordAdded(dataSetId, epoch, operationType); + return dataSetEvents[dataSetId].length - 1; + } + + // Function to get the number of events for a data set + function getEventCount(uint256 dataSetId) external view returns (uint256) { + return dataSetEvents[dataSetId].length; + } + + // Function to get a specific event for a data set + function getEvent(uint256 dataSetId, uint256 eventIndex) external view returns (EventRecord memory) { + require(eventIndex < dataSetEvents[dataSetId].length, "Event index out of bounds"); + return dataSetEvents[dataSetId][eventIndex]; + } + + // Function to get all events for a data set + function listEvents(uint256 dataSetId) external view returns (EventRecord[] memory) { + return dataSetEvents[dataSetId]; + } +} + +/// @title SimplePDPService +/// @notice A default implementation of a PDP Listener. +/// @dev This contract only supports one PDP service caller, set in the constructor, +/// The primary purpose of this contract is to +/// 1. Enforce a proof count of 5 proofs per data set proving period. +/// 2. Provide a reliable way to report faults to users. +contract SimplePDPService is PDPListener, IPDPProvingSchedule, Initializable, UUPSUpgradeable, OwnableUpgradeable { + event FaultRecord(uint256 indexed dataSetId, uint256 periodsFaulted, uint256 deadline); + + uint256 public constant NO_CHALLENGE_SCHEDULED = 0; + uint256 public constant NO_PROVING_DEADLINE = 0; + + // The address of the PDP verifier contract that is allowed to call this contract + address public pdpVerifierAddress; + mapping(uint256 => uint256) public provingDeadlines; + mapping(uint256 => bool) public provenThisPeriod; + + /// @custom:oz-upgrades-unsafe-allow constructor + constructor() { + _disableInitializers(); + } + + function initialize(address _pdpVerifierAddress) public initializer { + __Ownable_init(msg.sender); + __UUPSUpgradeable_init(); + require(_pdpVerifierAddress != address(0), "PDP verifier address cannot be zero"); + pdpVerifierAddress = _pdpVerifierAddress; + } + + function _authorizeUpgrade(address newImplementation) internal override onlyOwner {} + + // Modifier to ensure only the PDP verifier contract can call certain functions + modifier onlyPDPVerifier() { + require(msg.sender == pdpVerifierAddress, "Caller is not the PDP verifier"); + _; + } + + // SLA specification functions setting values for PDP service providers + // Max number of epochs between two consecutive proofs + function getMaxProvingPeriod() public pure returns (uint64) { + return 2880; + } + + // Number of epochs at the end of a proving period during which a + // proof of possession can be submitted + function challengeWindow() public pure returns (uint256) { + return 60; + } + + // Initial value for challenge window start + // Can be used for first call to nextProvingPeriod + function initChallengeWindowStart() public view returns (uint256) { + return block.number + getMaxProvingPeriod() - challengeWindow(); + } + + // The start of the challenge window for the current proving period + function thisChallengeWindowStart(uint256 setId) public view returns (uint256) { + if (provingDeadlines[setId] == NO_PROVING_DEADLINE) { + revert("Proving period not yet initialized"); + } + + uint256 periodsSkipped; + // Proving period is open 0 skipped periods + if (block.number <= provingDeadlines[setId]) { + periodsSkipped = 0; + } else { + // Proving period has closed possibly some skipped periods + periodsSkipped = 1 + (block.number - (provingDeadlines[setId] + 1)) / getMaxProvingPeriod(); + } + return provingDeadlines[setId] + periodsSkipped * getMaxProvingPeriod() - challengeWindow(); + } + + // The start of the NEXT OPEN proving period's challenge window + // Useful for querying before nextProvingPeriod to determine challengeEpoch to submit for nextProvingPeriod + function nextChallengeWindowStart(uint256 setId) public view returns (uint256) { + if (provingDeadlines[setId] == NO_PROVING_DEADLINE) { + revert("Proving period not yet initialized"); + } + // If the current period is open this is the next period's challenge window + if (block.number <= provingDeadlines[setId]) { + return thisChallengeWindowStart(setId) + getMaxProvingPeriod(); + } + // If the current period is not yet open this is the current period's challenge window + return thisChallengeWindowStart(setId); + } + + // Challenges / merkle inclusion proofs provided per data set + function getChallengesPerProof() public pure returns (uint64) { + return 5; + } + + /** + * @notice Returns PDP configuration values (for IPDPProvingSchedule interface) + * @return maxProvingPeriod Maximum number of epochs between proofs + * @return challengeWindow_ Number of epochs for the challenge window + * @return challengesPerProof Number of challenges required per proof + * @return initChallengeWindowStart_ Initial challenge window start for new data sets + */ + function getPDPConfig() + external + view + override + returns ( + uint64 maxProvingPeriod, + uint256 challengeWindow_, + uint256 challengesPerProof, + uint256 initChallengeWindowStart_ + ) + { + maxProvingPeriod = getMaxProvingPeriod(); + challengeWindow_ = challengeWindow(); + challengesPerProof = getChallengesPerProof(); + initChallengeWindowStart_ = initChallengeWindowStart(); + } + + /** + * @notice Returns the start of the next challenge window for a data set (for IPDPProvingSchedule interface) + * @param setId The ID of the data set + * @return The block number when the next challenge window starts + */ + function nextPDPChallengeWindowStart(uint256 setId) external view override returns (uint256) { + return nextChallengeWindowStart(setId); + } + + // Listener interface methods + // Note many of these are noops as they are not important for the SimplePDPService's functionality + // of enforcing proof contraints and reporting faults. + // Note we generally just drop the user defined extraData as this contract has no use for it + function dataSetCreated(uint256 dataSetId, address creator, bytes calldata) external onlyPDPVerifier {} + + function dataSetDeleted(uint256 dataSetId, uint256 deletedLeafCount, bytes calldata) external onlyPDPVerifier {} + + function piecesAdded(uint256 dataSetId, uint256 firstAdded, Cids.Cid[] memory pieceData, bytes calldata) + external + onlyPDPVerifier + {} + + function piecesScheduledRemove(uint256 dataSetId, uint256[] memory pieceIds, bytes calldata) + external + onlyPDPVerifier + {} + + function storageProviderChanged(uint256, address, address, bytes calldata) external override onlyPDPVerifier {} + + // possession proven checks for correct challenge count and reverts if too low + // it also checks that proofs are not late and emits a fault record if so + function possessionProven( + uint256 dataSetId, + uint256, /*challengedLeafCount*/ + uint256, /*seed*/ + uint256 challengeCount + ) external onlyPDPVerifier { + if (provenThisPeriod[dataSetId]) { + revert("Only one proof of possession allowed per proving period. Open a new proving period."); + } + if (challengeCount < getChallengesPerProof()) { + revert("Invalid challenge count < 5"); + } + if (provingDeadlines[dataSetId] == NO_PROVING_DEADLINE) { + revert("Proving not yet started"); + } + // check for proof outside of challenge window + if (provingDeadlines[dataSetId] < block.number) { + revert("Current proving period passed. Open a new proving period."); + } + + if (provingDeadlines[dataSetId] - challengeWindow() > block.number) { + revert("Too early. Wait for challenge window to open"); + } + provenThisPeriod[dataSetId] = true; + } + + // nextProvingPeriod checks for unsubmitted proof in which case it emits a fault event + // Additionally it enforces constraints on the update of its state: + // 1. One update per proving period. + // 2. Next challenge epoch must fall within the challenge window in the last challengeWindow() + // epochs of the proving period. + function nextProvingPeriod(uint256 dataSetId, uint256 challengeEpoch, uint256, /*leafCount*/ bytes calldata) + external + onlyPDPVerifier + { + // initialize state for new data set + if (provingDeadlines[dataSetId] == NO_PROVING_DEADLINE) { + uint256 firstDeadline = block.number + getMaxProvingPeriod(); + if (challengeEpoch < firstDeadline - challengeWindow() || challengeEpoch > firstDeadline) { + revert("Next challenge epoch must fall within the next challenge window"); + } + provingDeadlines[dataSetId] = firstDeadline; + provenThisPeriod[dataSetId] = false; + return; + } + + // Revert when proving period not yet open + // Can only get here if calling nextProvingPeriod multiple times within the same proving period + uint256 prevDeadline = provingDeadlines[dataSetId] - getMaxProvingPeriod(); + if (block.number <= prevDeadline) { + revert("One call to nextProvingPeriod allowed per proving period"); + } + + uint256 periodsSkipped; + // Proving period is open 0 skipped periods + if (block.number <= provingDeadlines[dataSetId]) { + periodsSkipped = 0; + } else { + // Proving period has closed possibly some skipped periods + periodsSkipped = (block.number - (provingDeadlines[dataSetId] + 1)) / getMaxProvingPeriod(); + } + + uint256 nextDeadline; + // the data set has become empty and provingDeadline is set inactive + if (challengeEpoch == NO_CHALLENGE_SCHEDULED) { + nextDeadline = NO_PROVING_DEADLINE; + } else { + nextDeadline = provingDeadlines[dataSetId] + getMaxProvingPeriod() * (periodsSkipped + 1); + if (challengeEpoch < nextDeadline - challengeWindow() || challengeEpoch > nextDeadline) { + revert("Next challenge epoch must fall within the next challenge window"); + } + } + uint256 faultPeriods = periodsSkipped; + if (!provenThisPeriod[dataSetId]) { + // include previous unproven period + faultPeriods += 1; + } + if (faultPeriods > 0) { + emit FaultRecord(dataSetId, faultPeriods, provingDeadlines[dataSetId]); + } + provingDeadlines[dataSetId] = nextDeadline; + provenThisPeriod[dataSetId] = false; + } +} diff --git a/service_contracts/src/pdp/contracts/interfaces/IPDPEvents.sol b/service_contracts/src/pdp/contracts/interfaces/IPDPEvents.sol new file mode 100644 index 00000000..9d30af3a --- /dev/null +++ b/service_contracts/src/pdp/contracts/interfaces/IPDPEvents.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +import {Cids} from "../Cids.sol"; +import {IPDPTypes} from "./IPDPTypes.sol"; + +/// @title IPDPEvents +/// @notice Shared events for PDP contracts and consumers +interface IPDPEvents { + event DataSetCreated(uint256 indexed setId, address indexed storageProvider); + event StorageProviderChanged( + uint256 indexed setId, address indexed oldStorageProvider, address indexed newStorageProvider + ); + event DataSetDeleted(uint256 indexed setId, uint256 deletedLeafCount); + event DataSetEmpty(uint256 indexed setId); + event PiecesAdded(uint256 indexed setId, uint256[] pieceIds, Cids.Cid[] pieceCids); + event PiecesRemoved(uint256 indexed setId, uint256[] pieceIds); + event ProofFeePaid(uint256 indexed setId, uint256 fee, uint64 price, int32 expo); + event PossessionProven(uint256 indexed setId, IPDPTypes.PieceIdAndOffset[] challenges); + event NextProvingPeriod(uint256 indexed setId, uint256 challengeEpoch, uint256 leafCount); + event ContractUpgraded(string version, address newImplementation); +} diff --git a/service_contracts/src/pdp/contracts/interfaces/IPDPTypes.sol b/service_contracts/src/pdp/contracts/interfaces/IPDPTypes.sol new file mode 100644 index 00000000..63939f18 --- /dev/null +++ b/service_contracts/src/pdp/contracts/interfaces/IPDPTypes.sol @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +/// @title IPDPTypes +/// @notice Shared types for PDP contracts and consumers +interface IPDPTypes { + struct Proof { + bytes32 leaf; + bytes32[] proof; + } + + struct PieceIdAndOffset { + uint256 pieceId; + uint256 offset; + } +} diff --git a/service_contracts/src/pdp/contracts/interfaces/IPDPVerifier.sol b/service_contracts/src/pdp/contracts/interfaces/IPDPVerifier.sol new file mode 100644 index 00000000..2a6514f8 --- /dev/null +++ b/service_contracts/src/pdp/contracts/interfaces/IPDPVerifier.sol @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.20; + +import {Cids} from "../Cids.sol"; +import {IPDPTypes} from "./IPDPTypes.sol"; +import {IPDPEvents} from "./IPDPEvents.sol"; + +/// @title IPDPVerifier +/// @notice Main interface for the PDPVerifier contract +interface IPDPVerifier is IPDPEvents { + // View functions + function getChallengeFinality() external view returns (uint256); + function getNextDataSetId() external view returns (uint64); + function dataSetLive(uint256 setId) external view returns (bool); + function pieceLive(uint256 setId, uint256 pieceId) external view returns (bool); + function pieceChallengable(uint256 setId, uint256 pieceId) external view returns (bool); + function getDataSetLeafCount(uint256 setId) external view returns (uint256); + function getNextPieceId(uint256 setId) external view returns (uint256); + function getNextChallengeEpoch(uint256 setId) external view returns (uint256); + function getDataSetListener(uint256 setId) external view returns (address); + function getDataSetStorageProvider(uint256 setId) external view returns (address, address); + function getDataSetLastProvenEpoch(uint256 setId) external view returns (uint256); + function getPieceCid(uint256 setId, uint256 pieceId) external view returns (bytes memory); + function getPieceLeafCount(uint256 setId, uint256 pieceId) external view returns (uint256); + function getChallengeRange(uint256 setId) external view returns (uint256); + function getScheduledRemovals(uint256 setId) external view returns (uint256[] memory); + + // State-changing functions + function proposeDataSetStorageProvider(uint256 setId, address newStorageProvider) external; + function claimDataSetStorageProvider(uint256 setId, bytes calldata extraData) external; + function createDataSet(address listenerAddr, bytes calldata extraData) external payable returns (uint256); + function deleteDataSet(uint256 setId, bytes calldata extraData) external; + function addPieces(uint256 setId, Cids.Cid[] calldata pieceData, bytes calldata extraData) + external + returns (uint256); + function schedulePieceDeletions(uint256 setId, uint256[] calldata pieceIds, bytes calldata extraData) external; + function provePossession(uint256 setId, IPDPTypes.Proof[] calldata proofs) external payable; + function nextProvingPeriod(uint256 setId, uint256 challengeEpoch, bytes calldata extraData) external; + function findPieceIds(uint256 setId, uint256[] calldata leafIndexs) + external + view + returns (IPDPTypes.PieceIdAndOffset[] memory); +} diff --git a/service_contracts/src/pdp/docs/design.md b/service_contracts/src/pdp/docs/design.md new file mode 100644 index 00000000..4786f1b2 --- /dev/null +++ b/service_contracts/src/pdp/docs/design.md @@ -0,0 +1,197 @@ +# Provable Data Possession - Design Documentation + +## Overview +Provable Data Possession (PDP) is a protocol that allows storage providers to prove they possess specific data without revealing the data itself. The system operates through a set of smart contracts that manage data sets, verification, and fault reporting. + +PDP currently enables a client-storage provider relationship where: +1. Clients and storage providers establish a data set for data storage verification +2. Storage providers add data pieces to the data set and submit periodic proofs +3. The system verifies these proofs using randomized challenges +4. Faults are reported when proofs fail or are not submitted + + +## Table of Contents +1. [Architecture](#architecture) +2. [Core Components](#core-components) +3. [Data Structures](#data-structures) +4. [Workflows](#workflows) +5. [Security Considerations](#security-considerations) +6. [Performance Considerations](#performance-considerations) +7. [Future Enhancements](#future-enhancements) +8. [Appendices](#appendices) + +## Architecture +The PDP system uses a singleton contract design where a single verifier contract manages multiple data sets for many storage providers. + +### System Components +- **PDP Verifier**: The main contract that holds data sets and verifies proofs +- **SimplePDPService**: Manages proving periods and fault reporting +- **Supporting Contracts**: Additional contracts for specific functionality + +### Interaction Patterns +The PDP system follows these primary interaction patterns: +1. Clients and storage providers establish data sets through the verifier contract +2. The system issues challenges based on chain randomness +3. Storage providers submit merkleproofs for data possession verification +4. The SimplePDPService contract (or in general the listener) receives events about all operations +5. Faults are reported when proofs are not submitted + +## Core Components + +### PDP Verifier +- **Purpose**: Manages data sets and verifies proofs +- **Key methods**: + - Create data sets + - Add/delete pieces to data sets + - Verify proofs + - Manage proving periods +- **State management**: Maintains data set state including pieces, sizes, and challenge epochs + +Search over data set data to find a challenged leaf is the heart of the PDPVerifier. To do this efficiently the verifier needs binary search. To implement binary search efficiently with a mutating array of data set pieces we use a Fenwick/BIT tree variant. See the design document: https://www.notion.so/filecoindev/PDP-Logical-Array-4405cda734964622993d3d58389942e8 + +Much of the design of the verifier comes down to preventing proving parties from grinding attacks: See grinding prevention design document: https://www.notion.so/filecoindev/PDP-Grinding-Mitigations-1a3dc41950c180de9403cc2bb5c14bbb + +The verifier charges for its services with a proof fee. See the working proof fee design document: https://www.notion.so/filecoindev/Pricing-mechanism-for-PDPverifier-12adc41950c180ea9608cb419c369ba4 + +For historical context please see the original design document of what has become the verifier: https://docs.google.com/document/d/1VwU182XZb54d__FQqMIJ_Srpk5a65QlDv_ffktnhDN0/edit?tab=t.0#heading=h.jue9m7srjcr3 + + + +### PDP Listener +The listener contract is a design pattern allowing for extensibile programmability of the PDP storage protocol. Itcoordinates a concrete storage agreement between a storage client and provider using the PDPVerifier's proving service. + +See the design document: https://www.notion.so/filecoindev/PDP-Extensibility-The-Listener-Contract-1a3dc41950c1804b9a21c15bc0abc95f + +Included is a default instantiation -- the SimplePDPService. + +### SimplePDPService + +This is the default instantiation of the PDPListener. + +- **Fault handling**: Reports faults when proving fails +- **Proving period management**: Manages the timing of proof challenges +- **Challenge window implementation**: Enforces time constraints for proof submission + +## Data Structures +Detailed description of key data structures. + +### DataSet +A data set is a logical container that holds an ordered collection of Merkle roots representing arrays of data: + +```solidity +struct Piece { + id: u64 + data: CID, + size: u64, // Must be multiple of 32. +} +struct DataSet { + id: u64, + // Protocol enforced delay in epochs between a successful proof and availability of + // the next challenge. + challengeDelay: u64, + // ID to assign to the next piece (a sequence number). + nextPieceID: u64, + // Pieces in the data set. + pieces: Piece[], + // The total size of all pieces. + totalSize: u64, + // Epoch from which to draw the next challenge. + nextChallengeEpoch: u64, +} +``` + +### Proof Structure +Each proof certifies the inclusion of a leaf at a specified position within a Merkle tree: + +```solidity +struct Proof { + leaf: bytes32, + leafOffset: uint, + proof: bytes32[], +} +``` + +### Logical Array Implementation +The PDP Logical Array is implemented using a variant of a Fenwick tree to efficiently manage the concatenated data from all pieces in a data set. See previously linked design document + +## Workflows +Detailed description of key workflows. + +### Data Set Creation +1. A client and storage provider agree to set up a data set +2. The storage provider calls the verifier contract to create a new data set +3. The data set is initialized with storage provider permissions and challenge parameters + +### Data Verification +1. The storage provider adds Merkle pieces to the data set +2. At each proving period: + - The system generates random challenges based on chain randomness + - The storage provider constructs Merkle proofs for the challenged leaves + - The storage provider submits proofs to the verifier contract + - The contract verifies the proofs and updates the next challenge epoch + +### Fault Handling +1. If a storage provider fails to submit valid proofs within the proving period: + - The storage provider must call nextProvingPeriod to acknowledge the fault + - The SimplePDPService contract emits an event registering the fault + - The system updates the next challenge epoch + +## Security Considerations + +### Threat Model +- Storage providers may attempt to cheat by not storing data +- Attackers may try to bias randomness or grind data sets +- Data clients could try to force a fault to get out of paying honest storage providers for storage +- Contract ownership could be compromised + +### Data Set Independence and Storage Provider Control +- Data set operations are completely independent +- Only the storage provider of a data set can impact the result of operations on that data set + +### Soundness +- Proofs are valid only if the storage provider has the challenged data +- Merkle proofs must be sound +- Randomness cannot be biased through grinding or chain forking + +### Completeness +- Proving always works if providing Merkle proofs to the randomly sampled leaves + +### Liveness +- Storage providers can always add new pieces to the data set +- Progress can be made with nextProvingPeriod after data loss or connectivity issues +- Pieces can be deleted from data sets + +### Access Control +- Storage provider management is strictly enforced +- Only data set storage providers can modify their data sets + +### Randomness Handling +- Challenge seed generation uses filecoin L1 chain randomness from the drand beacon +- A new FEVM precompile has recently been introduced allowed lookup of drand randomness for any epoch in the past. + +## Performance Considerations + +### Gas Optimization +- The singleton contract design may have higher costs as state grows +- Merkle proof verification is designed to be gas-efficient + +### Scalability +- The system can handle multiple data sets for many storage providers +- The logical array implements binary search using a Fenwick/BIT tree variant that makes efficiency possible for mutating data sets. + +## Future Enhancements + +### Upgradability +- Proxy pattern implementation +- Version management + +### Additional Features +- Planned enhancements +- Roadmap + +### Glossary +- **Data Set**: A container for Merkle pieces representing data to be proven +- **Merkle Proof**: A cryptographic proof of data inclusion in a Merkle tree +- **Proving Period**: The time window between successive challenge windows +- **Challenge Window**: The time window during which proofs must be submitted +- **Challenge**: A random request to prove possession of specific data diff --git a/service_contracts/src/pdp/docs/gas-benchmarks/AddRoots Gas by ProofSet Size.png b/service_contracts/src/pdp/docs/gas-benchmarks/AddRoots Gas by ProofSet Size.png new file mode 100644 index 00000000..c0b29244 Binary files /dev/null and b/service_contracts/src/pdp/docs/gas-benchmarks/AddRoots Gas by ProofSet Size.png differ diff --git a/service_contracts/src/pdp/docs/gas-benchmarks/ProvePosession Gas by ProofSet Size.png b/service_contracts/src/pdp/docs/gas-benchmarks/ProvePosession Gas by ProofSet Size.png new file mode 100644 index 00000000..d3464372 Binary files /dev/null and b/service_contracts/src/pdp/docs/gas-benchmarks/ProvePosession Gas by ProofSet Size.png differ diff --git a/service_contracts/src/pdp/docs/gas-benchmarks/README.md b/service_contracts/src/pdp/docs/gas-benchmarks/README.md new file mode 100644 index 00000000..69713293 --- /dev/null +++ b/service_contracts/src/pdp/docs/gas-benchmarks/README.md @@ -0,0 +1,43 @@ +# PDP Gas Benchmarks + +This directory contains gas cost benchmarks for PDP. + +## Calibration Network Gas Costs + +The file `calibration-gas-costs.csv` contains gas cost measurements Calibnet, collected during the week of 2025-03-10 and 2025-03-17. **Operations that was measured was**: + - ProvePossession (submitting proofs of data possession) + - NextProvingPeriod (setting up the next proving window) + - AddPieces (adding new data pieces to a data set) + +## Summary Table + +Below is a summary of gas costs by operation type and data characteristics: + +| Operation Type | Data Size | Piece Count | Avg Gas Cost | Range | +|---------------|-----------|------------|-------------|-------| +| ProvePossession | 64 GiB | 39 | ~120M | 105-145M | +| ProvePossession | 100 MB | 113 | ~125M | 99-149M | +| ProvePossession | 1 MB | 1011 | ~138M | 123-153M | +| ProvePossession | 1 MB | 10000 | ~177M | 177M | +| NextProvingPeriod | 64 GiB | 39 | ~56M | 56M | +| NextProvingPeriod | 100 MB | 113 | ~54M | 54M | +| NextProvingPeriod | 1 MB | 1011 | ~54M | 54M | +| NextProvingPeriod | 1 MB | 10000 | ~54M | 54M | +| AddPieces | 64 GiB | 39 | ~44M | 44M | +| AddPieces | 100 MB | 113 | ~55M | 55M | +| AddPieces | 1 MB | 1011 | ~81M | 81M | +| AddPieces | 1 MB | 10000 | ~98M | 98M | + +## Observations + +- **ProvePossession** operations are the most gas-intensive, with costs influenced by a combination of data set size and piece count. The correlation isn't as strong because costs are influenced by a linear combination of two different logarithmic functions: log(# pieces) + log(data set size). +- **NextProvingPeriod** operations have relatively consistent gas costs regardless of data set characteristics. +- **AddPieces** operations show a clear correlation between piece count and gas cost, with costs scaling logarithmically with the number of pieces. + +![ProvePossession Gas for DataSet Size](ProvePosession%20Gas%20by%20DataSet%20Size.png) + +![AddPieces Gas by DataSet Size](AddPieces%20Gas%20by%20DataSet%20Size.png) + +## Raw Data + +For detailed transaction information, refer to the [`calibration-gas-costs.csv`](calibration-gas-costs.csv) file which contains links to the specific transactions on calibnet. \ No newline at end of file diff --git a/service_contracts/src/pdp/docs/gas-benchmarks/calibration-gas-costs.csv b/service_contracts/src/pdp/docs/gas-benchmarks/calibration-gas-costs.csv new file mode 100644 index 00000000..c0037a78 --- /dev/null +++ b/service_contracts/src/pdp/docs/gas-benchmarks/calibration-gas-costs.csv @@ -0,0 +1,18 @@ +DataSet ID,Gas value,Message type,Message Link,DataSize,PieceCount,DataSet Size +4,"105,553,070",ProvePossession,https://calibration.filfox.info/en/message/bafy2bzaceackgsldv7rl6uu2ohy2arg6on4vnm3sh6qk6ac7skcaiscdmwxyo?t=4,64GiB,39,2496 GiB +5,"99,514,874",ProvePossession,https://calibration.filfox.info/en/message/bafy2bzaceds37k36ddwexmf5flapycyrqxbvrredq2ps55wm4mgpbtvmcjqoq?t=4,100MB,113,11300 MB +6,"123,405,148",ProvePossession,https://calibration.filfox.info/en/message/bafy2bzacecr7kbrtvqkd6nmhn3tlxazlt2nceiykpqhkkxynpsxvrz2vsc7eo?t=4,1MB,1011,1011MB +5,"105,682,164",ProvePossession,https://calibration.filfox.info/en/message/bafy2bzaceb6gacb6tmm54ekn3gzg3cfnwc2w375vv6han2hwurrkb3qxmse6g?t=3,100 MB,113,11300 MB +4,"111,308,777",ProvePossession,https://calibration.filfox.info/en/message/bafy2bzacecu6kfdqpsor6fyrhhuthynnvxcc7by64uumdf44pbawvhomw6hhk?t=4,64 GiB,39, +4,"145,714,311",ProvePossession,https://calibration.filfox.info/en/message/0x4bbdd4066f84c53a1609791230af0c140b121cc7ceffe95b3fa5d86be190dabb?t=4,64GiB,39,"145,714,311" +5,"149,567,082",ProvePossession,https://calibration.filfox.info/en/message/0xb3d1a93d99577afdd00a80c8ccd15e4df87143c63358b95fb3a269140e0e3ce0?t=1,100MB,113,"149,567,082" +6,"153,074,953",ProvePossession,https://calibration.filfox.info/en/message/0xfb0ed656e96947dbe3d6f1d2b6739e8abf49a745e35f4689d7fa331969fb8af0?t=4,1MB,1011,"153,074,953" +7,"177,255,135",ProvePossession,https://calibration.filfox.info/en/message/0xf1375c2c9ff02b624ec4d133d15aed1c5d4b0c086aa0bdba88d1687b4c398017?t=4,1MB,10000,"177,255,135" +4,"56,652,758",NextProvingPeriod,https://calibration.filfox.info/en/message/0xac88291db41349c2903286d0a1d82ffad19ded9f41d59823b2281d92f60dd75f?t=4,64GiB,39, +5,"54,861,912",NextProvingPeriod,https://calibration.filfox.info/en/message/0x6a4d020c3b4ea05cd34f1a4546459b579db204d87e2e0db4d7314511dd2b219d?t=1,100MB,113, +6,"54,254,719",NextProvingPeriod,https://calibration.filfox.info/en/message/0xf049119c4f65fa3c680f71feda7f745b243bddb23ce04e05e31405cbc75abc51?t=4,1MB,1011, +7,"54,199,247",NextProvingPeriod,https://calibration.filfox.info/en/message/0xd83eb0b78398760b866e5244e08cfc10b59526f1e3cf7c7fa5e95f6343ea780c?t=4,1MB,10000, +4,"44,250,736",AddPieces,https://calibration.filfox.info/en/message/0x327817faf41fddc47f9416ab5623c54dc14d924615f5d3fb78f1d88eeba425a8?t=4,64GiB,39, +5,"55,549,661",AddPieces,https://calibration.pdp-explorer.eng.filoz.org/datasets/5,100MB,113, +6,"81,035,371",AddPieces,https://calibration.filfox.info/en/message/0x2d9fa3570a0d605aba79df00efea292bfb75b6601ff49f4673fc7f50cb5a6df4?t=1,1MB,1011, +7,"98,250,749",AddPieces,https://calibration.filfox.info/en/message/0x5d09607e618e8d3377e5900b410d60e55efd6b573725d9539d9fb626321f95a1?t=4,1MB,10000, \ No newline at end of file diff --git a/service_contracts/src/Errors.sol b/service_contracts/src/service-provider/Errors.sol similarity index 100% rename from service_contracts/src/Errors.sol rename to service_contracts/src/service-provider/Errors.sol diff --git a/service_contracts/src/Extsload.sol b/service_contracts/src/service-provider/Extsload.sol similarity index 100% rename from service_contracts/src/Extsload.sol rename to service_contracts/src/service-provider/Extsload.sol diff --git a/service_contracts/src/FilecoinWarmStorageService.sol b/service_contracts/src/service-provider/FilecoinWarmStorageService.sol similarity index 99% rename from service_contracts/src/FilecoinWarmStorageService.sol rename to service_contracts/src/service-provider/FilecoinWarmStorageService.sol index 3ae1173a..202a0899 100644 --- a/service_contracts/src/FilecoinWarmStorageService.sol +++ b/service_contracts/src/service-provider/FilecoinWarmStorageService.sol @@ -11,7 +11,7 @@ import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; import {IERC20Metadata} from "@openzeppelin/contracts/token/ERC20/extensions/IERC20Metadata.sol"; import {EIP712Upgradeable} from "@openzeppelin/contracts-upgradeable/utils/cryptography/EIP712Upgradeable.sol"; import {ERC1967Utils} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Utils.sol"; -import {Payments, IValidator} from "@fws-payments/Payments.sol"; +import {Payments, IValidator} from "@payments/Payments.sol"; import {Errors} from "./Errors.sol"; import {ServiceProviderRegistry} from "./ServiceProviderRegistry.sol"; diff --git a/service_contracts/src/FilecoinWarmStorageServiceStateView.sol b/service_contracts/src/service-provider/FilecoinWarmStorageServiceStateView.sol similarity index 100% rename from service_contracts/src/FilecoinWarmStorageServiceStateView.sol rename to service_contracts/src/service-provider/FilecoinWarmStorageServiceStateView.sol diff --git a/service_contracts/src/ServiceProviderRegistry.sol b/service_contracts/src/service-provider/ServiceProviderRegistry.sol similarity index 100% rename from service_contracts/src/ServiceProviderRegistry.sol rename to service_contracts/src/service-provider/ServiceProviderRegistry.sol diff --git a/service_contracts/src/ServiceProviderRegistryStorage.sol b/service_contracts/src/service-provider/ServiceProviderRegistryStorage.sol similarity index 100% rename from service_contracts/src/ServiceProviderRegistryStorage.sol rename to service_contracts/src/service-provider/ServiceProviderRegistryStorage.sol diff --git a/service_contracts/src/lib/FilecoinWarmStorageServiceLayout.sol b/service_contracts/src/service-provider/lib/FilecoinWarmStorageServiceLayout.sol similarity index 100% rename from service_contracts/src/lib/FilecoinWarmStorageServiceLayout.sol rename to service_contracts/src/service-provider/lib/FilecoinWarmStorageServiceLayout.sol diff --git a/service_contracts/src/lib/FilecoinWarmStorageServiceStateInternalLibrary.sol b/service_contracts/src/service-provider/lib/FilecoinWarmStorageServiceStateInternalLibrary.sol similarity index 100% rename from service_contracts/src/lib/FilecoinWarmStorageServiceStateInternalLibrary.sol rename to service_contracts/src/service-provider/lib/FilecoinWarmStorageServiceStateInternalLibrary.sol diff --git a/service_contracts/src/lib/FilecoinWarmStorageServiceStateLibrary.sol b/service_contracts/src/service-provider/lib/FilecoinWarmStorageServiceStateLibrary.sol similarity index 100% rename from service_contracts/src/lib/FilecoinWarmStorageServiceStateLibrary.sol rename to service_contracts/src/service-provider/lib/FilecoinWarmStorageServiceStateLibrary.sol diff --git a/service_contracts/src/session-key-registry/README.md b/service_contracts/src/session-key-registry/README.md new file mode 100644 index 00000000..2ddfcd53 --- /dev/null +++ b/service_contracts/src/session-key-registry/README.md @@ -0,0 +1,25 @@ +# SessionKeyRegistry + +## Usage +Builds with [forge](https://getfoundry.sh/introduction/installation/). + +### Build +```sh +forge build +``` + +### Test +``` +forge test -vvv +``` + +## FAQ + +### What are session keys? +Session keys are disposable keys for dapps to perform actions on the user's behalf. +Session keys are scoped to constrain the actions they can take. +Session keys expire in order to reduce the possibilities + +### Why a registry? +Certain user actions are not message calls but EIP-712 signatures. +Dapps using `ecrecover` need to check if a session key was authorized to perform an action. diff --git a/service_contracts/src/session-key-registry/contracts/SessionKeyRegistry.sol b/service_contracts/src/session-key-registry/contracts/SessionKeyRegistry.sol new file mode 100644 index 00000000..6ba28974 --- /dev/null +++ b/service_contracts/src/session-key-registry/contracts/SessionKeyRegistry.sol @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity^0.8.30; + +contract SessionKeyRegistry { + mapping ( + address user => mapping ( + address signer => mapping ( + bytes32 permission => uint256 + ) + ) + ) public authorizationExpiry; + + function _setAuthorizations(address signer, uint256 expiry, bytes32[] calldata permissions) internal { + mapping (bytes32 => uint256) storage permissionExpiry = authorizationExpiry[msg.sender][signer]; + for (uint256 i = 0; i < permissions.length; i++) { + permissionExpiry[permissions[i]] = expiry; + } + } + + /** + * @notice Caller revokes from the signer the specified permissions + * @param signer the authorized account + * @param permissions the scope of authority to revoke from the signer + */ + function revoke(address signer, bytes32[] calldata permissions) external { + _setAuthorizations(signer, 0, permissions); + } + + /** + * @notice Caller authorizes the signer with permissions until expiry + * @param signer the account authorized + * @param expiry when the authorization ends + * @param permissions the scope of authority granted to the signer + */ + function login(address signer, uint256 expiry, bytes32[] calldata permissions) external { + _setAuthorizations(signer, expiry, permissions); + } + + /** + * @notice Caller funds and authorizes the signer with permissions until expiry + * @param signer the account authorized + * @param expiry when the authorization ends + * @param permissions the scope of authority granted to the signer + */ + function loginAndFund(address payable signer, uint256 expiry, bytes32[] calldata permissions) external payable { + _setAuthorizations(signer, expiry, permissions); + signer.transfer(msg.value); + } +} diff --git a/service_contracts/test/payments/AccountLockupSettlement.t.sol b/service_contracts/test/payments/AccountLockupSettlement.t.sol new file mode 100644 index 00000000..451a64a4 --- /dev/null +++ b/service_contracts/test/payments/AccountLockupSettlement.t.sol @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; + +contract AccountLockupSettlementTest is Test, BaseTestHelper { + PaymentsTestHelpers helper; + Payments payments; + + // Define constants + uint256 internal constant DEPOSIT_AMOUNT = 100 ether; + uint256 internal constant MAX_LOCKUP_PERIOD = 100; + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + // Setup operator approval for potential rails + helper.setupOperatorApproval( + USER1, + OPERATOR, + 10 ether, // rateAllowance + 100 ether, // lockupAllowance + MAX_LOCKUP_PERIOD // maxLockupPeriod + ); + } + + function testSettlementWithNoLockupRate() public { + // Setup: deposit funds + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // No rails created, so lockup rate should be 0 + + // Advance blocks to create a settlement gap without a rate + helper.advanceBlocks(10); + + // Trigger settlement with a new deposit + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Verify settlement occurred + helper.assertAccountState(USER1, DEPOSIT_AMOUNT * 2, 0, 0, block.number); + } + + function testSimpleLockupAccumulation() public { + // Setup: deposit funds + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Define a lockup rate + uint256 lockupRate = 2 ether; + uint256 lockupPeriod = 2; + + // Create rail with the desired rate + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + lockupRate, // payment rate + lockupPeriod, // lockup period + 0, // no fixed lockup + address(0), // no fixed lockup + SERVICE_FEE_RECIPIENT // operator comission fee receiver + ); + assertEq(railId, 1); + + // Note: Settlement begins at the current block + // Advance blocks to create a settlement gap + uint256 elapsedBlocks = 5; + helper.advanceBlocks(elapsedBlocks); + + // Trigger settlement with a new deposit + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // The correct expected value is: + uint256 initialLockup = lockupRate * lockupPeriod; + uint256 accumulatedLockup = lockupRate * elapsedBlocks; + uint256 expectedLockup = initialLockup + accumulatedLockup; + + // Verify settlement occurred + helper.assertAccountState(USER1, DEPOSIT_AMOUNT * 2, expectedLockup, lockupRate, block.number); + } + + function testPartialSettlement() public { + uint256 lockupRate = 20 ether; + + helper.makeDeposit( + USER1, + USER1, + DEPOSIT_AMOUNT / 2 // 50 + ); + + // Create rail with the high rate (this will set the railway's settledUpTo to the current block) + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + lockupRate, // Very high payment rate (20 ether per block) + 1, // lockup period + 0, // no fixed lockup + address(0), // no fixed lockup + SERVICE_FEE_RECIPIENT + ); + + // When a rail is created, its settledUpTo is set to the current block + // Initial account lockup value should be lockupRate * lockupPeriod = 20 ether * 1 = 20 ether + // Initial funds are DEPOSIT_AMOUNT / 2 = 50 ether + + // Advance many blocks to exceed available funds + uint256 advancedBlocks = 10; + helper.advanceBlocks(advancedBlocks); + + // Deposit additional funds, which will trigger settlement + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT / 2); + + // Verify partial settlement + uint256 expectedSettlementBlock = 5; // lockupRate is 20, so we only have enough funds to pay for 5 epochs) + uint256 expectedLockup = DEPOSIT_AMOUNT; + + // Verify settlement state using helper function + helper.assertAccountState( + USER1, + DEPOSIT_AMOUNT, // expected funds + expectedLockup, // expected lockup + lockupRate, // expected lockup rate + expectedSettlementBlock // expected settlement block + ); + } + + function testSettlementAfterGap() public { + helper.makeDeposit( + USER1, + USER1, + DEPOSIT_AMOUNT * 2 // 200 ether + ); + + uint256 lockupRate = 1 ether; // 1 token per block + uint256 lockupPeriod = 30; + uint256 initialLockup = 10 ether; + + // Create rail + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + lockupRate, // 1 token per block + lockupPeriod, // Lockup period of 30 blocks + initialLockup, // initial fixed lockup of 10 ether + address(0), // no fixed lockup + SERVICE_FEE_RECIPIENT // operator comission fee receiver + ); + + // Roll forward many blocks + helper.advanceBlocks(30); + + // Trigger settlement with a new deposit + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Verify settlement occurred + uint256 expectedLockup = initialLockup + (lockupRate * 30) + (lockupRate * lockupPeriod); // accumulated lockup // future lockup + + // Verify settlement occurred + helper.assertAccountState( + USER1, + DEPOSIT_AMOUNT * 3, // expected funds + expectedLockup, // expected lockup + lockupRate, // expected lockup rate + block.number // expected settlement block + ); + } + + function testSettlementInvariants() public { + // Setup: deposit a specific amount + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Scenario 1: Lockup exactly matches funds by creating a rail with fixed lockup + // exactly matching the deposit amount + + // Create a rail with fixed lockup = all available funds + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + 0, // no payment rate + 10, // Lockup period + DEPOSIT_AMOUNT, // fixed lockup equal to all funds + address(0), // no fixed lockup + SERVICE_FEE_RECIPIENT // operator comission fee receiver + ); + + // Verify the account state + // Verify the account state using helper function + helper.assertAccountState( + USER1, + DEPOSIT_AMOUNT, + DEPOSIT_AMOUNT, + 0, // no payment rate + block.number + ); + + helper.makeDeposit(USER1, USER1, 1); // Adding more funds + + // Scenario 2: Verify we can't create a situation where lockup > funds + // We'll try to create a rail with an impossibly high fixed lockup + + // Increase operator approval allowance + + helper.setupOperatorApproval( + USER1, + OPERATOR, + 0, // no rate allowance needed + DEPOSIT_AMOUNT * 3, // much higher lockup allowance + MAX_LOCKUP_PERIOD // max lockup period + ); + + // Try to set up a rail with lockup > funds which should fail + vm.startPrank(OPERATOR); + uint256 railId = payments.createRail( + helper.testToken(), + USER1, + USER2, + address(0), + 0, + SERVICE_FEE_RECIPIENT // operator comission fee receiver + ); + + // This should fail because lockupFixed > available funds + vm.expectRevert("invariant failure: insufficient funds to cover lockup after function execution"); + payments.modifyRailLockup(railId, 10, DEPOSIT_AMOUNT * 2); + vm.stopPrank(); + } + + function testWithdrawWithLockupSettlement() public { + helper.makeDeposit( + USER1, + USER1, + DEPOSIT_AMOUNT * 2 // Deposit 200 ether + ); + // Set a lockup rate and an existing lockup via a rail + uint256 lockupRate = 1 ether; + uint256 initialLockup = 50 ether; + uint256 lockupPeriod = 10; + + // Create rail with fixed + rate-based lockup + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + lockupRate, // 1 ether per block + lockupPeriod, // Lockup period of 10 blocks + initialLockup, // 50 ether fixed lockup + address(0), // no fixed lockup + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Total lockup at rail creation: 50 ether fixed + (1 ether * 10 blocks) = 60 ether + // Available for withdrawal at creation: 200 ether - 60 ether = 140 ether + + // Try to withdraw more than available (should fail) + helper.expectWithdrawalToFail(USER1, 140 ether, 150 ether); + + // Withdraw exactly the available amount (should succeed and also settle account lockup) + helper.makeWithdrawal(USER1, 140 ether); + + // Verify account state after withdrawal + // Remaining funds: 200 - 140 = 60 ether + // Remaining lockup: 60 ether (unchanged because no blocks passed) + helper.assertAccountState( + USER1, + 60 ether, // expected funds + 60 ether, // expected lockup + lockupRate, // expected lockup rate + block.number // expected settlement block + ); + } +} diff --git a/service_contracts/test/payments/AccountManagement.t.sol b/service_contracts/test/payments/AccountManagement.t.sol new file mode 100644 index 00000000..ae7813dd --- /dev/null +++ b/service_contracts/test/payments/AccountManagement.t.sol @@ -0,0 +1,531 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import {Errors} from "@payments/Errors.sol"; + +contract AccountManagementTest is Test, BaseTestHelper { + PaymentsTestHelpers helper; + Payments payments; + + uint256 internal constant DEPOSIT_AMOUNT = 100 ether; + uint256 internal constant INITIAL_BALANCE = 1000 ether; + uint256 internal constant MAX_LOCKUP_PERIOD = 100; + + IERC20 private constant NATIVE_TOKEN = IERC20(address(0)); + + function setUp() public { + // Create test helpers and setup environment + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + } + + function testBasicDeposit() public { + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + } + + function testNativeDeposit() public { + helper.makeNativeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + } + + function testMultipleDeposits() public { + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT + 1); + } + + function testDepositToAnotherUser() public { + helper.makeDeposit(USER1, USER2, DEPOSIT_AMOUNT); + } + + /*////////////////////////////////////////////////////////////// + DEPOSIT WITH PERMIT TESTS + //////////////////////////////////////////////////////////////*/ + + function testDepositWithPermit() public { + helper.makeDepositWithPermit(user1Sk, USER1, DEPOSIT_AMOUNT); + } + + function testDepositWithPermitExpiredPermitReverts() public { + helper.expectExpiredPermitToRevert(user1Sk, USER1, DEPOSIT_AMOUNT); + } + + function testDepositWithPermitZeroAmountNoEffect() public { + helper.makeDepositWithPermit(user1Sk, USER1, 0); + } + + function testDepositWithPermitMultiple() public { + helper.makeDepositWithPermit(user1Sk, USER1, DEPOSIT_AMOUNT); + helper.makeDepositWithPermit(user1Sk, USER1, DEPOSIT_AMOUNT); + } + + function testDepositWithPermitRevertsForNativeToken() public { + helper.expectNativeTokenDepositWithPermitToRevert(user1Sk, USER1, DEPOSIT_AMOUNT); + } + + function testDepositWithPermitInvalidPermitReverts() public { + helper.expectInvalidPermitToRevert(user1Sk, USER1, DEPOSIT_AMOUNT); + } + + function testDepositWithPermitToAnotherUser() public { + helper.makeDepositWithPermitToAnotherUser(user1Sk, RELAYER, DEPOSIT_AMOUNT); + } + + function testNativeDepositWithInsufficientNativeTokens() public { + vm.startPrank(USER1); + + // Test zero token address + vm.expectRevert( + abi.encodeWithSelector(Errors.MustSendExactNativeAmount.selector, DEPOSIT_AMOUNT, DEPOSIT_AMOUNT - 1) + ); + payments.deposit{value: DEPOSIT_AMOUNT - 1}(NATIVE_TOKEN, USER1, DEPOSIT_AMOUNT); + + vm.stopPrank(); + } + + function testDepositWithZeroRecipient() public { + vm.startPrank(USER1); + + IERC20 testToken = helper.testToken(); + + // Using straightforward expectRevert without message + vm.expectRevert(); + payments.deposit(testToken, address(0), DEPOSIT_AMOUNT); + + vm.stopPrank(); + } + + function testDepositWithInsufficientBalance() public { + vm.startPrank(USER1); + vm.expectRevert(); + helper.makeDeposit(USER1, USER1, INITIAL_BALANCE + 1); + vm.stopPrank(); + } + + function testDepositWithInsufficientAllowance() public { + // Reset allowance to a small amount + vm.startPrank(USER1); + IERC20 testToken = helper.testToken(); + testToken.approve(address(payments), DEPOSIT_AMOUNT / 2); + + // Attempt deposit with more than approved + vm.expectRevert(); + payments.deposit(testToken, USER1, DEPOSIT_AMOUNT); + vm.stopPrank(); + } + + /*////////////////////////////////////////////////////////////// + WITHDRAWAL TESTS + //////////////////////////////////////////////////////////////*/ + + function testBasicWithdrawal() public { + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + helper.makeWithdrawal(USER1, DEPOSIT_AMOUNT / 2); + } + + function testNativeWithdrawal() public { + helper.makeNativeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + helper.makeNativeWithdrawal(USER1, DEPOSIT_AMOUNT / 2); + } + + function testMultipleWithdrawals() public { + // Setup: deposit first + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Test multiple withdrawals + helper.makeWithdrawal(USER1, DEPOSIT_AMOUNT / 4); + helper.makeWithdrawal(USER1, DEPOSIT_AMOUNT / 4); + } + + function testWithdrawToAnotherAddress() public { + // Setup: deposit first + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Test withdrawTo + helper.makeWithdrawalTo(USER1, USER2, DEPOSIT_AMOUNT / 2); + } + + function testWithdrawEntireBalance() public { + // Setup: deposit first + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Withdraw everything + helper.makeWithdrawal(USER1, DEPOSIT_AMOUNT); + } + + function testWithdrawExcessAmount() public { + // Setup: deposit first + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Try to withdraw more than available + helper.expectWithdrawalToFail(USER1, DEPOSIT_AMOUNT, DEPOSIT_AMOUNT + 1); + } + + function testWithdrawToWithZeroRecipient() public { + vm.startPrank(USER1); + + IERC20 testToken = helper.testToken(); + + // Test zero recipient address + vm.expectRevert(); + payments.withdrawTo(testToken, address(0), DEPOSIT_AMOUNT); + + vm.stopPrank(); + } + + /*////////////////////////////////////////////////////////////// + LOCKUP/SETTLEMENT TESTS + //////////////////////////////////////////////////////////////*/ + + function testWithdrawWithLockedFunds() public { + // First, deposit funds + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Define locked amount to be half of the deposit + uint256 lockedAmount = DEPOSIT_AMOUNT / 2; + + // Create a rail with a fixed lockup amount to achieve the required locked funds + helper.setupOperatorApproval( + USER1, + OPERATOR, + 100 ether, // rateAllowance + lockedAmount, // lockupAllowance exactly matches what we need + MAX_LOCKUP_PERIOD // max lockup period + ); + + // Create rail with the fixed lockup + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + 0, // no payment rate + 0, // no lockup period + lockedAmount, // fixed lockup of half the deposit + address(0), // no fixed lockup + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Verify lockup worked by checking account state + helper.assertAccountState( + USER1, + DEPOSIT_AMOUNT, // expected funds + lockedAmount, // expected lockup + 0, // expected rate (not set in this test) + block.number // expected last settled + ); + + // Try to withdraw more than unlocked funds + helper.expectWithdrawalToFail(USER1, DEPOSIT_AMOUNT - lockedAmount, DEPOSIT_AMOUNT); + + // Should be able to withdraw up to unlocked amount + helper.makeWithdrawal(USER1, DEPOSIT_AMOUNT - lockedAmount); + } + + function testSettlementDuringDeposit() public { + // First deposit + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Setup operator approval with sufficient allowances + helper.setupOperatorApproval( + USER1, + OPERATOR, + 100 ether, // rateAllowance + 1000 ether, // lockupAllowance + MAX_LOCKUP_PERIOD // max lockup period + ); + + uint256 lockupRate = 0.5 ether; // 0.5 token per block + + // Create a rail that will set the lockup rate to 0.5 ether per block + // This creates a lockup rate of 0.5 ether/block for the account + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + lockupRate, // payment rate (creates lockup rate) + 10, // lockup period + 0, // no fixed lockup + address(0), // no fixed lockup + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Create a second rail to get to 1 ether lockup rate on the account + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + lockupRate, // payment rate (creates another 0.5 ether/block lockup rate) + 10, // lockup period + 0, // no fixed lockup + address(0), // no fixed lockup + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance 10 blocks to create settlement gap + helper.advanceBlocks(10); + + // Make another deposit to trigger settlement + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Check all states match expectations using assertAccountState helper + helper.assertAccountState( + USER1, + DEPOSIT_AMOUNT * 2, // expected funds + 20 ether, // expected lockup (2 rails × 0.5 ether per block × 10 blocks + future lockup of 10 ether) + lockupRate * 2, // expected rate (2 * 0.5 ether) + block.number // expected last settled + ); + } + + /*////////////////////////////////////////////////////////////// + ACCOUNT INFO TESTS + //////////////////////////////////////////////////////////////*/ + + function testGetAccountInfoNoLockups() public { + // Setup: deposit funds + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Get account info + (uint256 fundedUntil, uint256 totalBalance, uint256 availableBalance, uint256 lockupRate) = + payments.getAccountInfoIfSettled(helper.testToken(), USER1); + + // Verify account state + assertEq(totalBalance, DEPOSIT_AMOUNT, "total balance mismatch"); + assertEq(availableBalance, DEPOSIT_AMOUNT, "available balance mismatch"); + assertEq(lockupRate, 0, "lockup rate should be 0"); + assertEq(fundedUntil, type(uint256).max, "funded until should be max"); + } + + function testGetAccountInfoWithFixedLockup() public { + // Setup: deposit funds + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Setup operator approval + helper.setupOperatorApproval(USER1, OPERATOR, 100 ether, DEPOSIT_AMOUNT, MAX_LOCKUP_PERIOD); + + // Create rail with fixed lockup + uint256 fixedLockup = DEPOSIT_AMOUNT / 2; + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + 0, + 0, + fixedLockup, + address(0), + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Get account info + (uint256 fundedUntil, uint256 totalBalance, uint256 availableBalance, uint256 lockupRate) = + payments.getAccountInfoIfSettled(helper.testToken(), USER1); + + // Verify account state + assertEq(totalBalance, DEPOSIT_AMOUNT, "total balance mismatch"); + assertEq(availableBalance, DEPOSIT_AMOUNT - fixedLockup, "available balance mismatch"); + assertEq(lockupRate, 0, "lockup rate should be 0"); + assertEq(fundedUntil, type(uint256).max, "funded until should be max with no rate"); + } + + // Helper function to calculate simulated lockup and available balance + function calculateSimulatedLockupAndBalance( + uint256 funds, + uint256 lockupCurrent, + uint256 lockupRate, + uint256 lockupLastSettledAt + ) internal view returns (uint256 simulatedLockupCurrent, uint256 availableBalance) { + uint256 currentEpoch = block.number; + uint256 elapsedTime = currentEpoch - lockupLastSettledAt; + simulatedLockupCurrent = lockupCurrent; + + if (elapsedTime > 0 && lockupRate > 0) { + uint256 additionalLockup = lockupRate * elapsedTime; + + if (funds >= lockupCurrent + additionalLockup) { + simulatedLockupCurrent = lockupCurrent + additionalLockup; + } else { + uint256 availableFunds = funds - lockupCurrent; + if (availableFunds > 0) { + uint256 fractionalEpochs = availableFunds / lockupRate; + simulatedLockupCurrent = lockupCurrent + (lockupRate * fractionalEpochs); + } + } + } + + availableBalance = funds > simulatedLockupCurrent ? funds - simulatedLockupCurrent : 0; + } + + function testGetAccountInfoWithRateLockup() public { + // Setup: deposit funds + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Setup operator approval + helper.setupOperatorApproval(USER1, OPERATOR, 100 ether, DEPOSIT_AMOUNT, MAX_LOCKUP_PERIOD); + + uint256 lockupRate = 1 ether; // 1 token per block + uint256 lockupPeriod = 10; + + // Create rail with rate lockup + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + lockupRate, + lockupPeriod, + 0, + address(0), + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance 5 blocks + helper.advanceBlocks(5); + + // Get raw account data for debugging + (uint256 funds, uint256 lockupCurrent, uint256 lockupRate2, uint256 lockupLastSettledAt) = + payments.accounts(helper.testToken(), USER1); + + (, uint256 availableBalance) = + calculateSimulatedLockupAndBalance(funds, lockupCurrent, lockupRate2, lockupLastSettledAt); + + // Get account info + (uint256 fundedUntil, uint256 totalBalance1, uint256 availableBalance1, uint256 lockupRate1) = + payments.getAccountInfoIfSettled(helper.testToken(), USER1); + + // Verify account state + assertEq(totalBalance1, DEPOSIT_AMOUNT, "total balance mismatch"); + assertEq(availableBalance1, availableBalance, "available balance mismatch"); + assertEq(lockupRate1, lockupRate, "lockup rate mismatch"); + assertEq(fundedUntil, block.number + (availableBalance / lockupRate), "funded until mismatch"); + } + + function testGetAccountInfoWithPartialSettlement() public { + // Setup: deposit funds + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Setup operator approval + helper.setupOperatorApproval(USER1, OPERATOR, 100 ether, DEPOSIT_AMOUNT, MAX_LOCKUP_PERIOD); + + uint256 lockupRate = 2 ether; // 2 tokens per block + uint256 lockupPeriod = 10; + + // Create rail with rate lockup + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + lockupRate, + lockupPeriod, + 0, + address(0), + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance blocks to create partial settlement + helper.advanceBlocks(5); + + // Get raw account data for debugging + (uint256 funds, uint256 lockupCurrent, uint256 lockupRate2, uint256 lockupLastSettledAt) = + payments.accounts(helper.testToken(), USER1); + + (, uint256 availableBalance) = + calculateSimulatedLockupAndBalance(funds, lockupCurrent, lockupRate2, lockupLastSettledAt); + + // Get account info + (uint256 fundedUntil, uint256 totalBalance2, uint256 availableBalance2, uint256 lockupRate3) = + payments.getAccountInfoIfSettled(helper.testToken(), USER1); + + // Verify account state + assertEq(totalBalance2, DEPOSIT_AMOUNT, "total balance mismatch"); + assertEq(availableBalance2, availableBalance, "available balance mismatch"); + assertEq(lockupRate3, lockupRate, "lockup rate mismatch"); + assertEq(fundedUntil, block.number + (availableBalance / lockupRate), "funded until mismatch"); + } + + function testGetAccountInfoInDebt() public { + // Setup: deposit funds + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Setup operator approval + helper.setupOperatorApproval(USER1, OPERATOR, 100 ether, DEPOSIT_AMOUNT, MAX_LOCKUP_PERIOD); + + uint256 lockupRate = 2 ether; // 2 tokens per block + uint256 lockupPeriod = 10; + + // Create rail with rate lockup + helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + lockupRate, + lockupPeriod, + 0, + address(0), + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance blocks to create debt + helper.advanceBlocks(60); // This will create debt as 60 * 2 > DEPOSIT_AMOUNT + + // Get account info + (uint256 fundedUntil, uint256 totalBalance3, uint256 availableBalance3, uint256 lockupRate3) = + payments.getAccountInfoIfSettled(helper.testToken(), USER1); + + // Verify account state + assertEq(totalBalance3, DEPOSIT_AMOUNT, "total balance mismatch"); + assertEq(availableBalance3, 0, "available balance should be 0"); + assertEq(lockupRate3, lockupRate, "lockup rate mismatch"); + assertTrue(fundedUntil < block.number, "funded until should be in the past"); + } + + function testGetAccountInfoAfterRateChange() public { + // Setup: deposit funds + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Setup operator approval + helper.setupOperatorApproval(USER1, OPERATOR, 100 ether, DEPOSIT_AMOUNT, MAX_LOCKUP_PERIOD); + + uint256 initialRate = 1 ether; // 1 token per block + uint256 lockupPeriod = 10; + + // Create rail with initial rate + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + initialRate, + lockupPeriod, + 0, + address(0), + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance some blocks + helper.advanceBlocks(5); + + // Change the rate + uint256 newRate = 2 ether; // 2 tokens per block + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, newRate, 0); + + // Get raw account data for debugging + (uint256 funds, uint256 lockupCurrent, uint256 lockupRate2, uint256 lockupLastSettledAt) = + payments.accounts(helper.testToken(), USER1); + + (, uint256 availableBalance) = + calculateSimulatedLockupAndBalance(funds, lockupCurrent, lockupRate2, lockupLastSettledAt); + + // Get account info + (uint256 fundedUntil, uint256 totalBalance4, uint256 availableBalance4, uint256 lockupRate4) = + payments.getAccountInfoIfSettled(helper.testToken(), USER1); + + // Verify account state + assertEq(totalBalance4, DEPOSIT_AMOUNT, "total balance mismatch"); + assertEq(availableBalance4, availableBalance, "available balance mismatch"); + assertEq(lockupRate4, newRate, "lockup rate mismatch"); + assertEq(fundedUntil, block.number + (availableBalance / newRate), "funded until mismatch"); + } +} diff --git a/service_contracts/test/payments/Burn.t.sol b/service_contracts/test/payments/Burn.t.sol new file mode 100644 index 00000000..a8656762 --- /dev/null +++ b/service_contracts/test/payments/Burn.t.sol @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import {Test} from "forge-std/Test.sol"; + +import {Dutch} from "@payments/Dutch.sol"; +import {Errors} from "@payments/Errors.sol"; +import {FIRST_AUCTION_START_PRICE, MAX_AUCTION_START_PRICE, Payments} from "@payments/Payments.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; + +contract BurnTest is Test { + using Dutch for uint256; + + PaymentsTestHelpers helper = new PaymentsTestHelpers(); + Payments payments; + uint256 testTokenRailId; + uint256 nativeTokenRailId; + + address payable private constant BURN_ADDRESS = payable(0xff00000000000000000000000000000000000063); + + IERC20 private testToken; + IERC20 private constant NATIVE_TOKEN = IERC20(address(0)); + address private payer; + address private payee; + address private operator; + address private recipient; + + function setUp() public { + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + + testToken = helper.testToken(); + operator = helper.OPERATOR(); + payer = helper.USER1(); + payee = helper.USER2(); + recipient = helper.USER3(); + + vm.prank(payer); + payments.setOperatorApproval(testToken, operator, true, 5 * 10 ** 18, 5 * 10 ** 18, 28800); + vm.prank(payer); + payments.setOperatorApproval(NATIVE_TOKEN, operator, true, 5 * 10 ** 18, 5 * 10 ** 18, 28800); + + vm.prank(operator); + testTokenRailId = payments.createRail(testToken, payer, payee, address(0), 0, address(0)); + vm.prank(operator); + nativeTokenRailId = payments.createRail(NATIVE_TOKEN, payer, payee, address(0), 0, address(0)); + + vm.prank(payer); + testToken.approve(address(payments), 5 * 10 ** 18); + vm.prank(payer); + payments.deposit(testToken, payer, 5 * 10 ** 18); + + vm.prank(payer); + payments.deposit{value: 5 * 10 ** 18}(NATIVE_TOKEN, payer, 5 * 10 ** 18); + } + + function testBurn() public { + uint256 newRate = 9 * 10 ** 16; + vm.prank(operator); + payments.modifyRailPayment(testTokenRailId, newRate, 0); + + vm.roll(vm.getBlockNumber() + 10); + + (uint256 availableBefore,,,) = payments.accounts(testToken, address(payments)); + assertEq(availableBefore, 0); + + vm.prank(payer); + payments.settleRail(testTokenRailId, vm.getBlockNumber()); + + (uint256 available,,,) = payments.accounts(testToken, address(payments)); + assertEq(available, 10 * newRate * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR()); + + vm.expectRevert( + abi.encodeWithSelector( + Errors.WithdrawAmountExceedsAccumulatedFees.selector, testToken, available, available + 1 + ) + ); + payments.burnForFees{value: FIRST_AUCTION_START_PRICE}(testToken, recipient, available + 1); + + vm.expectRevert( + abi.encodeWithSelector( + Errors.InsufficientNativeTokenForBurn.selector, FIRST_AUCTION_START_PRICE - 1, FIRST_AUCTION_START_PRICE + ) + ); + payments.burnForFees{value: FIRST_AUCTION_START_PRICE - 1}(testToken, recipient, available); + + payments.burnForFees{value: FIRST_AUCTION_START_PRICE}(testToken, recipient, available); + uint256 received = testToken.balanceOf(recipient); + assertEq(available, received); + + (uint256 availableAfter,,,) = payments.accounts(testToken, address(payments)); + assertEq(availableAfter, 0); + + assertEq(BURN_ADDRESS.balance, FIRST_AUCTION_START_PRICE); + + uint256 oneTimePayment = 2 * 10 ** 16; + + vm.prank(operator); + payments.modifyRailLockup(testTokenRailId, 20, oneTimePayment); + + newRate = 11 * 10 ** 16; + vm.prank(operator); + payments.modifyRailPayment(testTokenRailId, newRate, oneTimePayment); + + (uint256 startPrice, uint256 startTime) = payments.auctionInfo(testToken); + assertEq(startTime, block.timestamp); + assertEq(startPrice, FIRST_AUCTION_START_PRICE * Dutch.RESET_FACTOR); + + vm.roll(vm.getBlockNumber() + 17); + + (available,,,) = payments.accounts(testToken, address(payments)); + assertEq(available, oneTimePayment * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR()); + + vm.prank(payer); + payments.settleRail(testTokenRailId, vm.getBlockNumber()); + + (available,,,) = payments.accounts(testToken, address(payments)); + assertEq( + available, + (17 * newRate + oneTimePayment) * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR() + ); + + vm.warp(startTime + 11 days); + uint256 expectedPrice = startPrice.decay(11 days); + + vm.expectRevert( + abi.encodeWithSelector( + Errors.WithdrawAmountExceedsAccumulatedFees.selector, testToken, available, available + 1 + ) + ); + payments.burnForFees{value: expectedPrice}(testToken, recipient, available + 1); + + vm.expectRevert( + abi.encodeWithSelector(Errors.InsufficientNativeTokenForBurn.selector, expectedPrice - 1, expectedPrice) + ); + payments.burnForFees{value: expectedPrice - 1}(testToken, recipient, available); + + // can buy less than full amount + uint256 remainder = 113; + payments.burnForFees{value: expectedPrice}(testToken, recipient, available - remainder); + + uint256 totalReceived = testToken.balanceOf(recipient); + assertEq(received + available - remainder, totalReceived); + + (available,,,) = payments.accounts(testToken, address(payments)); + assertEq(available, remainder); + + assertEq(BURN_ADDRESS.balance, FIRST_AUCTION_START_PRICE + expectedPrice); + } + + function testNativeAutoBurned() public { + uint256 newRate = 7 * 10 ** 16; + vm.prank(operator); + payments.modifyRailPayment(nativeTokenRailId, newRate, 0); + + vm.roll(vm.getBlockNumber() + 12); + + assertEq(BURN_ADDRESS.balance, 0); + + (uint256 availableBefore,,,) = payments.accounts(NATIVE_TOKEN, address(payments)); + assertEq(availableBefore, 0); + + vm.prank(payer); + payments.settleRail(nativeTokenRailId, vm.getBlockNumber()); + + (uint256 availableAfter,,,) = payments.accounts(NATIVE_TOKEN, address(payments)); + assertEq(availableAfter, 0); + + assertEq( + BURN_ADDRESS.balance, 12 * newRate * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR() + ); + } + + function testBurnNoOp() public { + uint256 startPrice; + uint256 startTime; + for (uint256 i = 0; i < 5; i++) { + (startPrice, startTime) = payments.auctionInfo(testToken); + assertEq(startPrice.decay(vm.getBlockTimestamp() - startTime), 0); + payments.burnForFees(testToken, recipient, 0); + (startPrice, startTime) = payments.auctionInfo(testToken); + assertEq(startPrice, 0); + assertEq(startTime, vm.getBlockTimestamp()); + } + + uint256 newRate = 9 * 10 ** 16; + vm.prank(operator); + payments.modifyRailPayment(testTokenRailId, newRate, 0); + vm.roll(vm.getBlockNumber() + 10); + // verify that settling rail in this situation still restarts the auction + vm.prank(payer); + payments.settleRail(testTokenRailId, vm.getBlockNumber()); + vm.prank(operator); + payments.modifyRailPayment(testTokenRailId, 0, 0); + + (startPrice, startTime) = payments.auctionInfo(testToken); + assertEq(startPrice, FIRST_AUCTION_START_PRICE); + assertEq(startTime, vm.getBlockTimestamp()); + + // wait until the price is 0 again + uint256 heatDeath = vm.getBlockTimestamp() + 10 ** 24; + vm.warp(heatDeath); + + for (uint256 i = 0; i < 5; i++) { + (startPrice, startTime) = payments.auctionInfo(testToken); + assertEq(startPrice.decay(vm.getBlockTimestamp() - startTime), 0); + payments.burnForFees(testToken, recipient, 0); + (startPrice, startTime) = payments.auctionInfo(testToken); + assertEq(startPrice, 0); + assertEq(startTime, vm.getBlockTimestamp()); + } + + // verify that settling rail in this situation still restarts the auction + vm.roll(vm.getBlockNumber() + 1); + vm.prank(operator); + payments.modifyRailPayment(testTokenRailId, newRate, 0); + vm.roll(vm.getBlockNumber() + 10); + vm.prank(payer); + payments.settleRail(testTokenRailId, vm.getBlockNumber()); + + (startPrice, startTime) = payments.auctionInfo(testToken); + assertEq(startPrice, FIRST_AUCTION_START_PRICE); + assertEq(startTime, vm.getBlockTimestamp()); + } + + // test escalating fees up to uint max + function testInferno() public { + // start the auction + uint256 newRate = 19 * 10 ** 14; + vm.prank(operator); + payments.modifyRailPayment(testTokenRailId, newRate, 0); + vm.roll(vm.getBlockNumber() + 10); + vm.prank(payer); + payments.settleRail(testTokenRailId, vm.getBlockNumber()); + + uint256 startPrice; + uint256 startTime; + uint256 available; + uint256 expectedStartPrice = FIRST_AUCTION_START_PRICE; + // repeatedly end the auction, multiplying the burn + for (uint256 i = 0; i < 256; i++) { + (available,,,) = payments.accounts(testToken, address(payments)); + (startPrice, startTime) = payments.auctionInfo(testToken); + assertEq(startPrice, expectedStartPrice); + assertEq(startTime, vm.getBlockTimestamp()); + vm.deal(recipient, startPrice); + vm.prank(recipient); + payments.burnForFees{value: startPrice}(testToken, recipient, available); + expectedStartPrice *= Dutch.RESET_FACTOR; + if (expectedStartPrice > MAX_AUCTION_START_PRICE) { + expectedStartPrice = MAX_AUCTION_START_PRICE; + } + } + assertEq(expectedStartPrice, MAX_AUCTION_START_PRICE); + } +} diff --git a/service_contracts/test/payments/BurnExtraFeeToken.t.sol b/service_contracts/test/payments/BurnExtraFeeToken.t.sol new file mode 100644 index 00000000..139d06d0 --- /dev/null +++ b/service_contracts/test/payments/BurnExtraFeeToken.t.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {ExtraFeeToken} from "./mocks/ExtraFeeToken.sol"; +import {FIRST_AUCTION_START_PRICE, Payments} from "@payments/Payments.sol"; +import {Test} from "forge-std/Test.sol"; + +contract BurnFeeOnTransferTokenTest is Test { + PaymentsTestHelpers helper = new PaymentsTestHelpers(); + Payments payments; + ExtraFeeToken feeToken; + + uint256 railId; + address payable private constant BURN_ADDRESS = payable(0xff00000000000000000000000000000000000063); + + address operator; + address payer; + address payee; + address recipient; + + function setUp() public { + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + operator = helper.OPERATOR(); + payer = helper.USER1(); + payee = helper.USER2(); + recipient = helper.USER3(); + } + + function testBurnFeeOnTransferToken() public { + feeToken = new ExtraFeeToken(10 ** 16); + + feeToken.mint(payer, 50000 * 10 ** 18); + vm.prank(payer); + feeToken.approve(address(payments), 50000 * 10 ** 18); + vm.prank(payer); + payments.deposit(feeToken, payer, 500 * 10 ** 18); + + (uint256 balance,,,) = payments.accounts(feeToken, payer); + assertEq(balance, 500 * 10 ** 18); + + vm.prank(payer); + payments.setOperatorApproval(feeToken, operator, true, 50000 * 10 ** 18, 500 * 10 ** 18, 28800); + + vm.prank(operator); + railId = payments.createRail(feeToken, payer, payee, address(0), 0, address(0)); + + uint256 newRate = 100 * 10 ** 16; + + vm.prank(operator); + payments.modifyRailPayment(railId, newRate, 0); + + vm.roll(vm.getBlockNumber() + 10); + + vm.prank(payer); + payments.settleRail(railId, vm.getBlockNumber()); + + (uint256 available,,,) = payments.accounts(feeToken, address(payments)); + assertEq(available, 10 * newRate * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR()); + + vm.expectRevert(); + payments.burnForFees{value: FIRST_AUCTION_START_PRICE}(feeToken, recipient, available); + + uint256 requested = available - feeToken.transferFee(); + vm.expectRevert(); + payments.burnForFees{value: FIRST_AUCTION_START_PRICE}(feeToken, recipient, requested + 1); + + payments.burnForFees{value: FIRST_AUCTION_START_PRICE}(feeToken, recipient, requested); + uint256 received = feeToken.balanceOf(recipient); + assertEq(requested, received); + + (uint256 availableAfter,,,) = payments.accounts(feeToken, address(payments)); + assertEq(availableAfter, 0); + + assertEq(BURN_ADDRESS.balance, FIRST_AUCTION_START_PRICE); + } +} diff --git a/service_contracts/test/payments/BurnFeeOnTransferToken.t.sol b/service_contracts/test/payments/BurnFeeOnTransferToken.t.sol new file mode 100644 index 00000000..67ad7984 --- /dev/null +++ b/service_contracts/test/payments/BurnFeeOnTransferToken.t.sol @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {MockFeeOnTransferTokenWithPermit} from "./mocks/MockFeeOnTransferTokenWithPermit.sol"; +import {FIRST_AUCTION_START_PRICE, Payments} from "@payments/Payments.sol"; +import {Test} from "forge-std/Test.sol"; + +contract BurnFeeOnTransferTokenTest is Test { + PaymentsTestHelpers helper = new PaymentsTestHelpers(); + Payments payments; + MockFeeOnTransferTokenWithPermit feeToken; + + uint256 railId; + address payable private constant BURN_ADDRESS = payable(0xff00000000000000000000000000000000000063); + + address operator; + address payer; + address payee; + address recipient; + + function setUp() public { + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + operator = helper.OPERATOR(); + payer = helper.USER1(); + payee = helper.USER2(); + recipient = helper.USER3(); + } + + function testBurnFeeOnTransferToken() public { + feeToken = new MockFeeOnTransferTokenWithPermit("FeeToken", "FEE", 100); + + feeToken.mint(payer, 50000 * 10 ** 18); + vm.prank(payer); + feeToken.approve(address(payments), 50000 * 10 ** 18); + vm.prank(payer); + payments.deposit(feeToken, payer, 500 * 10 ** 18); + + (uint256 balance,,,) = payments.accounts(feeToken, payer); + assertEq(balance, 495 * 10 ** 18); + + vm.prank(payer); + payments.setOperatorApproval(feeToken, operator, true, 50000 * 10 ** 18, 500 * 10 ** 18, 28800); + + vm.prank(operator); + railId = payments.createRail(feeToken, payer, payee, address(0), 0, address(0)); + + uint256 newRate = 100 * 10 ** 16; + + vm.prank(operator); + payments.modifyRailPayment(railId, newRate, 0); + + vm.roll(vm.getBlockNumber() + 10); + + vm.prank(payer); + payments.settleRail(railId, vm.getBlockNumber()); + + (uint256 available,,,) = payments.accounts(feeToken, address(payments)); + assertEq(available, 10 * newRate * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR()); + + payments.burnForFees{value: FIRST_AUCTION_START_PRICE}(feeToken, recipient, available); + uint256 received = feeToken.balanceOf(recipient); + assertEq(available * 99 / 100, received); + + (uint256 availableAfter,,,) = payments.accounts(feeToken, address(payments)); + assertEq(availableAfter, 0); + + assertEq(BURN_ADDRESS.balance, FIRST_AUCTION_START_PRICE); + } +} diff --git a/service_contracts/test/payments/DepositWithAuthorization.t.sol b/service_contracts/test/payments/DepositWithAuthorization.t.sol new file mode 100644 index 00000000..aeff8a10 --- /dev/null +++ b/service_contracts/test/payments/DepositWithAuthorization.t.sol @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockERC20} from "./mocks/MockERC20.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; + +contract DepositWithAuthorization is Test, BaseTestHelper { + MockERC20 testToken; + PaymentsTestHelpers helper; + Payments payments; + + uint256 constant DEPOSIT_AMOUNT = 1000 ether; + uint256 constant RATE_ALLOWANCE = 100 ether; + uint256 constant LOCKUP_ALLOWANCE = 1000 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + uint256 internal constant INITIAL_BALANCE = 1000 ether; + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + + testToken = helper.testToken(); + } + + function testDepositWithAuthorization_HappyPath() public { + uint256 fromPrivateKey = user1Sk; + address from = vm.addr(fromPrivateKey); + address to = from; + uint256 validForSeconds = 60; + uint256 amount = DEPOSIT_AMOUNT; + + // Windows + uint256 validAfter = 0; // valid immediately + uint256 validBefore = block.timestamp + validForSeconds; + + // Nonce: generate a unique bytes32 per authorization + // For tests you can make it deterministic: + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + // Pre-state capture + uint256 fromBalanceBefore = helper._balanceOf(from, false); + uint256 paymentsBalanceBefore = helper._balanceOf(address(payments), false); + Payments.Account memory toAccountBefore = helper._getAccountData(to, false); + + // Build signature + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + fromPrivateKey, + testToken, + from, + address(payments), // receiveWithAuthorization pays to Payments contract + amount, + validAfter, + validBefore, + nonce + ); + + // Execute deposit via authorization + vm.startPrank(from); + + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + + vm.stopPrank(); + + // Post-state capture + uint256 fromBalanceAfter = helper._balanceOf(from, false); + uint256 paymentsBalanceAfter = helper._balanceOf(address(payments), false); + Payments.Account memory toAccountAfter = helper._getAccountData(from, false); + + // Assertions + helper._assertDepositBalances( + fromBalanceBefore, + fromBalanceAfter, + paymentsBalanceBefore, + paymentsBalanceAfter, + toAccountBefore, + toAccountAfter, + amount + ); + + // Verify authorization is consumed on the token + bool used = testToken.authorizationState(from, nonce); + assertTrue(used); + } + + function testDepositWithAuthorization_Revert_ReplayNonceUsed() public { + uint256 fromPrivateKey = user1Sk; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validForSeconds = 60; + + address from = vm.addr(fromPrivateKey); + address to = from; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + validForSeconds; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + fromPrivateKey, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + vm.startPrank(from); + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + // Second attempt with same nonce must revert + vm.expectRevert("EIP3009: authorization already used"); + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + vm.stopPrank(); + } + + function testDepositWithAuthorization_Revert_InvalidSignature_WrongSigner() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 60; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + // Generate signature with a different private key + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user2Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + vm.startPrank(from); + vm.expectRevert("EIP3009: invalid signature"); + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + vm.stopPrank(); + } + + function testDepositWithAuthorization_Revert_InvalidSignature_Corrupted() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 60; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + // Corrupt r + r = bytes32(uint256(r) ^ 3); + + vm.startPrank(from); + vm.expectRevert("EIP712: invalid signature"); // invalid signature should revert + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + vm.stopPrank(); + } + + function testDepositWithAuthorization_Revert_ExpiredAuthorization() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 1; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + // advance beyond validBefore + vm.warp(validBefore + 1); + + vm.startPrank(from); + vm.expectRevert("EIP3009: authorization expired"); // expired window should revert + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + vm.stopPrank(); + } + + function testDepositWithAuthorization_Revert_NotYetValidAuthorization() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = block.timestamp + 60; + uint256 validBefore = validAfter + 300; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + // Pre-state capture + uint256 fromBalanceBefore = helper._balanceOf(from, false); + uint256 paymentsBalanceBefore = helper._balanceOf(address(payments), false); + Payments.Account memory toAccountBefore = helper._getAccountData(to, false); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + vm.startPrank(from); + vm.expectRevert("EIP3009: authorization not yet valid"); // not yet valid + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + + // Now advance to validAfter + 1 and succeed + vm.warp(validAfter + 1); + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + vm.stopPrank(); + + // Post-state capture + uint256 fromBalanceAfter = helper._balanceOf(from, false); + uint256 paymentsBalanceAfter = helper._balanceOf(address(payments), false); + Payments.Account memory toAccountAfter = helper._getAccountData(from, false); + + // Assertions + helper._assertDepositBalances( + fromBalanceBefore, + fromBalanceAfter, + paymentsBalanceBefore, + paymentsBalanceAfter, + toAccountBefore, + toAccountAfter, + amount + ); + + // Verify authorization is consumed on the token + bool used = testToken.authorizationState(from, nonce); + assertTrue(used); + } + + function testDepositWithAuthorization_SubmittedByDifferentSender() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 300; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + // Pre-state capture + uint256 fromBalanceBefore = helper._balanceOf(from, false); + uint256 paymentsBalanceBefore = helper._balanceOf(address(payments), false); + Payments.Account memory toAccountBefore = helper._getAccountData(to, false); + + // Attempt to submit as a different user + address relayer = vm.addr(user2Sk); + vm.startPrank(relayer); + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + vm.stopPrank(); + + // Post-state capture + uint256 fromBalanceAfter = helper._balanceOf(from, false); + uint256 paymentsBalanceAfter = helper._balanceOf(address(payments), false); + Payments.Account memory toAccountAfter = helper._getAccountData(to, false); + + // Assertions + helper._assertDepositBalances( + fromBalanceBefore, + fromBalanceAfter, + paymentsBalanceBefore, + paymentsBalanceAfter, + toAccountBefore, + toAccountAfter, + amount + ); + + // Verify authorization is consumed on the token + bool used = testToken.authorizationState(from, nonce); + assertTrue(used); + } + + function testDepositWithAuthorization_Revert_InsufficientBalance() public { + helper.depositWithAuthorizationInsufficientBalance(user1Sk); + } + + function testDepositWithAuthorization_Revert_DomainMismatchWrongToken() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 300; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + // Create a second token + MockERC20 otherToken = new MockERC20("OtherToken", "OTK"); + + // Sign against otherToken domain + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, otherToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + vm.startPrank(from); + vm.expectRevert("EIP3009: invalid signature"); // domain mismatch + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + vm.stopPrank(); + } +} diff --git a/service_contracts/test/payments/DepositWithAuthorizationAndOperatorApproval.t.sol b/service_contracts/test/payments/DepositWithAuthorizationAndOperatorApproval.t.sol new file mode 100644 index 00000000..12c25c30 --- /dev/null +++ b/service_contracts/test/payments/DepositWithAuthorizationAndOperatorApproval.t.sol @@ -0,0 +1,530 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockERC20} from "./mocks/MockERC20.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {Errors} from "@payments/Errors.sol"; + +contract DepositWithAuthorization is Test, BaseTestHelper { + MockERC20 testToken; + PaymentsTestHelpers helper; + Payments payments; + + uint256 constant DEPOSIT_AMOUNT = 1000 ether; + uint256 constant RATE_ALLOWANCE = 100 ether; + uint256 constant LOCKUP_ALLOWANCE = 1000 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + uint256 internal constant INITIAL_BALANCE = 1000 ether; + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + + testToken = helper.testToken(); + } + + function testDepositWithAuthorizationAndOperatorApproval_HappyPath() public { + uint256 fromPrivateKey = user1Sk; + uint256 validForSeconds = 60; + uint256 amount = DEPOSIT_AMOUNT; + + helper.depositWithAuthorizationAndOperatorApproval( + fromPrivateKey, amount, validForSeconds, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + } + + function testDepositWithAuthorizationAndOperatorApproval_ZeroAmount() public { + uint256 fromPrivateKey = user1Sk; + uint256 validForSeconds = 60; + uint256 amount = 0; // Zero amount + + helper.depositWithAuthorizationAndOperatorApproval( + fromPrivateKey, amount, validForSeconds, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + } + + function testDepositWithAuthorizationAndOperatorApproval_Revert_InvalidSignature() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 60; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + // Build signature with wrong private key + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user2Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + vm.startPrank(from); + + vm.expectRevert("EIP3009: invalid signature"); + payments.depositWithAuthorizationAndApproveOperator( + testToken, + to, + amount, + validAfter, + validBefore, + nonce, + v, + r, + s, + OPERATOR, + RATE_ALLOWANCE, + LOCKUP_ALLOWANCE, + MAX_LOCKUP_PERIOD + ); + + vm.stopPrank(); + } + + function testDepositWithAuthorizationAndOperatorApproval_Revert_InvalidSignature_Corrupted() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 60; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + // Corrupt r + r = bytes32(uint256(r) ^ 3); + + vm.startPrank(from); + vm.expectRevert("EIP712: invalid signature"); // invalid signature should revert + payments.depositWithAuthorizationAndApproveOperator( + testToken, + to, + amount, + validAfter, + validBefore, + nonce, + v, + r, + s, + OPERATOR, + RATE_ALLOWANCE, + LOCKUP_ALLOWANCE, + MAX_LOCKUP_PERIOD + ); + } + + function testDepositWithAuthorizationAndOperatorApproval_Revert_ExpiredAuthorization() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 1; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + // advance beyond validBefore + vm.warp(validBefore + 1); + + vm.startPrank(from); + vm.expectRevert("EIP3009: authorization expired"); // expired window should revert + payments.depositWithAuthorizationAndApproveOperator( + testToken, + to, + amount, + validAfter, + validBefore, + nonce, + v, + r, + s, + OPERATOR, + RATE_ALLOWANCE, + LOCKUP_ALLOWANCE, + MAX_LOCKUP_PERIOD + ); + } + + function testDepositWithAuthorizationAndOperatorApproval_Revert_NotYetValidAuthorization() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = block.timestamp + 60; + uint256 validBefore = validAfter + 300; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + vm.startPrank(from); + vm.expectRevert("EIP3009: authorization not yet valid"); // not yet valid + payments.depositWithAuthorizationAndApproveOperator( + testToken, + to, + amount, + validAfter, + validBefore, + nonce, + v, + r, + s, + OPERATOR, + RATE_ALLOWANCE, + LOCKUP_ALLOWANCE, + MAX_LOCKUP_PERIOD + ); + } + + function testDepositWithAuthorizationAndOperatorApproval_Revert_DifferentSender() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 60; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + // Attempt to submit as a different user + from = vm.addr(user2Sk); + vm.startPrank(from); + vm.expectRevert(abi.encodeWithSelector(Errors.SignerMustBeMsgSender.selector, from, to)); + payments.depositWithAuthorizationAndApproveOperator( + testToken, + to, + amount, + validAfter, + validBefore, + nonce, + v, + r, + s, + OPERATOR, + RATE_ALLOWANCE, + LOCKUP_ALLOWANCE, + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + } + + function testDepositWithAuthorizationAndIncreaseOperatorApproval_HappyPath() public { + uint256 fromPrivateKey = user1Sk; + address from = vm.addr(fromPrivateKey); + address to = from; + uint256 validForSeconds = 60 * 60; + uint256 amount = DEPOSIT_AMOUNT; + + // Step 1: First establish initial operator approval with deposit + helper.depositWithAuthorizationAndOperatorApproval( + fromPrivateKey, amount, validForSeconds, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Step 2: Verify initial approval state + (bool isApproved, uint256 initialRateAllowance, uint256 initialLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(initialRateAllowance, RATE_ALLOWANCE); + assertEq(initialLockupAllowance, LOCKUP_ALLOWANCE); + + // Step 3: Prepare for the increase operation + uint256 additionalDeposit = 500 ether; + uint256 rateIncrease = 50 ether; + uint256 lockupIncrease = 500 ether; + + // Give USER1 more tokens for the additional deposit + testToken.mint(USER1, additionalDeposit); + + uint256 validAfter = 0; + uint256 validBefore = validAfter + validForSeconds; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, additionalDeposit, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), additionalDeposit, validAfter, validBefore, nonce + ); + + // Record initial account state + (uint256 initialFunds,,,) = payments.accounts(testToken, USER1); + + // Step 4: Execute depositWithAuthorizationAndIncreaseOperatorApproval + vm.startPrank(USER1); + payments.depositWithAuthorizationAndIncreaseOperatorApproval( + testToken, + to, + additionalDeposit, + validAfter, + validBefore, + nonce, + v, + r, + s, + OPERATOR, + rateIncrease, + lockupIncrease + ); + + vm.stopPrank(); + + // Step 5: Verify results + // Check deposit was successful + (uint256 finalFunds,,,) = payments.accounts(testToken, USER1); + assertEq(finalFunds, initialFunds + additionalDeposit); + + // Check operator approval was increased + (, uint256 finalRateAllowance, uint256 finalLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(finalRateAllowance, initialRateAllowance + rateIncrease); + assertEq(finalLockupAllowance, initialLockupAllowance + lockupIncrease); + } + + function testDepositWithAuthorizationAndIncreaseOperatorApproval_ZeroIncrease() public { + uint256 fromPrivateKey = user1Sk; + address from = vm.addr(fromPrivateKey); + address to = from; + uint256 validForSeconds = 60 * 60; + uint256 amount = DEPOSIT_AMOUNT; + + // Step 1: First establish initial operator approval with deposit + helper.depositWithAuthorizationAndOperatorApproval( + fromPrivateKey, amount, validForSeconds, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Step 2: Verify initial approval state + (bool isApproved, uint256 initialRateAllowance, uint256 initialLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(initialRateAllowance, RATE_ALLOWANCE); + assertEq(initialLockupAllowance, LOCKUP_ALLOWANCE); + + // Step 3: Prepare for the increase operation + uint256 additionalDeposit = 500 ether; + uint256 rateIncrease = 0; + uint256 lockupIncrease = 0; + + // Give USER1 more tokens for the additional deposit + testToken.mint(USER1, additionalDeposit); + + uint256 validAfter = 0; + uint256 validBefore = validAfter + validForSeconds; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, additionalDeposit, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), additionalDeposit, validAfter, validBefore, nonce + ); + + // Record initial account state + (uint256 initialFunds,,,) = payments.accounts(testToken, USER1); + + // Step 4: Execute depositWithAuthorizationAndIncreaseOperatorApproval + vm.startPrank(USER1); + payments.depositWithAuthorizationAndIncreaseOperatorApproval( + testToken, + to, + additionalDeposit, + validAfter, + validBefore, + nonce, + v, + r, + s, + OPERATOR, + rateIncrease, + lockupIncrease + ); + + vm.stopPrank(); + + // Step 5: Verify results + // Check deposit was successful + (uint256 finalFunds,,,) = payments.accounts(testToken, USER1); + assertEq(finalFunds, initialFunds + additionalDeposit); + + (, uint256 finalRateAllowance, uint256 finalLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(finalRateAllowance, initialRateAllowance); // No change + assertEq(finalLockupAllowance, initialLockupAllowance); // No change + } + + function testDepositWithAuthorizationAndIncreaseOperatorApproval_InvalidSignature() public { + uint256 fromPrivateKey = user1Sk; + address from = vm.addr(fromPrivateKey); + address to = from; + uint256 validForSeconds = 60 * 60; + uint256 amount = DEPOSIT_AMOUNT; + + // First establish initial operator approval with deposit + helper.depositWithAuthorizationAndOperatorApproval( + fromPrivateKey, amount, validForSeconds, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Verify initial approval state + (bool isApproved, uint256 initialRateAllowance, uint256 initialLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(initialRateAllowance, RATE_ALLOWANCE); + assertEq(initialLockupAllowance, LOCKUP_ALLOWANCE); + + // Prepare for the increase operation + uint256 additionalDeposit = 500 ether; + uint256 rateIncrease = 0; + uint256 lockupIncrease = 0; + + // Give USER1 more tokens for the additional deposit + testToken.mint(USER1, additionalDeposit); + + uint256 validAfter = 0; + uint256 validBefore = validAfter + validForSeconds; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, additionalDeposit, block.number)); + + // Create invalid permit signature (wrong private key) + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user2Sk, testToken, from, address(payments), additionalDeposit, validAfter, validBefore, nonce + ); + + vm.startPrank(USER1); + vm.expectRevert("EIP3009: invalid signature"); + payments.depositWithAuthorizationAndIncreaseOperatorApproval( + testToken, + to, + additionalDeposit, + validAfter, + validBefore, + nonce, + v, + r, + s, + OPERATOR, + rateIncrease, + lockupIncrease + ); + vm.stopPrank(); + + (, uint256 finalRateAllowance, uint256 finalLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(finalRateAllowance, initialRateAllowance); // No change + assertEq(finalLockupAllowance, initialLockupAllowance); // No change + } + + function testDepositWithAuthorizationAndIncreaseOperatorApproval_WithExistingUsage() public { + uint256 fromPrivateKey = user1Sk; + address from = vm.addr(fromPrivateKey); + address to = from; + uint256 validForSeconds = 60 * 60; + uint256 amount = DEPOSIT_AMOUNT; + + // First establish initial operator approval with deposit + helper.depositWithAuthorizationAndOperatorApproval( + fromPrivateKey, amount, validForSeconds, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Create rail and use some allowance to establish existing usage + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + uint256 paymentRate = 30 ether; + uint256 lockupFixed = 200 ether; + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + payments.modifyRailLockup(railId, 0, lockupFixed); + vm.stopPrank(); + + // Verify some allowance is used + (, uint256 preRateAllowance, uint256 preLockupAllowance, uint256 preRateUsage, uint256 preLockupUsage,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(preRateUsage, paymentRate); + assertEq(preLockupUsage, lockupFixed); + + // Setup for additional deposit with increase + uint256 additionalDeposit = 500 ether; + uint256 rateIncrease = 70 ether; + uint256 lockupIncrease = 800 ether; + + testToken.mint(USER1, additionalDeposit); + + uint256 validAfter = 0; + uint256 validBefore = validAfter + validForSeconds; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, additionalDeposit, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), additionalDeposit, validAfter, validBefore, nonce + ); + + (uint256 initialFunds,,,) = payments.accounts(testToken, USER1); + + // Execute increase with existing usage + vm.startPrank(USER1); + payments.depositWithAuthorizationAndIncreaseOperatorApproval( + testToken, + to, + additionalDeposit, + validAfter, + validBefore, + nonce, + v, + r, + s, + OPERATOR, + rateIncrease, + lockupIncrease + ); + vm.stopPrank(); + + // Verify results + (uint256 finalFunds,,,) = payments.accounts(testToken, USER1); + assertEq(finalFunds, initialFunds + additionalDeposit); + + (, uint256 finalRateAllowance, uint256 finalLockupAllowance, uint256 finalRateUsage, uint256 finalLockupUsage,) + = payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(finalRateAllowance, preRateAllowance + rateIncrease); + assertEq(finalLockupAllowance, preLockupAllowance + lockupIncrease); + assertEq(finalRateUsage, preRateUsage); // Usage unchanged + assertEq(finalLockupUsage, preLockupUsage); // Usage unchanged + } + + function testDepositWithAuthorizationAndIncreaseOperatorApproval_Revert_DifferentSender() public { + address from = vm.addr(user1Sk); + address to = from; + uint256 amount = DEPOSIT_AMOUNT; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 60; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = helper.getReceiveWithAuthorizationSignature( + user1Sk, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + // First establish initial operator approval with deposit + helper.depositWithAuthorizationAndOperatorApproval( + user1Sk, amount, 60 * 60, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Verify initial approval state + (bool isApproved, uint256 initialRateAllowance, uint256 initialLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(initialRateAllowance, RATE_ALLOWANCE); + assertEq(initialLockupAllowance, LOCKUP_ALLOWANCE); + + // Prepare for the increase operation + uint256 rateIncrease = 10 ether; + uint256 lockupIncrease = 10 ether; + + // Give USER1 more tokens for the additional deposit + testToken.mint(USER1, amount); + + // Attempt to submit as a different user + from = vm.addr(user2Sk); + vm.startPrank(from); + vm.expectRevert(abi.encodeWithSelector(Errors.SignerMustBeMsgSender.selector, from, to)); + payments.depositWithAuthorizationAndIncreaseOperatorApproval( + testToken, to, amount, validAfter, validBefore, nonce, v, r, s, OPERATOR, rateIncrease, lockupIncrease + ); + vm.stopPrank(); + } +} diff --git a/service_contracts/test/payments/DepositWithPermitAndOperatorApproval.t.sol b/service_contracts/test/payments/DepositWithPermitAndOperatorApproval.t.sol new file mode 100644 index 00000000..9e4ec449 --- /dev/null +++ b/service_contracts/test/payments/DepositWithPermitAndOperatorApproval.t.sol @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockERC20} from "./mocks/MockERC20.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {Errors} from "@payments/Errors.sol"; + +contract DepositWithPermitAndOperatorApproval is Test, BaseTestHelper { + MockERC20 testToken; + PaymentsTestHelpers helper; + Payments payments; + + uint256 constant DEPOSIT_AMOUNT = 1000 ether; + uint256 constant RATE_ALLOWANCE = 100 ether; + uint256 constant LOCKUP_ALLOWANCE = 1000 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + uint256 internal constant INITIAL_BALANCE = 1000 ether; + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + + testToken = helper.testToken(); + } + + function testDepositWithPermitAndOperatorApproval_HappyPath() public { + helper.makeDepositWithPermitAndOperatorApproval( + user1Sk, DEPOSIT_AMOUNT, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + } + + function testDepositWithPermitAndOperatorApproval_ZeroAmount() public { + helper.makeDepositWithPermitAndOperatorApproval( + user1Sk, 0, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + } + + function testDepositWithPermitAndOperatorApproval_MultipleDeposits() public { + uint256 firstDepositAmount = 500 ether; + uint256 secondDepositAmount = 300 ether; + + helper.makeDepositWithPermitAndOperatorApproval( + user1Sk, firstDepositAmount, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + helper.makeDepositWithPermitAndOperatorApproval( + user1Sk, secondDepositAmount, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + } + + function testDepositWithPermitAndOperatorApproval_InvalidPermitReverts() public { + helper.expectInvalidPermitAndOperatorApprovalToRevert( + user1Sk, DEPOSIT_AMOUNT, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + } + + function testDepositWithPermitAndOperatorApproval_Revert_DifferentSender() public { + address from = USER1; + uint256 deadline = block.timestamp + 1 hours; + + // get signature for permit + (uint8 v, bytes32 r, bytes32 s) = + helper.getPermitSignature(user1Sk, from, address(payments), DEPOSIT_AMOUNT, deadline); + + vm.startPrank(RELAYER); + vm.expectRevert(abi.encodeWithSelector(Errors.SignerMustBeMsgSender.selector, RELAYER, from)); + payments.depositWithPermitAndApproveOperator( + testToken, + from, + DEPOSIT_AMOUNT, + deadline, + v, + r, + s, + OPERATOR, + RATE_ALLOWANCE, + LOCKUP_ALLOWANCE, + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + } + + // SECTION: Deposit With Permit And Increase Operator Approval Tests + + function testDepositWithPermitAndIncreaseOperatorApproval_HappyPath() public { + // Step 1: First establish initial operator approval with deposit + helper.makeDepositWithPermitAndOperatorApproval( + user1Sk, DEPOSIT_AMOUNT, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Step 2: Verify initial approval state + (bool isApproved, uint256 initialRateAllowance, uint256 initialLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(initialRateAllowance, RATE_ALLOWANCE); + assertEq(initialLockupAllowance, LOCKUP_ALLOWANCE); + + // Step 3: Prepare for the increase operation + uint256 additionalDeposit = 500 ether; + uint256 rateIncrease = 50 ether; + uint256 lockupIncrease = 500 ether; + + // Give USER1 more tokens for the additional deposit + testToken.mint(USER1, additionalDeposit); + + // Get permit signature for the additional deposit + uint256 deadline = block.timestamp + 1 hours; + (uint8 v, bytes32 r, bytes32 s) = + helper.getPermitSignature(user1Sk, USER1, address(payments), additionalDeposit, deadline); + + // Record initial account state + (uint256 initialFunds,,,) = payments.accounts(testToken, USER1); + + // Step 4: Execute depositWithPermitAndIncreaseOperatorApproval + vm.startPrank(USER1); + payments.depositWithPermitAndIncreaseOperatorApproval( + testToken, USER1, additionalDeposit, deadline, v, r, s, OPERATOR, rateIncrease, lockupIncrease + ); + vm.stopPrank(); + + // Step 5: Verify results + // Check deposit was successful + (uint256 finalFunds,,,) = payments.accounts(testToken, USER1); + assertEq(finalFunds, initialFunds + additionalDeposit); + + // Check operator approval was increased + (, uint256 finalRateAllowance, uint256 finalLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(finalRateAllowance, initialRateAllowance + rateIncrease); + assertEq(finalLockupAllowance, initialLockupAllowance + lockupIncrease); + } + + function testDepositWithPermitAndIncreaseOperatorApproval_ZeroIncrease() public { + // First establish initial operator approval with deposit + helper.makeDepositWithPermitAndOperatorApproval( + user1Sk, DEPOSIT_AMOUNT, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Verify initial approval state + (bool isApproved, uint256 initialRateAllowance, uint256 initialLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(initialRateAllowance, RATE_ALLOWANCE); + assertEq(initialLockupAllowance, LOCKUP_ALLOWANCE); + + // Setup for additional deposit with zero increases + uint256 additionalDeposit = 500 ether; + testToken.mint(USER1, additionalDeposit); + + uint256 deadline = block.timestamp + 1 hours; + (uint8 v, bytes32 r, bytes32 s) = + helper.getPermitSignature(user1Sk, USER1, address(payments), additionalDeposit, deadline); + + (uint256 initialFunds,,,) = payments.accounts(testToken, USER1); + + // Execute with zero increases + vm.startPrank(USER1); + payments.depositWithPermitAndIncreaseOperatorApproval( + testToken, + USER1, + additionalDeposit, + deadline, + v, + r, + s, + OPERATOR, + 0, // Zero rate increase + 0 // Zero lockup increase + ); + vm.stopPrank(); + + // Verify deposit occurred but allowances unchanged + (uint256 finalFunds,,,) = payments.accounts(testToken, USER1); + assertEq(finalFunds, initialFunds + additionalDeposit); + + (, uint256 finalRateAllowance, uint256 finalLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(finalRateAllowance, initialRateAllowance); // No change + assertEq(finalLockupAllowance, initialLockupAllowance); // No change + } + + function testDepositWithPermitAndIncreaseOperatorApproval_InvalidPermit() public { + // First establish initial operator approval with deposit + helper.makeDepositWithPermitAndOperatorApproval( + user1Sk, DEPOSIT_AMOUNT, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Setup for additional deposit with invalid permit + uint256 additionalDeposit = 500 ether; + testToken.mint(USER1, additionalDeposit); + + uint256 deadline = block.timestamp + 1 hours; + + // Create invalid permit signature (wrong private key) + (uint8 v, bytes32 r, bytes32 s) = + helper.getPermitSignature(user2Sk, USER1, address(payments), additionalDeposit, deadline); + + vm.startPrank(USER1); + vm.expectRevert( + abi.encodeWithSignature( + "ERC2612InvalidSigner(address,address)", + vm.addr(user2Sk), // Wrong signer address + USER1 // Intended recipient + ) + ); + payments.depositWithPermitAndIncreaseOperatorApproval( + testToken, USER1, additionalDeposit, deadline, v, r, s, OPERATOR, 50 ether, 500 ether + ); + vm.stopPrank(); + } + + function testDepositWithPermitAndIncreaseOperatorApproval_WithExistingUsage() public { + // First establish initial operator approval with deposit + helper.makeDepositWithPermitAndOperatorApproval( + user1Sk, DEPOSIT_AMOUNT, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Create rail and use some allowance to establish existing usage + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + uint256 paymentRate = 30 ether; + uint256 lockupFixed = 200 ether; + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + payments.modifyRailLockup(railId, 0, lockupFixed); + vm.stopPrank(); + + // Verify some allowance is used + (, uint256 preRateAllowance, uint256 preLockupAllowance, uint256 preRateUsage, uint256 preLockupUsage,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(preRateUsage, paymentRate); + assertEq(preLockupUsage, lockupFixed); + + // Setup for additional deposit with increase + uint256 additionalDeposit = 500 ether; + uint256 rateIncrease = 70 ether; + uint256 lockupIncrease = 800 ether; + + testToken.mint(USER1, additionalDeposit); + + uint256 deadline = block.timestamp + 1 hours; + (uint8 v, bytes32 r, bytes32 s) = + helper.getPermitSignature(user1Sk, USER1, address(payments), additionalDeposit, deadline); + + (uint256 initialFunds,,,) = payments.accounts(testToken, USER1); + + // Execute increase with existing usage + vm.startPrank(USER1); + payments.depositWithPermitAndIncreaseOperatorApproval( + testToken, USER1, additionalDeposit, deadline, v, r, s, OPERATOR, rateIncrease, lockupIncrease + ); + vm.stopPrank(); + + // Verify results + (uint256 finalFunds,,,) = payments.accounts(testToken, USER1); + assertEq(finalFunds, initialFunds + additionalDeposit); + + (, uint256 finalRateAllowance, uint256 finalLockupAllowance, uint256 finalRateUsage, uint256 finalLockupUsage,) + = payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(finalRateAllowance, preRateAllowance + rateIncrease); + assertEq(finalLockupAllowance, preLockupAllowance + lockupIncrease); + assertEq(finalRateUsage, preRateUsage); // Usage unchanged + assertEq(finalLockupUsage, preLockupUsage); // Usage unchanged + } + + function testDepositWithPermitAndIncreaseOperatorApproval_Revert_DifferentSender() public { + address from = USER1; + + // Step 1: First establish initial operator approval with deposit + helper.makeDepositWithPermitAndOperatorApproval( + user1Sk, DEPOSIT_AMOUNT, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + + // Step 2: Verify initial approval state + (bool isApproved, uint256 initialRateAllowance, uint256 initialLockupAllowance,,,) = + payments.operatorApprovals(testToken, USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(initialRateAllowance, RATE_ALLOWANCE); + assertEq(initialLockupAllowance, LOCKUP_ALLOWANCE); + + // Step 3: Prepare for the increase operation + uint256 additionalDeposit = 500 ether; + uint256 rateIncrease = 50 ether; + uint256 lockupIncrease = 500 ether; + + // Give USER1 more tokens for the additional deposit + testToken.mint(USER1, additionalDeposit); + + // Get permit signature for the additional deposit + uint256 deadline = block.timestamp + 1 hours; + (uint8 v, bytes32 r, bytes32 s) = + helper.getPermitSignature(user1Sk, USER1, address(payments), additionalDeposit, deadline); + + vm.startPrank(RELAYER); + vm.expectRevert(abi.encodeWithSelector(Errors.SignerMustBeMsgSender.selector, RELAYER, from)); + payments.depositWithPermitAndIncreaseOperatorApproval( + testToken, USER1, additionalDeposit, deadline, v, r, s, OPERATOR, rateIncrease, lockupIncrease + ); + vm.stopPrank(); + } +} diff --git a/service_contracts/test/payments/Dutch.t.sol b/service_contracts/test/payments/Dutch.t.sol new file mode 100644 index 00000000..bab0c118 --- /dev/null +++ b/service_contracts/test/payments/Dutch.t.sol @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.30; + +import {Test} from "forge-std/Test.sol"; +import {Dutch} from "@payments/Dutch.sol"; + +contract ExternalDutch { + using Dutch for uint256; + + function dutch(uint256 startPrice, uint256 elapsed) external pure returns (uint256) { + return startPrice.decay(elapsed); + } +} + +contract DutchTest is Test { + using Dutch for uint256; + + function checkExactDecay(uint256 startPrice) internal pure { + assertEq(startPrice.decay(0), startPrice); + assertEq(startPrice.decay(3.5 days), startPrice / 2); + assertEq(startPrice.decay(7 days), startPrice / 4); + assertEq(startPrice.decay(14 days), startPrice / 16); + assertEq(startPrice.decay(21 days), startPrice / 64); + assertEq(startPrice.decay(28 days), startPrice / 256); + assertEq(startPrice.decay(35 days), startPrice / 1024); + } + + function testDecay() public pure { + checkExactDecay(0.00000001 ether); + checkExactDecay(0.01 ether); + checkExactDecay(9 ether); + checkExactDecay(11 ether); + checkExactDecay(13 ether); + checkExactDecay(1300000 ether); + } + + function testMaxDecayU256() public pure { + uint256 maxPrice = 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff; + + assertEq(maxPrice.decay(0), maxPrice); + assertEq(maxPrice.decay(10000000), 12852371374314799914919560702529050018701224735495877087613516410500); + assertEq(maxPrice.decay(50000000), 1950746206018947071427216775); + assertEq(maxPrice.decay(58060000), 18480601319969968529); + assertEq(maxPrice.decay(Dutch.MAX_DECAY - 1), 18446828639436756833); + assertEq(maxPrice.decay(Dutch.MAX_DECAY), 18446786356524694827); + assertEq(maxPrice.decay(Dutch.MAX_DECAY + 1), 0); + } + + function testMaxDecayFIL() public pure { + uint256 maxPrice = 2 * 10 ** 27; // max FIL supply + + assertEq(maxPrice.decay(0), maxPrice); + assertEq(maxPrice.decay(90 days), 36329437917604310558); + assertEq(maxPrice.decay(10000000), 221990491042506894); + assertEq(maxPrice.decay(20000000), 24639889); + assertEq(maxPrice.decay(23000000), 25423); + assertEq(maxPrice.decay(26000000), 26); + assertEq(maxPrice.decay(26500000), 8); + assertEq(maxPrice.decay(27000000), 2); + assertEq(maxPrice.decay(27425278), 1); + assertEq(maxPrice.decay(27425279), 0); + assertEq(maxPrice.decay(Dutch.MAX_DECAY - 1), 0); + assertEq(maxPrice.decay(Dutch.MAX_DECAY), 0); + assertEq(maxPrice.decay(Dutch.MAX_DECAY + 1), 0); + } +} diff --git a/service_contracts/test/payments/FeeOnTransferVulnerability.t.sol b/service_contracts/test/payments/FeeOnTransferVulnerability.t.sol new file mode 100644 index 00000000..e0b053af --- /dev/null +++ b/service_contracts/test/payments/FeeOnTransferVulnerability.t.sol @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test, console} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockFeeOnTransferTokenWithPermit} from "./mocks/MockFeeOnTransferTokenWithPermit.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {MessageHashUtils} from "@openzeppelin/contracts/utils/cryptography/MessageHashUtils.sol"; + +contract FeeOnTransferVulnerabilityTest is Test, BaseTestHelper { + PaymentsTestHelpers helper; + Payments payments; + MockFeeOnTransferTokenWithPermit feeToken; + + uint256 internal constant INITIAL_BALANCE = 10000 ether; + uint256 internal constant DEPOSIT_AMOUNT = 1000 ether; + uint256 internal constant FEE_PERCENTAGE = 200; // 2% fee + + function setUp() public { + // Create test helpers and setup environment + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + + // Create fee-on-transfer token with 2% fee + feeToken = new MockFeeOnTransferTokenWithPermit("PermitFeeToken", "PFEE", FEE_PERCENTAGE); + + // Mint tokens to users + feeToken.mint(USER1, INITIAL_BALANCE); + feeToken.mint(USER2, INITIAL_BALANCE); + + // Approve payments contract + vm.prank(USER1); + feeToken.approve(address(payments), type(uint256).max); + + vm.prank(USER2); + feeToken.approve(address(payments), type(uint256).max); + } + + function testFeeOnTransferVulnerabilityBasic() public { + // Record initial balances + uint256 contractBalanceBefore = feeToken.balanceOf(address(payments)); + + // User1 deposits 1000 tokens, but due to 2% fee, only 980 actually reach the contract + vm.prank(USER1); + payments.deposit(feeToken, USER1, DEPOSIT_AMOUNT); + + // Check actual token balance vs recorded balance + uint256 contractBalanceAfter = feeToken.balanceOf(address(payments)); + uint256 actualTokensReceived = contractBalanceAfter - contractBalanceBefore; + uint256 expectedTokensReceived = DEPOSIT_AMOUNT - (DEPOSIT_AMOUNT * FEE_PERCENTAGE / 10000); + + // The contract actually received less due to fee + assertEq(actualTokensReceived, expectedTokensReceived, "Contract received expected amount after fee"); + assertLt(actualTokensReceived, DEPOSIT_AMOUNT, "Contract received less than deposit amount"); + + // The payments contract also knows it does not have the full amount + (, uint256 recordedFunds,,) = payments.getAccountInfoIfSettled(feeToken, USER1); + assertEq(recordedFunds, expectedTokensReceived, "Contract recorded full deposit amount"); + } + + function testFeeOnTransferWithDepositWithPermit() public { + // Record initial balances + uint256 contractBalanceBefore = feeToken.balanceOf(address(payments)); + + // Prepare permit parameters + uint256 deadline = block.timestamp + 1 hours; + + // Get permit signature + (uint8 v, bytes32 r, bytes32 s) = + getPermitSignature(feeToken, user1Sk, USER1, address(payments), DEPOSIT_AMOUNT, deadline); + + // User1 deposits 1000 tokens using permit, but due to 2% fee, only 980 actually reach the contract + vm.prank(USER1); + payments.depositWithPermit(feeToken, USER1, DEPOSIT_AMOUNT, deadline, v, r, s); + + // Check actual token balance vs recorded balance + uint256 contractBalanceAfter = feeToken.balanceOf(address(payments)); + uint256 actualTokensReceived = contractBalanceAfter - contractBalanceBefore; + uint256 expectedTokensReceived = DEPOSIT_AMOUNT - (DEPOSIT_AMOUNT * FEE_PERCENTAGE / 10000); + + // The contract actually received less due to fee + assertEq(actualTokensReceived, expectedTokensReceived, "Contract received expected amount after fee"); + assertLt(actualTokensReceived, DEPOSIT_AMOUNT, "Contract received less than deposit amount"); + + // With the fix, the payments contract should record the actual amount received + (, uint256 recordedFunds,,) = payments.getAccountInfoIfSettled(feeToken, USER1); + assertEq(recordedFunds, expectedTokensReceived, "Contract recorded actual received amount"); + + console.log("Deposit amount:", DEPOSIT_AMOUNT); + console.log("Actual tokens received:", actualTokensReceived); + console.log("Recorded balance:", recordedFunds); + console.log("Discrepancy:", recordedFunds > actualTokensReceived ? recordedFunds - actualTokensReceived : 0); + } + + function getPermitSignature( + MockFeeOnTransferTokenWithPermit token, + uint256 privateKey, + address owner, + address spender, + uint256 value, + uint256 deadline + ) internal view returns (uint8 v, bytes32 r, bytes32 s) { + uint256 nonce = token.nonces(owner); + bytes32 domainSeparator = token.DOMAIN_SEPARATOR(); + + bytes32 structHash = keccak256( + abi.encode( + keccak256("Permit(address owner,address spender,uint256 value,uint256 nonce,uint256 deadline)"), + owner, + spender, + value, + nonce, + deadline + ) + ); + + bytes32 digest = MessageHashUtils.toTypedDataHash(domainSeparator, structHash); + + (v, r, s) = vm.sign(privateKey, digest); + } + + function testFeeOnTransferWithDepositWithPermitAndApproveOperator() public { + // Record initial balances + uint256 contractBalanceBefore = feeToken.balanceOf(address(payments)); + + // Prepare permit and operator approval parameters + uint256 deadline = block.timestamp + 1 hours; + uint256 rateAllowance = 10 ether; + uint256 lockupAllowance = 100 ether; + uint256 maxLockupPeriod = 100; + + // Get permit signature + (uint8 v, bytes32 r, bytes32 s) = + getPermitSignature(feeToken, user1Sk, USER1, address(payments), DEPOSIT_AMOUNT, deadline); + + // User1 deposits 1000 tokens using permit and approves operator, but due to 2% fee, only 980 actually reach the contract + vm.prank(USER1); + payments.depositWithPermitAndApproveOperator( + feeToken, + USER1, + DEPOSIT_AMOUNT, + deadline, + v, + r, + s, + OPERATOR, + rateAllowance, + lockupAllowance, + maxLockupPeriod + ); + + // Check actual token balance vs recorded balance + uint256 contractBalanceAfter = feeToken.balanceOf(address(payments)); + uint256 actualTokensReceived = contractBalanceAfter - contractBalanceBefore; + uint256 expectedTokensReceived = DEPOSIT_AMOUNT - (DEPOSIT_AMOUNT * FEE_PERCENTAGE / 10000); + + // The contract actually received less due to fee + assertEq(actualTokensReceived, expectedTokensReceived, "Contract received expected amount after fee"); + assertLt(actualTokensReceived, DEPOSIT_AMOUNT, "Contract received less than deposit amount"); + + // With the fix, the payments contract should record the actual amount received + (, uint256 recordedFunds,,) = payments.getAccountInfoIfSettled(feeToken, USER1); + assertEq(recordedFunds, expectedTokensReceived, "Contract recorded actual received amount"); + + // Verify operator approval was set correctly + (bool isApproved, uint256 actualRateAllowance, uint256 actualLockupAllowance,,, uint256 actualMaxLockupPeriod) = + payments.operatorApprovals(feeToken, USER1, OPERATOR); + assertEq(isApproved, true, "Operator should be approved"); + assertEq(actualRateAllowance, rateAllowance, "Rate allowance should be set"); + assertEq(actualLockupAllowance, lockupAllowance, "Lockup allowance should be set"); + assertEq(actualMaxLockupPeriod, maxLockupPeriod, "Max lockup period should be set"); + + console.log("Deposit amount:", DEPOSIT_AMOUNT); + console.log("Actual tokens received:", actualTokensReceived); + console.log("Recorded balance:", recordedFunds); + console.log("Operator approved:", isApproved); + } +} diff --git a/service_contracts/test/payments/Fees.t.sol b/service_contracts/test/payments/Fees.t.sol new file mode 100644 index 00000000..648200ec --- /dev/null +++ b/service_contracts/test/payments/Fees.t.sol @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockERC20} from "./mocks/MockERC20.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {RailSettlementHelpers} from "./helpers/RailSettlementHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; + +contract FeesTest is Test, BaseTestHelper { + PaymentsTestHelpers helper; + RailSettlementHelpers settlementHelper; + Payments payments; + + // Multiple tokens for testing + MockERC20 token1; + MockERC20 token2; + MockERC20 token3; + + uint256 constant INITIAL_BALANCE = 5000 ether; + uint256 constant DEPOSIT_AMOUNT = 200 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + + // Payment rates for each rail + uint256 constant RAIL1_RATE = 5 ether; + uint256 constant RAIL2_RATE = 10 ether; + uint256 constant RAIL3_RATE = 15 ether; + + // Rail IDs + uint256 rail1Id; + uint256 rail2Id; + uint256 rail3Id; + + function setUp() public { + // Initialize helpers + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + + settlementHelper = new RailSettlementHelpers(); + settlementHelper.initialize(payments, helper); + + // Set up 3 different tokens + token1 = MockERC20(helper.testToken()); // Use the default token from the helper + token2 = new MockERC20("Token 2", "TK2"); + token3 = new MockERC20("Token 3", "TK3"); + + // Initialize tokens and make deposits + setupTokensAndDeposits(); + + // Create rails with different tokens + createRails(); + } + + function setupTokensAndDeposits() internal { + // Mint tokens to users + // Token 1 is already handled by the helper + token2.mint(USER1, INITIAL_BALANCE); + token3.mint(USER1, INITIAL_BALANCE); + + // Approve transfers for all tokens + vm.startPrank(USER1); + token1.approve(address(payments), type(uint256).max); + token2.approve(address(payments), type(uint256).max); + token3.approve(address(payments), type(uint256).max); + vm.stopPrank(); + + // Make deposits with all tokens + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); // Uses token1 + + // Make deposits with token2 and token3 + vm.startPrank(USER1); + payments.deposit(token2, USER1, DEPOSIT_AMOUNT); + payments.deposit(token3, USER1, DEPOSIT_AMOUNT); + vm.stopPrank(); + } + + function createRails() internal { + // Set up operator approvals for each token + helper.setupOperatorApproval( + USER1, // from + OPERATOR, // operator + RAIL1_RATE, // rate allowance for token1 + RAIL1_RATE * 10, // lockup allowance (enough for the period) + MAX_LOCKUP_PERIOD // max lockup period + ); + + // Operator approvals for token2 and token3 + vm.startPrank(USER1); + payments.setOperatorApproval( + token2, + OPERATOR, + true, // approved + RAIL2_RATE, // rate allowance for token2 + RAIL2_RATE * 10, // lockup allowance (enough for the period) + MAX_LOCKUP_PERIOD // max lockup period + ); + + payments.setOperatorApproval( + token3, + OPERATOR, + true, // approved + RAIL3_RATE, // rate allowance for token3 + RAIL3_RATE * 10, // lockup allowance (enough for the period) + MAX_LOCKUP_PERIOD // max lockup period + ); + vm.stopPrank(); + + // Create rails with different tokens + rail1Id = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + RAIL1_RATE, + 10, // lockupPeriod + 0, // No fixed lockup + address(0), // No validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Create a rail with token2 + vm.startPrank(OPERATOR); + rail2Id = payments.createRail( + token2, + USER1, // from + USER2, // to + address(0), // no validator + 0, // no commission + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Set rail2 parameters + payments.modifyRailPayment(rail2Id, RAIL2_RATE, 0); + payments.modifyRailLockup(rail2Id, 10, 0); // 10 blocks, no fixed lockup + + // Create a rail with token3 + rail3Id = payments.createRail( + token3, + USER1, // from + USER2, // to + address(0), // no validator + 0, // no commission + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Set rail3 parameters + payments.modifyRailPayment(rail3Id, RAIL3_RATE, 0); + payments.modifyRailLockup(rail3Id, 10, 0); // 10 blocks, no fixed lockup + vm.stopPrank(); + } +} diff --git a/service_contracts/test/payments/OperatorApproval.t.sol b/service_contracts/test/payments/OperatorApproval.t.sol new file mode 100644 index 00000000..a1f66489 --- /dev/null +++ b/service_contracts/test/payments/OperatorApproval.t.sol @@ -0,0 +1,957 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +pragma solidity ^0.8.27; + +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockERC20} from "./mocks/MockERC20.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {Errors} from "@payments/Errors.sol"; + +contract OperatorApprovalTest is Test, BaseTestHelper { + MockERC20 secondToken; + PaymentsTestHelpers helper; + Payments payments; + + uint256 constant DEPOSIT_AMOUNT = 1000 ether; + uint256 constant RATE_ALLOWANCE = 100 ether; + uint256 constant LOCKUP_ALLOWANCE = 1000 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + IERC20 private constant NATIVE_TOKEN = IERC20(address(0)); + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + + // Deposit funds for client + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + } + + function testNativeFIL() public { + vm.startPrank(USER1); + payments.setOperatorApproval(NATIVE_TOKEN, OPERATOR, true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + vm.stopPrank(); + } + + function testInvalidAddresses() public { + // Test zero operator address + vm.startPrank(USER1); + vm.expectRevert(abi.encodeWithSelector(Errors.ZeroAddressNotAllowed.selector, "operator")); + payments.setOperatorApproval( + IERC20(address(0x1)), address(0), true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + } + + function testModifyingAllowances() public { + // Setup initial approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Increase allowances + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE * 2, LOCKUP_ALLOWANCE * 2, MAX_LOCKUP_PERIOD); + + // Decrease allowances + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE / 2, LOCKUP_ALLOWANCE / 2, MAX_LOCKUP_PERIOD); + } + + function testRevokingAndReapprovingOperator() public { + // Setup initial approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Revoke approval + helper.revokeOperatorApprovalAndVerify(USER1, OPERATOR); + + // Reapprove operator + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + } + + function testRateTrackingWithMultipleRails() public { + // Setup initial approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Create a rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Verify no allowance consumed yet + helper.verifyOperatorAllowances( + USER1, OPERATOR, true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, 0, 0, MAX_LOCKUP_PERIOD + ); + + // 1. Set initial payment rate + uint256 initialRate = 10 ether; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, initialRate, 0); + vm.stopPrank(); + + // Verify rate usage matches initial rate + helper.verifyOperatorAllowances( + USER1, OPERATOR, true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, initialRate, 0, MAX_LOCKUP_PERIOD + ); + + // 2. Increase payment rate + uint256 increasedRate = 15 ether; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, increasedRate, 0); + vm.stopPrank(); + + // Verify rate usage increased + helper.verifyOperatorAllowances( + USER1, OPERATOR, true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, increasedRate, 0, MAX_LOCKUP_PERIOD + ); + + // 3. Decrease payment rate + uint256 decreasedRate = 5 ether; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, decreasedRate, 0); + vm.stopPrank(); + + // Verify rate usage decreased + helper.verifyOperatorAllowances( + USER1, OPERATOR, true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, decreasedRate, 0, MAX_LOCKUP_PERIOD + ); + + // 4. Create second rail and set rate + uint256 railId2 = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + uint256 rate2 = 15 ether; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId2, rate2, 0); + vm.stopPrank(); + + // Verify combined rate usage + helper.verifyOperatorAllowances( + USER1, OPERATOR, true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, decreasedRate + rate2, 0, MAX_LOCKUP_PERIOD + ); + } + + function testRateLimitEnforcement() public { + // Setup initial approval with limited rate allowance + uint256 limitedRateAllowance = 10 ether; + helper.setupOperatorApproval(USER1, OPERATOR, limitedRateAllowance, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Create rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set rate to exactly the limit + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, limitedRateAllowance, 0); + vm.stopPrank(); + + // Now try to exceed the limit - should revert + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector( + Errors.OperatorRateAllowanceExceeded.selector, limitedRateAllowance, limitedRateAllowance + 1 ether + ) + ); + payments.modifyRailPayment(railId, limitedRateAllowance + 1 ether, 0); + vm.stopPrank(); + } + + // SECTION: Lockup Allowance Tracking + + function testLockupTracking() public { + // Setup initial approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Create rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set payment rate + uint256 paymentRate = 10 ether; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + vm.stopPrank(); + + // 1. Set initial lockup + uint256 lockupPeriod = 5; // 5 blocks + uint256 initialFixedLockup = 100 ether; + + vm.startPrank(OPERATOR); + payments.modifyRailLockup(railId, lockupPeriod, initialFixedLockup); + vm.stopPrank(); + + // Calculate expected lockup usage + uint256 expectedLockupUsage = initialFixedLockup + (paymentRate * lockupPeriod); + + // Verify lockup usage + helper.verifyOperatorAllowances( + USER1, OPERATOR, true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, paymentRate, expectedLockupUsage, MAX_LOCKUP_PERIOD + ); + + // 2. Increase fixed lockup + uint256 increasedFixedLockup = 200 ether; + vm.startPrank(OPERATOR); + payments.modifyRailLockup(railId, lockupPeriod, increasedFixedLockup); + vm.stopPrank(); + + // Calculate updated expected lockup usage + uint256 updatedExpectedLockupUsage = increasedFixedLockup + (paymentRate * lockupPeriod); + + // Verify increased lockup usage + helper.verifyOperatorAllowances( + USER1, + OPERATOR, + true, + RATE_ALLOWANCE, + LOCKUP_ALLOWANCE, + paymentRate, + updatedExpectedLockupUsage, + MAX_LOCKUP_PERIOD + ); + + // 3. Decrease fixed lockup + uint256 decreasedFixedLockup = 50 ether; + vm.startPrank(OPERATOR); + payments.modifyRailLockup(railId, lockupPeriod, decreasedFixedLockup); + vm.stopPrank(); + + // Calculate reduced expected lockup usage + uint256 finalExpectedLockupUsage = decreasedFixedLockup + (paymentRate * lockupPeriod); + + // Verify decreased lockup usage + helper.verifyOperatorAllowances( + USER1, + OPERATOR, + true, + RATE_ALLOWANCE, + LOCKUP_ALLOWANCE, + paymentRate, + finalExpectedLockupUsage, + MAX_LOCKUP_PERIOD + ); + } + + function testLockupLimitEnforcement() public { + // Setup initial approval with limited lockup allowance + uint256 limitedLockupAllowance = 100 ether; + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, limitedLockupAllowance, MAX_LOCKUP_PERIOD); + + // Create rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set payment rate + uint256 paymentRate = 10 ether; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + vm.stopPrank(); + + // Try to set fixed lockup that exceeds allowance + uint256 excessiveLockup = 110 ether; + (,,,, uint256 currentLockupUsage,) = payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + uint256 attemptedUsage = currentLockupUsage + excessiveLockup; + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector( + Errors.OperatorLockupAllowanceExceeded.selector, limitedLockupAllowance, attemptedUsage + ) + ); + payments.modifyRailLockup(railId, 0, excessiveLockup); + vm.stopPrank(); + } + + function testAllowanceEdgeCases() public { + // 1. Test exact allowance consumption + uint256 exactRateAllowance = 10 ether; + uint256 exactLockupAllowance = 100 ether; + helper.setupOperatorApproval(USER1, OPERATOR, exactRateAllowance, exactLockupAllowance, MAX_LOCKUP_PERIOD); + + // Create rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Use exactly the available rate allowance + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, exactRateAllowance, 0); + vm.stopPrank(); + + // Use exactly the available lockup allowance + vm.startPrank(OPERATOR); + payments.modifyRailLockup(railId, 0, exactLockupAllowance); + vm.stopPrank(); + + // Verify allowances are fully consumed + helper.verifyOperatorAllowances( + USER1, + OPERATOR, + true, + exactRateAllowance, + exactLockupAllowance, + exactRateAllowance, + exactLockupAllowance, + MAX_LOCKUP_PERIOD + ); + + // 2. Test zero allowance behavior + helper.setupOperatorApproval(USER1, OPERATOR, 0, 0, MAX_LOCKUP_PERIOD); + + // Create rail with zero allowances + uint256 railId2 = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Attempt to set non-zero rate (should fail) + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector(Errors.OperatorRateAllowanceExceeded.selector, 0, exactRateAllowance + 1) + ); + payments.modifyRailPayment(railId2, 1, 0); + vm.stopPrank(); + + // Attempt to set non-zero lockup (should fail) + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector(Errors.OperatorLockupAllowanceExceeded.selector, 0, exactLockupAllowance + 1) + ); + payments.modifyRailLockup(railId2, 0, 1); + vm.stopPrank(); + } + + function testOperatorAuthorizationBoundaries() public { + // 1. Test unapproved operator + // Try to create a rail and expect it to fail + helper.expectcreateRailToRevertWithoutOperatorApproval(); + + // 2. Setup approval and create rail + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // 3. Test non-operator rail modification + vm.startPrank(USER1); + vm.expectRevert(abi.encodeWithSelector(Errors.OnlyRailOperatorAllowed.selector, OPERATOR, USER1)); + payments.modifyRailPayment(railId, 10 ether, 0); + vm.stopPrank(); + + // 4. Revoke approval and verify operator can't create new rails + vm.startPrank(USER1); + payments.setOperatorApproval( + helper.testToken(), OPERATOR, false, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Verify operator approval was revoked + // Try to create a rail and expect it to fail + helper.expectcreateRailToRevertWithoutOperatorApproval(); + + // 5. Verify operator can still modify existing rails after approval revocation + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 5 ether, 0); + vm.stopPrank(); + + // 6. Test client authorization (operator can't set approvals for client) + vm.startPrank(OPERATOR); + payments.setOperatorApproval( + helper.testToken(), USER2, true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Verify operator approval was not set for client + (bool isApproved,,,,,) = payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + assertFalse(isApproved, "Second operator should not be approved for client"); + } + + function testOneTimePaymentScenarios() public { + // Setup approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Create rail with fixed lockup + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + uint256 paymentRate = 10 ether; + uint256 fixedLockup = 100 ether; + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + payments.modifyRailLockup(railId, 0, fixedLockup); + vm.stopPrank(); + + uint256 oneTimeAmount = 30 ether; + helper.executeOneTimePayment(railId, OPERATOR, oneTimeAmount); + + // 2. Test complete fixed lockup consumption using one time payment + uint256 remainingFixedLockup = fixedLockup - oneTimeAmount; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, remainingFixedLockup); + vm.stopPrank(); + + // Verify fixed lockup is now zero + Payments.RailView memory rail = payments.getRail(railId); + assertEq(rail.lockupFixed, 0, "Fixed lockup should be zero"); + + // 3. Test excessive payment reverts + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector(Errors.OneTimePaymentExceedsLockup.selector, railId, rail.lockupFixed, 1) + ); + payments.modifyRailPayment(railId, paymentRate, 1); // Lockup is now 0, so any payment should fail + vm.stopPrank(); + } + + function testAllowanceChangesWithOneTimePayments() public { + // Setup approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, 1000 ether, MAX_LOCKUP_PERIOD); + + // Create rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + uint256 paymentRate = 10 ether; + uint256 fixedLockup = 800 ether; + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + payments.modifyRailLockup(railId, 0, fixedLockup); + vm.stopPrank(); + + // 1. Test allowance reduction after fixed lockup set + vm.startPrank(USER1); + payments.setOperatorApproval( + helper.testToken(), + OPERATOR, + true, + RATE_ALLOWANCE, + 500 ether, // below fixed lockup of 800 ether, + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Operator should still be able to make one-time payments up to the fixed lockup + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 300 ether); + vm.stopPrank(); + + // Check that one-time payment succeeded despite reduced allowance + Payments.RailView memory rail = payments.getRail(railId); + assertEq(rail.lockupFixed, fixedLockup - 300 ether, "Fixed lockup not reduced correctly"); + + // 2. Test zero allowance after fixed lockup set + vm.startPrank(USER1); + payments.setOperatorApproval( + helper.testToken(), + OPERATOR, + true, + RATE_ALLOWANCE, + 0, // zero allowance + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Operator should still be able to make one-time payments up to the fixed lockup + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 200 ether); + vm.stopPrank(); + + // Check that one-time payment succeeded despite zero allowance + rail = payments.getRail(railId); + assertEq(rail.lockupFixed, 300 ether, "Fixed lockup not reduced correctly"); + } + + function test_OperatorCanReduceUsageOfExistingRailDespiteInsufficientAllowance() public { + // Client allows operator to use up to 90 rate/30 lockup + helper.setupOperatorApproval(USER1, OPERATOR, 90 ether, 30 ether, MAX_LOCKUP_PERIOD); + + // Operator creates a rail using 50 rate/20 lockup + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 50 ether, 0); + payments.modifyRailLockup(railId, 0, 20 ether); + vm.stopPrank(); + + // Client reduces allowance to below what's already being used + vm.startPrank(USER1); + payments.setOperatorApproval( + helper.testToken(), + OPERATOR, + true, + 40 ether, // below current usage of 50 ether + 15 ether, // below current usage of 20 ether + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Operator should still be able to reduce usage of rate/lockup on existing rail + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 30 ether, 0); + payments.modifyRailLockup(railId, 0, 10 ether); + vm.stopPrank(); + + // Allowance - usage should be 40 - 30 = 10 for rate, 15 - 10 = 5 for lockup + ( + , + /*bool isApproved*/ + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 rateUsage, + uint256 lockupUsage, + ) = helper.payments().operatorApprovals(helper.testToken(), USER1, OPERATOR); + assertEq(rateAllowance - rateUsage, 10 ether); + assertEq(lockupAllowance - lockupUsage, 5 ether); + + // Even though the operator can reduce usage on existing rails despite insufficient allowance, + // they should not be able to create new rail configurations with non-zero rate/lockup + + // Create a new rail, which should succeed + uint256 railId2 = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + uint256 attemptedUsage = rateUsage + 11 ether; + + // But attempting to set non-zero rate on the new rail should fail due to insufficient allowance + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector(Errors.OperatorRateAllowanceExceeded.selector, rateAllowance, attemptedUsage) + ); + payments.modifyRailPayment(railId2, 11 ether, 0); + vm.stopPrank(); + + (,,,, lockupUsage,) = payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + uint256 oldLockupFixed = payments.getRail(railId2).lockupFixed; + uint256 newLockupFixed = 6 ether; + uint256 lockupIncrease = 0; + if (newLockupFixed > oldLockupFixed) { + lockupIncrease = newLockupFixed - oldLockupFixed; + } + attemptedUsage = lockupUsage + lockupIncrease; + + // Similarly, attempting to set non-zero lockup on the new rail should fail + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector(Errors.OperatorLockupAllowanceExceeded.selector, lockupAllowance, attemptedUsage) + ); + payments.modifyRailLockup(railId2, 0, 6 ether); + vm.stopPrank(); + } + + function testAllowanceReductionScenarios() public { + // 1. Test reducing rate allowance below current usage + // Setup approval + helper.setupOperatorApproval( + USER1, + OPERATOR, + 100 ether, // 100 ether rate allowance + 1000 ether, + MAX_LOCKUP_PERIOD + ); + + // Create rail and set rate + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 50 ether, 0); + vm.stopPrank(); + + // Client reduces rate allowance below current usage + vm.startPrank(USER1); + payments.setOperatorApproval( + helper.testToken(), + OPERATOR, + true, + 30 ether, // below current usage of 50 ether + 1000 ether, + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Operator should be able to decrease rate + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 30 ether, 0); // Decrease to allowance + vm.stopPrank(); + + ( + , // isApproved + uint256 rateAllowance, + , + , + , + ) = payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + uint256 attemptedRateUsage = 40 ether; + // Operator should not be able to increase rate above current allowance + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector(Errors.OperatorRateAllowanceExceeded.selector, rateAllowance, attemptedRateUsage) + ); + payments.modifyRailPayment(railId, attemptedRateUsage, 0); // Try to increase above allowance + vm.stopPrank(); + + // 2. Test zeroing rate allowance after usage + vm.startPrank(USER1); + payments.setOperatorApproval( + helper.testToken(), + OPERATOR, + true, + 0, // zero allowance + 1000 ether, + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Operator should be able to decrease rate + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 20 ether, 0); + vm.stopPrank(); + + // Operator should not be able to increase rate at all + vm.startPrank(OPERATOR); + // Payments.OperatorApproval approval = payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + vm.expectRevert(abi.encodeWithSelector(Errors.OperatorRateAllowanceExceeded.selector, 0, 21 ether)); + payments.modifyRailPayment(railId, 21 ether, 0); + vm.stopPrank(); + + // 3. Test reducing lockup allowance below current usage + // Create a new rail for lockup testing + uint256 railId2 = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Reset approval with high lockup + helper.setupOperatorApproval(USER1, OPERATOR, 50 ether, 1000 ether, MAX_LOCKUP_PERIOD); + + // Set fixed lockup + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId2, 10 ether, 0); + payments.modifyRailLockup(railId2, 0, 500 ether); + vm.stopPrank(); + + // Client reduces lockup allowance below current usage + vm.startPrank(USER1); + payments.setOperatorApproval( + helper.testToken(), + OPERATOR, + true, + 50 ether, + 300 ether, // below current usage of 500 ether + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Operator should be able to decrease fixed lockup + vm.startPrank(OPERATOR); + payments.modifyRailLockup(railId2, 0, 200 ether); + vm.stopPrank(); + + // Operator should not be able to increase fixed lockup above current allowance + vm.startPrank(OPERATOR); + vm.expectRevert(abi.encodeWithSelector(Errors.OperatorLockupAllowanceExceeded.selector, 300 ether, 400 ether)); + payments.modifyRailLockup(railId2, 0, 400 ether); + vm.stopPrank(); + } + + function testComprehensiveApprovalLifecycle() public { + // This test combines multiple approval lifecycle aspects into one comprehensive test + + // Setup approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Create two rails with different parameters + uint256 railId1 = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + uint256 railId2 = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set parameters for first rail + uint256 rate1 = 10 ether; + uint256 lockupPeriod1 = 5; + uint256 fixedLockup1 = 50 ether; + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId1, rate1, 0); + payments.modifyRailLockup(railId1, lockupPeriod1, fixedLockup1); + vm.stopPrank(); + + // Set parameters for second rail + uint256 rate2 = 15 ether; + uint256 lockupPeriod2 = 3; + uint256 fixedLockup2 = 30 ether; + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId2, rate2, 0); + payments.modifyRailLockup(railId2, lockupPeriod2, fixedLockup2); + vm.stopPrank(); + + // Calculate expected usage + uint256 expectedRateUsage = rate1 + rate2; + uint256 expectedLockupUsage = fixedLockup1 + (rate1 * lockupPeriod1) + fixedLockup2 + (rate2 * lockupPeriod2); + + // Verify combined usage + helper.verifyOperatorAllowances( + USER1, + OPERATOR, + true, + RATE_ALLOWANCE, + LOCKUP_ALLOWANCE, + expectedRateUsage, + expectedLockupUsage, + MAX_LOCKUP_PERIOD + ); + + // Make one-time payment for first rail + uint256 oneTimeAmount = 20 ether; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId1, rate1, oneTimeAmount); + vm.stopPrank(); + + // Revoke approval + vm.startPrank(USER1); + payments.setOperatorApproval( + helper.testToken(), OPERATOR, false, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Operator should still be able to modify existing rails + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId1, rate1 - 2 ether, 0); + payments.modifyRailLockup(railId2, lockupPeriod2, fixedLockup2 - 10 ether); + vm.stopPrank(); + + // Testing that operator shouldn't be able to create a new rail using try/catch + helper.expectcreateRailToRevertWithoutOperatorApproval(); + + // Reapprove with reduced allowances + vm.startPrank(USER1); + payments.setOperatorApproval( + helper.testToken(), + OPERATOR, + true, + 20 ether, // Only enough for current rails + 100 ether, + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + + // Operator should be able to create a new rail + uint256 railId3 = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // But should not be able to exceed the new allowance + vm.startPrank(OPERATOR); + (, uint256 rateAllowance,, uint256 rateUsage,,) = + payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + uint256 attempted = rateUsage + 10 ether; // Attempt to set rate above allowance + vm.expectRevert(abi.encodeWithSelector(Errors.OperatorRateAllowanceExceeded.selector, rateAllowance, attempted)); + payments.modifyRailPayment(railId3, 10 ether, 0); // Would exceed new rate allowance + vm.stopPrank(); + } + + function testMaxLockupPeriodEnforcement() public { + // Setup initial approval with limited lockup period + uint256 limitedMaxLockupPeriod = 5; // 5 blocks max lockup period + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, limitedMaxLockupPeriod); + + // Create rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set payment rate + uint256 paymentRate = 10 ether; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + vm.stopPrank(); + + // Set lockup period exactly at the limit + vm.startPrank(OPERATOR); + payments.modifyRailLockup(railId, limitedMaxLockupPeriod, 50 ether); + vm.stopPrank(); + + // Now try to exceed the max lockup period - should revert + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector( + Errors.LockupPeriodExceedsOperatorMaximum.selector, + helper.testToken(), + OPERATOR, + limitedMaxLockupPeriod, + limitedMaxLockupPeriod + 1 + ) + ); + payments.modifyRailLockup(railId, limitedMaxLockupPeriod + 1, 50 ether); + vm.stopPrank(); + } + + // Verify that operators can reduce lockup period even if it's over the max + function testReducingLockupPeriodBelowMax() public { + // Setup initial approval with high max lockup period + uint256 initialMaxLockupPeriod = 20; // 20 blocks initially + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, initialMaxLockupPeriod); + // Create rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + // Set payment rate and high lockup period + uint256 paymentRate = 10 ether; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + payments.modifyRailLockup(railId, 15, 50 ether); // 15 blocks period + vm.stopPrank(); + + // Now client reduces max lockup period + vm.startPrank(USER1); + uint256 finalMaxLockupPeriod = 5; // Reduce to 5 blocks + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, finalMaxLockupPeriod); + vm.stopPrank(); + + // Operator should be able to reduce period below the new max + vm.startPrank(OPERATOR); + payments.modifyRailLockup(railId, 4, 50 ether); // Lower to 4 blocks + vm.stopPrank(); + + // But not increase it above the new max, even though it's lower than what it was + vm.startPrank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector( + Errors.LockupPeriodExceedsOperatorMaximum.selector, + helper.testToken(), + OPERATOR, + finalMaxLockupPeriod, + 6 + ) + ); + payments.modifyRailLockup(railId, 6, 50 ether); // Try to increase to 6 blocks, which is over the new max of 5 + vm.stopPrank(); + } + + // SECTION: Increase Operator Approval Tests + + function testIncreaseOperatorApproval_HappyPath() public { + // Setup initial approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Verify initial state + (bool isApproved, uint256 rateAllowance, uint256 lockupAllowance,,, uint256 maxLockupPeriod) = + payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(rateAllowance, RATE_ALLOWANCE); + assertEq(lockupAllowance, LOCKUP_ALLOWANCE); + assertEq(maxLockupPeriod, MAX_LOCKUP_PERIOD); + + // Increase allowances + uint256 rateIncrease = 50 ether; + uint256 lockupIncrease = 500 ether; + + vm.startPrank(USER1); + payments.increaseOperatorApproval(helper.testToken(), OPERATOR, rateIncrease, lockupIncrease); + vm.stopPrank(); + + // Verify increased allowances + (isApproved, rateAllowance, lockupAllowance,,, maxLockupPeriod) = + payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(rateAllowance, RATE_ALLOWANCE + rateIncrease); + assertEq(lockupAllowance, LOCKUP_ALLOWANCE + lockupIncrease); + assertEq(maxLockupPeriod, MAX_LOCKUP_PERIOD); // Should remain unchanged + } + + function testIncreaseOperatorApproval_ZeroIncrease() public { + // Setup initial approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Increase by zero (should work but not change anything) + vm.startPrank(USER1); + payments.increaseOperatorApproval(helper.testToken(), OPERATOR, 0, 0); + vm.stopPrank(); + + // Verify allowances remain the same + (bool isApproved, uint256 rateAllowance, uint256 lockupAllowance,,, uint256 maxLockupPeriod) = + payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + assertEq(isApproved, true); + assertEq(rateAllowance, RATE_ALLOWANCE); + assertEq(lockupAllowance, LOCKUP_ALLOWANCE); + assertEq(maxLockupPeriod, MAX_LOCKUP_PERIOD); + } + + function testIncreaseOperatorApproval_OperatorNotApproved() public { + // Get token address before setting up expectRevert + IERC20 tokenAddress = helper.testToken(); + + // Try to increase approval for non-approved operator + vm.startPrank(USER1); + vm.expectRevert(abi.encodeWithSelector(Errors.OperatorNotApproved.selector, USER1, OPERATOR)); + payments.increaseOperatorApproval(tokenAddress, OPERATOR, 50 ether, 500 ether); + vm.stopPrank(); + } + + function testIncreaseOperatorApproval_ZeroOperatorAddress() public { + // Get token address before setting up expectRevert + IERC20 tokenAddress = helper.testToken(); + + // Try to increase approval for zero address operator + vm.startPrank(USER1); + vm.expectRevert(abi.encodeWithSelector(Errors.ZeroAddressNotAllowed.selector, "operator")); + payments.increaseOperatorApproval(tokenAddress, address(0), 50 ether, 500 ether); + vm.stopPrank(); + } + + function testIncreaseOperatorApproval_AfterRevocation() public { + // Setup initial approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Revoke approval + helper.revokeOperatorApprovalAndVerify(USER1, OPERATOR); + + // Get token address before setting up expectRevert + IERC20 tokenAddress = helper.testToken(); + + // Try to increase revoked approval + vm.startPrank(USER1); + vm.expectRevert(abi.encodeWithSelector(Errors.OperatorNotApproved.selector, USER1, OPERATOR)); + payments.increaseOperatorApproval(tokenAddress, OPERATOR, 50 ether, 500 ether); + vm.stopPrank(); + } + + function testIncreaseOperatorApproval_WithExistingUsage() public { + // Setup initial approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Create rail and use some allowance + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + uint256 paymentRate = 30 ether; + uint256 lockupFixed = 200 ether; + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + payments.modifyRailLockup(railId, 0, lockupFixed); + vm.stopPrank(); + + // Verify usage before increase + (, uint256 rateAllowanceBefore, uint256 lockupAllowanceBefore, uint256 rateUsage, uint256 lockupUsage,) = + payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + assertEq(rateUsage, paymentRate); + assertEq(lockupUsage, lockupFixed); + + // Increase allowances + uint256 rateIncrease = 70 ether; + uint256 lockupIncrease = 800 ether; + + vm.startPrank(USER1); + payments.increaseOperatorApproval(helper.testToken(), OPERATOR, rateIncrease, lockupIncrease); + vm.stopPrank(); + + // Verify allowances increased but usage remains the same + (, uint256 rateAllowanceAfter, uint256 lockupAllowanceAfter, uint256 rateUsageAfter, uint256 lockupUsageAfter,) + = payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + assertEq(rateAllowanceAfter, rateAllowanceBefore + rateIncrease); + assertEq(lockupAllowanceAfter, lockupAllowanceBefore + lockupIncrease); + assertEq(rateUsageAfter, rateUsage); // Usage should remain unchanged + assertEq(lockupUsageAfter, lockupUsage); // Usage should remain unchanged + } + + function testIncreaseOperatorApproval_MultipleIncreases() public { + // Setup initial approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // First increase + uint256 firstRateIncrease = 25 ether; + uint256 firstLockupIncrease = 250 ether; + + vm.startPrank(USER1); + payments.increaseOperatorApproval(helper.testToken(), OPERATOR, firstRateIncrease, firstLockupIncrease); + vm.stopPrank(); + + // Second increase + uint256 secondRateIncrease = 35 ether; + uint256 secondLockupIncrease = 350 ether; + + vm.startPrank(USER1); + payments.increaseOperatorApproval(helper.testToken(), OPERATOR, secondRateIncrease, secondLockupIncrease); + vm.stopPrank(); + + // Verify cumulative increases + (, uint256 rateAllowance, uint256 lockupAllowance,,,) = + payments.operatorApprovals(helper.testToken(), USER1, OPERATOR); + assertEq(rateAllowance, RATE_ALLOWANCE + firstRateIncrease + secondRateIncrease); + assertEq(lockupAllowance, LOCKUP_ALLOWANCE + firstLockupIncrease + secondLockupIncrease); + } +} diff --git a/service_contracts/test/payments/OperatorApprovalUsageLeak.t.sol b/service_contracts/test/payments/OperatorApprovalUsageLeak.t.sol new file mode 100644 index 00000000..a81809d8 --- /dev/null +++ b/service_contracts/test/payments/OperatorApprovalUsageLeak.t.sol @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +pragma solidity ^0.8.27; + +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {console} from "forge-std/console.sol"; + +contract OperatorApprovalUsageLeakTest is Test, BaseTestHelper { + PaymentsTestHelpers helper; + Payments payments; + IERC20 testToken; + + uint256 constant DEPOSIT_AMOUNT = 1000 ether; + uint256 constant RATE_ALLOWANCE = 200 ether; + uint256 constant LOCKUP_ALLOWANCE = 2000 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + testToken = helper.testToken(); + + // Deposit funds for client + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + } + + function testOperatorLockupUsageLeakOnRailFinalization() public { + // Setup operator approval + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, MAX_LOCKUP_PERIOD); + + // Create a rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set payment rate and lockup + uint256 paymentRate = 10 ether; + uint256 lockupPeriod = 10; // 10 blocks + uint256 lockupFixed = 100 ether; + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + payments.modifyRailLockup(railId, lockupPeriod, lockupFixed); + vm.stopPrank(); + + // Calculate expected lockup usage + uint256 expectedLockupUsage = lockupFixed + (paymentRate * lockupPeriod); + + console.log("Initial lockup usage calculation:"); + console.log(" Fixed lockup:", lockupFixed); + console.log(" Rate-based lockup:", paymentRate * lockupPeriod); + console.log(" Total expected:", expectedLockupUsage); + + // Verify initial lockup usage is correct + helper.verifyOperatorAllowances( + USER1, OPERATOR, true, RATE_ALLOWANCE, LOCKUP_ALLOWANCE, paymentRate, expectedLockupUsage, MAX_LOCKUP_PERIOD + ); + + // Terminate the rail (by client) + vm.startPrank(USER1); + payments.terminateRail(railId); + vm.stopPrank(); + + // Get the account's lockup settled epoch + (,,, uint256 lockupLastSettledAt) = payments.accounts(testToken, USER1); + + // Calculate the rail's end epoch + uint256 endEpoch = lockupLastSettledAt + lockupPeriod; + + console.log("\nAfter termination:"); + console.log(" Current block:", block.number); + console.log(" Lockup last settled at:", lockupLastSettledAt); + console.log(" Rail end epoch:", endEpoch); + + // Move time forward to after the rail's end epoch + vm.roll(endEpoch + 1); + + console.log("\nAfter time advance:"); + console.log(" Current block:", block.number); + + // Settle the rail completely - this will trigger finalizeTerminatedRail + vm.startPrank(USER2); // Payee can settle + (uint256 settledAmount,,,, uint256 finalEpoch,) = payments.settleRail(railId, endEpoch); + vm.stopPrank(); + + console.log("\nAfter settlement:"); + console.log(" Settled amount:", settledAmount); + console.log(" Final epoch:", finalEpoch); + + // Check operator lockup usage after finalization + (,,, uint256 rateUsageAfter, uint256 lockupUsageAfter,) = payments.operatorApprovals(testToken, USER1, OPERATOR); + + console.log("\nFinal operator usage:"); + console.log(" Rate usage:", rateUsageAfter); + console.log(" Lockup usage:", lockupUsageAfter); + + // Assert the correct behavior: lockup usage should be 0 after finalization + assertEq(lockupUsageAfter, 0, "Lockup usage should be 0 after rail finalization"); + assertEq(rateUsageAfter, 0, "Rate usage should be 0 after rail finalization"); + } + + function testMultipleRailsShowCumulativeLeak() public { + // Setup operator approval with higher allowances + helper.setupOperatorApproval(USER1, OPERATOR, RATE_ALLOWANCE * 5, LOCKUP_ALLOWANCE * 5, MAX_LOCKUP_PERIOD); + + uint256 totalLeakedUsage = 0; + + // Create and terminate multiple rails to show cumulative effect + for (uint256 i = 1; i <= 3; i++) { + console.log("\n=== Rail", i, "==="); + + // Create rail + uint256 railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set payment rate and lockup + uint256 paymentRate = 10 ether * i; + uint256 lockupPeriod = 5 * i; + uint256 lockupFixed = 50 ether * i; + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, paymentRate, 0); + payments.modifyRailLockup(railId, lockupPeriod, lockupFixed); + vm.stopPrank(); + + // Terminate the rail + vm.startPrank(USER1); + payments.terminateRail(railId); + vm.stopPrank(); + + // Get end epoch + (,,, uint256 lockupLastSettledAt) = payments.accounts(testToken, USER1); + uint256 endEpoch = lockupLastSettledAt + lockupPeriod; + + // Move time forward + vm.roll(endEpoch + 1); + + // Settle to trigger finalization + vm.startPrank(USER2); + payments.settleRail(railId, endEpoch); + vm.stopPrank(); + + // Track leaked usage + uint256 leakedForThisRail = paymentRate * lockupPeriod; + totalLeakedUsage += leakedForThisRail; + + console.log(" Leaked usage from this rail:", leakedForThisRail); + } + + // Check final operator lockup usage + (,,,, uint256 finalLockupUsage,) = payments.operatorApprovals(testToken, USER1, OPERATOR); + + console.log("\n=== FINAL OPERATOR USAGE ==="); + console.log("Final operator lockup usage:", finalLockupUsage); + console.log("Expected (correct) lockup usage: 0"); + + // Assert the correct behavior: all lockup usage should be cleared after all rails are finalized + assertEq(finalLockupUsage, 0, "All lockup usage should be cleared after finalizing all rails"); + } +} diff --git a/service_contracts/test/payments/PayeeFaultArbitrationBug.t.sol b/service_contracts/test/payments/PayeeFaultArbitrationBug.t.sol new file mode 100644 index 00000000..e7752820 --- /dev/null +++ b/service_contracts/test/payments/PayeeFaultArbitrationBug.t.sol @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockERC20} from "./mocks/MockERC20.sol"; +import {MockValidator} from "./mocks/MockValidator.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {console} from "forge-std/console.sol"; + +contract PayeeFaultArbitrationBugTest is Test, BaseTestHelper { + PaymentsTestHelpers helper; + Payments payments; + MockERC20 token; + MockValidator validator; + + uint256 constant DEPOSIT_AMOUNT = 200 ether; + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + token = MockERC20(address(helper.testToken())); + + // Create an validator that will reduce payment when payee fails + validator = new MockValidator(MockValidator.ValidatorMode.REDUCE_AMOUNT); + validator.configure(20); // Only approve 20% of requested payment (simulating payee fault) + + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + } + + function testLockupReturnedWithFaultTermination() public { + uint256 paymentRate = 5 ether; + uint256 lockupPeriod = 12; + uint256 fixedLockup = 10 ether; + + uint256 railId = helper.setupRailWithParameters( + USER1, USER2, OPERATOR, paymentRate, lockupPeriod, fixedLockup, address(validator), SERVICE_FEE_RECIPIENT + ); + + uint256 expectedTotalLockup = fixedLockup + (paymentRate * lockupPeriod); + + console.log("\n=== FIXED LOCKUP TEST ==="); + console.log("Fixed lockup:", fixedLockup); + console.log("Rate-based lockup:", paymentRate * lockupPeriod); + console.log("Expected total lockup:", expectedTotalLockup); + + // SP fails immediately, terminate + vm.prank(OPERATOR); + payments.terminateRail(railId); + + // Verify that railTerminated was called on the validator with correct parameters + assertTrue(validator.railTerminatedCalled(), "railTerminated should have been called"); + assertEq(validator.lastTerminatedRailId(), railId, "Incorrect railId passed to validator"); + assertEq(validator.lastTerminator(), OPERATOR, "Incorrect terminator passed to validator"); + + // Get the rail to verify the endEpoch matches + Payments.RailView memory rail = payments.getRail(railId); + assertEq(validator.lastEndEpoch(), rail.endEpoch, "Incorrect endEpoch passed to validator"); + + helper.advanceBlocks(15); + + vm.prank(USER1); + payments.settleRail(railId, block.number); + + Payments.Account memory payerFinal = helper.getAccountData(USER1); + + console.log("Lockup after:", payerFinal.lockupCurrent); + console.log("Expected lockup:", expectedTotalLockup); + + require(payerFinal.lockupCurrent == 0, "Payee fault bug: Fixed lockup not fully returned"); + } + + function testLockupReturnedWithFault() public { + uint256 paymentRate = 5 ether; + uint256 lockupPeriod = 12; + uint256 fixedLockup = 10 ether; + + uint256 railId = helper.setupRailWithParameters( + USER1, USER2, OPERATOR, paymentRate, lockupPeriod, fixedLockup, address(validator), SERVICE_FEE_RECIPIENT + ); + + uint256 expectedTotalLockup = fixedLockup + (paymentRate * lockupPeriod); + + console.log("\n=== FIXED LOCKUP TEST ==="); + console.log("Fixed lockup:", fixedLockup); + console.log("Rate-based lockup:", paymentRate * lockupPeriod); + console.log("Expected total lockup:", expectedTotalLockup); + + vm.prank(OPERATOR); + helper.advanceBlocks(15); + + vm.prank(USER1); + payments.settleRail(railId, block.number); + + Payments.Account memory payerFinal = helper.getAccountData(USER1); + + console.log("Lockup after:", payerFinal.lockupCurrent); + console.log("Expected lockup:", expectedTotalLockup); + + require(payerFinal.lockupCurrent == expectedTotalLockup, "Payee fault bug: Fixed lockup not fully returned"); + } + + function testLockupReturnedWithFaultReducedDuration() public { + uint256 paymentRate = 5 ether; + uint256 lockupPeriod = 12; + uint256 fixedLockup = 10 ether; + + MockValidator dv = new MockValidator(MockValidator.ValidatorMode.REDUCE_DURATION); + dv.configure(20); // Only approve 20% of requested duration + + uint256 railId = helper.setupRailWithParameters( + USER1, USER2, OPERATOR, paymentRate, lockupPeriod, fixedLockup, address(dv), SERVICE_FEE_RECIPIENT + ); + + // we will try to settle for 15 epochs, but the validator will only approve 20% of the duration i.e. 3 epochs + // this means that funds for the remaining 12 epochs will still be locked up. + uint256 expectedTotalLockup = fixedLockup + (paymentRate * lockupPeriod) + (12 * paymentRate); + + console.log("\n=== FIXED LOCKUP TEST ==="); + console.log("Fixed lockup:", fixedLockup); + console.log("Rate-based lockup:", paymentRate * lockupPeriod); + console.log("Expected total lockup:", expectedTotalLockup); + + vm.prank(OPERATOR); + helper.advanceBlocks(15); + + vm.prank(USER1); + payments.settleRail(railId, block.number); + + Payments.Account memory payerFinal = helper.getAccountData(USER1); + + console.log("Lockup after:", payerFinal.lockupCurrent); + console.log("Expected lockup:", expectedTotalLockup); + + require(payerFinal.lockupCurrent == expectedTotalLockup, "Payee fault bug: Fixed lockup not fully returned"); + } +} diff --git a/service_contracts/test/payments/PaymentsAccessControl.t.sol b/service_contracts/test/payments/PaymentsAccessControl.t.sol new file mode 100644 index 00000000..19aad7bf --- /dev/null +++ b/service_contracts/test/payments/PaymentsAccessControl.t.sol @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {Errors} from "@payments/Errors.sol"; + +contract AccessControlTest is Test, BaseTestHelper { + Payments payments; + PaymentsTestHelpers helper; + + uint256 constant DEPOSIT_AMOUNT = 100 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + uint256 railId; + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + + // Setup operator approval + helper.setupOperatorApproval( + USER1, + OPERATOR, + 10 ether, // rateAllowance + 100 ether, // lockupAllowance + MAX_LOCKUP_PERIOD // maxLockupPeriod + ); + + // Deposit funds for client + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // Create a rail for testing + railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set up rail parameters + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 1 ether, 0); // 1 ether per block + payments.modifyRailLockup(railId, 10, 10 ether); // 10 block lockup period, 10 ether fixed + vm.stopPrank(); + } + + function testTerminateRail_SucceedsWhenCalledByClient() public { + vm.startPrank(USER1); + payments.terminateRail(railId); + vm.stopPrank(); + } + + function testTerminateRail_SucceedsWhenCalledByOperator() public { + vm.startPrank(OPERATOR); + payments.terminateRail(railId); + vm.stopPrank(); + } + + function testTerminateRail_RevertsWhenCalledByRecipient() public { + vm.startPrank(USER2); + vm.expectRevert( + abi.encodeWithSelector(Errors.NotAuthorizedToTerminateRail.selector, railId, USER1, OPERATOR, USER2) + ); + payments.terminateRail(railId); + vm.stopPrank(); + } + + function testTerminateRail_RevertsWhenCalledByUnauthorized() public { + vm.startPrank(address(0x99)); + vm.expectRevert( + abi.encodeWithSelector(Errors.NotAuthorizedToTerminateRail.selector, railId, USER1, OPERATOR, address(0x99)) + ); + payments.terminateRail(railId); + vm.stopPrank(); + } + + function testModifyRailLockup_SucceedsWhenCalledByOperator() public { + vm.startPrank(OPERATOR); + payments.modifyRailLockup(railId, 20, 20 ether); + vm.stopPrank(); + } + + function testModifyRailLockup_RevertsWhenCalledByClient() public { + vm.startPrank(USER1); + vm.expectRevert(abi.encodeWithSelector(Errors.OnlyRailOperatorAllowed.selector, OPERATOR, USER1)); + payments.modifyRailLockup(railId, 20, 20 ether); + vm.stopPrank(); + } + + function testModifyRailLockup_RevertsWhenCalledByRecipient() public { + vm.startPrank(USER2); + vm.expectRevert(abi.encodeWithSelector(Errors.OnlyRailOperatorAllowed.selector, OPERATOR, USER2)); + payments.modifyRailLockup(railId, 20, 20 ether); + vm.stopPrank(); + } + + function testModifyRailLockup_RevertsWhenCalledByUnauthorized() public { + vm.startPrank(address(0x99)); + vm.expectRevert(abi.encodeWithSelector(Errors.OnlyRailOperatorAllowed.selector, OPERATOR, address(0x99))); + payments.modifyRailLockup(railId, 20, 20 ether); + vm.stopPrank(); + } + + function testModifyRailPayment_SucceedsWhenCalledByOperator() public { + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 2 ether, 0); + vm.stopPrank(); + } + + function testModifyRailPayment_RevertsWhenCalledByClient() public { + vm.startPrank(USER1); + vm.expectRevert(abi.encodeWithSelector(Errors.OnlyRailOperatorAllowed.selector, OPERATOR, USER1)); + payments.modifyRailPayment(railId, 2 ether, 0); + vm.stopPrank(); + } + + function testModifyRailPayment_RevertsWhenCalledByRecipient() public { + vm.startPrank(USER2); + vm.expectRevert(abi.encodeWithSelector(Errors.OnlyRailOperatorAllowed.selector, OPERATOR, USER2)); + payments.modifyRailPayment(railId, 2 ether, 0); + vm.stopPrank(); + } + + function testModifyRailPayment_RevertsWhenCalledByUnauthorized() public { + vm.startPrank(address(0x99)); + vm.expectRevert(abi.encodeWithSelector(Errors.OnlyRailOperatorAllowed.selector, OPERATOR, address(0x99))); + payments.modifyRailPayment(railId, 2 ether, 0); + vm.stopPrank(); + } + + function testSettleTerminatedRailWithoutValidation_RevertsWhenCalledByOperator() public { + // 2. Add more funds + helper.makeDeposit( + USER1, + USER1, + 100 ether // Plenty of funds + ); + + // Terminate the rail + vm.startPrank(USER1); + payments.terminateRail(railId); + vm.stopPrank(); + + // Attempt to settle from operator account + vm.startPrank(OPERATOR); + vm.expectRevert(abi.encodeWithSelector(Errors.OnlyRailClientAllowed.selector, USER1, OPERATOR)); + payments.settleTerminatedRailWithoutValidation(railId); + vm.stopPrank(); + } + + function testTerminateRail_OnlyOperatorCanTerminateWhenLockupNotFullySettled() public { + // Advance blocks to create an unsettled state + helper.advanceBlocks(500); + + // Client should not be able to terminate because lockup is not fully settled + vm.startPrank(USER1); + vm.expectRevert( + abi.encodeWithSelector(Errors.NotAuthorizedToTerminateRail.selector, railId, USER1, OPERATOR, USER1) + ); + payments.terminateRail(railId); + vm.stopPrank(); + + // Operator should be able to terminate even when lockup is not fully settled + vm.startPrank(OPERATOR); + payments.terminateRail(railId); + vm.stopPrank(); + + // Verify the rail was terminated by checking its end epoch is set + Payments.RailView memory railView = payments.getRail(railId); + assertTrue(railView.endEpoch > 0, "Rail was not terminated properly"); + } +} diff --git a/service_contracts/test/payments/PaymentsEvents.t.sol b/service_contracts/test/payments/PaymentsEvents.t.sol new file mode 100644 index 00000000..77fbc4a2 --- /dev/null +++ b/service_contracts/test/payments/PaymentsEvents.t.sol @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {MockERC20} from "./mocks/MockERC20.sol"; + +/** + * @title PaymentsEventsTest + * @dev Test contract for verifying all events emitted by the Payments contract + */ +contract PaymentsEventsTest is Test, BaseTestHelper { + Payments public payments; + PaymentsTestHelpers public helper; + MockERC20 public testToken; + + uint256 constant DEPOSIT_AMOUNT = 100 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + uint256 railId; + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + testToken = helper.testToken(); + + // Setup operator approval + helper.setupOperatorApproval( + USER1, + OPERATOR, + 10 ether, // rateAllowance + 100 ether, // lockupAllowance + MAX_LOCKUP_PERIOD // maxLockupPeriod + ); + + // Deposit funds for client + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + } + + /** + * @dev Test for AccountLockupSettled event + */ + function testAccountLockupSettledEvent() public { + // Create a rail to trigger account lockup changes + railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set up rail parameters which will trigger account settlement + vm.startPrank(OPERATOR); + + payments.modifyRailLockup(railId, 5, 0 ether); + + // This will trigger account lockup settlement + // account.lockupCurrent = rate * period = 25 ether + payments.modifyRailPayment(railId, 5 ether, 0); // 1 ether per block + + vm.stopPrank(); + + helper.advanceBlocks(5); + + vm.startPrank(OPERATOR); + + // Expect the event to be emitted + // lockupCurrent = 25 ether ( from modifyRailPayment ) + 5 * 5 ether ( elapsedTime * lockupRate) + vm.expectEmit(true, true, true, true); + emit Payments.AccountLockupSettled(testToken, USER1, 50 ether, 5 ether, block.number); + emit Payments.RailLockupModified(railId, 5, 10, 0, 0); + + payments.modifyRailLockup(railId, 10, 0 ether); + + vm.stopPrank(); + } + + /** + * @dev Test for OperatorApprovalSet event + */ + function testOperatorApprovalUpdatedEvent() public { + vm.startPrank(USER1); + + // Expect the event to be emitted + vm.expectEmit(true, true, true, true); + emit Payments.OperatorApprovalUpdated(testToken, USER1, OPERATOR2, true, 5 ether, 50 ether, MAX_LOCKUP_PERIOD); + + // Set operator approval + payments.setOperatorApproval( + testToken, + OPERATOR2, + true, + 5 ether, // rateAllowance + 50 ether, // lockupAllowance + MAX_LOCKUP_PERIOD // maxLockupPeriod + ); + + vm.stopPrank(); + } + + /** + * @dev Test for RailCreated event + */ + function testRailCreatedEvent() public { + vm.startPrank(OPERATOR); + + // Expect the event to be emitted + vm.expectEmit(true, true, true, true); + emit Payments.RailCreated( + 1, // railId (assuming this is the first rail) + USER1, // payer + USER2, // payee + testToken, // token + OPERATOR, // operator + address(0), // validator + SERVICE_FEE_RECIPIENT, // serviceFeeRecipient + 0 // commissionRateBps + ); + + // Create rail + payments.createRail( + testToken, + USER1, + USER2, + address(0), // validator + 0, // commissionRateBps + SERVICE_FEE_RECIPIENT // serviceFeeRecipient + ); + + vm.stopPrank(); + } + + /** + * @dev Test for RailLockupModified event + */ + function testRailLockupModifiedEvent() public { + // Create a rail first + railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + vm.startPrank(OPERATOR); + + // Expect the event to be emitted + vm.expectEmit(true, false, false, true); + emit Payments.RailLockupModified(railId, 0, 10, 0, 10 ether); + + // Modify rail lockup + payments.modifyRailLockup(railId, 10, 10 ether); + + vm.stopPrank(); + } + + /** + * @dev Test for RailOneTimePayment event + */ + function testRailOneTimePaymentEvent() public { + // Create a rail first + railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + // Set up rail parameters + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 1 ether, 0); + payments.modifyRailLockup(railId, 10, 10 ether); + + // calcualate expected values + Payments.RailView memory rail = payments.getRail(railId); + uint256 oneTimeAmount = 5 ether; + uint256 expectedNetworkFee = + oneTimeAmount * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR(); + uint256 expectedOperatorCommission = + ((oneTimeAmount - expectedNetworkFee) * rail.commissionRateBps) / payments.COMMISSION_MAX_BPS(); + uint256 expectedNetPayeeAmount = oneTimeAmount - expectedOperatorCommission - expectedNetworkFee; + + // expect the event to be emitted + vm.expectEmit(true, false, false, true); + emit Payments.RailOneTimePaymentProcessed( + railId, expectedNetPayeeAmount, expectedOperatorCommission, expectedNetworkFee + ); + + // Execute one-time payment by calling modifyRailPayment with the current rate and a one-time payment amount + + payments.modifyRailPayment(railId, 1 ether, oneTimeAmount); + + vm.stopPrank(); + } + + /** + * @dev Test for RailPaymentRateModified event + */ + function testRailPaymentRateModifiedEvent() public { + // Create a rail first + railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + vm.startPrank(OPERATOR); + + // Expect the event to be emitted + vm.expectEmit(true, false, false, true); + emit Payments.RailRateModified(railId, 0, 1 ether); + + // Modify rail payment rate + payments.modifyRailPayment(railId, 1 ether, 0); + + vm.stopPrank(); + } + + /** + * @dev Test for RailSettled event + */ + function testRailSettledEvent() public { + // Create and set up a rail + railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 1 ether, 0); + payments.modifyRailLockup(railId, 10, 10 ether); + vm.stopPrank(); + + // Advance blocks to accumulate payment + helper.advanceBlocks(5); + + vm.startPrank(USER1); + + // expected values + Payments.RailView memory rail = payments.getRail(railId); + uint256 totalSettledAmount = 5 * rail.paymentRate; + uint256 totalNetworkFee = + 5 * rail.paymentRate * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR(); + uint256 totalOperatorCommission = + ((totalSettledAmount - totalNetworkFee) * rail.commissionRateBps) / payments.COMMISSION_MAX_BPS(); + uint256 totalNetPayeeAmount = totalSettledAmount - totalNetworkFee - totalOperatorCommission; + + // Expect the event to be emitted + vm.expectEmit(true, true, false, true); + emit Payments.RailSettled( + railId, totalSettledAmount, totalNetPayeeAmount, totalOperatorCommission, totalNetworkFee, block.number + ); + + // Settle rail + payments.settleRail(railId, block.number); + + vm.stopPrank(); + } + + /** + * @dev Test for RailTerminated event + */ + function testRailTerminatedEvent() public { + // Create and set up a rail + railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 1 ether, 0); + payments.modifyRailLockup(railId, 10, 10 ether); + vm.stopPrank(); + + vm.startPrank(USER1); + + // expected end epoch + Payments.RailView memory rail = payments.getRail(railId); + uint256 expectedEndEpoch = block.number + rail.lockupPeriod; + // Expect the event to be emitted + vm.expectEmit(true, true, false, true); + emit Payments.RailTerminated(railId, USER1, expectedEndEpoch); + + // Terminate rail + payments.terminateRail(railId); + + vm.stopPrank(); + } + + /** + * @dev Test for RailFinalized event + */ + function testRailFinalizedEvent() public { + // Create and set up a rail + railId = helper.createRail(USER1, USER2, OPERATOR, address(0), SERVICE_FEE_RECIPIENT); + + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, 1 ether, 0); + payments.modifyRailLockup(railId, 10, 10 ether); + vm.stopPrank(); + + // Terminate the rail + vm.startPrank(USER1); + payments.terminateRail(railId); + vm.stopPrank(); + + // Get the rail to check its end epoch + Payments.RailView memory rail = payments.getRail(railId); + + // Advance blocks past the end epoch + helper.advanceBlocks(rail.lockupPeriod + 1); + + vm.startPrank(USER1); + + // Expect the event to be emitted + vm.expectEmit(true, false, false, true); + emit Payments.RailFinalized(railId); + + // Settle terminated rail to trigger finalization + payments.settleTerminatedRailWithoutValidation(railId); + + vm.stopPrank(); + } + + /** + * @dev Test for DepositRecorded event + */ + function testDepositRecordedEvent() public { + vm.startPrank(USER1); + + // Make sure we have approval + testToken.approve(address(payments), 10 ether); + + // Expect the event to be emitted + // Only check the first three indexed parameters + vm.expectEmit(true, true, true, true); + emit Payments.AccountLockupSettled(testToken, USER2, 0, 0, block.number); + emit Payments.DepositRecorded(testToken, USER1, USER2, 10 ether); // Amount not checked + + // Deposit tokens + payments.deposit(testToken, USER2, 10 ether); + + vm.stopPrank(); + + // Test event in DepositWithPermit + // Use a private key for signing + uint256 privateKey = 1; + address signer = vm.addr(privateKey); + + // Mint tokens to the signer + MockERC20(testToken).mint(signer, 50 ether); + + uint256 depositAmount = 10 ether; + uint256 deadline = block.timestamp + 1 hours; + + // Get signature components + (uint8 v, bytes32 r, bytes32 s) = + helper.getPermitSignature(privateKey, signer, address(payments), depositAmount, deadline); + + vm.startPrank(signer); + + // Expect the event to be emitted + vm.expectEmit(true, true, false, true); + emit Payments.AccountLockupSettled(testToken, signer, 0, 0, block.number); + emit Payments.DepositRecorded(testToken, signer, signer, depositAmount); + + // Deposit with permit + payments.depositWithPermit(testToken, signer, depositAmount, deadline, v, r, s); + + vm.stopPrank(); + } + + /** + * @dev Test for WithdrawRecorded event + */ + function testWithdrawRecordedEvent() public { + // First make a deposit to USER2 + helper.makeDeposit(USER1, USER2, 10 ether); + + vm.startPrank(USER2); + + // Expect the event to be emitted + vm.expectEmit(true, true, true, true); + emit Payments.WithdrawRecorded(testToken, USER2, USER2, 5 ether); + + // Withdraw tokens + payments.withdraw(testToken, 5 ether); + + vm.stopPrank(); + } +} diff --git a/service_contracts/test/payments/RailGetters.t.sol b/service_contracts/test/payments/RailGetters.t.sol new file mode 100644 index 00000000..bbeda149 --- /dev/null +++ b/service_contracts/test/payments/RailGetters.t.sol @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +pragma solidity ^0.8.27; + +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockERC20} from "./mocks/MockERC20.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {RailSettlementHelpers} from "./helpers/RailSettlementHelpers.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; + +contract PayeeRailsTest is Test, BaseTestHelper { + PaymentsTestHelpers helper; + RailSettlementHelpers settlementHelper; + Payments payments; + MockERC20 token; + + // Secondary token for multi-token testing + MockERC20 token2; + + uint256 constant INITIAL_BALANCE = 5000 ether; + uint256 constant DEPOSIT_AMOUNT = 200 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + + // Rail IDs for tests + uint256 rail1Id; + uint256 rail2Id; + uint256 rail3Id; + uint256 rail4Id; // Different token + uint256 rail5Id; // Different payee + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + token = MockERC20(address(helper.testToken())); + + // Create settlement helper + settlementHelper = new RailSettlementHelpers(); + settlementHelper.initialize(payments, helper); + + // Create a second token for multi-token tests + token2 = new MockERC20("Token 2", "TK2"); + token2.mint(USER1, INITIAL_BALANCE); + + // Make deposits to test accounts + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + + // For token2 + vm.startPrank(USER1); + token2.approve(address(payments), type(uint256).max); + payments.deposit(token2, USER1, DEPOSIT_AMOUNT); + vm.stopPrank(); + + // Setup operator approvals + helper.setupOperatorApproval( + USER1, // from + OPERATOR, // operator + 15 ether, // rate allowance (sum of all rates: 5+3+2+1 = 11 ether) + 200 ether, // lockup allowance, + MAX_LOCKUP_PERIOD // maximum lockup period + ); + + // Setup approval for token2 + vm.startPrank(USER1); + payments.setOperatorApproval( + token2, + OPERATOR, + true, // approved + 10 ether, // rate allowance + 100 ether, // lockup allowance + MAX_LOCKUP_PERIOD // maximum lockup period + ); + vm.stopPrank(); + + // Create different rails for testing + createTestRails(); + } + + function createTestRails() internal { + // Rail 1: Standard rail with token1 and USER2 as payee + rail1Id = helper.setupRailWithParameters( + USER1, // from + USER2, // to (payee) + OPERATOR, // operator + 5 ether, // rate + 10, // lockupPeriod + 0, // No fixed lockup + address(0), // No validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Rail 2: Another rail with token1 and USER2 as payee + rail2Id = helper.setupRailWithParameters( + USER1, // from + USER2, // to (payee) + OPERATOR, // operator + 3 ether, // rate + 10, // lockupPeriod + 0, // No fixed lockup + address(0), // No validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Rail 3: Will be terminated + rail3Id = helper.setupRailWithParameters( + USER1, // from + USER2, // to (payee) + OPERATOR, // operator + 2 ether, // rate + 5, // lockupPeriod + 0, // No fixed lockup + address(0), // No validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Rail 4: With token2 and USER2 as payee + vm.startPrank(OPERATOR); + rail4Id = payments.createRail( + token2, + USER1, // from + USER2, // to (payee) + address(0), // no validator + 0, // no commission + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + payments.modifyRailPayment(rail4Id, 4 ether, 0); + payments.modifyRailLockup(rail4Id, 10, 0); + vm.stopPrank(); + + // Rail 5: With token1 but USER3 as payee + rail5Id = helper.setupRailWithParameters( + USER1, // from + USER3, // to (payee) + OPERATOR, // operator + 1 ether, // rate + 10, // lockupPeriod + 0, // No fixed lockup + address(0), // No validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Terminate Rail 3 + vm.prank(OPERATOR); + payments.terminateRail(rail3Id); + } + + function testGetRailsForPayeeAndToken() public view { + // Test getting all rails for USER2 and token1 (should include terminated) + (Payments.RailInfo[] memory rails,,) = payments.getRailsForPayeeAndToken(USER2, token, 0, 3); + + // Should include 3 rails: rail1Id, rail2Id, and rail3Id (terminated) + assertEq(rails.length, 3, "Should have 3 rails for USER2 with token1"); + + // Verify the rail IDs and their termination status + bool foundRail1 = false; + bool foundRail2 = false; + bool foundRail3 = false; + + for (uint256 i = 0; i < rails.length; i++) { + if (rails[i].railId == rail1Id) { + foundRail1 = true; + assertFalse(rails[i].isTerminated, "Rail 1 should not be terminated"); + assertEq(rails[i].endEpoch, 0, "Rail 1 should have 0 endEpoch"); + } else if (rails[i].railId == rail2Id) { + foundRail2 = true; + assertFalse(rails[i].isTerminated, "Rail 2 should not be terminated"); + assertEq(rails[i].endEpoch, 0, "Rail 2 should have 0 endEpoch"); + } else if (rails[i].railId == rail3Id) { + foundRail3 = true; + assertTrue(rails[i].isTerminated, "Rail 3 should be terminated"); + assertTrue(rails[i].endEpoch > 0, "Rail 3 should have non-zero endEpoch"); + } + } + + assertTrue(foundRail1, "Rail 1 not found"); + assertTrue(foundRail2, "Rail 2 not found"); + assertTrue(foundRail3, "Rail 3 not found"); + + // Test different token (should only return rails for that token) + (Payments.RailInfo[] memory token2Rail,,) = payments.getRailsForPayeeAndToken(USER2, token2, 0, 0); + + // Should include only 1 rail with token2: rail4Id + assertEq(token2Rail.length, 1, "Should have 1 rail for USER2 with token2"); + assertEq(token2Rail[0].railId, rail4Id, "Rail ID should match rail4Id"); + + // Test different payee (should only return rails for that payee) + (Payments.RailInfo[] memory user3Rails,,) = payments.getRailsForPayeeAndToken(USER3, token, 0, 0); + + // Should include only 1 rail for USER3: rail5Id + assertEq(user3Rails.length, 1, "Should have 1 rail for USER3 with token1"); + assertEq(user3Rails[0].railId, rail5Id, "Rail ID should match rail5Id"); + } + + function testGetRailsForPayerAndToken() public view { + // Test getting all rails for USER1 (payer) and token1 (should include terminated) + (Payments.RailInfo[] memory rails,,) = payments.getRailsForPayerAndToken(USER1, token, 0, 4); + + // Should include 4 rails: rail1Id, rail2Id, rail3Id (terminated), and rail5Id + assertEq(rails.length, 4, "Should have 4 rails for USER1 with token1"); + + // Verify the rail IDs and their termination status + bool foundRail1 = false; + bool foundRail2 = false; + bool foundRail3 = false; + bool foundRail5 = false; + + for (uint256 i = 0; i < rails.length; i++) { + if (rails[i].railId == rail1Id) { + foundRail1 = true; + assertFalse(rails[i].isTerminated, "Rail 1 should not be terminated"); + assertEq(rails[i].endEpoch, 0, "Rail 1 should have 0 endEpoch"); + } else if (rails[i].railId == rail2Id) { + foundRail2 = true; + assertFalse(rails[i].isTerminated, "Rail 2 should not be terminated"); + assertEq(rails[i].endEpoch, 0, "Rail 2 should have 0 endEpoch"); + } else if (rails[i].railId == rail3Id) { + foundRail3 = true; + assertTrue(rails[i].isTerminated, "Rail 3 should be terminated"); + assertTrue(rails[i].endEpoch > 0, "Rail 3 should have non-zero endEpoch"); + } else if (rails[i].railId == rail5Id) { + foundRail5 = true; + assertFalse(rails[i].isTerminated, "Rail 5 should not be terminated"); + assertEq(rails[i].endEpoch, 0, "Rail 5 should have 0 endEpoch"); + } + } + + assertTrue(foundRail1, "Rail 1 not found"); + assertTrue(foundRail2, "Rail 2 not found"); + assertTrue(foundRail3, "Rail 3 not found"); + assertTrue(foundRail5, "Rail 5 not found"); + + // Test different token (should only return rails for that token) + (Payments.RailInfo[] memory token2Rails,,) = payments.getRailsForPayerAndToken(USER1, token2, 0, 0); + + // Should include only 1 rail with token2: rail4Id + assertEq(token2Rails.length, 1, "Should have 1 rail for USER1 with token2"); + assertEq(token2Rails[0].railId, rail4Id, "Rail ID should match rail4Id"); + } + + function testRailsBeyondEndEpoch() public { + // Get the initial rails when Rail 3 is terminated but not beyond its end epoch + (Payments.RailInfo[] memory initialPayeeRails,,) = payments.getRailsForPayeeAndToken(USER2, token, 0, 3); + (Payments.RailInfo[] memory initialPayerRails,,) = payments.getRailsForPayerAndToken(USER1, token, 0, 4); + + // Should include all 3 rails for payee + assertEq(initialPayeeRails.length, 3, "Should have 3 rails initially for payee"); + // Should include all 4 rails for payer + assertEq(initialPayerRails.length, 4, "Should have 4 rails initially for payer"); + + // Get the endEpoch for Rail 3 + uint256 endEpoch; + for (uint256 i = 0; i < initialPayeeRails.length; i++) { + if (initialPayeeRails[i].railId == rail3Id) { + endEpoch = initialPayeeRails[i].endEpoch; + break; + } + } + + // Advance blocks beyond the end epoch of Rail 3 + uint256 blocksToAdvance = endEpoch - block.number + 1; + helper.advanceBlocks(blocksToAdvance); + + // IMPORTANT: Settle the rail now that we're beyond its end epoch + // This will finalize the rail (set rail.from = address(0)) + vm.prank(USER1); // Settle as the client + payments.settleRail(rail3Id, endEpoch); + + // Get rails again for both payee and payer + (Payments.RailInfo[] memory finalPayeeRails,,) = payments.getRailsForPayeeAndToken(USER2, token, 0, 3); + (Payments.RailInfo[] memory finalPayerRails,,) = payments.getRailsForPayerAndToken(USER1, token, 0, 4); + + // Should include only 2 rails now for payee, as Rail 3 is beyond its end epoch + assertEq(finalPayeeRails.length, 2, "Should have 2 rails for payee after advancing beyond end epoch"); + + // Should include only 3 rails now for payer, as Rail 3 is beyond its end epoch + assertEq(finalPayerRails.length, 3, "Should have 3 rails for payer after advancing beyond end epoch"); + + // Verify Rail 3 is no longer included in payee rails + bool railFoundInPayeeRails = false; + for (uint256 i = 0; i < finalPayeeRails.length; i++) { + if (finalPayeeRails[i].railId == rail3Id) { + railFoundInPayeeRails = true; + break; + } + } + + // Verify Rail 3 is no longer included in payer rails + bool railFoundInPayerRails = false; + for (uint256 i = 0; i < finalPayerRails.length; i++) { + if (finalPayerRails[i].railId == rail3Id) { + railFoundInPayerRails = true; + break; + } + } + + assertFalse(railFoundInPayeeRails, "Rail 3 should not be included in payee rails after its end epoch"); + + assertFalse(railFoundInPayerRails, "Rail 3 should not be included in payer rails after its end epoch"); + } + + function testEmptyResult() public view { + // Test non-existent payee + (Payments.RailInfo[] memory nonExistentPayee,,) = payments.getRailsForPayeeAndToken(address(0x123), token, 0, 0); + assertEq(nonExistentPayee.length, 0, "Should return empty array for non-existent payee"); + + // Test non-existent payer + (Payments.RailInfo[] memory nonExistentPayer,,) = payments.getRailsForPayerAndToken(address(0x123), token, 0, 0); + assertEq(nonExistentPayer.length, 0, "Should return empty array for non-existent payer"); + + // Test non-existent token for payee + (Payments.RailInfo[] memory nonExistentTokenForPayee,,) = + payments.getRailsForPayeeAndToken(USER2, IERC20(address(0x456)), 0, 0); + assertEq(nonExistentTokenForPayee.length, 0, "Should return empty array for non-existent token with payee"); + + // Test non-existent token for payer + (Payments.RailInfo[] memory nonExistentTokenForPayer,,) = + payments.getRailsForPayerAndToken(USER1, IERC20(address(0x456)), 0, 0); + assertEq(nonExistentTokenForPayer.length, 0, "Should return empty array for non-existent token with payer"); + } + + function testPagination() public view { + // Test pagination for payee rails (USER2 has 3 rails with token1) + + // Test getting first 2 rails + (Payments.RailInfo[] memory page1, uint256 nextOffset1, uint256 total1) = + payments.getRailsForPayeeAndToken(USER2, token, 0, 2); + + assertEq(page1.length, 2, "First page should have 2 rails"); + assertEq(nextOffset1, 2, "Next offset should be 2"); + assertEq(total1, 3, "Total should be 3"); + + // Test getting remaining rail + (Payments.RailInfo[] memory page2, uint256 nextOffset2, uint256 total2) = + payments.getRailsForPayeeAndToken(USER2, token, nextOffset1, 2); + + assertEq(page2.length, 1, "Second page should have 1 rail"); + assertEq(nextOffset2, 3, "Next offset should be 3 (end of array)"); + assertEq(total2, 3, "Total should still be 3"); + + // Verify no duplicate rails between pages + bool duplicateFound = false; + for (uint256 i = 0; i < page1.length; i++) { + for (uint256 j = 0; j < page2.length; j++) { + if (page1[i].railId == page2[j].railId) { + duplicateFound = true; + break; + } + } + } + assertFalse(duplicateFound, "No duplicate rails should exist between pages"); + + // Test offset beyond array length + (Payments.RailInfo[] memory emptyPage, uint256 nextOffset3, uint256 total3) = + payments.getRailsForPayeeAndToken(USER2, token, 10, 2); + + assertEq(emptyPage.length, 0, "Should return empty array for offset beyond length"); + assertEq(nextOffset3, 3, "Next offset should equal total length"); + assertEq(total3, 3, "Total should still be 3"); + + // Test pagination for payer rails (USER1 has 4 rails with token1) + (Payments.RailInfo[] memory payerPage1, uint256 payerNext1, uint256 payerTotal1) = + payments.getRailsForPayerAndToken(USER1, token, 0, 3); + + assertEq(payerPage1.length, 3, "Payer first page should have 3 rails"); + assertEq(payerNext1, 3, "Payer next offset should be 3"); + assertEq(payerTotal1, 4, "Payer total should be 4"); + + (Payments.RailInfo[] memory payerPage2, uint256 payerNext2, uint256 payerTotal2) = + payments.getRailsForPayerAndToken(USER1, token, payerNext1, 3); + + assertEq(payerPage2.length, 1, "Payer second page should have 1 rail"); + assertEq(payerNext2, 4, "Payer next offset should be 4 (end of array)"); + assertEq(payerTotal2, 4, "Payer total should still be 4"); + } +} diff --git a/service_contracts/test/payments/RailSettlement.t.sol b/service_contracts/test/payments/RailSettlement.t.sol new file mode 100644 index 00000000..2b6bdc00 --- /dev/null +++ b/service_contracts/test/payments/RailSettlement.t.sol @@ -0,0 +1,962 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockERC20} from "./mocks/MockERC20.sol"; +import {MockValidator} from "./mocks/MockValidator.sol"; +import {PaymentsTestHelpers} from "./helpers/PaymentsTestHelpers.sol"; +import {RailSettlementHelpers} from "./helpers/RailSettlementHelpers.sol"; +import {console} from "forge-std/console.sol"; +import {BaseTestHelper} from "./helpers/BaseTestHelper.sol"; +import {Errors} from "@payments/Errors.sol"; + +contract RailSettlementTest is Test, BaseTestHelper { + PaymentsTestHelpers helper; + RailSettlementHelpers settlementHelper; + Payments payments; + MockERC20 token; + + uint256 constant DEPOSIT_AMOUNT = 200 ether; + uint256 constant MAX_LOCKUP_PERIOD = 100; + + function setUp() public { + helper = new PaymentsTestHelpers(); + helper.setupStandardTestEnvironment(); + payments = helper.payments(); + token = MockERC20(address(helper.testToken())); + + // Create settlement helper with the helper that has the initialized payment contract + settlementHelper = new RailSettlementHelpers(); + // Initialize the settlement helper with our Payments instance + settlementHelper.initialize(payments, helper); + + // Make deposits to test accounts for testing + helper.makeDeposit(USER1, USER1, DEPOSIT_AMOUNT); + } + + //-------------------------------- + // 1. Basic Settlement Flow Tests + //-------------------------------- + + function testBasicSettlement() public { + // Create a rail with a simple rate + uint256 rate = 5 ether; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + 10, // lockupPeriod + 0, // No fixed lockup + address(0), // No validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance a few blocks + helper.advanceBlocks(5); + + // Settle for the elapsed blocks + uint256 expectedAmount = rate * 5; // 5 blocks * 5 ether + console.log("block.number", block.number); + + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, block.number); + } + + function testSettleRailInDebt() public { + uint256 rate = 50 ether; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + 3, // lockupPeriod - total locked: 150 ether (3 * 50) + 0, // No fixed lockup + address(0), + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance 7 blocks + helper.advanceBlocks(7); + + // With 200 ether deposit and 150 ether locked, we can only pay for 1 epoch (50 ether) + uint256 expectedAmount = 50 ether; + uint256 expectedEpoch = 2; // Initial epoch (1) + 1 epoch + + // First settlement + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, expectedEpoch); + + // Settle again - should be a no-op since we're already settled to the expected epoch + settlementHelper.settleRailAndVerify(railId, block.number, 0, expectedEpoch); + + // Add more funds and settle again + uint256 additionalDeposit = 300 ether; + helper.makeDeposit(USER1, USER1, additionalDeposit); + + // Should be able to settle the remaining 6 epochs + uint256 expectedAmount2 = rate * 6; // 6 more epochs * 50 ether + + // Third settlement + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount2, block.number); + } + + function testSettleRailWithRateChange() public { + // Set up a rail + uint256 rate = 5 ether; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + 10, // lockupPeriod + 0, // No fixed lockup + address(0), // Standard validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + uint256 newRate1 = 6 ether; + uint256 newRate2 = 7 ether; + + // Set the rate to 6 ether after 7 blocks + helper.advanceBlocks(7); + + // Increase operator allowances to allow rate modification + // We increase rate allowance = 5 + 6 + 7 ether and add buffer for lockup + uint256 rateAllowance = rate + newRate1 + newRate2; + uint256 lockupAllowance = (rate + newRate1 + newRate2) * 10; + helper.setupOperatorApproval(USER1, OPERATOR, rateAllowance, lockupAllowance, MAX_LOCKUP_PERIOD); + + // Operator increases the payment rate from 5 ETH to 6 ETH per block for epochs (9-14) + // This creates a rate change queue + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, newRate1, 0); + vm.stopPrank(); + + // Advance 6 blocks + helper.advanceBlocks(6); + + // Operator increases the payment rate from 6 ETH to 7 ETH per block for epochs (15-21) + // This creates a rate change queue + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, newRate2, 0); + vm.stopPrank(); + + // Advance 6 blocks + helper.advanceBlocks(7); + + // expectedAmount = 5 * 7 + 6 * 6 + 7 * 7 = 120 ether + uint256 expectedAmount = rate * 7 + newRate1 * 6 + newRate2 * 7; + + // settle and verify + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, block.number); + } + + //-------------------------------- + // 2. Validation Scenarios + //-------------------------------- + + function testValidationWithStandardApproval() public { + // Deploy a standard validator that approves everything + MockValidator validator = new MockValidator(MockValidator.ValidatorMode.STANDARD); + + // Create a rail with the validator + uint256 rate = 5 ether; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + 10, // lockupPeriod + 0, // No fixed lockup + address(validator), // Standard validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance several blocks + helper.advanceBlocks(5); + + // Verify standard validator approves full amount + uint256 expectedAmount = rate * 5; // 5 blocks * 5 ether + + // Settle with validation + RailSettlementHelpers.SettlementResult memory result = + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, block.number); + + // Verify validaton note + assertEq(result.note, "Standard approved payment", "Validator note should match"); + } + + function testValidationWithMultipleRateChanges() public { + // Deploy a standard validator that approves everything + MockValidator validator = new MockValidator(MockValidator.ValidatorMode.STANDARD); + + // Setup operator approval first + helper.setupOperatorApproval( + USER1, // from + OPERATOR, + 10, + 100 ether, + MAX_LOCKUP_PERIOD // lockup period + ); + + // Create a rail with the validator + uint256 rate = 1; + uint256 expectedAmount = 0; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + 10, // lockupPeriod + 0, // No fixed lockup + address(validator), // Standard validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + vm.startPrank(OPERATOR); + while (rate++ < 10) { + // Advance several blocks + payments.modifyRailPayment(railId, rate, 0); + expectedAmount += rate * 5; + helper.advanceBlocks(5); + } + vm.stopPrank(); + + // Settle with validation + RailSettlementHelpers.SettlementResult memory result = + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, block.number); + + // Verify validator note + assertEq(result.note, "Standard approved payment", "Validator note should match"); + } + + function testValidationWithReducedAmount() public { + // Deploy an validator that reduces payment amounts + MockValidator validator = new MockValidator(MockValidator.ValidatorMode.REDUCE_AMOUNT); + validator.configure(80); // 80% of the original amount + + // Create a rail with the validator + uint256 rate = 10 ether; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + 10, // lockupPeriod + 0, // No fixed lockup + address(validator), // Reduced amount validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance several blocks + helper.advanceBlocks(5); + + // Verify reduced amount (80% of original) + uint256 expectedAmount = (rate * 5 * 80) / 100; // 5 blocks * 10 ether * 80% + uint256 expectedNetworkFee = + expectedAmount * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR(); + uint256 expectedNetPayeeAmount = expectedAmount - expectedNetworkFee; + + // Settle with validation - verify against NET payee amount + RailSettlementHelpers.SettlementResult memory result = + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, block.number); + + assertEq(result.netPayeeAmount, expectedNetPayeeAmount, "Net payee amount incorrect"); + assertEq(result.operatorCommission, 0, "Operator commission incorrect"); + + // Verify validator note + assertEq(result.note, "Validator reduced payment amount", "Validator note should match"); + } + + function testValidationWithReducedDuration() public { + // Deploy an validator that reduces settlement duration + MockValidator validator = new MockValidator(MockValidator.ValidatorMode.REDUCE_DURATION); + validator.configure(60); // 60% of the original duration + + // Create a rail with the validator + uint256 rate = 10 ether; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + 10, // lockupPeriod + 0, // No fixed lockup + address(validator), // Reduced duration validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance several blocks + uint256 advanceBlocks = 5; + helper.advanceBlocks(advanceBlocks); + + // Calculate expected settlement duration (60% of 5 blocks) + uint256 expectedDuration = (advanceBlocks * 60) / 100; + uint256 expectedSettledUpto = block.number - advanceBlocks + expectedDuration; + uint256 expectedAmount = rate * expectedDuration; // expectedDuration blocks * 10 ether + + // Settle with validation + RailSettlementHelpers.SettlementResult memory result = + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, expectedSettledUpto); + + // Verify validator note + assertEq(result.note, "Validator reduced settlement duration", "Validator note should match"); + } + + function testMaliciousValidatorHandling() public { + // Deploy a malicious validator + MockValidator validator = new MockValidator(MockValidator.ValidatorMode.MALICIOUS); + + // Create a rail with the validator + uint256 rate = 5 ether; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + 10, // lockupPeriod + 0, // No fixed lockup + address(validator), // Malicious validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance several blocks + helper.advanceBlocks(5); + + // Attempt settlement with malicious validator - should revert + vm.prank(USER1); + vm.expectRevert( + abi.encodeWithSelector( + Errors.ValidatorSettledBeyondSegmentEnd.selector, railId, block.number, block.number + 10 + ) + ); + payments.settleRail(railId, block.number); + + // Set the validator to return invalid amount but valid settlement duration + validator.setMode(MockValidator.ValidatorMode.CUSTOM_RETURN); + uint256 proposedAmount = rate * 5; // 5 blocks * 5 ether + uint256 invalidAmount = proposedAmount * 2; // Double the correct amount + validator.setCustomValues(invalidAmount, block.number, "Attempting excessive payment"); + + // Attempt settlement with excessive amount - should also revert + vm.prank(USER1); + // error ValidatorModifiedAmountExceedsMaximum(uint256 railId, uint256 maxAllowed, uint256 attempted); + vm.expectRevert( + abi.encodeWithSelector( + Errors.ValidatorModifiedAmountExceedsMaximum.selector, railId, proposedAmount, invalidAmount + ) + ); + payments.settleRail(railId, block.number); + } + + //-------------------------------- + // 3. Termination and Edge Cases + //-------------------------------- + + function testRailTerminationAndSettlement() public { + uint256 rate = 10 ether; + uint256 lockupPeriod = 5; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + lockupPeriod, // lockupPeriod + 0, // No fixed lockup + address(0), // No validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance several blocks + helper.advanceBlocks(3); + + // First settlement + uint256 expectedAmount1 = rate * 3; // 3 blocks * 10 ether + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount1, block.number); + + // Terminate the rail + vm.prank(OPERATOR); + payments.terminateRail(railId); + + // Verify rail was terminated - check endEpoch is set + Payments.RailView memory rail = payments.getRail(railId); + assertTrue(rail.endEpoch > 0, "Rail should be terminated"); + + // Verify endEpoch calculation: should be the lockupLastSettledAt (current block) + lockupPeriod + Payments.Account memory account = helper.getAccountData(USER1); + assertEq( + rail.endEpoch, + account.lockupLastSettledAt + rail.lockupPeriod, + "End epoch should be account lockup last settled at + lockup period" + ); + + // Advance more blocks + helper.advanceBlocks(10); + + // Get balances before final settlement + Payments.Account memory userBefore = helper.getAccountData(USER1); + Payments.Account memory recipientBefore = helper.getAccountData(USER2); + + // Final settlement after termination + vm.prank(USER1); + + ( + uint256 settledAmount, + uint256 netPayeeAmount, + uint256 totalOperatorCommission, + uint256 totalNetworkFee, + uint256 settledUpto, + ) = payments.settleRail(railId, block.number); + + // Verify that total settled amount is equal to the sum of net payee amount and operator commission + assertEq( + settledAmount, + netPayeeAmount + totalOperatorCommission + totalNetworkFee, + "Mismatch in settled amount breakdown" + ); + + // Should settle up to endEpoch, which is lockupPeriod blocks after the last settlement + uint256 expectedAmount2 = rate * lockupPeriod; // lockupPeriod = 5 blocks + assertEq(settledAmount, expectedAmount2, "Final settlement amount incorrect"); + assertEq(settledUpto, rail.endEpoch, "Final settled up to incorrect"); + + // Get balances after settlement + Payments.Account memory userAfter = helper.getAccountData(USER1); + Payments.Account memory recipientAfter = helper.getAccountData(USER2); + + assertEq( + userBefore.funds - userAfter.funds, expectedAmount2, "User funds not reduced correctly in final settlement" + ); + assertEq( + recipientAfter.funds - recipientBefore.funds, + netPayeeAmount, + "Recipient funds not increased correctly in final settlement" + ); + + // Verify account lockup is cleared after full settlement + assertEq(userAfter.lockupCurrent, 0, "Account lockup should be cleared after full rail settlement"); + assertEq(userAfter.lockupRate, 0, "Account lockup rate should be zero after full rail settlement"); + } + + function testSettleAlreadyFullySettledRail() public { + // Create a rail with standard rate + uint256 rate = 5 ether; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + 10, // lockupPeriod + 0, // No fixed lockup + address(0), // No validator + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Settle immediately without advancing blocks - should be a no-op + RailSettlementHelpers.SettlementResult memory result = + settlementHelper.settleRailAndVerify(railId, block.number, 0, block.number); + + console.log("result.note", result.note); + + // Verify the note indicates already settled + assertTrue( + bytes(result.note).length > 0 + && stringsEqual(result.note, string.concat("already settled up to epoch ", vm.toString(block.number))), + "Note should indicate already settled" + ); + } + + function testSettleRailWithRateChangeQueueForReducedAmountValidation() public { + // Deploy an validator that reduces the payment amount by a percentage + uint256 factor = 80; // 80% of the original amount + MockValidator validator = new MockValidator(MockValidator.ValidatorMode.REDUCE_AMOUNT); + validator.configure(factor); + + // Create a rail with the validator + uint256 rate = 5 ether; + uint256 lockupPeriod = 10; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + lockupPeriod, + 0, // No fixed lockup + address(validator), + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Simulate 5 blocks passing (blocks 1-5) + helper.advanceBlocks(5); + + // Increase operator allowances to allow rate modification + // We double the rate allowance and add buffer for lockup + (, uint256 rateAllowance, uint256 lockupAllowance,,,) = helper.getOperatorAllowanceAndUsage(USER1, OPERATOR); + helper.setupOperatorApproval(USER1, OPERATOR, rateAllowance * 2, lockupAllowance + 10 * rate, MAX_LOCKUP_PERIOD); + + // Operator doubles the payment rate from 5 ETH to 10 ETH per block + // This creates a rate change in the queue + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, rate * 2, 0); + vm.stopPrank(); + + // Simulate 5 blocks passing (blocks 6-10) + helper.advanceBlocks(5); + + // Calculate expected settlement: + // Phase 1 (blocks 1-5): 5 blocks at 5 ETH/block → 25 ETH total -> after validation (80%) -> 20 ETH total + // Phase 2 (blocks 6-10): 5 blocks at 10 ETH/block → 50 ETH total -> after validation (80%) -> 40 ETH total + // Total after validation (80%) -> 60 ETH total + uint256 expectedDurationOldRate = 5; // Epochs 1-5 ( rate = 5 ) + uint256 expectedDurationNewRate = 5; // Epochs 6-10 ( rate = 10 ) + uint256 expectedAmountOldRate = (rate * expectedDurationOldRate * factor) / 100; // 20 ETH (25 * 0.8) + uint256 expectedAmountNewRate = ((rate * 2) * expectedDurationNewRate * factor) / 100; // 40 ETH (50 * 0.8) + uint256 expectedAmount = expectedAmountOldRate + expectedAmountNewRate; // 60 ETH total + + // settle and verify rail + RailSettlementHelpers.SettlementResult memory result = + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, block.number); + + console.log("result.note", result.note); + } + + function testSettleRailWithRateChangeQueueForReducedDurationValidation() public { + // Deploy an validator that reduces the duration by a percentage + uint256 factor = 60; // 60% of the original duration + MockValidator validator = new MockValidator(MockValidator.ValidatorMode.REDUCE_DURATION); + validator.configure(factor); + + // Create a rail with the validator + uint256 rate = 5 ether; + uint256 lockupPeriod = 10; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + lockupPeriod, + 0, // No fixed lockup + address(validator), + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Simulate 5 blocks passing (blocks 1-5) + helper.advanceBlocks(5); + + // Initial settlement for the first 5 blocks ( epochs 1-5 ) + // Duration reduction: 5 blocks * 60% = 3 blocks settled + // Amount: 3 blocks * 5 ETH = 15 ETH + // LastSettledUpto: 1 + (6 - 1) * 60% = 4 + vm.prank(USER1); + payments.settleRail(railId, block.number); + uint256 lastSettledUpto = 1 + ((block.number - 1) * factor) / 100; // validator only settles for 60% of the duration (block.number - lastSettledUpto = epoch 1) + vm.stopPrank(); + + // update operator allowances for rate modification + (, uint256 rateAllowance, uint256 lockupAllowance,,,) = helper.getOperatorAllowanceAndUsage(USER1, OPERATOR); + helper.setupOperatorApproval(USER1, OPERATOR, rateAllowance * 2, lockupAllowance + 10 * rate, MAX_LOCKUP_PERIOD); + + // Operator doubles the payment rate from 5 ETH to 10 ETH per block + // This creates a rate change in the queue + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, rate * 2, 0); + vm.stopPrank(); + + // Simulate 5 blocks passing (blocks 6-10) + helper.advanceBlocks(5); + + // Expected settlement calculation: + // - Rate change was at block 5, creating a boundary + // - Duration reduction applies only to the first rate segment (epochs 1-5) + // - We already settled 3 blocks (1-3) in the first settlement + // - Remaining in first segment: 2 blocks (4-5) at original rate + // - Duration reduction: 2 blocks * 60% = 1.2 blocks (truncated to 1 block) + // - Amount: 1 epoch * 5 ETH/epoch = 5 ETH + // - rail.settledUpto = 4 + 1 = 5 < segmentBoundary ( 6 ) => doesn't go to next settlement segment (epochs 6-10) + uint256 firstSegmentEndBoundary = 6; // Block where rate change occurred + uint256 expectedDuration = ((firstSegmentEndBoundary - lastSettledUpto) * factor) / 100; // (6-3)*0.6 = 1.8 → 1 block + uint256 expectedSettledUpto = lastSettledUpto + expectedDuration; // 4 + 1 = 5 + uint256 expectedAmount = rate * expectedDuration; // 5 ETH/epoch * 1 epoch = 5 ETH + + // settle and verify rail + RailSettlementHelpers.SettlementResult memory result = + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, expectedSettledUpto); + + console.log("result.note", result.note); + } + + function testModifyRailPayment_SkipsZeroRateEnqueue() public { + uint256 initialRate = 0; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + initialRate, + 10, // lockupPeriod + 0, // fixed lockup + address(0), // no arbiter + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // give the operator enough allowance to change the rate + helper.setupOperatorApproval(USER1, OPERATOR, 10 ether, 100 ether, MAX_LOCKUP_PERIOD); + + // advance a few blocks so there is “history” to mark as settled + helper.advanceBlocks(4); + uint256 beforeBlock = block.number; + + // change rate: 0 → 5 ether + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, 5 ether, 0); + vm.stopPrank(); + + // queue must still be empty + assertEq(payments.getRateChangeQueueSize(railId), 0, "queue should stay empty"); + + // settledUpTo must equal the block where modification occurred + Payments.RailView memory rv = payments.getRail(railId); + assertEq(rv.settledUpTo, beforeBlock, "settledUpTo should equal current block"); + } + + //-------------------------------- + // Helper Functions + //-------------------------------- + + // Helper to compare strings + function stringsEqual(string memory a, string memory b) internal pure returns (bool) { + return keccak256(abi.encodePacked(a)) == keccak256(abi.encodePacked(b)); + } + + function testSettlementWithOperatorCommission() public { + // Setup operator approval first + helper.setupOperatorApproval( + USER1, // from + OPERATOR, + 10 ether, // rate allowance + 100 ether, // lockup allowance + MAX_LOCKUP_PERIOD // max lockup period + ); + + // Create rail with 2% operator commission (200 BPS) + uint256 operatorCommissionBps = 200; + uint256 railId; + vm.startPrank(OPERATOR); + railId = payments.createRail( + token, + USER1, + USER2, + address(0), // no validator + operatorCommissionBps, + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + vm.stopPrank(); + + // Set rail parameters using modify functions + uint256 rate = 10 ether; + uint256 lockupPeriod = 5; + vm.startPrank(OPERATOR); + payments.modifyRailPayment(railId, rate, 0); + payments.modifyRailLockup(railId, lockupPeriod, 0); // no fixed lockup + vm.stopPrank(); + + // Advance time + uint256 elapsedBlocks = 5; + helper.advanceBlocks(elapsedBlocks); + + // --- Balances Before --- + Payments.Account memory payerBefore = helper.getAccountData(USER1); + Payments.Account memory payeeBefore = helper.getAccountData(USER2); + Payments.Account memory operatorBefore = helper.getAccountData(OPERATOR); + Payments.Account memory serviceFeeRecipientBefore = helper.getAccountData(SERVICE_FEE_RECIPIENT); + + // --- Expected Calculations --- + uint256 expectedSettledAmount = rate * elapsedBlocks; + uint256 expectedNetworkFee = + expectedSettledAmount * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR(); + uint256 expectedOperatorCommission = + ((expectedSettledAmount - expectedNetworkFee) * operatorCommissionBps) / payments.COMMISSION_MAX_BPS(); + uint256 expectedNetPayeeAmount = expectedSettledAmount - expectedNetworkFee - expectedOperatorCommission; + + // --- Settle Rail --- + vm.startPrank(USER1); // Any participant can settle + ( + uint256 settledAmount, + uint256 netPayeeAmount, + uint256 operatorCommission, + uint256 totalNetworkFee, + uint256 settledUpto, + ) = payments.settleRail(railId, block.number); + vm.stopPrank(); + + // --- Verification --- + + // 1. Return values from settleRail + assertEq(settledAmount, expectedSettledAmount, "Returned settledAmount incorrect"); + assertEq(netPayeeAmount, expectedNetPayeeAmount, "Returned netPayeeAmount incorrect"); + assertEq(operatorCommission, expectedOperatorCommission, "Returned operatorCommission incorrect"); + assertEq(totalNetworkFee, expectedNetworkFee, "Returned networkFee incorrect"); + assertEq(settledUpto, block.number, "Returned settledUpto incorrect"); + + // 2. Balances after settlement + Payments.Account memory payerAfter = helper.getAccountData(USER1); + Payments.Account memory payeeAfter = helper.getAccountData(USER2); + Payments.Account memory operatorAfter = helper.getAccountData(OPERATOR); + Payments.Account memory serviceFeeRecipientAfter = helper.getAccountData(SERVICE_FEE_RECIPIENT); + + assertEq(payerAfter.funds, payerBefore.funds - expectedSettledAmount, "Payer funds mismatch"); + assertEq(payeeAfter.funds, payeeBefore.funds + expectedNetPayeeAmount, "Payee funds mismatch"); + assertEq(operatorAfter.funds, operatorBefore.funds, "Operator funds mismatch"); + assertEq( + serviceFeeRecipientAfter.funds, + serviceFeeRecipientBefore.funds + expectedOperatorCommission, + "Service fee recipient funds mismatch" + ); + } + + function testSettleRailWithNonZeroZeroNonZeroRateSequence() public { + // Setup operator approval for rate modifications + helper.setupOperatorApproval( + USER1, + OPERATOR, + 25 ether, // rate allowance + 200 ether, // lockup allowance + MAX_LOCKUP_PERIOD + ); + + // Create a rail with initial rate + uint256 initialRate = 5 ether; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + initialRate, + 10, // lockupPeriod + 0, // No fixed lockup + address(0), // No arbiter + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance 3 blocks at initial rate (5 ether/block) + helper.advanceBlocks(3); + + // Change rate to zero + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, 0, 0); + vm.stopPrank(); + + // Advance 4 blocks at zero rate (no payment) + helper.advanceBlocks(4); + + // Change rate to new non-zero rate + uint256 finalRate = 8 ether; + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, finalRate, 0); + vm.stopPrank(); + + // Advance 5 blocks at final rate (8 ether/block) + helper.advanceBlocks(5); + + // Calculate expected settlement: + // Phase 1 (blocks 1-3): 3 blocks at 5 ether/block = 15 ether + // Phase 2 (blocks 4-7): 4 blocks at 0 ether/block = 0 ether + // Phase 3 (blocks 8-12): 5 blocks at 8 ether/block = 40 ether + // Total expected: 15 + 0 + 40 = 55 ether + uint256 expectedAmount = (initialRate * 3) + (0 * 4) + (finalRate * 5); + + // Settle and verify + RailSettlementHelpers.SettlementResult memory result = + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, block.number); + + console.log("Non-zero -> Zero -> Non-zero settlement note:", result.note); + } + + function testSettleRailWithZeroNonZeroZeroRateSequence() public { + // Setup operator approval for rate modifications + helper.setupOperatorApproval( + USER1, + OPERATOR, + 15 ether, // rate allowance + 150 ether, // lockup allowance + MAX_LOCKUP_PERIOD + ); + + // Create a rail starting with zero rate + uint256 initialRate = 0; + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + initialRate, + 10, // lockupPeriod + 0, // No fixed lockup + address(0), // No arbiter + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + // Advance 2 blocks at zero rate (no payment) + helper.advanceBlocks(2); + + // Change rate to non-zero + uint256 middleRate = 6 ether; + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, middleRate, 0); + vm.stopPrank(); + + // Advance 4 blocks at middle rate (6 ether/block) + helper.advanceBlocks(4); + + // Change rate back to zero + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, 0, 0); + vm.stopPrank(); + + // Advance 3 blocks at zero rate again (no payment) + helper.advanceBlocks(3); + + // Calculate expected settlement: + // Phase 1 (blocks 1-2): 2 blocks at 0 ether/block = 0 ether + // Phase 2 (blocks 3-6): 4 blocks at 6 ether/block = 24 ether + // Phase 3 (blocks 7-9): 3 blocks at 0 ether/block = 0 ether + // Total expected: 0 + 24 + 0 = 24 ether + uint256 expectedAmount = (0 * 2) + (middleRate * 4) + (0 * 3); + + // Settle and verify + RailSettlementHelpers.SettlementResult memory result = + settlementHelper.settleRailAndVerify(railId, block.number, expectedAmount, block.number); + + console.log("Zero -> Non-zero -> Zero settlement note:", result.note); + } + + function testPartialSettleOfZeroSegment() public { + uint256 rateOn = 1; + uint256 rateOff = 0; + scaffoldPartialSettleOfSegment(rateOn, rateOff); + } + + function testPartialSettleOfNonZeroSegment() public { + uint256 rateOn = 2; + uint256 rateOff = 1; + scaffoldPartialSettleOfSegment(rateOn, rateOff); + } + + function scaffoldPartialSettleOfSegment(uint256 rateOn, uint256 rateOff) public { + helper.setupOperatorApproval(USER1, OPERATOR, 1000 ether, 100000 ether, MAX_LOCKUP_PERIOD); + + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rateOn, + 0, // No lockup period + 0, // No fixed lockup + address(0), // No arbiter + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + + /* + | rate == 1 | rate == 0 | rate == 1 | + | 100 blocks | 100 blocks | 100 blocks | + X^ Y^ + First settle Second settle + */ + // Advance 100 blocks and turn rate off + // This adds a rate == 1, untilEpoch == 100 segment to the queue + helper.advanceBlocks(100); + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, rateOff, 0); + vm.stopPrank(); + + // Advance 100 blocks and turn rate on + // This adds a rate == 0, untilEpoch == 200 segment to the queue + helper.advanceBlocks(100); + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, rateOn, 0); + vm.stopPrank(); + + // Advance 100 blocks and turn rate off + // This adds a final rate == 1, untilEpoch == 300 segment to the queue + helper.advanceBlocks(100); + vm.prank(OPERATOR); + payments.modifyRailPayment(railId, rateOff, 0); + vm.stopPrank(); + + // Settle partway through the second segment + settlementHelper.settleRailAndVerify(railId, 151, 100 * rateOn + 50 * rateOff, 151); + + // Settle the whole rail, we should see another 100 tokens transferred + settlementHelper.settleRailAndVerify(railId, 301, 50 * rateOff + 100 * rateOn, 301); + } + + function testModifyTerminatedRailBeyondEndEpoch() public { + // Create a rail with standard parameters including fixed lockup + uint256 rate = 10 ether; + uint256 lockupPeriod = 5; + uint256 fixedLockup = 10 ether; // Add fixed lockup for one-time payment tests + uint256 railId = helper.setupRailWithParameters( + USER1, + USER2, + OPERATOR, + rate, + lockupPeriod, + fixedLockup, + address(0), // No validator + SERVICE_FEE_RECIPIENT + ); + + // Advance and settle to ensure the rail is active + helper.advanceBlocks(3); + vm.prank(USER1); + payments.settleRail(railId, block.number); + + // Terminate the rail + vm.prank(OPERATOR); + payments.terminateRail(railId); + + // Get the rail's end epoch + Payments.RailView memory rail = payments.getRail(railId); + uint256 endEpoch = rail.endEpoch; + + // Advance blocks to reach the end epoch + uint256 blocksToAdvance = endEpoch - block.number; + helper.advanceBlocks(blocksToAdvance); + + // Now we're at the end epoch - try to modify rate + vm.prank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector( + Errors.CannotModifyTerminatedRailBeyondEndEpoch.selector, railId, endEpoch, block.number + ) + ); + payments.modifyRailPayment(railId, 5 ether, 0); + + // Also try to make a one-time payment + vm.prank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector( + Errors.CannotModifyTerminatedRailBeyondEndEpoch.selector, railId, endEpoch, block.number + ) + ); + payments.modifyRailPayment(railId, rate, 1 ether); + + // Advance one more block to go beyond the end epoch + helper.advanceBlocks(1); + + // Try to modify rate again - should still revert + vm.prank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector( + Errors.CannotModifyTerminatedRailBeyondEndEpoch.selector, railId, endEpoch, block.number + ) + ); + payments.modifyRailPayment(railId, 5 ether, 0); + + // Try to make both rate change and one-time payment + vm.prank(OPERATOR); + vm.expectRevert( + abi.encodeWithSelector( + Errors.CannotModifyTerminatedRailBeyondEndEpoch.selector, railId, endEpoch, block.number + ) + ); + payments.modifyRailPayment(railId, 5 ether, 1 ether); + } +} diff --git a/service_contracts/test/payments/RateChangeQueue.t.sol b/service_contracts/test/payments/RateChangeQueue.t.sol new file mode 100644 index 00000000..f273f3f0 --- /dev/null +++ b/service_contracts/test/payments/RateChangeQueue.t.sol @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {RateChangeQueue} from "@payments/RateChangeQueue.sol"; + +contract RateChangeQueueTest is Test { + using RateChangeQueue for RateChangeQueue.Queue; + + struct TestQueueContainer { + RateChangeQueue.Queue queue; + } + + TestQueueContainer private queueContainer; + + function queue() internal view returns (RateChangeQueue.Queue storage) { + return queueContainer.queue; + } + + function createEmptyQueue() internal { + // Clear any existing data + RateChangeQueue.Queue storage q = queue(); + while (!q.isEmpty()) { + q.dequeue(); + } + } + + function createSingleItemQueue(uint256 rate, uint256 untilEpoch) + internal + returns (RateChangeQueue.RateChange memory) + { + createEmptyQueue(); + RateChangeQueue.enqueue(queue(), rate, untilEpoch); + assertEq(RateChangeQueue.size(queue()), 1); + return RateChangeQueue.RateChange(rate, untilEpoch); + } + + function createMultiItemQueue(uint256[] memory rates, uint256[] memory untilEpochs) + internal + returns (RateChangeQueue.RateChange[] memory) + { + require(rates.length == untilEpochs.length, "Input arrays must have same length"); + + createEmptyQueue(); + + RateChangeQueue.RateChange[] memory items = new RateChangeQueue.RateChange[](rates.length); + + for (uint256 i = 0; i < rates.length; i++) { + RateChangeQueue.enqueue(queue(), rates[i], untilEpochs[i]); + items[i] = RateChangeQueue.RateChange(rates[i], untilEpochs[i]); + } + + assertEq(RateChangeQueue.size(queue()), rates.length); + return items; + } + + function createQueueWithAdvancedIndices(uint256 cycles) internal { + createEmptyQueue(); + + // Create cycles of filling and emptying + for (uint256 i = 0; i < cycles; i++) { + // Fill with 3 items + RateChangeQueue.enqueue(queue(), 100 + i, 5 + i); + RateChangeQueue.enqueue(queue(), 200 + i, 6 + i); + RateChangeQueue.enqueue(queue(), 300 + i, 7 + i); + + // Empty + RateChangeQueue.dequeue(queue()); + RateChangeQueue.dequeue(queue()); + RateChangeQueue.dequeue(queue()); + } + + // Queue should be empty now but with advanced indices + assertTrue(RateChangeQueue.isEmpty(queue())); + } + + function assertRateChangeEq( + RateChangeQueue.RateChange memory actual, + RateChangeQueue.RateChange memory expected, + string memory message + ) internal pure { + assertEq(actual.rate, expected.rate, string.concat(message, " - rate mismatch")); + assertEq(actual.untilEpoch, expected.untilEpoch, string.concat(message, " - untilEpoch mismatch")); + } + + function testBasicQueueOperations() public { + createEmptyQueue(); + + RateChangeQueue.enqueue(queue(), 100, 5); + assertEq(RateChangeQueue.size(queue()), 1); + RateChangeQueue.enqueue(queue(), 200, 10); + RateChangeQueue.enqueue(queue(), 300, 15); + assertEq(RateChangeQueue.size(queue()), 3); + + // Verify peek (head) and peekTail operations + RateChangeQueue.RateChange memory head = RateChangeQueue.peek(queue()); + assertRateChangeEq(head, RateChangeQueue.RateChange(100, 5), "Head should match first enqueued item"); + + RateChangeQueue.RateChange memory tail = RateChangeQueue.peekTail(queue()); + assertRateChangeEq(tail, RateChangeQueue.RateChange(300, 15), "Tail should match last enqueued item"); + + // Size should remain unchanged after peek operations + assertEq(RateChangeQueue.size(queue()), 3); + + // Dequeue and verify FIFO order + RateChangeQueue.RateChange memory first = RateChangeQueue.dequeue(queue()); + assertRateChangeEq(first, RateChangeQueue.RateChange(100, 5), "First dequeued item mismatch"); + assertEq(RateChangeQueue.size(queue()), 2); + + RateChangeQueue.RateChange memory second = RateChangeQueue.dequeue(queue()); + assertRateChangeEq(second, RateChangeQueue.RateChange(200, 10), "Second dequeued item mismatch"); + assertEq(RateChangeQueue.size(queue()), 1); + + RateChangeQueue.RateChange memory third = RateChangeQueue.dequeue(queue()); + assertRateChangeEq(third, RateChangeQueue.RateChange(300, 15), "Third dequeued item mismatch"); + + // Queue should now be empty + assertTrue(RateChangeQueue.isEmpty(queue())); + assertEq(RateChangeQueue.size(queue()), 0); + } + + /// forge-config: default.allow_internal_expect_revert = true + function testEmptyQueueDequeue() public { + createEmptyQueue(); + + // Test dequeue on empty queue + vm.expectRevert("Queue is empty"); + RateChangeQueue.dequeue(queue()); + } + + /// forge-config: default.allow_internal_expect_revert = true + function testEmptyQueuePeek() public { + createEmptyQueue(); + + // Test peek on empty queue + vm.expectRevert("Queue is empty"); + RateChangeQueue.peek(queue()); + } + + /// forge-config: default.allow_internal_expect_revert = true + function testEmptyQueuePeekTail() public { + createEmptyQueue(); + + // Test peekTail on empty queue + vm.expectRevert("Queue is empty"); + RateChangeQueue.peekTail(queue()); + } + + function testBoundaryValues() public { + // Test with zero values + RateChangeQueue.RateChange memory zeroItem = createSingleItemQueue(0, 0); + RateChangeQueue.RateChange memory peekedZero = RateChangeQueue.peek(queue()); + assertRateChangeEq(peekedZero, zeroItem, "Zero values not stored correctly"); + RateChangeQueue.dequeue(queue()); + + // Test with max uint values + uint256 maxUint = type(uint256).max; + RateChangeQueue.RateChange memory maxItem = createSingleItemQueue(maxUint, maxUint); + RateChangeQueue.RateChange memory peekedMax = RateChangeQueue.peek(queue()); + assertRateChangeEq(peekedMax, maxItem, "Max values not stored correctly"); + } + + function testQueueReusability() public { + // Test emptying and reusing a queue + createSingleItemQueue(100, 5); + RateChangeQueue.dequeue(queue()); + assertTrue(RateChangeQueue.isEmpty(queue())); + + // Reuse after emptying + RateChangeQueue.enqueue(queue(), 200, 10); + assertEq(RateChangeQueue.size(queue()), 1); + + RateChangeQueue.RateChange memory peeked = RateChangeQueue.peek(queue()); + assertRateChangeEq(peeked, RateChangeQueue.RateChange(200, 10), "Queue reuse failed"); + + // Test with advanced indices + RateChangeQueue.dequeue(queue()); + createQueueWithAdvancedIndices(10); + + // Verify queue still functions correctly after index cycling + RateChangeQueue.enqueue(queue(), 999, 999); + assertEq(RateChangeQueue.size(queue()), 1); + + peeked = RateChangeQueue.peek(queue()); + assertRateChangeEq(peeked, RateChangeQueue.RateChange(999, 999), "Queue with advanced indices failed"); + } + + function testMixedOperations() public { + createEmptyQueue(); + + // Series of mixed enqueue/dequeue operations + RateChangeQueue.enqueue(queue(), 100, 5); + RateChangeQueue.enqueue(queue(), 200, 10); + + RateChangeQueue.RateChange memory first = RateChangeQueue.dequeue(queue()); + assertRateChangeEq(first, RateChangeQueue.RateChange(100, 5), "First dequeue failed"); + + RateChangeQueue.enqueue(queue(), 300, 15); + RateChangeQueue.enqueue(queue(), 400, 20); + + assertEq(RateChangeQueue.size(queue()), 3, "Queue size incorrect after mixed operations"); + + // Verify peek at both ends + RateChangeQueue.RateChange memory head = RateChangeQueue.peek(queue()); + assertRateChangeEq(head, RateChangeQueue.RateChange(200, 10), "Head incorrect after mixed operations"); + + RateChangeQueue.RateChange memory tail = RateChangeQueue.peekTail(queue()); + assertRateChangeEq(tail, RateChangeQueue.RateChange(400, 20), "Tail incorrect after mixed operations"); + + // Empty the queue + RateChangeQueue.dequeue(queue()); + RateChangeQueue.dequeue(queue()); + RateChangeQueue.dequeue(queue()); + + assertTrue(RateChangeQueue.isEmpty(queue()), "Queue should be empty after all dequeues"); + } +} diff --git a/service_contracts/test/payments/WithdrawExtraFeeToken.t.sol b/service_contracts/test/payments/WithdrawExtraFeeToken.t.sol new file mode 100644 index 00000000..a6f51cac --- /dev/null +++ b/service_contracts/test/payments/WithdrawExtraFeeToken.t.sol @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {ExtraFeeToken} from "./mocks/ExtraFeeToken.sol"; +import {Errors} from "@payments/Errors.sol"; +import {Payments} from "@payments/Payments.sol"; +import {Test} from "forge-std/Test.sol"; + +contract WithdrawExtraFeeTokenTest is Test { + function testWithdrawFeeToken() public { + Payments payments = new Payments(); + uint256 transferFee = 10 ** 18; + ExtraFeeToken feeToken = new ExtraFeeToken(transferFee); + address user1 = vm.addr(0x1111); + address user2 = vm.addr(0x2222); + feeToken.mint(user1, 10 ** 24); + feeToken.mint(user2, 10 ** 24); + + vm.prank(user1); + feeToken.approve(address(payments), 10 ** 24); + + vm.prank(user2); + feeToken.approve(address(payments), 10 ** 24); + + vm.prank(user1); + vm.expectRevert(); + payments.deposit(feeToken, user1, 10 ** 24); + + vm.prank(user1); + payments.deposit(feeToken, user1, 10 ** 23); + + assertEq(feeToken.balanceOf(address(payments)), 10 ** 23); + (uint256 deposit,,,) = payments.accounts(feeToken, user1); + assertEq(deposit, 10 ** 23); + + vm.prank(user1); + vm.expectRevert(); + payments.withdraw(feeToken, 10 ** 23); + + vm.prank(user2); + payments.deposit(feeToken, user2, 10 ** 23); + (deposit,,,) = payments.accounts(feeToken, user2); + assertEq(deposit, 10 ** 23); + + assertEq(feeToken.balanceOf(address(payments)), 2 * 10 ** 23); + + // the other user's deposit should not allow the withdrawal + vm.prank(user1); + vm.expectRevert(); + payments.withdraw(feeToken, 10 ** 23); + + // users can still withdraw their balance + (deposit,,,) = payments.accounts(feeToken, user1); + assertEq(deposit, 10 ** 23); + vm.prank(user1); + payments.withdraw(feeToken, deposit - transferFee); + (deposit,,,) = payments.accounts(feeToken, user1); + assertEq(deposit, 0); + + (deposit,,,) = payments.accounts(feeToken, user2); + assertEq(deposit, 10 ** 23); + vm.prank(user2); + payments.withdraw(feeToken, deposit - transferFee); + (deposit,,,) = payments.accounts(feeToken, user2); + assertEq(deposit, 0); + + assertEq(feeToken.balanceOf(address(payments)), 0); + } + + function testWithdrawLockup() public { + Payments payments = new Payments(); + uint256 transferFee = 10 ** 18; + ExtraFeeToken feeToken = new ExtraFeeToken(transferFee); + address user1 = vm.addr(0x1111); + address user2 = vm.addr(0x1112); + feeToken.mint(user1, 10 ** 24); + feeToken.mint(user2, 10 ** 24); + + vm.prank(user1); + feeToken.approve(address(payments), 10 ** 24); + vm.prank(user1); + payments.deposit(feeToken, user1, 10 ** 24 - transferFee); + + vm.prank(user2); + feeToken.approve(address(payments), 10 ** 24); + vm.prank(user2); + payments.deposit(feeToken, user2, 10 ** 24 - transferFee); + + (uint256 deposit,,,) = payments.accounts(feeToken, user1); + assertEq(deposit, 10 ** 24 - transferFee); + + address operator = vm.addr(0x2222); + + vm.prank(user1); + payments.setOperatorApproval(feeToken, operator, true, deposit, deposit, deposit); + vm.prank(operator); + uint256 railId = payments.createRail(feeToken, user1, operator, address(0), 0, address(0)); + + uint256 lockup = 10 ** 17; + vm.prank(operator); + payments.modifyRailLockup(railId, 0, lockup); + + vm.prank(user1); + vm.expectRevert(abi.encodeWithSelector(Errors.InsufficientUnlockedFunds.selector, deposit - lockup, deposit)); + payments.withdraw(feeToken, deposit); + + vm.prank(user1); + vm.expectRevert( + abi.encodeWithSelector( + Errors.InsufficientUnlockedFunds.selector, deposit - lockup, deposit - lockup + transferFee + ) + ); + payments.withdraw(feeToken, deposit - lockup); + + vm.prank(user1); + vm.expectRevert(abi.encodeWithSelector(Errors.InsufficientUnlockedFunds.selector, deposit - lockup, deposit)); + payments.withdraw(feeToken, deposit - transferFee); + + vm.prank(user1); + payments.withdraw(feeToken, deposit - transferFee - lockup); + } +} diff --git a/service_contracts/test/payments/helpers/BaseTestHelper.sol b/service_contracts/test/payments/helpers/BaseTestHelper.sol new file mode 100644 index 00000000..c8449b61 --- /dev/null +++ b/service_contracts/test/payments/helpers/BaseTestHelper.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; + +contract BaseTestHelper is Test { + uint256 internal ownerSk = 0x01; + uint256 internal user1Sk = 0x11; + uint256 internal user2Sk = 0x12; + uint256 internal user3Sk = 0x13; + uint256 internal operatorSk = 0x21; + uint256 internal operator2Sk = 0x22; + uint256 internal validatorSk = 0x31; + uint256 internal serviceFeeRecipientSk = 0x41; + uint256 internal relayerSk = 0x51; + + address public immutable OWNER = vm.addr(ownerSk); + address public immutable USER1 = vm.addr(user1Sk); + address public immutable USER2 = vm.addr(user2Sk); + address public immutable USER3 = vm.addr(user3Sk); + address public immutable OPERATOR = vm.addr(operatorSk); + address public immutable OPERATOR2 = vm.addr(operator2Sk); + address public immutable VALIDATOR = vm.addr(validatorSk); + address public immutable SERVICE_FEE_RECIPIENT = vm.addr(serviceFeeRecipientSk); + address public immutable RELAYER = vm.addr(relayerSk); +} diff --git a/service_contracts/test/payments/helpers/PaymentsTestHelpers.sol b/service_contracts/test/payments/helpers/PaymentsTestHelpers.sol new file mode 100644 index 00000000..b34387ab --- /dev/null +++ b/service_contracts/test/payments/helpers/PaymentsTestHelpers.sol @@ -0,0 +1,956 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockERC20} from "../mocks/MockERC20.sol"; +import {BaseTestHelper} from "./BaseTestHelper.sol"; +import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import {console} from "forge-std/console.sol"; +import {MessageHashUtils} from "@openzeppelin/contracts/utils/cryptography/MessageHashUtils.sol"; +import {Errors} from "@payments/Errors.sol"; + +contract PaymentsTestHelpers is Test, BaseTestHelper { + // Common constants + uint256 public constant INITIAL_BALANCE = 1000 ether; + uint256 public constant DEPOSIT_AMOUNT = 100 ether; + uint256 internal constant MAX_LOCKUP_PERIOD = 100; + + Payments public payments; + MockERC20 public testToken; + IERC20 private constant NATIVE_TOKEN = IERC20(address(0)); + + // Standard test environment setup with common addresses and token + function setupStandardTestEnvironment() public { + vm.startPrank(OWNER); + payments = new Payments(); + vm.stopPrank(); + + // Setup test token and assign to common users + address[] memory users = new address[](6); + users[0] = OWNER; + users[1] = USER1; + users[2] = USER2; + users[3] = OPERATOR; + users[4] = OPERATOR2; + users[5] = VALIDATOR; + + vm.deal(USER1, INITIAL_BALANCE); + vm.deal(USER2, INITIAL_BALANCE); + + testToken = setupTestToken("Test Token", "TEST", users, INITIAL_BALANCE, address(payments)); + } + + function setupTestToken( + string memory name, + string memory symbol, + address[] memory users, + uint256 initialBalance, + address paymentsContract + ) public returns (MockERC20) { + MockERC20 newToken = new MockERC20(name, symbol); + + // Mint tokens to users + for (uint256 i = 0; i < users.length; i++) { + newToken.mint(users[i], initialBalance); + + // Approve payments contract to spend tokens (i.e. allowance) + vm.startPrank(users[i]); + newToken.approve(paymentsContract, type(uint256).max); + vm.stopPrank(); + } + + return newToken; + } + + function getPermitSignature(uint256 privateKey, address owner, address spender, uint256 value, uint256 deadline) + public + view + returns (uint8 v, bytes32 r, bytes32 s) + { + uint256 nonce = MockERC20(testToken).nonces(owner); + bytes32 domainSeparator = MockERC20(testToken).DOMAIN_SEPARATOR(); + + bytes32 structHash = keccak256( + abi.encode( + keccak256("Permit(address owner,address spender,uint256 value,uint256 nonce,uint256 deadline)"), + owner, + spender, + value, + nonce, + deadline + ) + ); + + bytes32 digest = MessageHashUtils.toTypedDataHash(domainSeparator, structHash); + + // Sign the exact digest that `permit` expects using the provided private key + (v, r, s) = vm.sign(privateKey, digest); + } + + function makeDepositWithPermit(uint256 fromPrivateKey, address to, uint256 amount) public { + address from = vm.addr(fromPrivateKey); + uint256 deadline = block.timestamp + 1 hours; + + // Capture pre-deposit balances and state + uint256 fromBalanceBefore = _balanceOf(from, false); + uint256 paymentsBalanceBefore = _balanceOf(address(payments), false); + Payments.Account memory toAccountBefore = _getAccountData(to, false); + + // get signature for permit + (uint8 v, bytes32 r, bytes32 s) = getPermitSignature(fromPrivateKey, from, address(payments), amount, deadline); + + // Execute deposit with permit + vm.startPrank(from); + + payments.depositWithPermit(testToken, to, amount, deadline, v, r, s); + + vm.stopPrank(); + + // Capture post-deposit balances and state + uint256 fromBalanceAfter = _balanceOf(from, false); + uint256 paymentsBalanceAfter = _balanceOf(address(payments), false); + Payments.Account memory toAccountAfter = _getAccountData(to, false); + + // Asserts / Checks + _assertDepositBalances( + fromBalanceBefore, + fromBalanceAfter, + paymentsBalanceBefore, + paymentsBalanceAfter, + toAccountBefore, + toAccountAfter, + amount + ); + } + + function _assertDepositBalances( + uint256 fromBalanceBefore, + uint256 fromBalanceAfter, + uint256 paymentsBalanceBefore, + uint256 paymentsBalanceAfter, + Payments.Account memory toAccountBefore, + Payments.Account memory toAccountAfter, + uint256 amount + ) public pure { + assertEq(fromBalanceAfter, fromBalanceBefore - amount, "Sender's balance not reduced correctly"); + assertEq( + paymentsBalanceAfter, paymentsBalanceBefore + amount, "Payments contract balance not increased correctly" + ); + + assertEq( + paymentsBalanceAfter, paymentsBalanceBefore + amount, "Payments contract balance not increased correctly" + ); + + assertEq( + toAccountAfter.funds, toAccountBefore.funds + amount, "Recipient's account balance not increased correctly" + ); + } + + function getAccountData(address user) public view returns (Payments.Account memory) { + return _getAccountData(user, false); + } + + function getNativeAccountData(address user) public view returns (Payments.Account memory) { + return _getAccountData(user, true); + } + + function _getAccountData(address user, bool useNativeToken) public view returns (Payments.Account memory) { + IERC20 token = useNativeToken ? NATIVE_TOKEN : testToken; + (uint256 funds, uint256 lockupCurrent, uint256 lockupRate, uint256 lockupLastSettledAt) = + payments.accounts(token, user); + + return Payments.Account({ + funds: funds, + lockupCurrent: lockupCurrent, + lockupRate: lockupRate, + lockupLastSettledAt: lockupLastSettledAt + }); + } + + function makeDeposit(address from, address to, uint256 amount) public { + _performDeposit(from, to, amount, false); + } + + function makeNativeDeposit(address from, address to, uint256 amount) public { + _performDeposit(from, to, amount, true); + } + + function _performDeposit(address from, address to, uint256 amount, bool useNativeToken) public { + // Capture pre-deposit balances + uint256 fromBalanceBefore = _balanceOf(from, useNativeToken); + uint256 paymentsBalanceBefore = _balanceOf(address(payments), useNativeToken); + Payments.Account memory toAccountBefore = _getAccountData(to, useNativeToken); + + // Make the deposit + vm.startPrank(from); + + uint256 value = 0; + IERC20 token = testToken; + if (useNativeToken) { + value = amount; + token = NATIVE_TOKEN; + } + + payments.deposit{value: value}(token, to, amount); + vm.stopPrank(); + + // Verify token balances + uint256 fromBalanceAfter = _balanceOf(from, useNativeToken); + uint256 paymentsBalanceAfter = _balanceOf(address(payments), useNativeToken); + Payments.Account memory toAccountAfter = _getAccountData(to, useNativeToken); + + // Verify balances + assertEq(fromBalanceAfter, fromBalanceBefore - amount, "Sender's balance not reduced correctly"); + assertEq( + paymentsBalanceAfter, paymentsBalanceBefore + amount, "Payments contract balance not increased correctly" + ); + assertEq( + toAccountAfter.funds, toAccountBefore.funds + amount, "Recipient's account balance not increased correctly" + ); + console.log("toAccountAfter.funds", toAccountAfter.funds); + } + + function makeWithdrawal(address from, uint256 amount) public { + _performWithdrawal( + from, + from, // recipient is the same as sender + amount, + true, // use the standard withdraw function + false // use ERC20 token + ); + } + + function makeNativeWithdrawal(address from, uint256 amount) public { + _performWithdrawal( + from, + from, // recipient is the same as sender + amount, + true, // use the standard withdraw function + true // use native token + ); + } + + function expectWithdrawalToFail(address from, uint256 available, uint256 requested) public { + vm.startPrank(from); + vm.expectRevert(abi.encodeWithSelector(Errors.InsufficientUnlockedFunds.selector, available, requested)); + payments.withdraw(testToken, requested); + vm.stopPrank(); + } + + function makeWithdrawalTo(address from, address to, uint256 amount) public { + _performWithdrawal( + from, + to, + amount, + false, // use the withdrawTo function + false // use erc20 token + ); + } + + function makeNativeWithdrawalTo(address from, address to, uint256 amount) public { + _performWithdrawal( + from, + to, + amount, + false, // use the withdrawTo function + true // use native token + ); + } + + function _balanceOf(address addr, bool useNativeToken) public view returns (uint256) { + if (useNativeToken) { + return addr.balance; + } else { + return testToken.balanceOf(addr); + } + } + + function _performWithdrawal( + address from, + address to, + uint256 amount, + bool isStandardWithdrawal, + bool useNativeToken + ) private { + IERC20 token = useNativeToken ? NATIVE_TOKEN : testToken; + + // Capture pre-withdrawal balances + uint256 fromAccountBalanceBefore = _getAccountData(from, useNativeToken).funds; + uint256 recipientBalanceBefore = _balanceOf(to, useNativeToken); + uint256 paymentsBalanceBefore = _balanceOf(address(payments), useNativeToken); + + // Make the withdrawal + vm.startPrank(from); + if (isStandardWithdrawal) { + payments.withdraw(token, amount); + } else { + payments.withdrawTo(token, to, amount); + } + vm.stopPrank(); + + // Verify balances + uint256 fromAccountBalanceAfter = _getAccountData(from, useNativeToken).funds; + uint256 recipientBalanceAfter = _balanceOf(to, useNativeToken); + uint256 paymentsBalanceAfter = _balanceOf(address(payments), useNativeToken); + + // Assert balances changed correctly + assertEq( + fromAccountBalanceAfter, + fromAccountBalanceBefore - amount, + "Sender's account balance not decreased correctly" + ); + assertEq(recipientBalanceAfter, recipientBalanceBefore + amount, "Recipient's balance not increased correctly"); + assertEq( + paymentsBalanceAfter, paymentsBalanceBefore - amount, "Payments contract balance not decreased correctly" + ); + } + + function createRail(address from, address to, address railOperator, address validator, address serviceFeeRecipient) + public + returns (uint256) + { + vm.startPrank(railOperator); + uint256 railId = payments.createRail( + testToken, + from, + to, + validator, + 0, // commissionRateBps + serviceFeeRecipient // serviceFeeRecipient + ); + vm.stopPrank(); + + // Verify rail was created with the correct parameters + Payments.RailView memory rail = payments.getRail(railId); + assertEq(address(rail.token), address(testToken), "Rail token address mismatch"); + assertEq(rail.from, from, "Rail sender address mismatch"); + assertEq(rail.to, to, "Rail recipient address mismatch"); + assertEq(rail.validator, validator, "Rail validator address mismatch"); + assertEq(rail.operator, railOperator, "Rail operator address mismatch"); + assertEq(rail.serviceFeeRecipient, serviceFeeRecipient, "Rail service fee recipient address mismatch"); + + return railId; + } + + function setupRailWithParameters( + address from, + address to, + address railOperator, + uint256 paymentRate, + uint256 lockupPeriod, + uint256 lockupFixed, + address validator, + address serviceFeeRecipient + ) public returns (uint256 railId) { + // Calculate required allowances for the rail + uint256 requiredRateAllowance = paymentRate; + uint256 requiredLockupAllowance = lockupFixed + (paymentRate * lockupPeriod); + + // Get current operator allowances + (bool isApproved, uint256 rateAllowance, uint256 lockupAllowance,,,) = + payments.operatorApprovals(testToken, from, railOperator); + + // Ensure operator has sufficient allowances before creating the rail + if (!isApproved || rateAllowance < requiredRateAllowance || lockupAllowance < requiredLockupAllowance) { + vm.startPrank(from); + payments.setOperatorApproval( + testToken, + railOperator, + true, + requiredRateAllowance > rateAllowance ? requiredRateAllowance : rateAllowance, + requiredLockupAllowance > lockupAllowance ? requiredLockupAllowance : lockupAllowance, + MAX_LOCKUP_PERIOD + ); + vm.stopPrank(); + } + + railId = createRail(from, to, railOperator, validator, serviceFeeRecipient); + + // Get operator usage before modifications + (,,, uint256 rateUsageBefore, uint256 lockupUsageBefore,) = + payments.operatorApprovals(testToken, from, railOperator); + + // Get rail parameters before modifications to accurately calculate expected usage changes + Payments.RailView memory railBefore; + try payments.getRail(railId) returns (Payments.RailView memory railData) { + railBefore = railData; + } catch { + // If this is a new rail, all values will be zero + railBefore.paymentRate = 0; + railBefore.lockupPeriod = 0; + railBefore.lockupFixed = 0; + } + + // Set payment rate and lockup parameters + vm.startPrank(railOperator); + payments.modifyRailPayment(railId, paymentRate, 0); + payments.modifyRailLockup(railId, lockupPeriod, lockupFixed); + vm.stopPrank(); + + // Verify rail parameters were set correctly + Payments.RailView memory rail = payments.getRail(railId); + assertEq(rail.paymentRate, paymentRate, "Rail payment rate mismatch"); + assertEq(rail.lockupPeriod, lockupPeriod, "Rail lockup period mismatch"); + assertEq(rail.lockupFixed, lockupFixed, "Rail fixed lockup mismatch"); + assertEq(rail.validator, validator, "Rail validator address mismatch"); + + // Get operator usage after modifications + (,,, uint256 rateUsageAfter, uint256 lockupUsageAfter,) = + payments.operatorApprovals(testToken, from, railOperator); + + // Calculate expected change in rate usage + int256 expectedRateChange; + if (paymentRate > railBefore.paymentRate) { + expectedRateChange = int256(paymentRate - railBefore.paymentRate); + } else { + expectedRateChange = -int256(railBefore.paymentRate - paymentRate); + } + + // Calculate old and new lockup values to determine the change + uint256 oldLockupTotal = railBefore.lockupFixed + (railBefore.paymentRate * railBefore.lockupPeriod); + uint256 newLockupTotal = lockupFixed + (paymentRate * lockupPeriod); + int256 expectedLockupChange; + + if (newLockupTotal > oldLockupTotal) { + expectedLockupChange = int256(newLockupTotal - oldLockupTotal); + } else { + expectedLockupChange = -int256(oldLockupTotal - newLockupTotal); + } + + // Verify operator usage has been updated correctly + if (expectedRateChange > 0) { + assertEq( + rateUsageAfter, + rateUsageBefore + uint256(expectedRateChange), + "Operator rate usage not increased correctly" + ); + } else { + assertEq( + rateUsageBefore, + rateUsageAfter + uint256(-expectedRateChange), + "Operator rate usage not decreased correctly" + ); + } + + if (expectedLockupChange > 0) { + assertEq( + lockupUsageAfter, + lockupUsageBefore + uint256(expectedLockupChange), + "Operator lockup usage not increased correctly" + ); + } else { + assertEq( + lockupUsageBefore, + lockupUsageAfter + uint256(-expectedLockupChange), + "Operator lockup usage not decreased correctly" + ); + } + + return railId; + } + + function setupOperatorApproval( + address from, + address operator, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 maxLockupPeriod + ) public { + // Get initial usage values for verification + (,,, uint256 initialRateUsage, uint256 initialLockupUsage,) = + payments.operatorApprovals(testToken, from, operator); + + // Set approval + vm.startPrank(from); + payments.setOperatorApproval(testToken, operator, true, rateAllowance, lockupAllowance, maxLockupPeriod); + vm.stopPrank(); + + // Verify operator allowances after setting them + verifyOperatorAllowances( + from, + operator, + true, // isApproved + rateAllowance, // rateAllowance + lockupAllowance, // lockupAllowance + initialRateUsage, // rateUsage shouldn't change + initialLockupUsage, // lockupUsage shouldn't change + maxLockupPeriod // maxLockupPeriod + ); + } + + function revokeOperatorApprovalAndVerify(address from, address operator) public { + // Get current values for verification + ( + , + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 rateUsage, + uint256 lockupUsage, + uint256 maxLockupPeriod + ) = payments.operatorApprovals(testToken, from, operator); + + // Revoke approval + vm.startPrank(from); + payments.setOperatorApproval(testToken, operator, false, rateAllowance, lockupAllowance, maxLockupPeriod); + vm.stopPrank(); + + // Verify operator allowances after revoking + verifyOperatorAllowances( + from, + operator, + false, // isApproved should be false + rateAllowance, // rateAllowance should remain the same + lockupAllowance, // lockupAllowance should remain the same + rateUsage, // rateUsage shouldn't change + lockupUsage, // lockupUsage shouldn't change, + maxLockupPeriod // maxLockupPeriod should remain the same + ); + } + + function advanceBlocks(uint256 blocks) public { + vm.roll(block.number + blocks); + } + + function assertAccountState( + address user, + uint256 expectedFunds, + uint256 expectedLockup, + uint256 expectedRate, + uint256 expectedLastSettled + ) public view { + Payments.Account memory account = getAccountData(user); + assertEq(account.funds, expectedFunds, "Account funds incorrect"); + assertEq(account.lockupCurrent, expectedLockup, "Account lockup incorrect"); + assertEq(account.lockupRate, expectedRate, "Account lockup rate incorrect"); + assertEq(account.lockupLastSettledAt, expectedLastSettled, "Account last settled at incorrect"); + } + + function verifyOperatorAllowances( + address client, + address operator, + bool expectedIsApproved, + uint256 expectedRateAllowance, + uint256 expectedLockupAllowance, + uint256 expectedRateUsage, + uint256 expectedLockupUsage, + uint256 expectedMaxLockupPeriod + ) public view { + ( + bool isApproved, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 rateUsage, + uint256 lockupUsage, + uint256 maxLockupPeriod + ) = payments.operatorApprovals(testToken, client, operator); + + assertEq(isApproved, expectedIsApproved, "Operator approval status mismatch"); + assertEq(rateAllowance, expectedRateAllowance, "Rate allowance mismatch"); + assertEq(lockupAllowance, expectedLockupAllowance, "Lockup allowance mismatch"); + assertEq(rateUsage, expectedRateUsage, "Rate usage mismatch"); + assertEq(lockupUsage, expectedLockupUsage, "Lockup usage mismatch"); + assertEq(maxLockupPeriod, expectedMaxLockupPeriod, "Max lockup period mismatch"); + } + + // Get current operator allowance and usage + function getOperatorAllowanceAndUsage(address client, address operator) + public + view + returns ( + bool isApproved, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 rateUsage, + uint256 lockupUsage, + uint256 maxLockupPeriod + ) + { + return payments.operatorApprovals(testToken, client, operator); + } + + function executeOneTimePayment(uint256 railId, address operatorAddress, uint256 oneTimeAmount) public { + Payments.RailView memory railBefore = payments.getRail(railId); + address railClient = railBefore.from; + address railRecipient = railBefore.to; + + // Get initial balances + Payments.Account memory clientBefore = getAccountData(railClient); + Payments.Account memory recipientBefore = getAccountData(railRecipient); + Payments.Account memory operatorBefore = getAccountData(operatorAddress); + + // Get operator allowance and usage before payment + (,, uint256 lockupAllowanceBefore,, uint256 lockupUsageBefore,) = + payments.operatorApprovals(testToken, railClient, operatorAddress); + + // Make one-time payment + vm.startPrank(operatorAddress); + payments.modifyRailPayment(railId, railBefore.paymentRate, oneTimeAmount); + vm.stopPrank(); + + // Verify balance changes + Payments.Account memory clientAfter = getAccountData(railClient); + Payments.Account memory recipientAfter = getAccountData(railRecipient); + Payments.Account memory operatorAfter = getAccountData(operatorAddress); + + assertEq( + clientAfter.funds, + clientBefore.funds - oneTimeAmount, + "Client funds not reduced correctly after one-time payment" + ); + + uint256 networkFee = oneTimeAmount * payments.NETWORK_FEE_NUMERATOR() / payments.NETWORK_FEE_DENOMINATOR(); + // Get commission rate from rail + uint256 commissionRate = railBefore.commissionRateBps; + uint256 operatorCommission = 0; + + if (commissionRate > 0) { + operatorCommission = ((oneTimeAmount - networkFee) * commissionRate) / payments.COMMISSION_MAX_BPS(); + // Verify operator commission is non-zero when commission rate is non-zero + assertGt(operatorCommission, 0, "Operator commission should be non-zero when commission rate is non-zero"); + } + + uint256 netPayeeAmount = oneTimeAmount - networkFee - operatorCommission; + + assertEq( + recipientAfter.funds, + recipientBefore.funds + netPayeeAmount, + "Recipient funds not increased correctly after one-time payment" + ); + + // Verify fixed lockup was reduced + Payments.RailView memory railAfter = payments.getRail(railId); + assertEq( + railAfter.lockupFixed, + railBefore.lockupFixed - oneTimeAmount, + "Fixed lockup not reduced by one-time payment amount" + ); + + // Verify operator account is credited with commission + if (operatorCommission > 0) { + assertEq( + operatorAfter.funds, + operatorBefore.funds + operatorCommission, + "Operator funds not increased correctly with commission amount" + ); + } + + // Verify account lockup is also reduced + assertEq( + clientAfter.lockupCurrent, + clientBefore.lockupCurrent - oneTimeAmount, + "Client lockup not reduced correctly after one-time payment" + ); + + // Verify operator lockup allowance and usage are both reduced + (,, uint256 lockupAllowanceAfter,, uint256 lockupUsageAfter,) = + payments.operatorApprovals(testToken, railClient, operatorAddress); + + assertEq( + lockupAllowanceBefore - oneTimeAmount, + lockupAllowanceAfter, + "Operator lockup allowance not reduced correctly after one-time payment" + ); + + assertEq( + lockupUsageBefore - oneTimeAmount, + lockupUsageAfter, + "Operator lockup usage not reduced correctly after one-time payment" + ); + } + + function expectcreateRailToRevertWithoutOperatorApproval() public { + vm.startPrank(OPERATOR); + vm.expectRevert(abi.encodeWithSelector(Errors.OperatorNotApproved.selector, USER1, OPERATOR)); + payments.createRail( + testToken, + USER1, + USER2, + address(0), + 0, + SERVICE_FEE_RECIPIENT // operator commision receiver + ); + } + + function expectExpiredPermitToRevert(uint256 senderSk, address to, uint256 amount) public { + address from = vm.addr(senderSk); + uint256 futureDeadline = block.timestamp + 1 hours; + (uint8 v, bytes32 r, bytes32 s) = getPermitSignature(senderSk, from, address(payments), amount, futureDeadline); + vm.warp(futureDeadline + 10); + vm.startPrank(from); + vm.expectRevert(abi.encodeWithSignature("ERC2612ExpiredSignature(uint256)", futureDeadline)); + payments.depositWithPermit(testToken, to, amount, futureDeadline, v, r, s); + vm.stopPrank(); + } + + function expectNativeTokenDepositWithPermitToRevert(uint256 senderSk, address to, uint256 amount) public { + uint256 deadline = block.timestamp + 1 hours; + address from = vm.addr(senderSk); + vm.startPrank(from); + vm.expectRevert(Errors.NativeTokenNotSupported.selector); + payments.depositWithPermit( + NATIVE_TOKEN, // Native token is not allowed + to, + amount, + deadline, + 0, // v + bytes32(0), // r + bytes32(0) // s + ); + vm.stopPrank(); + } + + function expectInvalidPermitToRevert(uint256 senderSk, address to, uint256 amount) public { + uint256 deadline = block.timestamp + 1 hours; + + uint256 notSenderSk = senderSk == user1Sk ? user2Sk : user1Sk; + address from = vm.addr(senderSk); + + // Make permit signature from notFromSk, but call from 'from' + (uint8 v, bytes32 r, bytes32 s) = getPermitSignature(notSenderSk, from, address(payments), amount, deadline); + + vm.startPrank(from); + + // Expect custom error: ERC2612InvalidSigner(wrongRecovered, expectedOwner) + vm.expectRevert(abi.encodeWithSignature("ERC2612InvalidSigner(address,address)", vm.addr(notSenderSk), from)); + payments.depositWithPermit(testToken, to, amount, deadline, v, r, s); + vm.stopPrank(); + } + + function makeDepositWithPermitAndOperatorApproval( + uint256 fromPrivateKey, + uint256 amount, + address operator, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 maxLockupPeriod + ) public { + address from = vm.addr(fromPrivateKey); + address to = from; + uint256 deadline = block.timestamp + 1 hours; + + // Capture pre-deposit balances and state + uint256 fromBalanceBefore = _balanceOf(from, false); + uint256 paymentsBalanceBefore = _balanceOf(address(payments), false); + Payments.Account memory toAccountBefore = _getAccountData(to, false); + + // get signature for permit + (uint8 v, bytes32 r, bytes32 s) = getPermitSignature(fromPrivateKey, from, address(payments), amount, deadline); + + // Execute deposit with permit + vm.startPrank(from); + + payments.depositWithPermitAndApproveOperator( + testToken, from, amount, deadline, v, r, s, operator, rateAllowance, lockupAllowance, maxLockupPeriod + ); + + vm.stopPrank(); + + // Capture post-deposit balances and state + uint256 fromBalanceAfter = _balanceOf(from, false); + uint256 paymentsBalanceAfter = _balanceOf(address(payments), false); + Payments.Account memory toAccountAfter = _getAccountData(to, false); + + // Asserts / Checks + _assertDepositBalances( + fromBalanceBefore, + fromBalanceAfter, + paymentsBalanceBefore, + paymentsBalanceAfter, + toAccountBefore, + toAccountAfter, + amount + ); + + verifyOperatorAllowances(from, operator, true, rateAllowance, lockupAllowance, 0, 0, maxLockupPeriod); + } + + function expectInvalidPermitAndOperatorApprovalToRevert( + uint256 senderSk, + uint256 amount, + address operator, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 maxLockupPeriod + ) public { + uint256 deadline = block.timestamp + 1 hours; + address to = vm.addr(senderSk); // Use the sender's address as recipient + + uint256 notSenderSk = senderSk == user1Sk ? user2Sk : user1Sk; + address from = vm.addr(senderSk); + + // Make permit signature from notFromSk, but call from 'from' + (uint8 v, bytes32 r, bytes32 s) = getPermitSignature(notSenderSk, from, address(payments), amount, deadline); + + // Capture pre-deposit balances and state + uint256 fromBalanceBefore = _balanceOf(from, false); + uint256 paymentsBalanceBefore = _balanceOf(address(payments), false); + Payments.Account memory toAccountBefore = _getAccountData(to, false); + + vm.startPrank(from); + + // Expect custom error: ERC2612InvalidSigner(wrongRecovered, expectedOwner) + vm.expectRevert(abi.encodeWithSignature("ERC2612InvalidSigner(address,address)", vm.addr(notSenderSk), from)); + payments.depositWithPermitAndApproveOperator( + testToken, from, amount, deadline, v, r, s, operator, rateAllowance, lockupAllowance, maxLockupPeriod + ); + vm.stopPrank(); + + // Capture post-deposit balances and state + uint256 fromBalanceAfter = _balanceOf(from, false); + uint256 paymentsBalanceAfter = _balanceOf(address(payments), false); + Payments.Account memory toAccountAfter = _getAccountData(to, false); + + // Asserts / Checks + _assertDepositBalances( + fromBalanceBefore, + fromBalanceAfter, + paymentsBalanceBefore, + paymentsBalanceAfter, + toAccountBefore, + toAccountAfter, + 0 // No funds should have been transferred due to revert + ); + + verifyOperatorAllowances(from, operator, false, 0, 0, 0, 0, 0); // No values should have been set due to revert - expect defaults + } + + function makeDepositWithPermitToAnotherUser(uint256 senderSk, address depositer, uint256 amount) public { + address to = vm.addr(senderSk); + uint256 deadline = block.timestamp + 1 hours; + + // Get permit signature for 'to' address + (uint8 v, bytes32 r, bytes32 s) = getPermitSignature(senderSk, to, address(payments), amount, deadline); + + vm.startPrank(depositer); + payments.depositWithPermit(testToken, to, amount, deadline, v, r, s); + vm.stopPrank(); + } + + // keccak256("ReceiveWithAuthorization(address from,address to,uint256 value,uint256 validAfter,uint256 validBefore,bytes32 nonce)") + bytes32 private constant RECEIVE_WITH_AUTHORIZATION_TYPEHASH = keccak256( + "ReceiveWithAuthorization(address from,address to,uint256 value,uint256 validAfter,uint256 validBefore,bytes32 nonce)" + ); // as per EIP-3009 + + function getReceiveWithAuthorizationSignature( + uint256 privateKey, + IERC20 token, + address from, + address to, + uint256 value, + uint256 validAfter, + uint256 validBefore, + bytes32 nonce + ) public view returns (uint8 v, bytes32 r, bytes32 s) { + // EIP-712 domain for ERC-3009 (MockERC20 defines its own domainSeparator unrelated to ERC2612) + bytes32 domainSeparator = MockERC20(address(token)).domainSeparator(); + + bytes32 structHash = + keccak256(abi.encode(RECEIVE_WITH_AUTHORIZATION_TYPEHASH, from, to, value, validAfter, validBefore, nonce)); + + bytes32 digest = MessageHashUtils.toTypedDataHash(domainSeparator, structHash); + + (v, r, s) = vm.sign(privateKey, digest); + } + + function depositWithAuthorizationInsufficientBalance(uint256 fromPrivateKey) public { + address from = vm.addr(fromPrivateKey); + address to = from; + uint256 validAfter = 0; + uint256 validBefore = block.timestamp + 300; + uint256 amount = INITIAL_BALANCE + 1; + bytes32 nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + (uint8 v, bytes32 r, bytes32 s) = getReceiveWithAuthorizationSignature( + fromPrivateKey, testToken, from, address(payments), amount, validAfter, validBefore, nonce + ); + + vm.startPrank(from); + // Since signature is valid but balance is insufficient, MockERC20 will revert with ERC20InsufficientBalance + vm.expectRevert( + abi.encodeWithSignature("ERC20InsufficientBalance(address,uint256,uint256)", from, INITIAL_BALANCE, amount) + ); + payments.depositWithAuthorization(testToken, to, amount, validAfter, validBefore, nonce, v, r, s); + vm.stopPrank(); + } + + function depositWithAuthorizationAndOperatorApproval( + uint256 fromPrivateKey, + uint256 amount, + uint256 validForSeconds, + address operator, + uint256 rateAllowance, + uint256 lockupAllowance, + uint256 maxLockupPeriod + ) public returns (bytes32 nonce) { + address from = vm.addr(fromPrivateKey); + address to = from; + + // Windows + uint256 validAfter = 0; // valid immediately + uint256 validBefore = block.timestamp + validForSeconds; + + // Unique nonce + nonce = keccak256(abi.encodePacked("auth-nonce", from, to, amount, block.number)); + + // Pre-state capture + uint256 fromBalanceBefore = _balanceOf(from, false); + uint256 paymentsBalanceBefore = _balanceOf(address(payments), false); + Payments.Account memory toAccountBefore = _getAccountData(to, false); + + // Build signature + (uint8 v, bytes32 r, bytes32 s) = getReceiveWithAuthorizationSignature( + fromPrivateKey, + testToken, + from, + address(payments), // pay to Payments contract + amount, + validAfter, + validBefore, + nonce + ); + + // Execute deposit via authorization + vm.startPrank(from); + + payments.depositWithAuthorizationAndApproveOperator( + testToken, + to, + amount, + validAfter, + validBefore, + nonce, + v, + r, + s, + operator, + rateAllowance, + lockupAllowance, + maxLockupPeriod + ); + + vm.stopPrank(); + + // Post-state capture + uint256 fromBalanceAfter = _balanceOf(from, false); + uint256 paymentsBalanceAfter = _balanceOf(address(payments), false); + Payments.Account memory toAccountAfter = _getAccountData(from, false); + + // Assertions + _assertDepositBalances( + fromBalanceBefore, + fromBalanceAfter, + paymentsBalanceBefore, + paymentsBalanceAfter, + toAccountBefore, + toAccountAfter, + amount + ); + + // Verify authorization is consumed on the token + bool used = testToken.authorizationState(from, nonce); + assertTrue(used); + + verifyOperatorAllowances(from, operator, true, rateAllowance, lockupAllowance, 0, 0, maxLockupPeriod); + } +} diff --git a/service_contracts/test/payments/helpers/RailSettlementHelpers.sol b/service_contracts/test/payments/helpers/RailSettlementHelpers.sol new file mode 100644 index 00000000..819f6f29 --- /dev/null +++ b/service_contracts/test/payments/helpers/RailSettlementHelpers.sol @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {Test} from "forge-std/Test.sol"; +import {Payments} from "@payments/Payments.sol"; +import {MockValidator} from "../mocks/MockValidator.sol"; +import {PaymentsTestHelpers} from "./PaymentsTestHelpers.sol"; +import {console} from "forge-std/console.sol"; + +contract RailSettlementHelpers is Test { + PaymentsTestHelpers public baseHelper; + Payments public payments; + + constructor() { + baseHelper = new PaymentsTestHelpers(); + } + + function initialize(Payments _payments, PaymentsTestHelpers _baseHelper) public { + payments = _payments; + baseHelper = _baseHelper; + } + + struct SettlementResult { + uint256 totalAmount; + uint256 netPayeeAmount; + uint256 operatorCommission; + uint256 settledUpto; + string note; + } + + function setupRailWithValidatorAndRateChangeQueue( + address from, + address to, + address operator, + address validator, + uint256[] memory rates, + uint256 lockupPeriod, + uint256 lockupFixed, + uint256 maxLokkupPeriod, + address serviceFeeRecipient + ) public returns (uint256) { + require(validator != address(0), "RailSettlementHelpers: validator cannot be zero address"); + + // Setup operator approval with sufficient allowances + uint256 maxRate = 0; + for (uint256 i = 0; i < rates.length; i++) { + if (rates[i] > maxRate) { + maxRate = rates[i]; + } + } + + // Calculate total lockup needed + uint256 totalLockupAllowance = lockupFixed + (maxRate * lockupPeriod); + + // Setup operator approval with the necessary allowances + baseHelper.setupOperatorApproval( + from, + operator, + maxRate, // Rate allowance + totalLockupAllowance, // Lockup allowance + maxLokkupPeriod // Max lockup period + ); + + // Create rail with parameters + uint256 railId = baseHelper.setupRailWithParameters( + from, + to, + operator, + rates[0], // Initial rate + lockupPeriod, + lockupFixed, + validator, + serviceFeeRecipient + ); + + // Apply rate changes for the rest of the rates + vm.startPrank(operator); + for (uint256 i = 1; i < rates.length; i++) { + // Each change will enqueue the previous rate + payments.modifyRailPayment(railId, rates[i], 0); + + // Advance one block to ensure the changes are at different epochs + baseHelper.advanceBlocks(1); + } + vm.stopPrank(); + + return railId; + } + + function createInDebtRail( + address from, + address to, + address operator, + uint256 paymentRate, + uint256 lockupPeriod, + uint256 fundAmount, + uint256 fixedLockup, + address serviceFeeRecipient + ) public returns (uint256) { + baseHelper.makeDeposit(from, from, fundAmount); + + // Create a rail with specified parameters + uint256 railId = baseHelper.setupRailWithParameters( + from, to, operator, paymentRate, lockupPeriod, fixedLockup, address(0), serviceFeeRecipient + ); + + // Advance blocks past the lockup period to force the rail into debt + baseHelper.advanceBlocks(lockupPeriod + 1); + + return railId; + } + + function deployMockValidator(MockValidator.ValidatorMode mode) public returns (MockValidator) { + return new MockValidator(mode); + } + + function settleRailAndVerify(uint256 railId, uint256 untilEpoch, uint256 expectedAmount, uint256 expectedUpto) + public + returns (SettlementResult memory result) + { + console.log("settleRailAndVerify"); + // Get the rail details to identify payer and payee + Payments.RailView memory rail = payments.getRail(railId); + address payer = rail.from; + address payee = rail.to; + + // Get balances before settlement + Payments.Account memory payerAccountBefore = baseHelper.getAccountData(payer); + Payments.Account memory payeeAccountBefore = baseHelper.getAccountData(payee); + + console.log("payerFundsBefore", payerAccountBefore.funds); + console.log("payerLockupBefore", payerAccountBefore.lockupCurrent); + console.log("payeeFundsBefore", payeeAccountBefore.funds); + console.log("payeeLockupBefore", payeeAccountBefore.lockupCurrent); + + uint256 settlementAmount; + uint256 netPayeeAmount; + uint256 operatorCommission; + uint256 networkFee; + uint256 settledUpto; + string memory note; + + vm.startPrank(payer); + (settlementAmount, netPayeeAmount, operatorCommission, networkFee, settledUpto, note) = + payments.settleRail(railId, untilEpoch); + vm.stopPrank(); + + console.log("settlementAmount", settlementAmount); + console.log("netPayeeAmount", netPayeeAmount); + console.log("operatorCommission", operatorCommission); + console.log("networkFee", networkFee); + console.log("settledUpto", settledUpto); + console.log("note", note); + + // Verify results + assertEq(settlementAmount, expectedAmount, "Settlement amount doesn't match expected"); + assertEq(settledUpto, expectedUpto, "Settled upto doesn't match expected"); + + // Verify payer and payee balance changes + Payments.Account memory payerAccountAfter = baseHelper.getAccountData(payer); + Payments.Account memory payeeAccountAfter = baseHelper.getAccountData(payee); + console.log("payerFundsAfter", payerAccountAfter.funds); + console.log("payeeFundsAfter", payeeAccountAfter.funds); + + assertEq( + payerAccountBefore.funds - payerAccountAfter.funds, + settlementAmount, + "Payer's balance reduction doesn't match settlement amount" + ); + assertEq( + payeeAccountAfter.funds - payeeAccountBefore.funds, + netPayeeAmount, + "Payee's balance increase doesn't match net payee amount" + ); + + rail = payments.getRail(railId); + assertEq(rail.settledUpTo, expectedUpto, "Rail settled upto incorrect"); + + return SettlementResult(settlementAmount, netPayeeAmount, operatorCommission, settledUpto, note); + } + + function terminateAndSettleRail(uint256 railId, uint256 expectedAmount, uint256 expectedUpto) + public + returns (SettlementResult memory result) + { + // Get rail details to extract client and operator addresses + Payments.RailView memory rail = payments.getRail(railId); + address client = rail.from; + address operator = rail.operator; + + // Terminate the rail as operator + vm.prank(operator); + payments.terminateRail(railId); + + // Verify rail was properly terminated + rail = payments.getRail(railId); + (,,, uint256 lockupLastSettledAt) = payments.accounts(baseHelper.testToken(), client); + assertTrue(rail.endEpoch > 0, "Rail should be terminated"); + assertEq( + rail.endEpoch, + lockupLastSettledAt + rail.lockupPeriod, + "Rail end epoch should be account lockup last settled at + rail lockup period" + ); + + return settleRailAndVerify(railId, block.number, expectedAmount, expectedUpto); + } + + function modifyRailSettingsAndVerify( + Payments paymentsContract, + uint256 railId, + address operator, + uint256 newRate, + uint256 newLockupPeriod, + uint256 newFixedLockup + ) public { + Payments.RailView memory railBefore = paymentsContract.getRail(railId); + address client = railBefore.from; + + // Get operator allowance usage before modifications + (,,, uint256 rateUsageBefore, uint256 lockupUsageBefore,) = + paymentsContract.operatorApprovals(baseHelper.testToken(), client, operator); + + // Calculate current lockup total + uint256 oldLockupTotal = railBefore.lockupFixed + (railBefore.paymentRate * railBefore.lockupPeriod); + + // Calculate new lockup total + uint256 newLockupTotal = newFixedLockup + (newRate * newLockupPeriod); + + // Modify rail settings + vm.startPrank(operator); + + // First modify rate if needed + if (newRate != railBefore.paymentRate) { + paymentsContract.modifyRailPayment(railId, newRate, 0); + } + + // Then modify lockup parameters + if (newLockupPeriod != railBefore.lockupPeriod || newFixedLockup != railBefore.lockupFixed) { + paymentsContract.modifyRailLockup(railId, newLockupPeriod, newFixedLockup); + } + + vm.stopPrank(); + + // Verify changes + Payments.RailView memory railAfter = paymentsContract.getRail(railId); + + assertEq(railAfter.paymentRate, newRate, "Rail payment rate not updated correctly"); + + assertEq(railAfter.lockupPeriod, newLockupPeriod, "Rail lockup period not updated correctly"); + + assertEq(railAfter.lockupFixed, newFixedLockup, "Rail fixed lockup not updated correctly"); + + // Get operator allowance usage after modifications + (,,, uint256 rateUsageAfter, uint256 lockupUsageAfter,) = + paymentsContract.operatorApprovals(baseHelper.testToken(), client, operator); + + // Verify rate usage changes correctly + if (newRate > railBefore.paymentRate) { + // Rate increased + assertEq( + rateUsageAfter, + rateUsageBefore + (newRate - railBefore.paymentRate), + "Rate usage not increased correctly after rate increase" + ); + } else if (newRate < railBefore.paymentRate) { + // Rate decreased + assertEq( + rateUsageBefore, + rateUsageAfter + (railBefore.paymentRate - newRate), + "Rate usage not decreased correctly after rate decrease" + ); + } else { + // Rate unchanged + assertEq(rateUsageBefore, rateUsageAfter, "Rate usage changed unexpectedly when rate was not modified"); + } + + // Verify lockup usage changes correctly + if (newLockupTotal > oldLockupTotal) { + // Lockup increased + assertEq( + lockupUsageAfter, + lockupUsageBefore + (newLockupTotal - oldLockupTotal), + "Lockup usage not increased correctly after lockup increase" + ); + } else if (newLockupTotal < oldLockupTotal) { + // Lockup decreased + assertEq( + lockupUsageBefore, + lockupUsageAfter + (oldLockupTotal - newLockupTotal), + "Lockup usage not decreased correctly after lockup decrease" + ); + } else { + // Lockup unchanged + assertEq( + lockupUsageBefore, lockupUsageAfter, "Lockup usage changed unexpectedly when lockup was not modified" + ); + } + } +} diff --git a/service_contracts/test/payments/mocks/ExtraFeeToken.sol b/service_contracts/test/payments/mocks/ExtraFeeToken.sol new file mode 100644 index 00000000..7b80f3cb --- /dev/null +++ b/service_contracts/test/payments/mocks/ExtraFeeToken.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +/** + * This token decreases the sender balance by more than the value parameter + */ +contract ExtraFeeToken is ERC20 { + address private constant FEE_RECIPIENT = 0x0FeefeefeEFeeFeefeEFEEFEEfEeFEeFeeFeEfEe; + uint256 public transferFee; + + constructor(uint256 _transferFee) ERC20("FeeToken", "FEE") { + transferFee = _transferFee; + } + + function setFeeBips(uint256 bips) public { + transferFee = bips; + } + + function mint(address to, uint256 value) public { + _mint(to, value); + } + + function transfer(address to, uint256 value) public override returns (bool) { + _transfer(msg.sender, to, value); + _transfer(msg.sender, FEE_RECIPIENT, transferFee); + return true; + } + + function transferFrom(address from, address to, uint256 value) public override returns (bool) { + _spendAllowance(from, msg.sender, value); + _transfer(from, to, value); + _transfer(from, FEE_RECIPIENT, transferFee); + return true; + } +} diff --git a/service_contracts/test/payments/mocks/MockERC20.sol b/service_contracts/test/payments/mocks/MockERC20.sol new file mode 100644 index 00000000..7266c9d2 --- /dev/null +++ b/service_contracts/test/payments/mocks/MockERC20.sol @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {ERC20Permit} from "@openzeppelin/contracts/token/ERC20/extensions/ERC20Permit.sol"; +import {ECDSA} from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; +import {IERC3009} from "@payments/interfaces/IERC3009.sol"; +import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +/** + * @title MockERC20 + * @dev A mock ERC20 token with permit (ERC-2612) and transferWithAuthorization (ERC-3009) functionality for testing purposes. + */ +contract MockERC20 is ERC20, ERC20Permit, IERC3009 { + // --- ERC-3009 State and Constants --- + mapping(address => mapping(bytes32 => bool)) private _authorizationStates; + + bytes32 private constant _TRANSFER_WITH_AUTHORIZATION_TYPEHASH = keccak256( + "TransferWithAuthorization(address from,address to,uint256 value,uint256 validAfter,uint256 validBefore,bytes32 nonce)" + ); + bytes32 private constant _RECEIVE_WITH_AUTHORIZATION_TYPEHASH = keccak256( + "ReceiveWithAuthorization(address from,address to,uint256 value,uint256 validAfter,uint256 validBefore,bytes32 nonce)" + ); + + bytes32 private immutable _HASHED_NAME; + bytes32 private constant _HASHED_VERSION = keccak256("1"); + + // keccak256("Permit(address owner,address spender,uint256 value,uint256 nonce,uint256 deadline)"); + bytes32 private constant _PERMIT_TYPEHASH = 0x6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c9; + // keccak256("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)"); + bytes32 private constant _TYPE_HASH = 0x8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f; + + uint256 private immutable _CACHED_CHAIN_ID; + bytes32 private immutable _CACHED_DOMAIN_SEPARATOR; + + // --- ERC-3009 Event --- + event AuthorizationUsed(address indexed authorizer, bytes32 indexed nonce); + + constructor(string memory name, string memory symbol) ERC20(name, symbol) ERC20Permit(name) { + _HASHED_NAME = keccak256(abi.encode(name)); + _CACHED_CHAIN_ID = block.chainid; + _CACHED_DOMAIN_SEPARATOR = _buildDomainSeparator(_TYPE_HASH, _HASHED_NAME, _HASHED_VERSION); + } + + // Mint tokens for testing + function mint(address to, uint256 amount) public { + _mint(to, amount); + } + + // --- ERC-3009 Implementation --- + + /** + * @notice Execute a transfer with a signed authorization + * @param from Payer's address (Authorizer) + * @param to Payee's address + * @param value Amount to be transferred + * @param validAfter The time after which this is valid (unix time) + * @param validBefore The time before which this is valid (unix time) + * @param nonce Unique nonce + * @param v v of the signature + * @param r r of the signature + * @param s s of the signature + */ + function transferWithAuthorization( + address from, + address to, + uint256 value, + uint256 validAfter, + uint256 validBefore, + bytes32 nonce, + uint8 v, + bytes32 r, + bytes32 s + ) external { + require(block.timestamp > validAfter, "EIP3009: authorization not yet valid"); + require(block.timestamp < validBefore, "EIP3009: authorization expired"); + require(!_authorizationStates[from][nonce], "EIP3009: authorization already used"); + + bytes32 structHash = keccak256( + abi.encode(_TRANSFER_WITH_AUTHORIZATION_TYPEHASH, from, to, value, validAfter, validBefore, nonce) + ); + + bytes32 digest = _hashTypedDataV4(structHash); + address signer = ECDSA.recover(digest, v, r, s); + require(signer == from, "Invalid signature"); + + _authorizationStates[from][nonce] = true; + emit AuthorizationUsed(from, nonce); + + _transfer(from, to, value); + } + + /** + * @notice Receive a transfer with a signed authorization from the payer + * @dev This has an additional check to ensure that the payee's address matches + * the caller of this function to prevent front-running attacks. (See security + * considerations) + * @param _from Payer's address (Authorizer) + * @param _to Payee's address + * @param _value Amount to be transferred + * @param _validAfter The time after which this is valid (unix time) + * @param _validBefore The time before which this is valid (unix time) + * @param _nonce Unique nonce + * @param _v v of the signature + * @param _r r of the signature + * @param _s s of the signature + */ + function receiveWithAuthorization( + address _from, + address _to, + uint256 _value, + uint256 _validAfter, + uint256 _validBefore, + bytes32 _nonce, + uint8 _v, + bytes32 _r, + bytes32 _s + ) external { + require(_to == msg.sender, "EIP3009: caller must be the recipient"); + require(block.timestamp > _validAfter, "EIP3009: authorization not yet valid"); + require(block.timestamp < _validBefore, "EIP3009: authorization expired"); + require(!_authorizationStates[_from][_nonce], "EIP3009: authorization already used"); + _requireValidRecipient(_to); + + address recoveredAddress = _recover( + _v, + _r, + _s, + abi.encode(_RECEIVE_WITH_AUTHORIZATION_TYPEHASH, _from, _to, _value, _validAfter, _validBefore, _nonce) + ); + require(recoveredAddress == _from, "EIP3009: invalid signature"); + + _authorizationStates[_from][_nonce] = true; + emit AuthorizationUsed(_from, _nonce); + + _transfer(_from, _to, _value); + } + + function authorizationState(address authorizer, bytes32 nonce) external view returns (bool) { + return _authorizationStates[authorizer][nonce]; + } + + function _requireValidRecipient(address _recipient) internal view { + require( + _recipient != address(0) && _recipient != address(this), + "DebtToken: Cannot transfer tokens directly to the Debt token contract or the zero address" + ); + } + + function _recover(uint8 _v, bytes32 _r, bytes32 _s, bytes memory _typeHashAndData) + internal + view + returns (address) + { + bytes32 digest = keccak256(abi.encodePacked("\x19\x01", domainSeparator(), keccak256(_typeHashAndData))); + address recovered = ecrecover(digest, _v, _r, _s); + require(recovered != address(0), "EIP712: invalid signature"); + return recovered; + } + + function domainSeparator() public view returns (bytes32) { + if (block.chainid == _CACHED_CHAIN_ID) { + return _CACHED_DOMAIN_SEPARATOR; + } else { + return _buildDomainSeparator(_TYPE_HASH, _HASHED_NAME, _HASHED_VERSION); + } + } + + function _buildDomainSeparator(bytes32 _typeHash, bytes32 _name, bytes32 _version) private view returns (bytes32) { + return keccak256(abi.encode(_typeHash, _name, _version, block.chainid, address(this))); + } +} diff --git a/service_contracts/test/payments/mocks/MockFeeOnTransferTokenWithPermit.sol b/service_contracts/test/payments/mocks/MockFeeOnTransferTokenWithPermit.sol new file mode 100644 index 00000000..0ec8e326 --- /dev/null +++ b/service_contracts/test/payments/mocks/MockFeeOnTransferTokenWithPermit.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {ERC20, ERC20Permit} from "@openzeppelin/contracts/token/ERC20/extensions/ERC20Permit.sol"; + +contract MockFeeOnTransferTokenWithPermit is ERC20Permit { + uint256 public feePercentage; // Fee in basis points (100 = 1%) + + constructor(string memory name, string memory symbol, uint256 _feePercentage) + ERC20(name, symbol) + ERC20Permit(name) + { + feePercentage = _feePercentage; + } + + function mint(address to, uint256 amount) public { + _mint(to, amount); + } + + function setFeePercentage(uint256 _feePercentage) public { + feePercentage = _feePercentage; + } + + function transfer(address to, uint256 amount) public override returns (bool) { + return _transferWithFee(_msgSender(), to, amount); + } + + function transferFrom(address from, address to, uint256 amount) public override returns (bool) { + address spender = _msgSender(); + _spendAllowance(from, spender, amount); + return _transferWithFee(from, to, amount); + } + + function _transferWithFee(address from, address to, uint256 amount) internal returns (bool) { + uint256 fee = (amount * feePercentage) / 10000; + uint256 actualAmount = amount - fee; + + // Burn the fee (simulating fee-on-transfer) + _transfer(from, address(0xdead), fee); + _transfer(from, to, actualAmount); + + return true; + } +} diff --git a/service_contracts/test/payments/mocks/MockValidator.sol b/service_contracts/test/payments/mocks/MockValidator.sol new file mode 100644 index 00000000..43221d6a --- /dev/null +++ b/service_contracts/test/payments/mocks/MockValidator.sol @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.27; + +import {IValidator} from "@payments/Payments.sol"; + +contract MockValidator is IValidator { + enum ValidatorMode { + STANDARD, // Approves all payments as proposed + REDUCE_AMOUNT, // Reduces payment amount by a percentage + REDUCE_DURATION, // Settles for fewer epochs than requested + CUSTOM_RETURN, // Returns specific values set by the test + MALICIOUS // Returns invalid values + + } + + ValidatorMode public mode = ValidatorMode.STANDARD; // Default to STANDARD mode + uint256 public modificationFactor; // Percentage (0-100) for reductions + uint256 public customAmount; + uint256 public customUpto; + string public customNote; + + // Storage for railTerminated calls + uint256 public lastTerminatedRailId; + address public lastTerminator; + uint256 public lastEndEpoch; + bool public railTerminatedCalled; + + constructor(ValidatorMode _mode) { + mode = _mode; + modificationFactor = 100; // 100% = no modification by default + } + + function configure(uint256 _modificationFactor) external { + require(_modificationFactor <= 100, "Factor must be between 0-100"); + modificationFactor = _modificationFactor; + } + + // Set custom return values for CUSTOM_RETURN mode + function setCustomValues(uint256 _amount, uint256 _upto, string calldata _note) external { + customAmount = _amount; + customUpto = _upto; + customNote = _note; + } + + // Change the validator's mode + function setMode(ValidatorMode _mode) external { + mode = _mode; + } + + function validatePayment( + uint256, /* railId */ + uint256 proposedAmount, + uint256 fromEpoch, + uint256 toEpoch, + uint256 /* rate */ + ) external view override returns (ValidationResult memory result) { + if (mode == ValidatorMode.STANDARD) { + return ValidationResult({ + modifiedAmount: proposedAmount, + settleUpto: toEpoch, + note: "Standard approved payment" + }); + } else if (mode == ValidatorMode.REDUCE_AMOUNT) { + uint256 reducedAmount = (proposedAmount * modificationFactor) / 100; + return ValidationResult({ + modifiedAmount: reducedAmount, + settleUpto: toEpoch, + note: "Validator reduced payment amount" + }); + } else if (mode == ValidatorMode.REDUCE_DURATION) { + uint256 totalEpochs = toEpoch - fromEpoch; + uint256 reducedEpochs = (totalEpochs * modificationFactor) / 100; + uint256 reducedEndEpoch = fromEpoch + reducedEpochs; + + // Calculate reduced amount proportionally + uint256 reducedAmount = (proposedAmount * reducedEpochs) / totalEpochs; + + return ValidationResult({ + modifiedAmount: reducedAmount, + settleUpto: reducedEndEpoch, + note: "Validator reduced settlement duration" + }); + } else if (mode == ValidatorMode.CUSTOM_RETURN) { + return ValidationResult({modifiedAmount: customAmount, settleUpto: customUpto, note: customNote}); + } else { + // Malicious mode attempts to return invalid values + return ValidationResult({ + modifiedAmount: proposedAmount * 2, // Try to double the payment + settleUpto: toEpoch + 10, // Try to settle beyond the requested range + note: "Malicious validator attempting to manipulate payment" + }); + } + } + + function railTerminated(uint256 railId, address terminator, uint256 endEpoch) external override { + lastTerminatedRailId = railId; + lastTerminator = terminator; + lastEndEpoch = endEpoch; + railTerminatedCalled = true; + } +} diff --git a/service_contracts/test/pdp/BitOps.t.sol b/service_contracts/test/pdp/BitOps.t.sol new file mode 100644 index 00000000..7b462b76 --- /dev/null +++ b/service_contracts/test/pdp/BitOps.t.sol @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.13; + +import {Test} from "forge-std/Test.sol"; +import {BitOps} from "@pdp/BitOps.sol"; + +contract BitOpsTest is Test { + function testClzZero() public pure { + uint256 result = BitOps.clz(0); + assertEq(result, 256, "CLZ of 0 should be 256"); + } + + function testClzOne() public pure { + uint256 result = BitOps.clz(1); + assertEq(result, 255, "CLZ of 1 should be 255"); + } + + function testClzMaxUint256() public pure { + uint256 result = BitOps.clz(type(uint256).max); + assertEq(result, 0, "CLZ of max uint256 should be 0"); + } + + function testClzPowersOfTwo() public pure { + for (uint16 i = 0; i < 256; i++) { + uint256 input = 1 << i; + uint256 result = BitOps.clz(input); + assertEq( + result, + 255 - i, + string(abi.encodePacked("CLZ of 2^", vm.toString(i), " should be ", vm.toString(255 - i))) + ); + } + } + + function testClzSelectValues() public pure { + assertEq(BitOps.clz(0x000F), 252, "CLZ of 0x000F should be 252"); + assertEq(BitOps.clz(0x00FF), 248, "CLZ of 0x00FF should be 248"); + assertEq(BitOps.clz(0x0100), 247, "CLZ of 0x0100 should be 247"); + assertEq(BitOps.clz(0xFFFF), 240, "CLZ of 0xFFFF should be 240"); + assertEq(BitOps.clz(0x8000), 240, "CLZ of 0x8000 should be 240"); + assertEq(BitOps.clz(0x80000000), 56 * 4, "CLZ of 0x80000000 should be 56*4"); + assertEq(BitOps.clz(0x8FFFFFFF), 56 * 4, "CLZ of 0x8FFFFFFF should be 56*4"); + assertEq(BitOps.clz(0x8000000000000000), 48 * 4, "CLZ of 0x8000000000000000 should be 48*4"); + } + + function testCtzZero() public pure { + uint256 result = BitOps.ctz(0); + assertEq(result, 256, "CTZ of 0 should be 256"); + } + + function testCtz1LShift255() public pure { + uint256 result = BitOps.ctz(1 << 254); + assertEq(result, 254, "CTZ of 2^254 should be 254"); + } + + /// forge-config: default.allow_internal_expect_revert = true + function testCtzInputExceedsMaxInt256() public { + // Setup + uint256 maxInt256 = uint256(type(int256).max); + uint256 exceedingValue = maxInt256 + 1; + + // Expect the call to revert + vm.expectRevert("Input exceeds maximum int256 value"); + + // Call ctz with a value exceeding max int256 + BitOps.ctz(exceedingValue); + } + + function testCtzSelectValues() public pure { + assertEq(BitOps.ctz(0x000F), 0, "CTZ of 0x000F should be 0"); + assertEq(BitOps.ctz(0xFF00), 8, "CTZ of 0xFF00 should be 2"); + assertEq(BitOps.ctz(0x8000), 15, "CTZ of 0x8000 should be 15"); + assertEq(BitOps.ctz(0x80000000), 31, "CLZ of 0x80000000 should be 56*4"); + } +} diff --git a/service_contracts/test/pdp/Cids.t.sol b/service_contracts/test/pdp/Cids.t.sol new file mode 100644 index 00000000..46fcd25a --- /dev/null +++ b/service_contracts/test/pdp/Cids.t.sol @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.13; + +import {Test} from "forge-std/Test.sol"; +import {Cids} from "@pdp/Cids.sol"; + +contract CidsTest is Test { + function testDigestRoundTrip() public pure { + bytes32 digest = 0xbeadcafefacedeedfeedbabedeadbeefbeadcafefacedeedfeedbabedeadbeef; + Cids.Cid memory c = Cids.CommPv2FromDigest(0, 10, digest); + assertEq(c.data.length, 39); + bytes32 foundDigest = Cids.digestFromCid(c); + assertEq(foundDigest, digest, "digest equal"); + + (uint256 padding, uint8 height, uint256 digestOffset) = Cids.validateCommPv2(c); + assertEq(padding, 0, "padding"); + assertEq(height, 10, "height"); + + // assert that digest is same at digestOffset + for (uint256 i = 0; i < 32; i++) { + assertEq(bytes1(digest[i]), c.data[digestOffset + i], "bytes"); + } + } + + function testPieceSize() public pure { + assertEq(Cids.pieceSize(0, 30), 1 << (30 + 5)); + assertEq(Cids.pieceSize(127, 30), (1 << (30 + 5)) - 128); + assertEq(Cids.pieceSize(128, 30), (1 << (30 + 5)) - 129); + } + + function testLeafCount() public pure { + assertEq(Cids.leafCount(0, 30), 1 << 30); + assertEq(Cids.leafCount(127, 30), (1 << 30) - 4); + assertEq(Cids.leafCount(128, 30), (1 << 30) - 4); + } + + /// forge-config: default.allow_internal_expect_revert = true + function testDigestTooShort() public { + bytes memory byteArray = new bytes(31); + for (uint256 i = 0; i < 31; i++) { + byteArray[i] = bytes1(uint8(i)); + } + Cids.Cid memory c = Cids.Cid(byteArray); + vm.expectRevert("Cid data is too short"); + Cids.digestFromCid(c); + } + + function testUvarintLength() public pure { + assertEq(Cids._uvarintLength(0), 1); + assertEq(Cids._uvarintLength(1), 1); + assertEq(Cids._uvarintLength(127), 1); + assertEq(Cids._uvarintLength(128), 2); + assertEq(Cids._uvarintLength(16383), 2); + assertEq(Cids._uvarintLength(16384), 3); + assertEq(Cids._uvarintLength(2097151), 3); + assertEq(Cids._uvarintLength(2097152), 4); + assertEq(Cids._uvarintLength(type(uint256).max), 37); + } + + function testUvarintRoundTrip() public pure { + uint256[] memory values = new uint256[](7); + values[0] = 0; + values[1] = 1; + values[2] = 127; + values[3] = 128; + values[4] = 16384; + values[5] = 2097152; + values[6] = type(uint256).max; + + uint256 totalLength = 0; + for (uint256 i = 0; i < values.length; i++) { + totalLength += Cids._uvarintLength(values[i]); + } + bytes memory buffer = new bytes(totalLength); + uint256 offset = 0; + + // Write all values + for (uint256 i = 0; i < values.length; i++) { + offset = Cids._writeUvarint(buffer, offset, values[i]); + } + + // Read all values and verify + uint256 currentOffset = 0; + for (uint256 i = 0; i < values.length; i++) { + (uint256 readValue, uint256 newOffset) = Cids._readUvarint(buffer, currentOffset); + assertEq(readValue, values[i], "Uvarint round trip failed"); + currentOffset = newOffset; + } + } + + /// forge-config: default.allow_internal_expect_revert = true + function testReadUvarintIncomplete() public { + // Test reading an incomplete uvarint that should revert + bytes memory incompleteUvarint = hex"80"; // A single byte indicating more to come, but nothing follows + vm.expectRevert(); // Expect any revert, specifically index out of bounds + Cids._readUvarint(incompleteUvarint, 0); + } + + /// forge-config: default.allow_internal_expect_revert = true + function testReadUvarintMSBSetOnLastByte() public { + bytes memory incompleteUvarint2 = hex"ff81"; // MSB set on last byte. + vm.expectRevert(); + Cids._readUvarint(incompleteUvarint2, 0); + } + + function testReadUvarintWithOffset() public pure { + // Test reading with an offset + bytes memory bufferWithOffset = hex"00010203040506078001"; // Value 128 (8001) at offset 8 + (uint256 readValue, uint256 newOffset) = Cids._readUvarint(bufferWithOffset, 8); + assertEq(readValue, 128, "Read uvarint with offset failed"); + assertEq(newOffset, 10, "Offset after reading with offset incorrect"); + } + + function testValidateCommPv2FRC0069() public pure { + // The values are taken from FRC-0069 specification + // Test vector 1: height=4, padding=0 + bytes memory cidData1 = hex"01559120220004496dae0cc9e265efe5a006e80626a5dc5c409e5d3155c13984caf6c8d5cfd605"; + Cids.Cid memory cid1 = Cids.Cid(cidData1); + (uint256 padding1, uint8 height1, uint256 digestOffset1) = Cids.validateCommPv2(cid1); + assertEq(padding1, 0, "CID 1 padding"); + assertEq(height1, 4, "CID 1 height"); + + // Test vector 2: height=2, padding=0 + bytes memory cidData2 = hex"015591202200023731bb99ac689f66eef5973e4a94da188f4ddcae580724fc6f3fd60dfd488333"; + Cids.Cid memory cid2 = Cids.Cid(cidData2); + (uint256 padding2, uint8 height2, uint256 digestOffset2) = Cids.validateCommPv2(cid2); + assertEq(padding2, 0, "CID 2 padding"); + assertEq(height2, 2, "CID 2 height"); + + // Test vector 3: height=5, padding=504 + bytes memory cidData3 = hex"0155912023f80305de6815dcb348843215a94de532954b60be550a4bec6e74555665e9a5ec4e0f3c"; + Cids.Cid memory cid3 = Cids.Cid(cidData3); + (uint256 padding3, uint8 height3, uint256 digestOffset3) = Cids.validateCommPv2(cid3); + assertEq(padding3, 504, "CID 3 padding"); + assertEq(height3, 5, "CID 3 height"); + + // Verify that digestOffset points to valid data by checking a few bytes from the digest + // For CID 1 + assertEq(cid1.data[digestOffset1], bytes1(0x49), "CID 1 digest first byte"); + // For CID 2 + assertEq(cid2.data[digestOffset2], bytes1(0x37), "CID 2 digest first byte"); + // For CID 3 + assertEq(cid3.data[digestOffset3], bytes1(0xde), "CID 3 digest first byte"); + } +} diff --git a/service_contracts/test/pdp/ERC1967Proxy.t.sol b/service_contracts/test/pdp/ERC1967Proxy.t.sol new file mode 100644 index 00000000..1f603ad6 --- /dev/null +++ b/service_contracts/test/pdp/ERC1967Proxy.t.sol @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.20; + +import {Test} from "forge-std/Test.sol"; +import {PDPVerifier} from "@pdp/PDPVerifier.sol"; +import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; +import {MyERC1967Proxy} from "@pdp/ERC1967Proxy.sol"; + +contract ERC1967ProxyTest is Test { + PDPVerifier public implementation; + PDPVerifier public proxy; + address owner = address(0x123); + + function setUp() public { + // Set owner for testing + vm.startPrank(owner); + // Deploy implementation contract + implementation = new PDPVerifier(); + + // Deploy proxy pointing to implementation + bytes memory initData = abi.encodeWithSelector( + PDPVerifier.initialize.selector, + uint256(150) // challengeFinality + ); + + ERC1967Proxy proxyContract = new MyERC1967Proxy(address(implementation), initData); + + // Get PDPVerifier interface on proxy address + proxy = PDPVerifier(address(proxyContract)); + } + + function testInitialSetup() public view { + assertEq(proxy.getChallengeFinality(), 150); + assertEq(proxy.owner(), owner); + } + + function assertImplementationEquals(address checkImpl) public view { + bytes32 implementationSlot = 0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc; + assertEq(address(uint160(uint256(vm.load(address(proxy), implementationSlot)))), address(checkImpl)); + } + + function testUpgradeImplementation() public { + assertImplementationEquals(address(implementation)); + + // Deploy new implementation + PDPVerifier newImplementation = new PDPVerifier(); + + // Upgrade proxy to new implementation + proxy.upgradeToAndCall(address(newImplementation), ""); + + // Verify upgrade was successful + assertImplementationEquals(address(newImplementation)); + assertEq(proxy.getChallengeFinality(), 150); // State is preserved + assertEq(proxy.owner(), owner); // Owner is preserved + } + + function testUpgradeFromNonOwnerNoGood() public { + PDPVerifier newImplementation = new PDPVerifier(); + + vm.stopPrank(); + vm.startPrank(address(0xdead)); + + vm.expectRevert(); + proxy.upgradeToAndCall(address(newImplementation), ""); + assertEq(proxy.getChallengeFinality(), 150); // State is preserved + assertEq(proxy.owner(), owner); // Owner is preserved + } + + function testOwnershipTransfer() public { + vm.stopPrank(); + vm.startPrank(owner); + // Verify initial owner + assertEq(proxy.owner(), owner); + + address newOwner = address(0x123); + + // Transfer ownership + proxy.transferOwnership(newOwner); + + // Verify ownership changed + assertEq(proxy.owner(), newOwner); + } + + function testTransferFromNonOwneNoGood() public { + // Switch to non-owner account + vm.stopPrank(); + vm.startPrank(address(0xdead)); + + address newOwner = address(0x123); + + // Attempt transfer should fail + vm.expectRevert(); + proxy.transferOwnership(newOwner); + + // Verify owner unchanged + assertEq(proxy.owner(), owner); + } +} diff --git a/service_contracts/test/pdp/Fees.t.sol b/service_contracts/test/pdp/Fees.t.sol new file mode 100644 index 00000000..dae75b32 --- /dev/null +++ b/service_contracts/test/pdp/Fees.t.sol @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.13; + +import {Test} from "forge-std/Test.sol"; +import {PDPFees} from "@pdp/Fees.sol"; + +contract PDPFeesTest is Test { + uint256 constant EPOCHS_PER_DAY = 2880; + + function computeRewardPerPeriod(uint64 filUsdPrice, int32 filUsdPriceExpo, uint256 rawSize) + internal + pure + returns (uint256) + { + uint256 rewardPerEpochPerByte; + if (filUsdPriceExpo >= 0) { + rewardPerEpochPerByte = (PDPFees.ESTIMATED_MONTHLY_TIB_STORAGE_REWARD_USD * PDPFees.FIL_TO_ATTO_FIL) + / (PDPFees.TIB_IN_BYTES * PDPFees.EPOCHS_PER_MONTH * filUsdPrice * (10 ** uint32(filUsdPriceExpo))); + } else { + rewardPerEpochPerByte = ( + PDPFees.ESTIMATED_MONTHLY_TIB_STORAGE_REWARD_USD * PDPFees.FIL_TO_ATTO_FIL + * (10 ** uint32(-filUsdPriceExpo)) + ) / (PDPFees.TIB_IN_BYTES * PDPFees.EPOCHS_PER_MONTH * filUsdPrice); + } + uint256 rewardPerPeriod = rewardPerEpochPerByte * EPOCHS_PER_DAY * rawSize; + return rewardPerPeriod; + } + + /// forge-config: default.allow_internal_expect_revert = true + function testProofFeeWithGasFeeBoundZeroGasFee() public { + vm.expectRevert("failed to validate: estimated gas fee must be greater than 0"); + vm.fee(1000); + PDPFees.proofFeeWithGasFeeBound(0, 5, 0, 1e18, EPOCHS_PER_DAY); + } + + /// forge-config: default.allow_internal_expect_revert = true + function testProofFeeWithGasFeeBoundZeroAttoFilUsdPrice() public { + vm.expectRevert("failed to validate: AttoFIL price must be greater than 0"); + PDPFees.proofFeeWithGasFeeBound(1, 0, 0, 1e18, EPOCHS_PER_DAY); + } + + /// forge-config: default.allow_internal_expect_revert = true + function testProofFeeWithGasFeeBoundZeroRawSize() public { + vm.expectRevert("failed to validate: raw size must be greater than 0"); + PDPFees.proofFeeWithGasFeeBound(1, 5, 0, 0, EPOCHS_PER_DAY); + } + + function testProofFeeWithGasFeeBoundHighGasFee() public view { + uint64 filUsdPrice = 5; + int32 filUsdPriceExpo = 0; + uint256 rawSize = 1e18; + + uint256 rewardPerPeriod = computeRewardPerPeriod(filUsdPrice, filUsdPriceExpo, rawSize); + + uint256 gasLimitRight = (rewardPerPeriod * PDPFees.GAS_LIMIT_RIGHT_PERCENTAGE) / 100; + + uint256 estimatedGasFee = gasLimitRight; + + uint256 fee = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, EPOCHS_PER_DAY); + + assertEq(fee, 0, "Fee should be 0 when gas fee is high"); + } + + function testProofFeeWithGasFeeBoundMediumGasFee() public view { + uint64 filUsdPrice = 5; + int32 filUsdPriceExpo = 0; + uint256 rawSize = 1e18; + + uint256 rewardPerPeriod = computeRewardPerPeriod(filUsdPrice, filUsdPriceExpo, rawSize); + + uint256 gasLimitLeft = (rewardPerPeriod * PDPFees.GAS_LIMIT_LEFT_PERCENTAGE) / 100; + uint256 gasLimitRight = (rewardPerPeriod * PDPFees.GAS_LIMIT_RIGHT_PERCENTAGE) / 100; + + uint256 estimatedGasFee = (gasLimitLeft + gasLimitRight) / 2; + + uint256 fee = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, EPOCHS_PER_DAY); + + uint256 expectedFee = gasLimitRight - estimatedGasFee; + + assertEq(fee, expectedFee, "Fee should be partially discounted"); + } + + function testProofFeeWithGasFeeBoundLowGasFee() public view { + uint64 filUsdPrice = 5; + int32 filUsdPriceExpo = 0; + uint256 rawSize = 1e18; + + uint256 rewardPerPeriod = computeRewardPerPeriod(filUsdPrice, filUsdPriceExpo, rawSize); + + uint256 gasLimitLeft = (rewardPerPeriod * PDPFees.GAS_LIMIT_LEFT_PERCENTAGE) / 100; + + uint256 estimatedGasFee = gasLimitLeft / 2; + + uint256 fee = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, EPOCHS_PER_DAY); + + uint256 expectedFee = (rewardPerPeriod * PDPFees.PROOF_FEE_PERCENTAGE) / 100; + + assertEq(fee, expectedFee, "Fee should be full proof fee when gas fee is low"); + } + + function testProofFeeWithGasFeeBoundNegativeExponent() public view { + uint64 filUsdPrice = 5000; + int32 filUsdPriceExpo = -3; + uint256 rawSize = 1e18; + uint256 estimatedGasFee = 1e15; + + uint256 fee = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, EPOCHS_PER_DAY); + assertTrue(fee > 0, "Fee should be positive with negative exponent"); + } + + function testProofFeeWithGasFeeBoundLargeRawSize() public view { + uint64 filUsdPrice = 5; + int32 filUsdPriceExpo = 0; + uint256 rawSize = 1e30; + uint256 estimatedGasFee = 1e15; + + uint256 fee = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, EPOCHS_PER_DAY); + assertTrue(fee > 0, "Fee should be positive for large raw size"); + } + + function testProofFeeWithGasFeeBoundSmallRawSize() public view { + uint64 filUsdPrice = 5; + int32 filUsdPriceExpo = 0; + uint256 rawSize = 1; + + uint256 rewardPerPeriod = computeRewardPerPeriod(filUsdPrice, filUsdPriceExpo, rawSize); + uint256 gasLimitLeft = (rewardPerPeriod * PDPFees.GAS_LIMIT_LEFT_PERCENTAGE) / 100; + + uint256 estimatedGasFee = gasLimitLeft / 2; + + uint256 fee = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, EPOCHS_PER_DAY); + + uint256 expectedFee = (rewardPerPeriod * PDPFees.PROOF_FEE_PERCENTAGE) / 100; + + assertEq(fee, expectedFee, "Fee should be full proof fee when gas fee is low"); + } + + function testProofFeeWithGasFeeBoundHalfDollarFil() public view { + uint64 filUsdPrice = 5; + int32 filUsdPriceExpo = -1; // 0.5 USD per FIL + uint256 rawSize = 1e18; + uint256 estimatedGasFee = 1e15; + + uint256 fee = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, EPOCHS_PER_DAY); + assertTrue(fee > 0, "Fee should be positive with FIL price at $0.50"); + + // With lower FIL price, fee should be higher than when price is $5 + uint256 feeAt5Dollars = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, 0, rawSize, EPOCHS_PER_DAY); + assertTrue(fee > feeAt5Dollars, "Fee should be higher with lower FIL price"); + } + + function testSybilFee() public pure { + uint256 fee = PDPFees.sybilFee(); + assertEq(fee, PDPFees.SYBIL_FEE, "Sybil fee should match the constant"); + } + + function testProofFeeWithGasFeeBoundAtLeftBoundary() public view { + uint64 filUsdPrice = 5; + int32 filUsdPriceExpo = 0; + uint256 rawSize = 1e18; + + uint256 rewardPerPeriod = computeRewardPerPeriod(filUsdPrice, filUsdPriceExpo, rawSize); + uint256 gasLimitLeft = (rewardPerPeriod * PDPFees.GAS_LIMIT_LEFT_PERCENTAGE) / 100; + + // Test exactly at gasLimitLeft + uint256 estimatedGasFee = gasLimitLeft; + + uint256 fee = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, EPOCHS_PER_DAY); + + uint256 expectedFee = (rewardPerPeriod * PDPFees.PROOF_FEE_PERCENTAGE) / 100; + assertEq(fee, expectedFee, "Fee should be full proof fee at left boundary"); + } + + function testProofFeeWithGasFeeBoundNearRightBoundary() public view { + uint64 filUsdPrice = 5; + int32 filUsdPriceExpo = 0; + uint256 rawSize = 1e18; + + uint256 rewardPerPeriod = computeRewardPerPeriod(filUsdPrice, filUsdPriceExpo, rawSize); + uint256 gasLimitRight = (rewardPerPeriod * PDPFees.GAS_LIMIT_RIGHT_PERCENTAGE) / 100; + + // Test at gasLimitRight - 1 + uint256 estimatedGasFee = gasLimitRight - 1; + + uint256 fee = + PDPFees.proofFeeWithGasFeeBound(estimatedGasFee, filUsdPrice, filUsdPriceExpo, rawSize, EPOCHS_PER_DAY); + + uint256 expectedFee = 1; // Should be gasLimitRight - estimatedGasFee = 1 + assertEq(fee, expectedFee, "Fee should be 1 when estimatedGasFee is just below right boundary"); + } +} diff --git a/service_contracts/test/pdp/PDPVerifier.t.sol b/service_contracts/test/pdp/PDPVerifier.t.sol new file mode 100644 index 00000000..f6e48568 --- /dev/null +++ b/service_contracts/test/pdp/PDPVerifier.t.sol @@ -0,0 +1,1971 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.13; + +import {UUPSUpgradeable} from "@openzeppelin/contracts-upgradeable/proxy/utils/UUPSUpgradeable.sol"; +import {Test} from "forge-std/Test.sol"; +import {Cids} from "@pdp/Cids.sol"; +import {PDPVerifier, PDPListener} from "@pdp/PDPVerifier.sol"; +import {MyERC1967Proxy} from "@pdp/ERC1967Proxy.sol"; +import {ProofUtil} from "./ProofUtil.sol"; +import {PDPFees} from "@pdp/Fees.sol"; +import {PDPRecordKeeper} from "@pdp/SimplePDPService.sol"; +import {IPDPTypes} from "@pdp/interfaces/IPDPTypes.sol"; +import {IPDPEvents} from "@pdp/interfaces/IPDPEvents.sol"; +import {PieceHelper} from "./PieceHelper.t.sol"; +import {ProofBuilderHelper} from "./ProofBuilderHelper.t.sol"; +import {PythStructs} from "@pythnetwork/pyth-sdk-solidity/PythStructs.sol"; +import {IPyth} from "@pythnetwork/pyth-sdk-solidity/IPyth.sol"; +import {NEW_DATA_SET_SENTINEL} from "@pdp/PDPVerifier.sol"; + +contract PDPVerifierDataSetCreateDeleteTest is Test, PieceHelper { + TestingRecordKeeperService listener; + PDPVerifier pdpVerifier; + bytes empty = new bytes(0); + + function setUp() public { + PDPVerifier pdpVerifierImpl = new PDPVerifier(); + uint256 challengeFinality = 2; + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, challengeFinality); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = PDPVerifier(address(proxy)); + listener = new TestingRecordKeeperService(); + } + + function testCreateDataSet() public { + Cids.Cid memory zeroPiece; + + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetCreated(1, address(this)); + + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + assertEq(setId, 1, "First data set ID should be 1"); + assertEq(pdpVerifier.getDataSetLeafCount(setId), 0, "Data set leaf count should be 0"); + + (address currentStorageProvider, address proposedStorageProvider) = pdpVerifier.getDataSetStorageProvider(setId); + assertEq(currentStorageProvider, address(this), "Data set storage provider should be the constructor sender"); + assertEq( + proposedStorageProvider, + address(0), + "Data set proposed storage provider should be initialized to zero address" + ); + + assertEq(pdpVerifier.getNextChallengeEpoch(setId), 0, "Data set challenge epoch should be zero"); + assertEq(pdpVerifier.pieceLive(setId, 0), false, "Data set piece should not be live"); + assertEq(pdpVerifier.getPieceCid(setId, 0).data, zeroPiece.data, "Uninitialized piece should be empty"); + assertEq(pdpVerifier.getPieceLeafCount(setId, 0), 0, "Uninitialized piece should have zero leaves"); + assertEq(pdpVerifier.getNextChallengeEpoch(setId), 0, "Data set challenge epoch should be zero"); + assertEq( + pdpVerifier.getDataSetListener(setId), + address(listener), + "Data set listener should be the constructor listener" + ); + } + + function testDeleteDataSet() public { + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetCreated(1, address(this)); + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetDeleted(setId, 0); + pdpVerifier.deleteDataSet(setId, empty); + vm.expectRevert("Data set not live"); + pdpVerifier.getDataSetLeafCount(setId); + } + + function testOnlyStorageProviderCanDeleteDataSet() public { + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetCreated(1, address(this)); + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + // Create a new address to act as a non-storage-provider + address nonStorageProvider = address(0x1234); + // Expect revert when non-storage-provider tries to delete the data set + vm.prank(nonStorageProvider); + vm.expectRevert("Only the storage provider can delete data sets"); + pdpVerifier.deleteDataSet(setId, empty); + + // Now verify the storage provider can delete the data set + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetDeleted(setId, 0); + pdpVerifier.deleteDataSet(setId, empty); + vm.expectRevert("Data set not live"); + pdpVerifier.getDataSetStorageProvider(setId); + } + + // TODO: once we have addPieces we should test deletion of a non empty data set + function testCannotDeleteNonExistentDataSet() public { + // Test with data set ID 0 (which is never valid since IDs start from 1) + vm.expectRevert("Only the storage provider can delete data sets"); + pdpVerifier.deleteDataSet(0, empty); + + // Test with a data set ID that hasn't been created yet + vm.expectRevert("data set id out of bounds"); + pdpVerifier.deleteDataSet(999, empty); + } + + function testMethodsOnDeletedDataSetFails() public { + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetCreated(1, address(this)); + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetDeleted(setId, 0); + pdpVerifier.deleteDataSet(setId, empty); + vm.expectRevert("Only the storage provider can delete data sets"); + pdpVerifier.deleteDataSet(setId, empty); + vm.expectRevert("Data set not live"); + pdpVerifier.getDataSetStorageProvider(setId); + vm.expectRevert("Data set not live"); + pdpVerifier.getDataSetLeafCount(setId); + vm.expectRevert("Data set not live"); + pdpVerifier.getDataSetListener(setId); + vm.expectRevert("Data set not live"); + pdpVerifier.getPieceCid(setId, 0); + vm.expectRevert("Data set not live"); + pdpVerifier.getPieceLeafCount(setId, 0); + vm.expectRevert("Data set not live"); + pdpVerifier.getNextChallengeEpoch(setId); + vm.expectRevert("Data set not live"); + pdpVerifier.addPieces(setId, address(0), new Cids.Cid[](1), empty); + } + + function testGetDataSetID() public { + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetCreated(1, address(this)); + pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetCreated(2, address(this)); + pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + assertEq(3, pdpVerifier.getNextDataSetId(), "Next data set ID should be 3"); + assertEq(3, pdpVerifier.getNextDataSetId(), "Next data set ID should be 3"); + } + + receive() external payable {} + + function testDataSetIdsStartFromOne() public { + // Test that data set IDs start from 1, not 0 + assertEq(pdpVerifier.getNextDataSetId(), 1, "Next data set ID should start at 1"); + + uint256 firstSetId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + assertEq(firstSetId, 1, "First data set ID should be 1, not 0"); + + uint256 secondSetId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + assertEq(secondSetId, 2, "Second data set ID should be 2"); + + assertEq(pdpVerifier.getNextDataSetId(), 3, "Next data set ID should be 3 after creating two data sets"); + } + + function testCreateDataSetFeeHandling() public { + uint256 sybilFee = PDPFees.sybilFee(); + + // Test 1: Fails when sending not enough for sybil fee + vm.expectRevert("sybil fee not met"); + pdpVerifier.addPieces{value: sybilFee - 1}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Test 2: Returns funds over the sybil fee back to the sender + uint256 excessAmount = 1 ether; + uint256 initialBalance = address(this).balance; + + uint256 setId = pdpVerifier.addPieces{value: sybilFee + excessAmount}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + uint256 finalBalance = address(this).balance; + uint256 refundedAmount = finalBalance - (initialBalance - sybilFee - excessAmount); + assertEq(refundedAmount, excessAmount, "Excess amount should be refunded"); + + // Additional checks to ensure the data set was created correctly + assertEq(pdpVerifier.getDataSetLeafCount(setId), 0, "Data set leaf count should be 0"); + (address currentStorageProvider, address proposedStorageProvider) = pdpVerifier.getDataSetStorageProvider(setId); + assertEq(currentStorageProvider, address(this), "Data set storage provider should be the constructor sender"); + assertEq( + proposedStorageProvider, + address(0), + "Data set proposed storage provider should be initialized to zero address" + ); + } + + function testCombinedCreateDataSetAndAddPieces() public { + uint256 sybilFee = PDPFees.sybilFee(); + bytes memory combinedExtraData = abi.encode(empty, empty); + + Cids.Cid[] memory pieces = new Cids.Cid[](2); + pieces[0] = makeSamplePiece(64); + pieces[1] = makeSamplePiece(128); + + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetCreated(1, address(this)); + + vm.expectEmit(true, true, false, false); + uint256[] memory expectedPieceIds = new uint256[](2); + expectedPieceIds[0] = 1; + expectedPieceIds[1] = 2; + emit IPDPEvents.PiecesAdded(1, expectedPieceIds, pieces); + + uint256 firstAdded = + pdpVerifier.addPieces{value: sybilFee}(NEW_DATA_SET_SENTINEL, address(listener), pieces, combinedExtraData); + + // Verify the data set was created correctly + assertEq(firstAdded, 1, "First piece ID should be 1"); + assertEq(pdpVerifier.getDataSetLeafCount(firstAdded), 192, "Data set leaf count should be 64 + 128"); + assertEq(pdpVerifier.getNextPieceId(firstAdded), 2, "Next piece ID should be 2"); + assertEq(pdpVerifier.getDataSetListener(firstAdded), address(listener), "Listener should be set correctly"); + + // Verify pieces were added correctly + assertTrue(pdpVerifier.pieceLive(firstAdded, 0), "First piece should be live"); + assertTrue(pdpVerifier.pieceLive(firstAdded, 1), "Second piece should be live"); + assertEq(pdpVerifier.getPieceLeafCount(firstAdded, 0), 64, "First piece leaf count should be 64"); + assertEq(pdpVerifier.getPieceLeafCount(firstAdded, 1), 128, "Second piece leaf count should be 128"); + } + + function testNewDataSetSentinelValue() public { + assertEq(NEW_DATA_SET_SENTINEL, 0, "Sentinel value should be 0"); + + uint256 sybilFee = PDPFees.sybilFee(); + bytes memory combinedExtraData = abi.encode(empty, empty); + Cids.Cid[] memory pieces = new Cids.Cid[](0); + + uint256 firstAdded = + pdpVerifier.addPieces{value: sybilFee}(NEW_DATA_SET_SENTINEL, address(listener), pieces, combinedExtraData); + + assertEq(firstAdded, 1, "First piece ID should be 1"); + assertEq(pdpVerifier.getDataSetLeafCount(firstAdded), 0, "Data set leaf count should be 0"); + } +} + +contract PDPVerifierStorageProviderTest is Test, PieceHelper { + PDPVerifier pdpVerifier; + TestingRecordKeeperService listener; + address public storageProvider; + address public nextStorageProvider; + address public nonStorageProvider; + bytes empty = new bytes(0); + + function setUp() public { + PDPVerifier pdpVerifierImpl = new PDPVerifier(); + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, 2); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = PDPVerifier(address(proxy)); + listener = new TestingRecordKeeperService(); + + storageProvider = address(this); + nextStorageProvider = address(0x1234); + nonStorageProvider = address(0xffff); + } + + function testStorageProviderTransfer() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + pdpVerifier.proposeDataSetStorageProvider(setId, nextStorageProvider); + (address currentStorageProviderStart, address proposedStorageProviderStart) = + pdpVerifier.getDataSetStorageProvider(setId); + assertEq( + currentStorageProviderStart, storageProvider, "Data set storage provider should be the constructor sender" + ); + assertEq( + proposedStorageProviderStart, + nextStorageProvider, + "Data set proposed storage provider should make the one proposed" + ); + vm.prank(nextStorageProvider); + + vm.expectEmit(true, true, false, false); + emit IPDPEvents.StorageProviderChanged(setId, storageProvider, nextStorageProvider); + pdpVerifier.claimDataSetStorageProvider(setId, empty); + (address currentStorageProviderEnd, address proposedStorageProviderEnd) = + pdpVerifier.getDataSetStorageProvider(setId); + assertEq( + currentStorageProviderEnd, nextStorageProvider, "Data set storage provider should be the next provider" + ); + assertEq(proposedStorageProviderEnd, address(0), "Data set proposed storage provider should be zero address"); + } + + function testStorageProviderProposalReset() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + pdpVerifier.proposeDataSetStorageProvider(setId, nextStorageProvider); + pdpVerifier.proposeDataSetStorageProvider(setId, storageProvider); + (address currentStorageProviderEnd, address proposedStorageProviderEnd) = + pdpVerifier.getDataSetStorageProvider(setId); + assertEq( + currentStorageProviderEnd, storageProvider, "Data set storage provider should be the constructor sender" + ); + assertEq(proposedStorageProviderEnd, address(0), "Data set proposed storage provider should be zero address"); + } + + function testStorageProviderPermissionsRequired() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + vm.prank(nonStorageProvider); + vm.expectRevert("Only the current storage provider can propose a new storage provider"); + pdpVerifier.proposeDataSetStorageProvider(setId, nextStorageProvider); + + // Now send proposal from actual storage provider + pdpVerifier.proposeDataSetStorageProvider(setId, nextStorageProvider); + + // Proposed storage provider has no extra permissions + vm.prank(nextStorageProvider); + vm.expectRevert("Only the current storage provider can propose a new storage provider"); + pdpVerifier.proposeDataSetStorageProvider(setId, nonStorageProvider); + + vm.prank(nonStorageProvider); + vm.expectRevert("Only the proposed storage provider can claim storage provider role"); + pdpVerifier.claimDataSetStorageProvider(setId, empty); + } + + function testScheduleRemovePiecesOnlyStorageProvider() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieceDataArray = new Cids.Cid[](1); + pieceDataArray[0] = makeSamplePiece(100); + pdpVerifier.addPieces(setId, address(0), pieceDataArray, empty); + + uint256[] memory pieceIdsToRemove = new uint256[](1); + pieceIdsToRemove[0] = 0; + + vm.prank(nonStorageProvider); + vm.expectRevert("Only the storage provider can schedule removal of pieces"); + pdpVerifier.schedulePieceDeletions(setId, pieceIdsToRemove, empty); + } +} + +contract PDPVerifierDataSetMutateTest is Test, PieceHelper { + uint256 constant CHALLENGE_FINALITY_DELAY = 2; + + PDPVerifier pdpVerifier; + TestingRecordKeeperService listener; + bytes empty = new bytes(0); + + function setUp() public { + PDPVerifier pdpVerifierImpl = new PDPVerifier(); + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, CHALLENGE_FINALITY_DELAY); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = PDPVerifier(address(proxy)); + listener = new TestingRecordKeeperService(); + } + + function testAddPiece() public { + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetCreated(1, address(this)); + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + Cids.Cid[] memory pieces = new Cids.Cid[](1); + uint256 leafCount = 64; + pieces[0] = makeSamplePiece(leafCount); + + vm.expectEmit(true, true, false, false); + emit IPDPEvents.PiecesAdded(setId, new uint256[](0), new Cids.Cid[](0)); + uint256 pieceId = pdpVerifier.addPieces(setId, address(0), pieces, empty); + assertEq(pdpVerifier.getChallengeRange(setId), 0); + + // flush add + vm.expectEmit(true, true, false, false); + emit IPDPEvents.NextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, 2); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + assertEq(pdpVerifier.getDataSetLeafCount(setId), leafCount); + assertEq(pdpVerifier.getNextChallengeEpoch(setId), vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY); + assertEq(pdpVerifier.getChallengeRange(setId), leafCount); + + assertTrue(pdpVerifier.pieceLive(setId, pieceId)); + assertEq(pdpVerifier.getPieceCid(setId, pieceId).data, pieces[0].data); + assertEq(pdpVerifier.getPieceLeafCount(setId, pieceId), leafCount); + + assertEq(pdpVerifier.getNextPieceId(setId), 1); + } + + function testAddPiecesToExistingDataSetWithFee() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(64); + bytes memory addPayload = abi.encode("add", "data"); + + vm.expectRevert("no fee on add to existing dataset"); + pdpVerifier.addPieces{value: 1 ether}(setId, address(0), pieces, addPayload); + } + + function testAddPiecesToNonExistentDataSet() public { + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(64); + bytes memory addPayload = abi.encode("add", "data"); + + vm.expectRevert("Data set not live"); + pdpVerifier.addPieces( + 999, // Non-existent data set ID + address(0), + pieces, + addPayload + ); + } + + function testAddPiecesToExistingDataSetWrongStorageProvider() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(64); + bytes memory addPayload = abi.encode("add", "data"); + + // Try to add pieces as a different address + address otherAddress = address(0x1234); + vm.prank(otherAddress); + vm.expectRevert("Only the storage provider can add pieces"); + pdpVerifier.addPieces(setId, address(0), pieces, addPayload); + } + + function testAddMultiplePieces() public { + vm.expectEmit(true, true, false, false); + emit IPDPEvents.DataSetCreated(1, address(this)); + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieces = new Cids.Cid[](2); + pieces[0] = makeSamplePiece(64); + pieces[1] = makeSamplePiece(128); + + vm.expectEmit(true, true, false, false); + uint256[] memory pieceIds = new uint256[](2); + pieceIds[0] = 0; + pieceIds[1] = 1; + Cids.Cid[] memory pieceCids = new Cids.Cid[](2); + pieceCids[0] = pieces[0]; + pieceCids[1] = pieces[1]; + emit IPDPEvents.PiecesAdded(setId, pieceIds, pieceCids); + uint256 firstId = pdpVerifier.addPieces(setId, address(0), pieces, empty); + assertEq(firstId, 0); + // flush add + vm.expectEmit(true, true, true, false); + emit IPDPEvents.NextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, 6); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + uint256 expectedLeafCount = 64 + 128; + assertEq(pdpVerifier.getDataSetLeafCount(setId), expectedLeafCount); + assertEq(pdpVerifier.getNextChallengeEpoch(setId), vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY); + + assertTrue(pdpVerifier.pieceLive(setId, firstId)); + assertTrue(pdpVerifier.pieceLive(setId, firstId + 1)); + assertEq(pdpVerifier.getPieceCid(setId, firstId).data, pieces[0].data); + assertEq(pdpVerifier.getPieceCid(setId, firstId + 1).data, pieces[1].data); + + assertEq(pdpVerifier.getPieceLeafCount(setId, firstId), 64); + assertEq(pdpVerifier.getPieceLeafCount(setId, firstId + 1), 128); + assertEq(pdpVerifier.getNextPieceId(setId), 2); + } + + function expectIndexedError(uint256 index, string memory expectedMessage) internal { + vm.expectRevert(abi.encodeWithSelector(PDPVerifier.IndexedError.selector, index, expectedMessage)); + } + + function testAddBadPiece() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieces = new Cids.Cid[](1); + + pieces[0] = makeSamplePiece(0); + expectIndexedError(0, "Padding is too large"); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + // Fail when piece size is too large + pieces[0] = makeSamplePiece(1 << pdpVerifier.MAX_PIECE_SIZE_LOG2() + 1); + expectIndexedError(0, "Piece size must be less than 2^50"); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + // Fail when not adding any pieces; + Cids.Cid[] memory emptyPieces = new Cids.Cid[](0); + vm.expectRevert("Must add at least one piece"); + pdpVerifier.addPieces(setId, address(0), emptyPieces, empty); + + // Fail when data set is no longer live + pieces[0] = makeSamplePiece(1); + pdpVerifier.deleteDataSet(setId, empty); + vm.expectRevert("Data set not live"); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + } + + function testAddBadPiecesBatched() public { + // Add one bad piece, message fails on bad index + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieces = new Cids.Cid[](4); + pieces[0] = makeSamplePiece(1); + pieces[1] = makeSamplePiece(1); + pieces[2] = makeSamplePiece(1); + pieces[3] = makeSamplePiece(0); + + expectIndexedError(3, "Padding is too large"); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + // Add multiple bad pieces, message fails on first bad index + pieces[0] = makeSamplePiece(0); + expectIndexedError(0, "Padding is too large"); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + } + + function testRemovePiece() public { + // Add one piece + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(2); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + assertEq(pdpVerifier.getNextChallengeEpoch(setId), pdpVerifier.NO_CHALLENGE_SCHEDULED()); // Not updated on first add anymore + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + assertEq(pdpVerifier.getNextChallengeEpoch(setId), vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY); + + // Remove piece + uint256[] memory toRemove = new uint256[](1); + toRemove[0] = 0; + pdpVerifier.schedulePieceDeletions(setId, toRemove, empty); + + vm.expectEmit(true, true, false, false); + emit IPDPEvents.PiecesRemoved(setId, toRemove); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); // flush + + assertEq(pdpVerifier.getNextChallengeEpoch(setId), pdpVerifier.NO_CHALLENGE_SCHEDULED()); + assertEq(pdpVerifier.pieceLive(setId, 0), false); + assertEq(pdpVerifier.getNextPieceId(setId), 1); + assertEq(pdpVerifier.getDataSetLeafCount(setId), 0); + bytes memory emptyCidData = new bytes(0); + assertEq(pdpVerifier.getPieceCid(setId, 0).data, emptyCidData); + assertEq(pdpVerifier.getPieceLeafCount(setId, 0), 0); + } + + function testCannotScheduleRemovalOnNonLiveDataSet() public { + // Create a data set + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Add a piece to the data set + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(2); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + // Delete the data set + pdpVerifier.deleteDataSet(setId, empty); + + // Attempt to schedule removal of the piece, which should fail + uint256[] memory pieceIds = new uint256[](1); + pieceIds[0] = 0; + vm.expectRevert("Data set not live"); + pdpVerifier.schedulePieceDeletions(setId, pieceIds, empty); + } + + function testRemovePieceBatch() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieces = new Cids.Cid[](3); + pieces[0] = makeSamplePiece(2); + pieces[1] = makeSamplePiece(2); + pieces[2] = makeSamplePiece(2); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + uint256[] memory toRemove = new uint256[](2); + toRemove[0] = 0; + toRemove[1] = 2; + pdpVerifier.schedulePieceDeletions(setId, toRemove, empty); + + vm.expectEmit(true, true, false, false); + emit IPDPEvents.PiecesRemoved(setId, toRemove); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); // flush + + assertEq(pdpVerifier.pieceLive(setId, 0), false); + assertEq(pdpVerifier.pieceLive(setId, 1), true); + assertEq(pdpVerifier.pieceLive(setId, 2), false); + + assertEq(pdpVerifier.getNextPieceId(setId), 3); + assertEq(pdpVerifier.getDataSetLeafCount(setId), 64 / 32); + assertEq(pdpVerifier.getNextChallengeEpoch(setId), vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY); + + bytes memory emptyCidData = new bytes(0); + assertEq(pdpVerifier.getPieceCid(setId, 0).data, emptyCidData); + assertEq(pdpVerifier.getPieceCid(setId, 1).data, pieces[1].data); + assertEq(pdpVerifier.getPieceCid(setId, 2).data, emptyCidData); + + assertEq(pdpVerifier.getPieceLeafCount(setId, 0), 0); + assertEq(pdpVerifier.getPieceLeafCount(setId, 1), 64 / 32); + assertEq(pdpVerifier.getPieceLeafCount(setId, 2), 0); + } + + function testRemoveFuturePieces() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(2); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + assertEq(true, pdpVerifier.pieceLive(setId, 0)); + assertEq(false, pdpVerifier.pieceLive(setId, 1)); + uint256[] memory toRemove = new uint256[](2); + + // Scheduling an un-added piece for removal should fail + toRemove[0] = 0; // current piece + toRemove[1] = 1; // future piece + vm.expectRevert("Can only schedule removal of existing pieces"); + pdpVerifier.schedulePieceDeletions(setId, toRemove, empty); + // Actual removal does not fail + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + // Scheduling both unchallengeable and challengeable pieces for removal succeeds + // scheduling duplicate ids in both cases succeeds + uint256[] memory toRemove2 = new uint256[](4); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + toRemove2[0] = 0; // current challengeable piece + toRemove2[1] = 1; // current unchallengeable piece + toRemove2[2] = 0; // duplicate challengeable + toRemove2[3] = 1; // duplicate unchallengeable + // state exists for both pieces + assertEq(true, pdpVerifier.pieceLive(setId, 0)); + assertEq(true, pdpVerifier.pieceLive(setId, 1)); + // only piece 0 is challengeable + assertEq(true, pdpVerifier.pieceChallengable(setId, 0)); + assertEq(false, pdpVerifier.pieceChallengable(setId, 1)); + pdpVerifier.schedulePieceDeletions(setId, toRemove2, empty); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + assertEq(false, pdpVerifier.pieceLive(setId, 0)); + assertEq(false, pdpVerifier.pieceLive(setId, 1)); + } + + function testExtraDataMaxSizeLimit() public { + // Generate extra data that exceeds the max size (2KB) + bytes memory tooLargeExtraData = new bytes(2049); // 2KB + 1 byte + for (uint256 i = 0; i < tooLargeExtraData.length; i++) { + tooLargeExtraData[i] = 0x41; // ASCII 'A' + } + + // First test createDataSet with too large extra data + vm.expectRevert("Extra data too large"); + pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(tooLargeExtraData, empty) + ); + + // Now create data set + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieces = new Cids.Cid[](1); + + // Test addPieces with too large extra data + pieces[0] = makeSamplePiece(2); + vm.expectRevert("Extra data too large"); + pdpVerifier.addPieces(setId, address(0), pieces, tooLargeExtraData); + + // Now actually add piece id 0 + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + // Test schedulePieceDeletions with too large extra data + uint256[] memory pieceIds = new uint256[](1); + pieceIds[0] = 0; + vm.expectRevert("Extra data too large"); + pdpVerifier.schedulePieceDeletions(setId, pieceIds, tooLargeExtraData); + + // Test nextProvingPeriod with too large extra data + vm.expectRevert("Extra data too large"); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + 10, tooLargeExtraData); + + // Test deleteDataSet with too large extra data + vm.expectRevert("Extra data too large"); + pdpVerifier.deleteDataSet(setId, tooLargeExtraData); + } + + function testOnlyStorageProviderCanModifyDataSet() public { + // Setup a piece we can add + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(2); + + // First add a piece as the storage provider so we can test removal + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + address nonStorageProvider = address(0xC0FFEE); + // Try to add pieces as non-storage-provider + vm.prank(nonStorageProvider); + vm.expectRevert("Only the storage provider can add pieces"); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + // Try to delete data set as non-storage-provider + vm.prank(nonStorageProvider); + vm.expectRevert("Only the storage provider can delete data sets"); + pdpVerifier.deleteDataSet(setId, empty); + + // Try to schedule removals as non-storage-provider + uint256[] memory pieceIds = new uint256[](1); + pieceIds[0] = 0; + vm.prank(nonStorageProvider); + vm.expectRevert("Only the storage provider can schedule removal of pieces"); + pdpVerifier.schedulePieceDeletions(setId, pieceIds, empty); + + // Try to provePossession as non-storage-provider + vm.prank(nonStorageProvider); + IPDPTypes.Proof[] memory proofs = new IPDPTypes.Proof[](1); + proofs[0] = IPDPTypes.Proof(bytes32(abi.encodePacked("test")), new bytes32[](0)); + vm.expectRevert("Only the storage provider can prove possession"); + pdpVerifier.provePossession(setId, proofs); + + // Try to call nextProvingPeriod as non-storage-provider + vm.prank(nonStorageProvider); + vm.expectRevert("only the storage provider can move to next proving period"); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + 10, empty); + } + + function testNextProvingPeriodChallengeEpochTooSoon() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + // Add a piece to the data set (otherwise nextProvingPeriod fails waiting for leaves) + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(2); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + // Current block number + uint256 currentBlock = vm.getBlockNumber(); + + // Try to call nextProvingPeriod with a challenge epoch that is not at least + // challengeFinality epochs in the future + uint256 tooSoonEpoch = currentBlock + CHALLENGE_FINALITY_DELAY - 1; + + // Expect revert with the specific error message + vm.expectRevert("challenge epoch must be at least challengeFinality epochs in the future"); + pdpVerifier.nextProvingPeriod(setId, tooSoonEpoch, ""); + + // Set challenge epoch to exactly challengeFinality epochs in the future + // This should work (not revert) + uint256 validEpoch = currentBlock + CHALLENGE_FINALITY_DELAY; + + // This call should succeed + pdpVerifier.nextProvingPeriod(setId, validEpoch, ""); + + // Verify the challenge epoch was set correctly + assertEq(pdpVerifier.getNextChallengeEpoch(setId), validEpoch); + } + + function testNextProvingPeriodWithNoData() public { + // Get the NO_CHALLENGE_SCHEDULED constant value for clarity + uint256 noChallenge = pdpVerifier.NO_CHALLENGE_SCHEDULED(); + + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Initial state should be NO_CHALLENGE + assertEq( + pdpVerifier.getNextChallengeEpoch(setId), noChallenge, "Initial state should be NO_CHALLENGE_SCHEDULED" + ); + + // Try to set next proving period with various values + vm.expectRevert("can only start proving once leaves are added"); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + 100, empty); + + vm.expectRevert("can only start proving once leaves are added"); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + vm.expectRevert("can only start proving once leaves are added"); + pdpVerifier.nextProvingPeriod(setId, type(uint256).max, empty); + } + + function testNextProvingPeriodRevertsOnEmptyDataSet() public { + // Create a new data set + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Try to call nextProvingPeriod on the empty data set + // Should revert because no leaves have been added yet + vm.expectRevert("can only start proving once leaves are added"); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + } + + function testEmitDataSetEmptyEvent() public { + // Create a data set with one piece + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(2); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + // Schedule piece for removal + uint256[] memory toRemove = new uint256[](1); + toRemove[0] = 0; + pdpVerifier.schedulePieceDeletions(setId, toRemove, empty); + + // Expect DataSetEmpty event when calling nextProvingPeriod + vm.expectEmit(true, false, false, false); + emit IPDPEvents.DataSetEmpty(setId); + + // Call nextProvingPeriod which should remove the piece and emit the event + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + // Verify the data set is indeed empty + assertEq(pdpVerifier.getDataSetLeafCount(setId), 0); + assertEq(pdpVerifier.getNextChallengeEpoch(setId), 0); + assertEq(pdpVerifier.getDataSetLastProvenEpoch(setId), 0); + } +} + +contract PDPVerifierPaginationTest is Test, PieceHelper { + PDPVerifier pdpVerifier; + TestingRecordKeeperService listener; + bytes empty = new bytes(0); + + function setUp() public { + PDPVerifier pdpVerifierImpl = new PDPVerifier(); + uint256 challengeFinality = 2; + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, challengeFinality); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = PDPVerifier(address(proxy)); + listener = new TestingRecordKeeperService(); + } + + function testGetActivePiecesEmpty() public { + // Create empty data set and test + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + (Cids.Cid[] memory pieces, uint256[] memory ids, uint256[] memory sizes, bool hasMore) = + pdpVerifier.getActivePieces(setId, 0, 10); + + assertEq(pieces.length, 0, "Should return empty array for empty data set"); + assertEq(ids.length, 0, "Should return empty IDs array"); + assertEq(sizes.length, 0, "Should return empty sizes array"); + assertEq(hasMore, false, "Should not have more items"); + + // Also verify with getActivePieceCount + assertEq(pdpVerifier.getActivePieceCount(setId), 0, "Empty data set should have 0 active pieces"); + } + + function testGetActivePiecesPagination() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Add 15 pieces + Cids.Cid[] memory testPieces = new Cids.Cid[](15); + for (uint256 i = 0; i < 15; i++) { + testPieces[i] = makeSamplePiece(1024 / 32 * (i + 1)); + } + + uint256 firstPieceId = pdpVerifier.addPieces(setId, address(0), testPieces, empty); + assertEq(firstPieceId, 0, "First piece ID should be 0"); + + // Verify total count + assertEq(pdpVerifier.getActivePieceCount(setId), 15, "Should have 15 active pieces"); + + // Test first page + (Cids.Cid[] memory pieces1, uint256[] memory ids1, uint256[] memory sizes1, bool hasMore1) = + pdpVerifier.getActivePieces(setId, 0, 5); + assertEq(pieces1.length, 5, "First page should have 5 pieces"); + assertEq(ids1.length, 5, "First page should have 5 IDs"); + assertEq(sizes1.length, 5, "First page should have 5 sizes"); + assertEq(hasMore1, true, "Should have more items after first page"); + assertEq(sizes1[0], 1024, "First piece size should be 1024"); + assertEq(ids1[0], 0, "First piece ID should be 0"); + + // Test second page + (Cids.Cid[] memory pieces2, uint256[] memory ids2, uint256[] memory sizes2, bool hasMore2) = + pdpVerifier.getActivePieces(setId, 5, 5); + assertEq(pieces2.length, 5, "Second page should have 5 pieces"); + assertEq(hasMore2, true, "Should have more items after second page"); + assertEq(ids2[0], 5, "First piece ID on second page should be 5"); + assertEq(sizes2[0], 6144, "First piece size on second page should be 6144 (1024 * 6)"); + + // Test last page + (Cids.Cid[] memory pieces3, uint256[] memory ids3, uint256[] memory sizes3, bool hasMore3) = + pdpVerifier.getActivePieces(setId, 10, 5); + assertEq(pieces3.length, 5, "Last page should have 5 pieces"); + assertEq(hasMore3, false, "Should not have more items after last page"); + assertEq(ids3[0], 10, "First piece ID on last page should be 10"); + } + + function testGetActivePiecesWithDeleted() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Add pieces + Cids.Cid[] memory testPieces = new Cids.Cid[](10); + for (uint256 i = 0; i < 10; i++) { + testPieces[i] = makeSamplePiece(1024 / 32); + } + uint256 firstPieceId = pdpVerifier.addPieces(setId, address(0), testPieces, empty); + + // Schedule removal of pieces 2, 4, 6 (indices 1, 3, 5) + uint256[] memory toRemove = new uint256[](3); + toRemove[0] = firstPieceId + 1; // Piece at index 1 + toRemove[1] = firstPieceId + 3; // Piece at index 3 + toRemove[2] = firstPieceId + 5; // Piece at index 5 + pdpVerifier.schedulePieceDeletions(setId, toRemove, empty); + + // Move to next proving period to make removals effective + uint256 challengeFinality = pdpVerifier.getChallengeFinality(); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + challengeFinality, empty); + + // Should return only 7 active pieces + (Cids.Cid[] memory pieces, uint256[] memory ids, uint256[] memory sizes, bool hasMore) = + pdpVerifier.getActivePieces(setId, 0, 10); + assertEq(pieces.length, 7, "Should have 7 active pieces after deletions"); + assertEq(hasMore, false, "Should not have more items"); + + // Verify count matches + assertEq(pdpVerifier.getActivePieceCount(setId), 7, "Should have 7 active pieces count"); + + // Verify the correct pieces are returned (0, 2, 4, 6, 7, 8, 9) + assertEq(ids[0], 0, "First active piece should be 0"); + assertEq(ids[1], 2, "Second active piece should be 2"); + assertEq(ids[2], 4, "Third active piece should be 4"); + assertEq(ids[3], 6, "Fourth active piece should be 6"); + assertEq(ids[4], 7, "Fifth active piece should be 7"); + } + + function testGetActivePiecesEdgeCases() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Add 5 pieces + Cids.Cid[] memory testPieces = new Cids.Cid[](5); + for (uint256 i = 0; i < 5; i++) { + testPieces[i] = makeSamplePiece(1024 / 32); + } + pdpVerifier.addPieces(setId, address(0), testPieces, empty); + + // Verify count + assertEq(pdpVerifier.getActivePieceCount(setId), 5, "Should have 5 active pieces"); + + // Test offset beyond range + (Cids.Cid[] memory pieces1, uint256[] memory ids1, uint256[] memory sizes1, bool hasMore1) = + pdpVerifier.getActivePieces(setId, 10, 5); + assertEq(pieces1.length, 0, "Should return empty when offset beyond range"); + assertEq(hasMore1, false, "Should not have more items"); + + // Test limit 0 - should revert now + vm.expectRevert("Limit must be greater than 0"); + pdpVerifier.getActivePieces(setId, 0, 0); + + // Test limit exceeding available + (Cids.Cid[] memory pieces3, uint256[] memory ids3, uint256[] memory sizes3, bool hasMore3) = + pdpVerifier.getActivePieces(setId, 3, 10); + assertEq(pieces3.length, 2, "Should return only 2 pieces from offset 3"); + assertEq(hasMore3, false, "Should not have more items"); + assertEq(ids3[0], 3, "First ID should be 3"); + assertEq(ids3[1], 4, "Second ID should be 4"); + } + + function testGetActivePiecesNotLive() public { + // Test with invalid data set ID + vm.expectRevert("Data set not live"); + pdpVerifier.getActivePieces(999, 0, 10); + + // Also test getActivePieceCount + vm.expectRevert("Data set not live"); + pdpVerifier.getActivePieceCount(999); + } + + function testGetActivePiecesHasMore() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Add exactly 10 pieces + Cids.Cid[] memory testPieces = new Cids.Cid[](10); + for (uint256 i = 0; i < 10; i++) { + testPieces[i] = makeSamplePiece(1024 / 32); + } + pdpVerifier.addPieces(setId, address(0), testPieces, empty); + + // Test exact boundary - requesting exactly all items + (,,, bool hasMore1) = pdpVerifier.getActivePieces(setId, 0, 10); + assertEq(hasMore1, false, "Should not have more when requesting exactly all items"); + + // Test one less than total - should have more + (,,, bool hasMore2) = pdpVerifier.getActivePieces(setId, 0, 9); + assertEq(hasMore2, true, "Should have more when requesting less than total"); + + // Test at offset with remaining items + (,,, bool hasMore3) = pdpVerifier.getActivePieces(setId, 5, 4); + assertEq(hasMore3, true, "Should have more when 1 item remains"); + + // Test at offset with no remaining items + (,,, bool hasMore4) = pdpVerifier.getActivePieces(setId, 5, 5); + assertEq(hasMore4, false, "Should not have more when requesting exactly remaining items"); + } + + function testGetActivePiecesLargeSet() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Add 100 pieces + Cids.Cid[] memory testPieces = new Cids.Cid[](100); + for (uint256 i = 0; i < 100; i++) { + testPieces[i] = makeSamplePiece(1024 / 32 * (i + 1)); + } + pdpVerifier.addPieces(setId, address(0), testPieces, empty); + + // Verify total count + assertEq(pdpVerifier.getActivePieceCount(setId), 100, "Should have 100 active pieces"); + + // Test pagination through the entire set + uint256 totalRetrieved = 0; + uint256 offset = 0; + uint256 pageSize = 20; + + while (offset < 100) { + (Cids.Cid[] memory pieces, uint256[] memory ids, uint256[] memory sizes, bool hasMore) = + pdpVerifier.getActivePieces(setId, offset, pageSize); + + if (offset + pageSize < 100) { + assertEq(hasMore, true, "Should have more pages"); + assertEq(pieces.length, pageSize, "Should return full page"); + } else { + assertEq(hasMore, false, "Should not have more pages"); + assertEq(pieces.length, 100 - offset, "Should return remaining pieces"); + } + + // Verify IDs are sequential + for (uint256 i = 0; i < pieces.length; i++) { + assertEq(ids[i], offset + i, "IDs should be sequential"); + assertEq(sizes[i], 1024 * (offset + i + 1), "Sizes should match pattern"); + } + + totalRetrieved += pieces.length; + offset += pageSize; + } + + assertEq(totalRetrieved, 100, "Should have retrieved all 100 pieces"); + } +} + +// TestingRecordKeeperService is a PDPListener that allows any amount of proof challenges +// to help with more flexible testing. +contract TestingRecordKeeperService is PDPListener, PDPRecordKeeper { + // Implement the new storageProviderChanged hook + /// @notice Called when data set storage provider role is changed in PDPVerifier. + function storageProviderChanged(uint256, address, address, bytes calldata) external override {} + + function dataSetCreated(uint256 dataSetId, address creator, bytes calldata) external override { + receiveDataSetEvent(dataSetId, PDPRecordKeeper.OperationType.CREATE, abi.encode(creator)); + } + + function dataSetDeleted(uint256 dataSetId, uint256 deletedLeafCount, bytes calldata) external override { + receiveDataSetEvent(dataSetId, PDPRecordKeeper.OperationType.DELETE, abi.encode(deletedLeafCount)); + } + + function piecesAdded(uint256 dataSetId, uint256 firstAdded, Cids.Cid[] calldata pieceData, bytes calldata) + external + override + { + receiveDataSetEvent(dataSetId, PDPRecordKeeper.OperationType.ADD, abi.encode(firstAdded, pieceData)); + } + + function piecesScheduledRemove(uint256 dataSetId, uint256[] calldata pieceIds, bytes calldata) external override { + receiveDataSetEvent(dataSetId, PDPRecordKeeper.OperationType.REMOVE_SCHEDULED, abi.encode(pieceIds)); + } + + function possessionProven(uint256 dataSetId, uint256 challengedLeafCount, uint256 seed, uint256 challengeCount) + external + override + { + receiveDataSetEvent( + dataSetId, + PDPRecordKeeper.OperationType.PROVE_POSSESSION, + abi.encode(challengedLeafCount, seed, challengeCount) + ); + } + + function nextProvingPeriod(uint256 dataSetId, uint256 challengeEpoch, uint256 leafCount, bytes calldata) + external + override + { + receiveDataSetEvent( + dataSetId, PDPRecordKeeper.OperationType.NEXT_PROVING_PERIOD, abi.encode(challengeEpoch, leafCount) + ); + } +} + +contract SumTreeInternalTestPDPVerifier is PDPVerifier { + constructor() {} + + function getTestHeightFromIndex(uint256 index) public pure returns (uint256) { + return heightFromIndex(index); + } + + function getSumTreeCounts(uint256 setId, uint256 pieceId) public view returns (uint256) { + return sumTreeCounts[setId][pieceId]; + } +} + +contract SumTreeHeightTest is Test { + SumTreeInternalTestPDPVerifier pdpVerifier; + + function setUp() public { + PDPVerifier pdpVerifierImpl = new SumTreeInternalTestPDPVerifier(); + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, 2); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = SumTreeInternalTestPDPVerifier(address(proxy)); + } + + function testHeightFromIndex() public view { + // https://oeis.org/A001511 + uint8[105] memory oeisA001511 = [ + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 4, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 5, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 4, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 6, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 4, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 5, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 4, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 7, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 4, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 5, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 4, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 6, + 1, + 2, + 1, + 3, + 1, + 2, + 1, + 4, + 1 + ]; + for (uint256 i = 0; i < 105; i++) { + assertEq( + uint256(oeisA001511[i]), + pdpVerifier.getTestHeightFromIndex(i) + 1, + "Heights from index 0 to 104 should match OEIS A001511" + ); + } + } +} + +contract SumTreeAddTest is Test, PieceHelper { + SumTreeInternalTestPDPVerifier pdpVerifier; + TestingRecordKeeperService listener; + uint256 testSetId; + uint256 constant CHALLENGE_FINALITY_DELAY = 100; + bytes empty = new bytes(0); + + function setUp() public { + PDPVerifier pdpVerifierImpl = new SumTreeInternalTestPDPVerifier(); + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, CHALLENGE_FINALITY_DELAY); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = SumTreeInternalTestPDPVerifier(address(proxy)); + listener = new TestingRecordKeeperService(); + testSetId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + } + + function testMultiAdd() public { + uint256[] memory counts = new uint256[](8); + counts[0] = 1; + counts[1] = 2; + counts[2] = 3; + counts[3] = 5; + counts[4] = 8; + counts[5] = 13; + counts[6] = 21; + counts[7] = 34; + + Cids.Cid[] memory pieceDataArray = new Cids.Cid[](8); + + for (uint256 i = 0; i < counts.length; i++) { + pieceDataArray[i] = makeSamplePiece(counts[i]); + } + pdpVerifier.addPieces(testSetId, address(0), pieceDataArray, empty); + assertEq(pdpVerifier.getDataSetLeafCount(testSetId), 87, "Incorrect final data set leaf count"); + assertEq(pdpVerifier.getNextPieceId(testSetId), 8, "Incorrect next piece ID"); + assertEq(pdpVerifier.getSumTreeCounts(testSetId, 7), 87, "Incorrect sum tree count"); + assertEq(pdpVerifier.getPieceLeafCount(testSetId, 7), 34, "Incorrect piece leaf count"); + Cids.Cid memory expectedCid = pieceDataArray[3]; + Cids.Cid memory actualCid = pdpVerifier.getPieceCid(testSetId, 3); + assertEq(actualCid.data, expectedCid.data, "Incorrect piece CID"); + } + + function setUpTestingArray() public returns (uint256[] memory counts, uint256[] memory expectedSumTreeCounts) { + counts = new uint256[](8); + counts[0] = 200; + counts[1] = 100; + counts[2] = 1; // Remove + counts[3] = 30; + counts[4] = 50; + counts[5] = 1; // Remove + counts[6] = 400; + counts[7] = 40; + + // Correct sum tree values assuming that pieceIdsToRemove are deleted + expectedSumTreeCounts = new uint256[](8); + expectedSumTreeCounts[0] = 200; + expectedSumTreeCounts[1] = 300; + expectedSumTreeCounts[2] = 0; + expectedSumTreeCounts[3] = 330; + expectedSumTreeCounts[4] = 50; + expectedSumTreeCounts[5] = 50; + expectedSumTreeCounts[6] = 400; + expectedSumTreeCounts[7] = 820; + + uint256[] memory pieceIdsToRemove = new uint256[](2); + pieceIdsToRemove[0] = 2; + pieceIdsToRemove[1] = 5; + + // Add all + for (uint256 i = 0; i < counts.length; i++) { + Cids.Cid[] memory pieceDataArray = new Cids.Cid[](1); + pieceDataArray[0] = makeSamplePiece(counts[i]); + pdpVerifier.addPieces(testSetId, address(0), pieceDataArray, empty); + // Assert the piece was added correctly + assertEq(pdpVerifier.getPieceCid(testSetId, i).data, pieceDataArray[0].data, "Piece not added correctly"); + } + + // Delete some + // Remove pieces in batch + pdpVerifier.schedulePieceDeletions(testSetId, pieceIdsToRemove, empty); + // flush adds and removals + pdpVerifier.nextProvingPeriod(testSetId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + for (uint256 i = 0; i < pieceIdsToRemove.length; i++) { + bytes memory zeroBytes; + assertEq(pdpVerifier.getPieceCid(testSetId, pieceIdsToRemove[i]).data, zeroBytes); + assertEq(pdpVerifier.getPieceLeafCount(testSetId, pieceIdsToRemove[i]), 0, "Piece size should be 0"); + } + } + + function testSumTree() public { + (uint256[] memory counts, uint256[] memory expectedSumTreeCounts) = setUpTestingArray(); + // Assert that the sum tree count is correct + for (uint256 i = 0; i < counts.length; i++) { + assertEq(pdpVerifier.getSumTreeCounts(testSetId, i), expectedSumTreeCounts[i], "Incorrect sum tree size"); + } + + // Assert final data set leaf count + assertEq(pdpVerifier.getDataSetLeafCount(testSetId), 820, "Incorrect final data set leaf count"); + } + + function testFindPieceId() public { + setUpTestingArray(); + + // Test findPieceId for various positions + assertFindPieceAndOffset(testSetId, 0, 0, 0); + assertFindPieceAndOffset(testSetId, 199, 0, 199); + assertFindPieceAndOffset(testSetId, 200, 1, 0); + assertFindPieceAndOffset(testSetId, 299, 1, 99); + assertFindPieceAndOffset(testSetId, 300, 3, 0); + assertFindPieceAndOffset(testSetId, 329, 3, 29); + assertFindPieceAndOffset(testSetId, 330, 4, 0); + assertFindPieceAndOffset(testSetId, 379, 4, 49); + assertFindPieceAndOffset(testSetId, 380, 6, 0); + assertFindPieceAndOffset(testSetId, 779, 6, 399); + assertFindPieceAndOffset(testSetId, 780, 7, 0); + assertFindPieceAndOffset(testSetId, 819, 7, 39); + + // Test edge cases + vm.expectRevert("Leaf index out of bounds"); + uint256[] memory outOfBounds = new uint256[](1); + outOfBounds[0] = 820; + pdpVerifier.findPieceIds(testSetId, outOfBounds); + + vm.expectRevert("Leaf index out of bounds"); + outOfBounds[0] = 1000; + pdpVerifier.findPieceIds(testSetId, outOfBounds); + } + + function testBatchFindPieceId() public { + setUpTestingArray(); + uint256[] memory searchIndexes = new uint256[](12); + searchIndexes[0] = 0; + searchIndexes[1] = 199; + searchIndexes[2] = 200; + searchIndexes[3] = 299; + searchIndexes[4] = 300; + searchIndexes[5] = 329; + searchIndexes[6] = 330; + searchIndexes[7] = 379; + searchIndexes[8] = 380; + searchIndexes[9] = 779; + searchIndexes[10] = 780; + searchIndexes[11] = 819; + + uint256[] memory expectedPieces = new uint256[](12); + expectedPieces[0] = 0; + expectedPieces[1] = 0; + expectedPieces[2] = 1; + expectedPieces[3] = 1; + expectedPieces[4] = 3; + expectedPieces[5] = 3; + expectedPieces[6] = 4; + expectedPieces[7] = 4; + expectedPieces[8] = 6; + expectedPieces[9] = 6; + expectedPieces[10] = 7; + expectedPieces[11] = 7; + + uint256[] memory expectedOffsets = new uint256[](12); + expectedOffsets[0] = 0; + expectedOffsets[1] = 199; + expectedOffsets[2] = 0; + expectedOffsets[3] = 99; + expectedOffsets[4] = 0; + expectedOffsets[5] = 29; + expectedOffsets[6] = 0; + expectedOffsets[7] = 49; + expectedOffsets[8] = 0; + expectedOffsets[9] = 399; + expectedOffsets[10] = 0; + expectedOffsets[11] = 39; + + assertFindPiecesAndOffsets(testSetId, searchIndexes, expectedPieces, expectedOffsets); + } + + error TestingFindError(uint256 expected, uint256 actual, string msg); + + function assertFindPieceAndOffset(uint256 setId, uint256 searchIndex, uint256 expectPieceId, uint256 expectOffset) + internal + view + { + uint256[] memory searchIndices = new uint256[](1); + searchIndices[0] = searchIndex; + IPDPTypes.PieceIdAndOffset[] memory result = pdpVerifier.findPieceIds(setId, searchIndices); + if (result[0].pieceId != expectPieceId) { + revert TestingFindError(expectPieceId, result[0].pieceId, "unexpected piece"); + } + if (result[0].offset != expectOffset) { + revert TestingFindError(expectOffset, result[0].offset, "unexpected offset"); + } + } + + // The batched version of assertFindPieceAndOffset + function assertFindPiecesAndOffsets( + uint256 setId, + uint256[] memory searchIndices, + uint256[] memory expectPieceIds, + uint256[] memory expectOffsets + ) internal view { + IPDPTypes.PieceIdAndOffset[] memory result = pdpVerifier.findPieceIds(setId, searchIndices); + for (uint256 i = 0; i < searchIndices.length; i++) { + assertEq(result[i].pieceId, expectPieceIds[i], "unexpected piece"); + assertEq(result[i].offset, expectOffsets[i], "unexpected offset"); + } + } + + function testFindPieceIdTraverseOffTheEdgeAndBack() public { + uint256[] memory sizes = new uint256[](5); + sizes[0] = 1; // Remove + sizes[1] = 1; // Remove + sizes[2] = 1; // Remove + sizes[3] = 1; + sizes[4] = 1; + + uint256[] memory pieceIdsToRemove = new uint256[](3); + pieceIdsToRemove[0] = 0; + pieceIdsToRemove[1] = 1; + pieceIdsToRemove[2] = 2; + + for (uint256 i = 0; i < sizes.length; i++) { + Cids.Cid[] memory pieceDataArray = new Cids.Cid[](1); + pieceDataArray[0] = makeSamplePiece(sizes[i]); + pdpVerifier.addPieces(testSetId, address(0), pieceDataArray, empty); + } + pdpVerifier.schedulePieceDeletions(testSetId, pieceIdsToRemove, empty); + pdpVerifier.nextProvingPeriod(testSetId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); //flush removals + + assertFindPieceAndOffset(testSetId, 0, 3, 0); + assertFindPieceAndOffset(testSetId, 1, 4, 0); + } +} + +contract BadListener is PDPListener { + PDPRecordKeeper.OperationType public badOperation; + + function setBadOperation(PDPRecordKeeper.OperationType operationType) external { + badOperation = operationType; + } + + function storageProviderChanged(uint256, address, address, bytes calldata) external override {} + + function dataSetCreated(uint256 dataSetId, address creator, bytes calldata) external view override { + receiveDataSetEvent(dataSetId, PDPRecordKeeper.OperationType.CREATE, abi.encode(creator)); + } + + function dataSetDeleted(uint256 dataSetId, uint256 deletedLeafCount, bytes calldata) external view override { + receiveDataSetEvent(dataSetId, PDPRecordKeeper.OperationType.DELETE, abi.encode(deletedLeafCount)); + } + + function piecesAdded(uint256 dataSetId, uint256 firstAdded, Cids.Cid[] calldata pieceData, bytes calldata) + external + view + override + { + receiveDataSetEvent(dataSetId, PDPRecordKeeper.OperationType.ADD, abi.encode(firstAdded, pieceData)); + } + + function piecesScheduledRemove(uint256 dataSetId, uint256[] calldata pieceIds, bytes calldata) + external + view + override + { + receiveDataSetEvent(dataSetId, PDPRecordKeeper.OperationType.REMOVE_SCHEDULED, abi.encode(pieceIds)); + } + + function possessionProven(uint256 dataSetId, uint256 challengedLeafCount, uint256 seed, uint256 challengeCount) + external + view + override + { + receiveDataSetEvent( + dataSetId, + PDPRecordKeeper.OperationType.PROVE_POSSESSION, + abi.encode(challengedLeafCount, seed, challengeCount) + ); + } + + function nextProvingPeriod(uint256 dataSetId, uint256 challengeEpoch, uint256 leafCount, bytes calldata) + external + view + override + { + receiveDataSetEvent( + dataSetId, PDPRecordKeeper.OperationType.NEXT_PROVING_PERIOD, abi.encode(challengeEpoch, leafCount) + ); + } + + function receiveDataSetEvent(uint256, PDPRecordKeeper.OperationType operationType, bytes memory) internal view { + if (operationType == badOperation) { + revert("Failing operation"); + } + } +} + +contract PDPListenerIntegrationTest is Test, PieceHelper { + PDPVerifier pdpVerifier; + BadListener badListener; + uint256 constant CHALLENGE_FINALITY_DELAY = 2; + bytes empty = new bytes(0); + + function setUp() public { + PDPVerifier pdpVerifierImpl = new PDPVerifier(); + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, CHALLENGE_FINALITY_DELAY); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = PDPVerifier(address(proxy)); + badListener = new BadListener(); + } + + function testListenerPropagatesErrors() public { + badListener.setBadOperation(PDPRecordKeeper.OperationType.CREATE); + vm.expectRevert("Failing operation"); + pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(badListener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + badListener.setBadOperation(PDPRecordKeeper.OperationType.NONE); + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(badListener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + badListener.setBadOperation(PDPRecordKeeper.OperationType.ADD); + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(1); + vm.expectRevert("Failing operation"); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + badListener.setBadOperation(PDPRecordKeeper.OperationType.NONE); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + badListener.setBadOperation(PDPRecordKeeper.OperationType.REMOVE_SCHEDULED); + uint256[] memory pieceIds = new uint256[](1); + pieceIds[0] = 0; + vm.expectRevert("Failing operation"); + pdpVerifier.schedulePieceDeletions(setId, pieceIds, empty); + + badListener.setBadOperation(PDPRecordKeeper.OperationType.NONE); + pdpVerifier.schedulePieceDeletions(setId, pieceIds, empty); + + badListener.setBadOperation(PDPRecordKeeper.OperationType.NEXT_PROVING_PERIOD); + vm.expectRevert("Failing operation"); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + badListener.setBadOperation(PDPRecordKeeper.OperationType.NONE); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + } +} + +contract ExtraDataListener is PDPListener { + mapping(uint256 => mapping(PDPRecordKeeper.OperationType => bytes)) public extraDataBySetId; + + function storageProviderChanged(uint256, address, address, bytes calldata) external override {} + + function dataSetCreated(uint256 dataSetId, address, bytes calldata extraData) external override { + extraDataBySetId[dataSetId][PDPRecordKeeper.OperationType.CREATE] = extraData; + } + + function dataSetDeleted(uint256 dataSetId, uint256, bytes calldata extraData) external override { + extraDataBySetId[dataSetId][PDPRecordKeeper.OperationType.DELETE] = extraData; + } + + function piecesAdded(uint256 dataSetId, uint256, Cids.Cid[] calldata, bytes calldata extraData) external override { + extraDataBySetId[dataSetId][PDPRecordKeeper.OperationType.ADD] = extraData; + } + + function piecesScheduledRemove(uint256 dataSetId, uint256[] calldata, bytes calldata extraData) external override { + extraDataBySetId[dataSetId][PDPRecordKeeper.OperationType.REMOVE_SCHEDULED] = extraData; + } + + function possessionProven(uint256, uint256, uint256, uint256) external override {} + + function nextProvingPeriod(uint256 dataSetId, uint256, uint256, bytes calldata extraData) external override { + extraDataBySetId[dataSetId][PDPRecordKeeper.OperationType.NEXT_PROVING_PERIOD] = extraData; + } + + function getExtraData(uint256 dataSetId, PDPRecordKeeper.OperationType opType) + external + view + returns (bytes memory) + { + return extraDataBySetId[dataSetId][opType]; + } +} + +contract PDPVerifierExtraDataTest is Test, PieceHelper { + PDPVerifier pdpVerifier; + ExtraDataListener extraDataListener; + uint256 constant CHALLENGE_FINALITY_DELAY = 2; + bytes empty = new bytes(0); + + function setUp() public { + PDPVerifier pdpVerifierImpl = new PDPVerifier(); + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, CHALLENGE_FINALITY_DELAY); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = PDPVerifier(address(proxy)); + extraDataListener = new ExtraDataListener(); + } + + function testExtraDataPropagation() public { + // Test CREATE operation + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(extraDataListener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + assertEq( + extraDataListener.getExtraData(setId, PDPRecordKeeper.OperationType.CREATE), + empty, + "Extra data not propagated for CREATE" + ); + + // Test ADD operation + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(1); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + assertEq( + extraDataListener.getExtraData(setId, PDPRecordKeeper.OperationType.ADD), + empty, + "Extra data not propagated for ADD" + ); + + // Test REMOVE_SCHEDULED operation + uint256[] memory pieceIds = new uint256[](1); + pieceIds[0] = 0; + pdpVerifier.schedulePieceDeletions(setId, pieceIds, empty); + assertEq( + extraDataListener.getExtraData(setId, PDPRecordKeeper.OperationType.REMOVE_SCHEDULED), + empty, + "Extra data not propagated for REMOVE_SCHEDULED" + ); + + // Test NEXT_PROVING_PERIOD operation + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + assertEq( + extraDataListener.getExtraData(setId, PDPRecordKeeper.OperationType.NEXT_PROVING_PERIOD), + empty, + "Extra data not propagated for NEXT_PROVING_PERIOD" + ); + } +} + +contract PDPVerifierE2ETest is Test, ProofBuilderHelper, PieceHelper { + PDPVerifier pdpVerifier; + TestingRecordKeeperService listener; + uint256 constant CHALLENGE_FINALITY_DELAY = 2; + bytes empty = new bytes(0); + + function setUp() public { + PDPVerifier pdpVerifierImpl = new PDPVerifier(); + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, CHALLENGE_FINALITY_DELAY); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = PDPVerifier(address(proxy)); + listener = new TestingRecordKeeperService(); + vm.fee(1 gwei); + vm.deal(address(pdpVerifierImpl), 100 ether); + } + + receive() external payable {} + + function createPythCallData() internal view returns (bytes memory, PythStructs.Price memory) { + bytes memory pythCallData = + abi.encodeWithSelector(IPyth.getPriceUnsafe.selector, pdpVerifier.FIL_USD_PRICE_FEED_ID()); + + PythStructs.Price memory price = PythStructs.Price({price: 5, conf: 0, expo: 0, publishTime: 0}); + + return (pythCallData, price); + } + + function createPythAncientCallData() internal view returns (bytes memory, PythStructs.Price memory) { + bytes memory callData = + abi.encodeWithSelector(IPyth.getPriceUnsafe.selector, pdpVerifier.FIL_USD_PRICE_FEED_ID()); + + PythStructs.Price memory price = PythStructs.Price({price: 6, conf: 0, expo: 0, publishTime: 0}); + + return (callData, price); + } + + function testGetOldPrice() public { + (bytes memory pythFallbackCallData, PythStructs.Price memory price) = createPythAncientCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythFallbackCallData, abi.encode(price)); + + (uint64 priceOut, int32 expoOut) = pdpVerifier.getFILUSDPrice(); + assertEq(priceOut, uint64(6), "Price should be 6"); + assertEq(expoOut, int32(0), "Expo should be 0"); + } + + function testCompleteProvingPeriodE2E() public { + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + // Step 1: Create a data set + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + + // Step 2: Add data `A` in scope for the first proving period + // Note that the data in the first addPieces call is added to the first proving period + uint256[] memory leafCountsA = new uint256[](2); + leafCountsA[0] = 2; + leafCountsA[1] = 3; + bytes32[][][] memory treesA = new bytes32[][][](2); + for (uint256 i = 0; i < leafCountsA.length; i++) { + treesA[i] = ProofUtil.makeTree(leafCountsA[i]); + } + + Cids.Cid[] memory piecesProofPeriod1 = new Cids.Cid[](2); + piecesProofPeriod1[0] = makePiece(treesA[0], leafCountsA[0]); + piecesProofPeriod1[1] = makePiece(treesA[1], leafCountsA[1]); + pdpVerifier.addPieces(setId, address(0), piecesProofPeriod1, empty); + // flush the original addPieces call + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + uint256 challengeRangeProofPeriod1 = pdpVerifier.getChallengeRange(setId); + assertEq( + challengeRangeProofPeriod1, + pdpVerifier.getDataSetLeafCount(setId), + "Last challenged leaf should be total leaf count - 1" + ); + + // Step 3: Now that first challenge is set for sampling add more data `B` only in scope for the second proving period + uint256[] memory leafCountsB = new uint256[](2); + leafCountsB[0] = 4; + leafCountsB[1] = 5; + bytes32[][][] memory treesB = new bytes32[][][](2); + for (uint256 i = 0; i < leafCountsB.length; i++) { + treesB[i] = ProofUtil.makeTree(leafCountsB[i]); + } + + Cids.Cid[] memory piecesProvingPeriod2 = new Cids.Cid[](2); + piecesProvingPeriod2[0] = makePiece(treesB[0], leafCountsB[0]); + piecesProvingPeriod2[1] = makePiece(treesB[1], leafCountsB[1]); + pdpVerifier.addPieces(setId, address(0), piecesProvingPeriod2, empty); + + assertEq( + pdpVerifier.getPieceLeafCount(setId, 0), + leafCountsA[0], + "sanity check: First piece leaf count should be correct" + ); + assertEq(pdpVerifier.getPieceLeafCount(setId, 1), leafCountsA[1], "Second piece leaf count should be correct"); + assertEq(pdpVerifier.getPieceLeafCount(setId, 2), leafCountsB[0], "Third piece leaf count should be correct"); + assertEq(pdpVerifier.getPieceLeafCount(setId, 3), leafCountsB[1], "Fourth piece leaf count should be correct"); + + // CHECK: last challenged leaf doesn't move + assertEq( + pdpVerifier.getChallengeRange(setId), challengeRangeProofPeriod1, "Last challenged leaf should not move" + ); + assertEq( + pdpVerifier.getDataSetLeafCount(setId), + leafCountsA[0] + leafCountsA[1] + leafCountsB[0] + leafCountsB[1], + "Leaf count should only include non-removed pieces" + ); + + // Step 5: schedule removal of first + second proving period data + uint256[] memory piecesToRemove = new uint256[](2); + piecesToRemove[0] = 1; // Remove the second piece from first proving period + piecesToRemove[1] = 3; // Remove the second piece from second proving period + pdpVerifier.schedulePieceDeletions(setId, piecesToRemove, empty); + assertEq( + pdpVerifier.getScheduledRemovals(setId), piecesToRemove, "Scheduled removals should match piecesToRemove" + ); + + // Step 7: complete proving period 1. + // Advance chain until challenge epoch. + vm.roll(pdpVerifier.getNextChallengeEpoch(setId)); + // Prepare proofs. + // Proving trees for ProofPeriod1 are just treesA + IPDPTypes.Proof[] memory proofsProofPeriod1 = buildProofs(pdpVerifier, setId, 5, treesA, leafCountsA); + + vm.mockCall( + pdpVerifier.RANDOMNESS_PRECOMPILE(), + abi.encode(pdpVerifier.getNextChallengeEpoch(setId)), + abi.encode(pdpVerifier.getNextChallengeEpoch(setId)) + ); + + pdpVerifier.provePossession{value: 1e18}(setId, proofsProofPeriod1); + + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + // CHECK: leaf counts + assertEq( + pdpVerifier.getPieceLeafCount(setId, 0), + leafCountsA[0], + "First piece leaf count should be the set leaf count" + ); + assertEq(pdpVerifier.getPieceLeafCount(setId, 1), 0, "Second piece leaf count should be zeroed after removal"); + assertEq( + pdpVerifier.getPieceLeafCount(setId, 2), + leafCountsB[0], + "Third piece leaf count should be the set leaf count" + ); + assertEq(pdpVerifier.getPieceLeafCount(setId, 3), 0, "Fourth piece leaf count should be zeroed after removal"); + assertEq( + pdpVerifier.getDataSetLeafCount(setId), + leafCountsA[0] + leafCountsB[0], + "Leaf count should == size of non-removed pieces" + ); + assertEq( + pdpVerifier.getChallengeRange(setId), + leafCountsA[0] + leafCountsB[0], + "Last challenged leaf should be total leaf count" + ); + + // CHECK: scheduled removals are processed + assertEq(pdpVerifier.getScheduledRemovals(setId), new uint256[](0), "Scheduled removals should be processed"); + + // CHECK: the next challenge epoch has been updated + assertEq( + pdpVerifier.getNextChallengeEpoch(setId), + vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, + "Next challenge epoch should be updated" + ); + } +} + +contract PDPVerifierMigrateTest is Test { + PDPVerifier implementation; + PDPVerifier newImplementation; + MyERC1967Proxy proxy; + + function setUp() public { + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, 2); + implementation = new PDPVerifier(); + newImplementation = new PDPVerifier(); + proxy = new MyERC1967Proxy(address(implementation), initializeData); + } + + function testMigrate() public { + vm.expectEmit(true, true, true, true); + emit IPDPEvents.ContractUpgraded(newImplementation.VERSION(), address(newImplementation)); + bytes memory migrationCall = abi.encodeWithSelector(PDPVerifier.migrate.selector); + UUPSUpgradeable(address(proxy)).upgradeToAndCall(address(newImplementation), migrationCall); + // Second call should fail because reinitializer(2) can only be called once + vm.expectRevert("InvalidInitialization()"); + UUPSUpgradeable(address(proxy)).upgradeToAndCall(address(newImplementation), migrationCall); + } +} + +contract MockStorageProviderChangedListener is PDPListener { + uint256 public lastDataSetId; + address public lastOldStorageProvider; + address public lastNewStorageProvider; + bytes public lastExtraData; + bool public shouldRevert; + + function setShouldRevert(bool value) external { + shouldRevert = value; + } + + function storageProviderChanged( + uint256 dataSetId, + address oldStorageProvider, + address newStorageProvider, + bytes calldata extraData + ) external override { + if (shouldRevert) revert("MockStorageProviderChangedListener: forced revert"); + lastDataSetId = dataSetId; + lastOldStorageProvider = oldStorageProvider; + lastNewStorageProvider = newStorageProvider; + lastExtraData = extraData; + } + + function dataSetCreated(uint256, address, bytes calldata) external override {} + function dataSetDeleted(uint256, uint256, bytes calldata) external override {} + function piecesAdded(uint256, uint256, Cids.Cid[] calldata, bytes calldata) external override {} + function piecesScheduledRemove(uint256, uint256[] calldata, bytes calldata) external override {} + function possessionProven(uint256, uint256, uint256, uint256) external override {} + function nextProvingPeriod(uint256, uint256, uint256, bytes calldata) external override {} +} + +contract PDPVerifierStorageProviderListenerTest is Test { + PDPVerifier pdpVerifier; + MockStorageProviderChangedListener listener; + address public storageProvider; + address public nextStorageProvider; + address public nonStorageProvider; + bytes empty = new bytes(0); + + function setUp() public { + PDPVerifier pdpVerifierImpl = new PDPVerifier(); + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, 2); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = PDPVerifier(address(proxy)); + listener = new MockStorageProviderChangedListener(); + storageProvider = address(this); + nextStorageProvider = address(0x1234); + nonStorageProvider = address(0xffff); + } + + function testStorageProviderChangedCalledOnStorageProviderTransfer() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + pdpVerifier.proposeDataSetStorageProvider(setId, nextStorageProvider); + vm.prank(nextStorageProvider); + pdpVerifier.claimDataSetStorageProvider(setId, empty); + assertEq(listener.lastDataSetId(), setId, "Data set ID mismatch"); + assertEq(listener.lastOldStorageProvider(), storageProvider, "Old storage provider mismatch"); + assertEq(listener.lastNewStorageProvider(), nextStorageProvider, "New storage provider mismatch"); + } + + function testListenerRevertDoesNotRevertMainTx() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + pdpVerifier.proposeDataSetStorageProvider(setId, nextStorageProvider); + listener.setShouldRevert(true); + vm.prank(nextStorageProvider); + vm.expectRevert("MockStorageProviderChangedListener: forced revert"); + pdpVerifier.claimDataSetStorageProvider(setId, empty); + } +} diff --git a/service_contracts/test/pdp/PDPVerifierProofTest.t.sol b/service_contracts/test/pdp/PDPVerifierProofTest.t.sol new file mode 100644 index 00000000..7e96e08b --- /dev/null +++ b/service_contracts/test/pdp/PDPVerifierProofTest.t.sol @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.13; + +import {IPyth} from "@pythnetwork/pyth-sdk-solidity/IPyth.sol"; +import {PythStructs} from "@pythnetwork/pyth-sdk-solidity/PythStructs.sol"; +import {Test} from "forge-std/Test.sol"; +import {Cids} from "@pdp/Cids.sol"; +import {PDPVerifier} from "@pdp/PDPVerifier.sol"; +import {MyERC1967Proxy} from "@pdp/ERC1967Proxy.sol"; +import {ProofUtil} from "./ProofUtil.sol"; +import {PDPFees} from "@pdp/Fees.sol"; +import {IPDPTypes} from "@pdp/interfaces/IPDPTypes.sol"; +import {IPDPEvents} from "@pdp/interfaces/IPDPEvents.sol"; +import {PieceHelper} from "./PieceHelper.t.sol"; +import {ProofBuilderHelper} from "./ProofBuilderHelper.t.sol"; +import {TestingRecordKeeperService} from "./PDPVerifier.t.sol"; +import {NEW_DATA_SET_SENTINEL} from "@pdp/PDPVerifier.sol"; + +contract PDPVerifierProofTest is Test, ProofBuilderHelper, PieceHelper { + uint256 constant CHALLENGE_FINALITY_DELAY = 2; + bytes empty = new bytes(0); + PDPVerifier pdpVerifier; + TestingRecordKeeperService listener; + + function setUp() public { + PDPVerifier pdpVerifierImpl = new PDPVerifier(); + bytes memory initializeData = abi.encodeWithSelector(PDPVerifier.initialize.selector, CHALLENGE_FINALITY_DELAY); + MyERC1967Proxy proxy = new MyERC1967Proxy(address(pdpVerifierImpl), initializeData); + pdpVerifier = PDPVerifier(address(proxy)); + listener = new TestingRecordKeeperService(); + vm.fee(1 wei); + vm.deal(address(pdpVerifierImpl), 100 ether); + } + + function createPythCallData() internal view returns (bytes memory, PythStructs.Price memory) { + bytes memory pythCallData = + abi.encodeWithSelector(IPyth.getPriceUnsafe.selector, pdpVerifier.FIL_USD_PRICE_FEED_ID()); + + PythStructs.Price memory price = PythStructs.Price({price: 5, conf: 0, expo: 0, publishTime: 0}); + + return (pythCallData, price); + } + + function testProveSinglePiece() public { + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + uint256 leafCount = 10; + (uint256 setId, bytes32[][] memory tree) = makeDataSetWithOnePiece(leafCount); + + // Advance chain until challenge epoch. + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch); + + // Build a proof with multiple challenges to single tree. + uint256 challengeCount = 3; + IPDPTypes.Proof[] memory proofs = buildProofsForSingleton(setId, challengeCount, tree, leafCount); + + // Submit proof. + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + vm.expectEmit(true, true, false, false); + IPDPTypes.PieceIdAndOffset[] memory challenges = new IPDPTypes.PieceIdAndOffset[](challengeCount); + for (uint256 i = 0; i < challengeCount; i++) { + challenges[i] = IPDPTypes.PieceIdAndOffset(0, 0); + } + emit IPDPEvents.PossessionProven(setId, challenges); + pdpVerifier.provePossession{value: 1e18}(setId, proofs); + + // Verify the next challenge is in a subsequent epoch. + // Next challenge unchanged by prove + assertEq(pdpVerifier.getNextChallengeEpoch(setId), challengeEpoch); + + // Verify the next challenge is in a subsequent epoch after nextProvingPeriod + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + assertEq(pdpVerifier.getNextChallengeEpoch(setId), vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY); + } + + receive() external payable {} + + event Debug(string message, uint256 value); + + function testProveWithDifferentFeeAmounts() public { + vm.fee(0 gwei); + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + price.price = 1; + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + address sender = makeAddr("sender"); + vm.deal(sender, 1000 ether); + vm.startPrank(sender); + + uint256 leafCount = 10; + (uint256 setId, bytes32[][] memory tree) = makeDataSetWithOnePiece(leafCount); + + // Advance chain until challenge epoch. + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch); + + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + + // Build a proof with multiple challenges to single tree. + uint256 challengeCount = 3; + IPDPTypes.Proof[] memory proofs = buildProofsForSingleton(setId, challengeCount, tree, leafCount); + + // Mock block.number to 2881 + vm.roll(2881); + + // Determine the correct fee. + uint256 correctFee; + { + uint256 snapshotId = vm.snapshotState(); + uint256 balanceBefore = sender.balance; + pdpVerifier.provePossession{value: sender.balance}(setId, proofs); + uint256 balanceAfter = sender.balance; + correctFee = balanceBefore - balanceAfter; + vm.revertToStateAndDelete(snapshotId); + } + + // Test 1: Sending less than the required fee + vm.expectRevert("Incorrect fee amount"); + pdpVerifier.provePossession{value: correctFee - 1}(setId, proofs); + + // Test 2: Sending more than the required fee + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + pdpVerifier.provePossession{value: correctFee + 1}(setId, proofs); + + // Verify that the proof was accepted + assertEq( + pdpVerifier.getNextChallengeEpoch(setId), + challengeEpoch, + "Next challenge epoch should remain unchanged after prove" + ); + } + + function testDataSetLastProvenEpochOnPieceRemoval() public { + // Create a data set and verify initial lastProvenEpoch is 0 + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + assertEq(pdpVerifier.getDataSetLastProvenEpoch(setId), 0, "Initial lastProvenEpoch should be 0"); + + // Mock block.number to 2881 + uint256 blockNumber = 2881; + vm.roll(blockNumber); + // Add a piece and verify lastProvenEpoch is set to current block number + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(2); + + pdpVerifier.addPieces(setId, address(0), pieces, empty); + pdpVerifier.nextProvingPeriod(setId, blockNumber + CHALLENGE_FINALITY_DELAY, empty); + assertEq( + pdpVerifier.getDataSetLastProvenEpoch(setId), + blockNumber, + "lastProvenEpoch should be set to block.number after first proving period piece" + ); + + // Schedule piece removal + uint256[] memory piecesToRemove = new uint256[](1); + piecesToRemove[0] = 0; + pdpVerifier.schedulePieceDeletions(setId, piecesToRemove, empty); + + // Call nextProvingPeriod and verify lastProvenEpoch is reset to 0 + pdpVerifier.nextProvingPeriod(setId, blockNumber + CHALLENGE_FINALITY_DELAY, empty); + assertEq( + pdpVerifier.getDataSetLastProvenEpoch(setId), + 0, + "lastProvenEpoch should be reset to 0 after removing last piece" + ); + } + + function testLateProofAccepted() public { + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + uint256 leafCount = 10; + (uint256 setId, bytes32[][] memory tree) = makeDataSetWithOnePiece(leafCount); + + // Advance chain short of challenge epoch + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch + 100); + + // Build a proof. + IPDPTypes.Proof[] memory proofs = buildProofsForSingleton(setId, 3, tree, leafCount); + + // Submit proof. + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + pdpVerifier.provePossession{value: 1e18}(setId, proofs); + } + + function testProvePossesionSmall() public { + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + uint256 leafCount = 3; + (uint256 setId, bytes32[][] memory tree) = makeDataSetWithOnePiece(leafCount); + + // Advance chain short of challenge epoch + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch); + + // Build a proof. + IPDPTypes.Proof[] memory proofs = buildProofsForSingleton(setId, 3, tree, leafCount); + + // Submit proof. + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + pdpVerifier.provePossession{value: 1e18}(setId, proofs); + } + + function testEarlyProofRejected() public { + uint256 leafCount = 10; + (uint256 setId, bytes32[][] memory tree) = makeDataSetWithOnePiece(leafCount); + + // Advance chain short of challenge epoch + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch - 1); + + // Build a proof. + IPDPTypes.Proof[] memory proofs = buildProofsForSingleton(setId, 3, tree, leafCount); + + // Submit proof. + vm.expectRevert(); + pdpVerifier.provePossession{value: 1e18}(setId, proofs); + } + + function testProvePossessionFailsWithNoScheduledChallenge() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makeSamplePiece(2); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + + // Don't sample challenge (i.e. call nextProvingPeriod) + + // Create a dummy proof + IPDPTypes.Proof[] memory proofs = new IPDPTypes.Proof[](1); + proofs[0].leaf = bytes32(0); + proofs[0].proof = new bytes32[](1); + proofs[0].proof[0] = bytes32(0); + + // Try to prove possession without scheduling a challenge + // This should fail because nextChallengeEpoch is still NO_CHALLENGE_SCHEDULED (0) + vm.expectRevert("no challenge scheduled"); + pdpVerifier.provePossession{value: 1 ether}(setId, proofs); + } + + function testEmptyProofRejected() public { + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + IPDPTypes.Proof[] memory emptyProof = new IPDPTypes.Proof[](0); + + // Rejected with no pieces + vm.expectRevert(); + pdpVerifier.provePossession{value: 1e18}(setId, emptyProof); + + addOnePiece(setId, 10); + + // Rejected with a piece + vm.expectRevert(); + pdpVerifier.provePossession{value: 1e18}(setId, emptyProof); + } + + function testBadChallengeRejected() public { + uint256 leafCount = 10; + (uint256 setId, bytes32[][] memory tree) = makeDataSetWithOnePiece(leafCount); + + // Make a proof that's good for this challenge epoch. + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch); + IPDPTypes.Proof[] memory proofs = buildProofsForSingleton(setId, 3, tree, leafCount); + + // Submit proof successfully, advancing the data set to a new challenge epoch. + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + pdpVerifier.provePossession{value: 1e18}(setId, proofs); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); // resample + + uint256 nextChallengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + assertNotEq(nextChallengeEpoch, challengeEpoch); + vm.roll(nextChallengeEpoch); + + // The proof for the old challenge epoch should no longer be valid. + vm.expectRevert(); + pdpVerifier.provePossession{value: 1e18}(setId, proofs); + } + + function testBadPiecesRejected() public { + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + uint256[] memory leafCounts = new uint256[](2); + // Note: either co-prime leaf counts or a challenge count > 1 are required for this test to demonstrate the failing proof. + // With a challenge count == 1 and leaf counts e.g. 10 and 20 it just so happens that the first computed challenge index is the same + // (lying in the first piece) whether the tree has one or two pieces. + // This could be prevented if the challenge index calculation included some marker of data set contents, like + // a hash of all the pieces or an edit sequence number. + leafCounts[0] = 7; + leafCounts[1] = 13; + bytes32[][][] memory trees = new bytes32[][][](2); + // Make data set initially with one piece. + (uint256 setId, bytes32[][] memory tree) = makeDataSetWithOnePiece(leafCounts[0]); + trees[0] = tree; + // Add another piece before submitting the proof. + uint256 newPieceId; + (trees[1], newPieceId) = addOnePiece(setId, leafCounts[1]); + + // Make a proof that's good for the single piece. + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch); + IPDPTypes.Proof[] memory proofsOneRoot = buildProofsForSingleton(setId, 3, trees[0], leafCounts[0]); + + // The proof for one piece should be invalid against the set with two. + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + vm.expectRevert(); + pdpVerifier.provePossession{value: 1e18}(setId, proofsOneRoot); + + // Remove a piece and resample + uint256[] memory removePieces = new uint256[](1); + removePieces[0] = newPieceId; + pdpVerifier.schedulePieceDeletions(setId, removePieces, empty); + // flush removes + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); + + // Make a new proof that is valid with two pieces + challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch); + IPDPTypes.Proof[] memory proofsTwoRoots = buildProofs(pdpVerifier, setId, 10, trees, leafCounts); + + // A proof for two pieces should be invalid against the set with one. + proofsTwoRoots = buildProofs(pdpVerifier, setId, 10, trees, leafCounts); // regen as removal forced resampling challenge seed + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + vm.expectRevert(); + pdpVerifier.provePossession{value: 1e18}(setId, proofsTwoRoots); + + // But the single piece proof is now good again. + proofsOneRoot = buildProofsForSingleton(setId, 1, trees[0], leafCounts[0]); // regen as removal forced resampling challenge seed + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + pdpVerifier.provePossession{value: 1e18}(setId, proofsOneRoot); + } + + function testProveManyPieces() public { + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + uint256[] memory leafCounts = new uint256[](3); + // Pick a distinct size for each tree (up to some small maximum size). + for (uint256 i = 0; i < leafCounts.length; i++) { + leafCounts[i] = uint256(sha256(abi.encode(i))) % 64; + } + + (uint256 setId, bytes32[][][] memory trees) = makeDataSetWithPieces(leafCounts); + + // Advance chain until challenge epoch. + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch); + + // Build a proof with multiple challenges to span the pieces. + uint256 challengeCount = 11; + IPDPTypes.Proof[] memory proofs = buildProofs(pdpVerifier, setId, challengeCount, trees, leafCounts); + // Submit proof. + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + pdpVerifier.provePossession{value: 1e18}(setId, proofs); + } + + function testNextProvingPeriodFlexibleScheduling() public { + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + // Create data set and add initial piece + uint256 leafCount = 10; + (uint256 setId, bytes32[][] memory tree) = makeDataSetWithOnePiece(leafCount); + + // Set challenge sampling far in the future + uint256 farFutureBlock = vm.getBlockNumber() + 1000; + pdpVerifier.nextProvingPeriod(setId, farFutureBlock, empty); + assertEq( + pdpVerifier.getNextChallengeEpoch(setId), farFutureBlock, "Challenge epoch should be set to far future" + ); + + // Reset to a closer block + uint256 nearerBlock = vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY; + pdpVerifier.nextProvingPeriod(setId, nearerBlock, empty); + assertEq( + pdpVerifier.getNextChallengeEpoch(setId), nearerBlock, "Challenge epoch should be reset to nearer block" + ); + + // Verify we can still prove possession at the new block + vm.roll(nearerBlock); + + IPDPTypes.Proof[] memory proofs = buildProofsForSingleton(setId, 5, tree, 10); + vm.mockCall( + pdpVerifier.RANDOMNESS_PRECOMPILE(), + abi.encode(pdpVerifier.getNextChallengeEpoch(setId)), + abi.encode(pdpVerifier.getNextChallengeEpoch(setId)) + ); + pdpVerifier.provePossession{value: 1e18}(setId, proofs); + } + + function testProveSingleFake() public { + // Mock Pyth oracle call to return $5 USD/FIL + (bytes memory pythCallData, PythStructs.Price memory price) = createPythCallData(); + vm.mockCall(address(pdpVerifier.PYTH()), pythCallData, abi.encode(price)); + + uint256 leafCount = 10; + (uint256 setId, bytes32[][] memory tree) = makeDataSetWithOnePiece(leafCount); + + // Advance chain until challenge epoch. + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + vm.roll(challengeEpoch); + + uint256 challengeCount = 3; + // build fake proofs + IPDPTypes.Proof[] memory proofs = new IPDPTypes.Proof[](5); + for (uint256 i = 0; i < 5; i++) { + proofs[i] = IPDPTypes.Proof(tree[0][0], new bytes32[](0)); + } + + // Submit proof. + vm.mockCall(pdpVerifier.RANDOMNESS_PRECOMPILE(), abi.encode(challengeEpoch), abi.encode(challengeEpoch)); + IPDPTypes.PieceIdAndOffset[] memory challenges = new IPDPTypes.PieceIdAndOffset[](challengeCount); + for (uint256 i = 0; i < challengeCount; i++) { + challenges[i] = IPDPTypes.PieceIdAndOffset(0, 0); + } + vm.expectRevert("proof length does not match tree height"); + pdpVerifier.provePossession{value: 1e18}(setId, proofs); + } + + ///// Helpers ///// + + // Initializes a new data set, generates trees of specified sizes, and adds pieces to the set. + function makeDataSetWithPieces(uint256[] memory leafCounts) internal returns (uint256, bytes32[][][] memory) { + // Create trees and their pieces. + bytes32[][][] memory trees = new bytes32[][][](leafCounts.length); + Cids.Cid[] memory pieces = new Cids.Cid[](leafCounts.length); + for (uint256 i = 0; i < leafCounts.length; i++) { + // Generate a uniquely-sized tree for each piece (up to some small maximum size). + if (leafCounts[i] < 4) { + trees[i] = ProofUtil.makeTree(4); + pieces[i] = makePieceBytes(trees[i], leafCounts[i] * 32); + } else { + trees[i] = ProofUtil.makeTree(leafCounts[i]); + pieces[i] = makePiece(trees[i], leafCounts[i]); + } + } + + // Create new data set and add pieces. + uint256 setId = pdpVerifier.addPieces{value: PDPFees.sybilFee()}( + NEW_DATA_SET_SENTINEL, address(listener), new Cids.Cid[](0), abi.encode(empty, empty) + ); + pdpVerifier.addPieces(setId, address(0), pieces, empty); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); // flush adds + return (setId, trees); + } + + // Initializes a new data set and adds a single generated tree. + function makeDataSetWithOnePiece(uint256 leafCount) internal returns (uint256, bytes32[][] memory) { + uint256[] memory leafCounts = new uint256[](1); + leafCounts[0] = leafCount; + (uint256 setId, bytes32[][][] memory trees) = makeDataSetWithPieces(leafCounts); + return (setId, trees[0]); + } + + // Creates a tree and adds it to a data set. + // Returns the Merkle tree and piece. + function addOnePiece(uint256 setId, uint256 leafCount) internal returns (bytes32[][] memory, uint256) { + bytes32[][] memory tree = ProofUtil.makeTree(leafCount); + Cids.Cid[] memory pieces = new Cids.Cid[](1); + pieces[0] = makePiece(tree, leafCount); + uint256 pieceId = pdpVerifier.addPieces(setId, address(0), pieces, empty); + pdpVerifier.nextProvingPeriod(setId, vm.getBlockNumber() + CHALLENGE_FINALITY_DELAY, empty); // flush adds + return (tree, pieceId); + } + + // Builds a proof of posesesion for a data set with a single piece. + function buildProofsForSingleton(uint256 setId, uint256 challengeCount, bytes32[][] memory tree, uint256 leafCount) + internal + view + returns (IPDPTypes.Proof[] memory) + { + bytes32[][][] memory trees = new bytes32[][][](1); + trees[0] = tree; + uint256[] memory leafCounts = new uint256[](1); + leafCounts[0] = leafCount; + IPDPTypes.Proof[] memory proofs = buildProofs(pdpVerifier, setId, challengeCount, trees, leafCounts); + return proofs; + } +} diff --git a/service_contracts/test/pdp/PieceHelper.t.sol b/service_contracts/test/pdp/PieceHelper.t.sol new file mode 100644 index 00000000..96a942fb --- /dev/null +++ b/service_contracts/test/pdp/PieceHelper.t.sol @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.13; + +import {Test, console} from "forge-std/Test.sol"; +import {Cids} from "@pdp/Cids.sol"; +import {BitOps} from "@pdp/BitOps.sol"; + +contract PieceHelper is Test { + // Constructs a PieceData structure for a Merkle tree. + function makePiece(bytes32[][] memory tree, uint256 leafCount) internal pure returns (Cids.Cid memory) { + if (leafCount == 0) { + return Cids.CommPv2FromDigest(127, 2, tree[0][0]); + } + uint8 height = uint8(256 - BitOps.clz(leafCount - 1)); + require(1 << height >= leafCount, "makePiece: height not enough to hold leaf count"); + uint256 paddingLeaves = (1 << height) - leafCount; + uint256 padding = (paddingLeaves * 32 * 127 + 127) / 128; + + console.log("leafCount", leafCount); + console.log("height", height); + console.log("paddingLeaves", paddingLeaves); + console.log("padding", padding); + assertEq(Cids.leafCount(padding, height), leafCount, "makePiece: leaf count mismatch"); + return Cids.CommPv2FromDigest(padding, height, tree[0][0]); + } + + function makePieceBytes(bytes32[][] memory tree, uint256 count) internal pure returns (Cids.Cid memory) { + if (count == 0) { + return Cids.CommPv2FromDigest(127, 2, tree[0][0]); + } + if (count == 1) { + // piece with just 1 data byte doesn't exist + // it is either 0 data bytes or two + count = 2; + } + + uint256 leafCount = (count + 31) / 32; + uint8 height = uint8(256 - BitOps.clz(leafCount - 1)); + if (height < 2) { + height = 2; + } + + require(1 << (height + 5) >= count, "makeSamplePieceBytes: height not enough to hold count"); + uint256 padding = (1 << (height + 5)) - count; + padding = (padding * 127 + 127) / 128; + + console.log("count", count); + console.log("leafCount", leafCount); + console.log("height", height); + console.log("padding", padding); + assertEq(Cids.leafCount(padding, height), leafCount, "makeSamplePieceBytes: leaf count mismatch"); + assertEq(Cids.pieceSize(padding, height), count, "makeSamplePieceBytes: piece size mismatch"); + return Cids.CommPv2FromDigest(padding, height, tree[0][0]); + } + + function makeSamplePiece(uint256 leafCount) internal pure returns (Cids.Cid memory) { + bytes32[][] memory tree = new bytes32[][](1); + tree[0] = new bytes32[](1); + tree[0][0] = bytes32(abi.encodePacked(leafCount)); + return makePiece(tree, leafCount); + } + + // count here is bytes after Fr32 padding + function makeSamplePieceBytes(uint256 count) internal pure returns (Cids.Cid memory) { + bytes32[][] memory tree = new bytes32[][](1); + tree[0] = new bytes32[](1); + tree[0][0] = bytes32(abi.encodePacked(count)); + return makePieceBytes(tree, count); + } +} + +contract PieceHelperTest is Test, PieceHelper { + function testMakePiece() public pure { + bytes32[][] memory tree = new bytes32[][](1); + tree[0] = new bytes32[](10); + Cids.Cid memory piece = makePiece(tree, 10); + Cids.validateCommPv2(piece); + } + + function testMakeSamplePiece() public pure { + makeSamplePiece(0); + Cids.Cid memory piece = makeSamplePiece(1); + Cids.validateCommPv2(piece); + piece = makeSamplePiece(2); + Cids.validateCommPv2(piece); + piece = makeSamplePiece(3); + Cids.validateCommPv2(piece); + piece = makeSamplePiece(4); + Cids.validateCommPv2(piece); + piece = makeSamplePiece(10); + Cids.validateCommPv2(piece); + piece = makeSamplePiece(127); + Cids.validateCommPv2(piece); + piece = makeSamplePiece(128); + Cids.validateCommPv2(piece); + piece = makeSamplePiece(1024); + Cids.validateCommPv2(piece); + } + + function testMakeSamplePieceBytes() public pure { + Cids.Cid memory piece = makeSamplePieceBytes(0); + piece = makeSamplePieceBytes(1); + Cids.validateCommPv2(piece); + piece = makeSamplePieceBytes(2); + Cids.validateCommPv2(piece); + piece = makeSamplePieceBytes(32); + Cids.validateCommPv2(piece); + piece = makeSamplePieceBytes(31); + Cids.validateCommPv2(piece); + piece = makeSamplePieceBytes(127); + Cids.validateCommPv2(piece); + piece = makeSamplePieceBytes(128); + Cids.validateCommPv2(piece); + } +} diff --git a/service_contracts/test/pdp/ProofBuilderHelper.t.sol b/service_contracts/test/pdp/ProofBuilderHelper.t.sol new file mode 100644 index 00000000..1ccfe655 --- /dev/null +++ b/service_contracts/test/pdp/ProofBuilderHelper.t.sol @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.13; + +import {Test} from "forge-std/Test.sol"; +import {PDPVerifier} from "@pdp/PDPVerifier.sol"; +import {MerkleProve} from "@pdp/Proofs.sol"; +import {IPDPTypes} from "@pdp/interfaces/IPDPTypes.sol"; + +contract ProofBuilderHelper is Test { + // Builds a proof of possession for a data set + function buildProofs( + PDPVerifier pdpVerifier, + uint256 setId, + uint256 challengeCount, + bytes32[][][] memory trees, + uint256[] memory leafCounts + ) internal view returns (IPDPTypes.Proof[] memory) { + uint256 challengeEpoch = pdpVerifier.getNextChallengeEpoch(setId); + uint256 seed = challengeEpoch; // Seed is (temporarily) the challenge epoch + uint256 totalLeafCount = 0; + for (uint256 i = 0; i < leafCounts.length; ++i) { + totalLeafCount += leafCounts[i]; + } + + IPDPTypes.Proof[] memory proofs = new IPDPTypes.Proof[](challengeCount); + for (uint256 challengeIdx = 0; challengeIdx < challengeCount; challengeIdx++) { + // Compute challenge index + bytes memory payload = abi.encodePacked(seed, setId, uint64(challengeIdx)); + uint256 challengeOffset = uint256(keccak256(payload)) % totalLeafCount; + + uint256 treeIdx = 0; + uint256 treeOffset = 0; + for (uint256 i = 0; i < leafCounts.length; ++i) { + if (leafCounts[i] > challengeOffset) { + treeIdx = i; + treeOffset = challengeOffset; + break; + } else { + challengeOffset -= leafCounts[i]; + } + } + + bytes32[][] memory tree = trees[treeIdx]; + bytes32[] memory path = MerkleProve.buildProof(tree, treeOffset); + proofs[challengeIdx] = IPDPTypes.Proof(tree[tree.length - 1][treeOffset], path); + + // console.log("Leaf", vm.toString(proofs[0].leaf)); + // console.log("Proof"); + // for (uint j = 0; j < proofs[0].proof.length; j++) { + // console.log(vm.toString(j), vm.toString(proofs[0].proof[j])); + // } + } + + return proofs; + } +} diff --git a/service_contracts/test/pdp/ProofUtil.sol b/service_contracts/test/pdp/ProofUtil.sol new file mode 100644 index 00000000..d8c641cb --- /dev/null +++ b/service_contracts/test/pdp/ProofUtil.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.20; + +import {MerkleProve} from "@pdp/Proofs.sol"; + +// Methods for committing to data and generating proofs. +// These are only used in tests (which verify proofs). +// These functions provide a spec for the operations which providers should perform off-chain. +library ProofUtil { + /** + * Builds a Merkle tree over data that is a sequence of distinct leaf values. + */ + function makeTree(uint256 leafCount) internal view returns (bytes32[][] memory) { + bytes32[] memory data = generateLeaves(leafCount); + bytes32[][] memory tree = MerkleProve.buildTree(data); + return tree; + } + + /** + * Generates an array of leaves with distinct values. + */ + function generateLeaves(uint256 count) internal pure returns (bytes32[] memory) { + bytes32[] memory result = new bytes32[](count); + for (uint256 i = 0; i < count; i++) { + result[i] = bytes32(i); + } + return result; + } +} diff --git a/service_contracts/test/pdp/Proofs.t.sol b/service_contracts/test/pdp/Proofs.t.sol new file mode 100644 index 00000000..bb436f21 --- /dev/null +++ b/service_contracts/test/pdp/Proofs.t.sol @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.13; + +import {Test, console} from "forge-std/Test.sol"; +import {BitOps} from "@pdp/BitOps.sol"; +import {Hashes, MerkleProve, MerkleVerify} from "@pdp/Proofs.sol"; +import {ProofUtil} from "./ProofUtil.sol"; + +contract MerkleProveTest is Test { + function testVerifyEmptyProof() public view { + bytes32 root = sha256("hello"); + bytes32[] memory proof = new bytes32[](0); + bool result = MerkleVerify.verify(proof, root, root, 0, 1); + assertEq(result, true, "Verify should return true"); + } + + function testVerifyTreeTwoLeaves() public view { + bytes32[] memory leaves = ProofUtil.generateLeaves(2); + bytes32[][] memory tree = MerkleProve.buildTree(leaves); + bytes32 root = tree[0][0]; + + for (uint256 i = 0; i < leaves.length; i++) { + bytes32[] memory proof = MerkleProve.buildProof(tree, i); + assertTrue( + MerkleVerify.verify(proof, root, leaves[i], i, tree.length), + string.concat("Invalid proof ", vm.toString(i)) + ); + assertFalse( + MerkleVerify.verify(proof, root, leaves[i], i + 1, tree.length), + string.concat("False proof ", vm.toString(i)) + ); + } + } + + function testVerifyTreeThreeLeaves() public view { + bytes32[] memory leaves = ProofUtil.generateLeaves(3); + bytes32[][] memory tree = MerkleProve.buildTree(leaves); + bytes32 root = tree[0][0]; + + for (uint256 i = 0; i < leaves.length; i++) { + bytes32[] memory proof = MerkleProve.buildProof(tree, i); + assertTrue( + MerkleVerify.verify(proof, root, leaves[i], i, tree.length), + string.concat("Invalid proof ", vm.toString(i)) + ); + // Ensure the proof is invalid for every other index within range + for (uint256 j = 0; j < leaves.length; j++) { + if (j != i) { + assertFalse( + MerkleVerify.verify(proof, root, leaves[i], j, tree.length), + string.concat("False proof ", vm.toString(i)) + ); + } + } + } + } + + function testVerifyTreesManyLeaves() public { + bytes32[] memory leaves; + bytes32[][] memory tree; + bytes32[] memory proof; + vm.pauseGasMetering(); + for (uint256 width = 4; width < 60; width++) { + leaves = ProofUtil.generateLeaves(width); + tree = MerkleProve.buildTree(leaves); + bytes32 root = tree[0][0]; + + // Verify proof for each leaf + for (uint256 i = 0; i < leaves.length; i++) { + proof = MerkleProve.buildProof(tree, i); + assertTrue( + MerkleVerify.verify(proof, root, leaves[i], i, tree.length), + string.concat("Invalid proof ", vm.toString(i)) + ); + // Ensure the proof is invalid for every other index within range + for (uint256 j = 0; j < leaves.length; j++) { + if (j != i) { + assertFalse( + MerkleVerify.verify(proof, root, leaves[i], j, tree.length), + string.concat("False proof ", vm.toString(i)) + ); + } + } + } + } + vm.resumeGasMetering(); + } + + // Tests that the merkle root of a tree committing to known data (all zeros) matches the + // externally-known Filecoin piece commitment for the same data. + // Note that this is only testing a balanced tree (power-of-two payload). + function testFilecoinCommPEquivalance() public view { + // Known value for CommP of a 2KiB zero payload copied from built-in actors code. + uint8[32] memory zeroCommP2KiB = [ + 252, + 126, + 146, + 130, + 150, + 229, + 22, + 250, + 173, + 233, + 134, + 178, + 143, + 146, + 212, + 74, + 79, + 36, + 185, + 53, + 72, + 82, + 35, + 55, + 106, + 121, + 144, + 39, + 188, + 24, + 248, + 51 + ]; + + bytes32 expected = loadDigest(zeroCommP2KiB); + + // Build payload of of 2KiB of zeros, packed into bytes32 words + bytes32[] memory payload = new bytes32[](2048 / 32); + + bytes32[][] memory tree = MerkleProve.buildTree(payload); + assertEq(tree[0][0], expected); + } + + // Tests that the zero roots returned by the merkle library match the values computed for them here. + function testZeroRootsComputed() public view { + bytes32[] memory expected = buildZeroPaddingStack(51); + // console.log("Zero roots:"); + // for (uint i = 0; i < zeroRoots.length; i++) { + // console.log(vm.toString(i), vm.toString(zeroRoots[i])); + // } + for (uint256 height = 0; height <= 50; height++) { + assertEq(MerkleVerify.zeroRoot(height), expected[height]); + } + } + + // Tests some zero roots against known values for Filecoin sector sizes. + // The target digets are copied directly from built-in actors code. + function testZeroRootFilecoinEquivalence() public pure { + assertEq(MerkleVerify.zeroRoot(0), 0); + // 2 KiB / 32 = 64 leaves = 2^6 + assertEq( + MerkleVerify.zeroRoot(6), + loadDigest( + [ + 252, + 126, + 146, + 130, + 150, + 229, + 22, + 250, + 173, + 233, + 134, + 178, + 143, + 146, + 212, + 74, + 79, + 36, + 185, + 53, + 72, + 82, + 35, + 55, + 106, + 121, + 144, + 39, + 188, + 24, + 248, + 51 + ] + ) + ); + // 8 MiB = 256Ki leaves = 2^8 * 2^10 + assertEq( + MerkleVerify.zeroRoot(18), + loadDigest( + [ + 101, + 242, + 158, + 93, + 152, + 210, + 70, + 195, + 139, + 56, + 140, + 252, + 6, + 219, + 31, + 107, + 2, + 19, + 3, + 197, + 162, + 137, + 0, + 11, + 220, + 232, + 50, + 169, + 195, + 236, + 66, + 28 + ] + ) + ); + // 512 MiB = 16Mi leaves = 2^4 * 2^20 + assertEq( + MerkleVerify.zeroRoot(24), + loadDigest( + [ + 57, + 86, + 14, + 123, + 19, + 169, + 59, + 7, + 162, + 67, + 253, + 39, + 32, + 255, + 167, + 203, + 62, + 29, + 46, + 80, + 90, + 179, + 98, + 158, + 121, + 244, + 99, + 19, + 81, + 44, + 218, + 6 + ] + ) + ); + // 32 GiB = 1Gi leaves = 2^30 + assertEq( + MerkleVerify.zeroRoot(30), + loadDigest( + [ + 7, + 126, + 95, + 222, + 53, + 197, + 10, + 147, + 3, + 165, + 80, + 9, + 227, + 73, + 138, + 78, + 190, + 223, + 243, + 156, + 66, + 183, + 16, + 183, + 48, + 216, + 236, + 122, + 199, + 175, + 166, + 62 + ] + ) + ); + // 64 GiB = 2 * 1Gi leaves = 2^1 * 2^30 + assertEq( + MerkleVerify.zeroRoot(31), + loadDigest( + [ + 230, + 64, + 5, + 166, + 191, + 227, + 119, + 121, + 83, + 184, + 173, + 110, + 249, + 63, + 15, + 202, + 16, + 73, + 178, + 4, + 22, + 84, + 242, + 164, + 17, + 247, + 112, + 39, + 153, + 206, + 206, + 2 + ] + ) + ); + } + + // Tests that trees with explicit zero leaves produce known values for the root of the all-zero tree. + function testZeroTreeFilecoinEquivalence() public view { + for (uint256 i = 1; i <= 16; i++) { + bytes32[] memory leaves = new bytes32[](i); + bytes32[][] memory tree = MerkleProve.buildTree(leaves); + uint256 height = 256 - BitOps.clz(i - 1); + assertEq(tree[0][0], MerkleVerify.zeroRoot(height)); + } + } + + ///// Helper functions ///// + + // Returns an array of Merkle tree roots committing to all-zero data of increasing tree heights. + // The first entry is zero. + // The second entry is a node with two zero leaves. + // The third entry is a node with four zero leaves, etc. + function buildZeroPaddingStack(uint256 levels) public view returns (bytes32[] memory) { + bytes32[] memory result = new bytes32[](levels); + for (uint256 i = 1; i < levels; i++) { + result[i] = Hashes.orderedHash(result[i - 1], result[i - 1]); + } + + return result; + } + + // Loads a bytes32 hash digest from an array of 32 1-byte values. + function loadDigest(uint8[32] memory b) public pure returns (bytes32) { + bytes32 result; + for (uint256 i = 0; i < 32; i++) { + result |= bytes32(uint256(b[i]) << (8 * (31 - i))); + } + return result; + } + + function printTree(bytes32[][] memory tree) internal pure { + console.log("Tree:"); + for (uint256 i = 0; i < tree.length; i++) { + console.log("Level ", i, ":"); + for (uint256 j = 0; j < tree[i].length; j++) { + console.log(vm.toString(j), vm.toString(tree[i][j])); + } + } + console.log(); + } + + function printProof(bytes32[] memory proof) internal pure { + console.log("Proof: "); + for (uint256 j = 0; j < proof.length; j++) { + console.log(vm.toString(j), vm.toString(proof[j])); + } + } +} + +contract HashesTest is Test { + // Tests that the efficient hash function returns the same result as the expected hash function. + function testHash() public view { + bytes32 a = bytes32(0x0000000000000000000000000000000000000000000000000000000000000000); + bytes32 b = bytes32(0x0000000000000000000000000000000000000000000000000000000000000001); + verifyHash(a, a); + verifyHash(a, b); + verifyHash(b, a); + } + + function verifyHash(bytes32 a, bytes32 b) internal view { + bytes32 expected = expectedHash(a, b); + bytes32 result = Hashes.orderedHash(a, b); + assertEq(result, expected, "Hashes.commutativeHash should return the expected hash"); + } + + // Implements SHA254 hash of pairs via the standard sha256(abi.encode(a, b)). + function expectedHash(bytes32 a, bytes32 b) internal pure returns (bytes32) { + bytes memory payload = abi.encodePacked(a, b); + bytes32 digest = sha256(payload); + digest = bytes32((uint256(digest) & Hashes.SHA254_MASK)); + return digest; + } +} diff --git a/service_contracts/test/pdp/SimplePDPService.t.sol b/service_contracts/test/pdp/SimplePDPService.t.sol new file mode 100644 index 00000000..e9d5ab4a --- /dev/null +++ b/service_contracts/test/pdp/SimplePDPService.t.sol @@ -0,0 +1,428 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity ^0.8.13; + +import {Test} from "forge-std/Test.sol"; +import {SimplePDPService} from "@pdp/SimplePDPService.sol"; +import {MyERC1967Proxy} from "@pdp/ERC1967Proxy.sol"; +import {Cids} from "@pdp/Cids.sol"; + +contract SimplePDPServiceTest is Test { + SimplePDPService public pdpService; + address public pdpVerifierAddress; + bytes empty = new bytes(0); + uint256 public dataSetId; + uint256 public leafCount; + uint256 public seed; + + function setUp() public { + pdpVerifierAddress = address(this); + SimplePDPService pdpServiceImpl = new SimplePDPService(); + bytes memory initializeData = + abi.encodeWithSelector(SimplePDPService.initialize.selector, address(pdpVerifierAddress)); + MyERC1967Proxy pdpServiceProxy = new MyERC1967Proxy(address(pdpServiceImpl), initializeData); + pdpService = SimplePDPService(address(pdpServiceProxy)); + dataSetId = 1; + leafCount = 100; + seed = 12345; + } + + function testInitialState() public view { + assertEq(pdpService.pdpVerifierAddress(), pdpVerifierAddress, "PDP verifier address should be set correctly"); + } + + function testOnlyPDPVerifierCanAddRecord() public { + vm.prank(address(0xdead)); + vm.expectRevert("Caller is not the PDP verifier"); + pdpService.dataSetCreated(dataSetId, address(this), empty); + } + + function testGetMaxProvingPeriod() public view { + uint64 maxPeriod = pdpService.getMaxProvingPeriod(); + assertEq(maxPeriod, 2880, "Max proving period should be 2880"); + } + + function testGetChallengesPerProof() public view { + uint64 challenges = pdpService.getChallengesPerProof(); + assertEq(challenges, 5, "Challenges per proof should be 5"); + } + + function testInitialProvingPeriodHappyPath() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + uint256 challengeEpoch = pdpService.initChallengeWindowStart(); + + pdpService.nextProvingPeriod(dataSetId, challengeEpoch, leafCount, empty); + + assertEq( + pdpService.provingDeadlines(dataSetId), + block.number + pdpService.getMaxProvingPeriod(), + "Deadline should be set to current block + max period" + ); + assertFalse(pdpService.provenThisPeriod(dataSetId)); + } + + function testInitialProvingPeriodInvalidChallengeEpoch() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + uint256 firstDeadline = block.number + pdpService.getMaxProvingPeriod(); + + // Test too early + uint256 tooEarly = firstDeadline - pdpService.challengeWindow() - 1; + vm.expectRevert("Next challenge epoch must fall within the next challenge window"); + pdpService.nextProvingPeriod(dataSetId, tooEarly, leafCount, empty); + + // Test too late + uint256 tooLate = firstDeadline + 1; + vm.expectRevert("Next challenge epoch must fall within the next challenge window"); + pdpService.nextProvingPeriod(dataSetId, tooLate, leafCount, empty); + } + + function testProveBeforeInitialization() public { + // Create a simple mock proof + vm.expectRevert("Proving not yet started"); + pdpService.possessionProven(dataSetId, leafCount, seed, 5); + } + + function testInactivateDataSetHappyPath() public { + // Setup initial state + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + + // Prove possession in first period + vm.roll(block.number + pdpService.getMaxProvingPeriod() - pdpService.challengeWindow()); + pdpService.possessionProven(dataSetId, leafCount, seed, 5); + + // Inactivate the data set + pdpService.nextProvingPeriod(dataSetId, pdpService.NO_CHALLENGE_SCHEDULED(), leafCount, empty); + + assertEq( + pdpService.provingDeadlines(dataSetId), + pdpService.NO_PROVING_DEADLINE(), + "Proving deadline should be set to NO_PROVING_DEADLINE" + ); + assertEq(pdpService.provenThisPeriod(dataSetId), false, "Proven this period should now be false"); + } +} + +contract SimplePDPServiceFaultsTest is Test { + SimplePDPService public pdpService; + address public pdpVerifierAddress; + uint256 public dataSetId; + uint256 public leafCount; + uint256 public seed; + uint256 public challengeCount; + bytes empty = new bytes(0); + + function setUp() public { + pdpVerifierAddress = address(this); + SimplePDPService pdpServiceImpl = new SimplePDPService(); + bytes memory initializeData = + abi.encodeWithSelector(SimplePDPService.initialize.selector, address(pdpVerifierAddress)); + MyERC1967Proxy pdpServiceProxy = new MyERC1967Proxy(address(pdpServiceImpl), initializeData); + pdpService = SimplePDPService(address(pdpServiceProxy)); + dataSetId = 1; + leafCount = 100; + seed = 12345; + challengeCount = 5; + } + + function testPossessionProvenOnTime() public { + // Set up the proving deadline + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + vm.roll(block.number + pdpService.getMaxProvingPeriod() - pdpService.challengeWindow()); + pdpService.possessionProven(dataSetId, leafCount, seed, challengeCount); + assertTrue(pdpService.provenThisPeriod(dataSetId)); + + pdpService.nextProvingPeriod(dataSetId, pdpService.nextChallengeWindowStart(dataSetId), leafCount, empty); + vm.roll(block.number + pdpService.getMaxProvingPeriod()); + pdpService.possessionProven(dataSetId, leafCount, seed, challengeCount); + } + + function testNextProvingPeriodCalledLastMinuteOK() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + vm.roll(block.number + pdpService.getMaxProvingPeriod()); + pdpService.possessionProven(dataSetId, leafCount, seed, challengeCount); + + // wait until almost the end of proving period 2 + // this should all work fine + vm.roll(block.number + pdpService.getMaxProvingPeriod()); + pdpService.nextProvingPeriod(dataSetId, pdpService.nextChallengeWindowStart(dataSetId), leafCount, empty); + pdpService.possessionProven(dataSetId, leafCount, seed, challengeCount); + } + + function testFirstEpochLateToProve() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + vm.roll(block.number + pdpService.getMaxProvingPeriod() + 1); + vm.expectRevert("Current proving period passed. Open a new proving period."); + pdpService.possessionProven(dataSetId, leafCount, seed, challengeCount); + } + + function testNextProvingPeriodTwiceFails() public { + // Set up the proving deadline + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + vm.roll(block.number + pdpService.getMaxProvingPeriod() - pdpService.challengeWindow()); + pdpService.possessionProven(dataSetId, leafCount, seed, challengeCount); + uint256 deadline1 = pdpService.provingDeadlines(dataSetId); + assertTrue(pdpService.provenThisPeriod(dataSetId)); + + assertEq( + pdpService.provingDeadlines(dataSetId), + deadline1, + "Proving deadline should not change until nextProvingPeriod." + ); + uint256 challengeEpoch = pdpService.nextChallengeWindowStart(dataSetId); + pdpService.nextProvingPeriod(dataSetId, challengeEpoch, leafCount, empty); + assertEq( + pdpService.provingDeadlines(dataSetId), + deadline1 + pdpService.getMaxProvingPeriod(), + "Proving deadline should be updated" + ); + assertFalse(pdpService.provenThisPeriod(dataSetId)); + + vm.expectRevert("One call to nextProvingPeriod allowed per proving period"); + pdpService.nextProvingPeriod(dataSetId, challengeEpoch, leafCount, empty); + } + + function testFaultWithinOpenPeriod() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + + // Move to open proving period + vm.roll(block.number + pdpService.getMaxProvingPeriod() - 100); + + // Expect fault event when calling nextProvingPeriod without proof + vm.expectEmit(true, true, true, true); + emit SimplePDPService.FaultRecord(dataSetId, 1, pdpService.provingDeadlines(dataSetId)); + pdpService.nextProvingPeriod(dataSetId, pdpService.nextChallengeWindowStart(dataSetId), leafCount, empty); + } + + function testFaultAfterPeriodOver() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + + // Move past proving period + vm.roll(block.number + pdpService.getMaxProvingPeriod() + 1); + + // Expect fault event when calling nextProvingPeriod without proof + vm.expectEmit(true, true, true, true); + emit SimplePDPService.FaultRecord(dataSetId, 1, pdpService.provingDeadlines(dataSetId)); + pdpService.nextProvingPeriod(dataSetId, pdpService.nextChallengeWindowStart(dataSetId), leafCount, empty); + } + + function testNextProvingPeriodWithoutProof() public { + // Set up the proving deadline without marking as proven + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + // Move to the next period + vm.roll(block.number + pdpService.getMaxProvingPeriod() + 1); + // Expect a fault event + vm.expectEmit(); + emit SimplePDPService.FaultRecord(dataSetId, 1, pdpService.provingDeadlines(dataSetId)); + pdpService.nextProvingPeriod(dataSetId, pdpService.nextChallengeWindowStart(dataSetId), leafCount, empty); + assertFalse(pdpService.provenThisPeriod(dataSetId)); + } + + function testInvalidChallengeCount() public { + uint256 invalidChallengeCount = 4; // Less than required + + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + vm.expectRevert("Invalid challenge count < 5"); + pdpService.possessionProven(dataSetId, leafCount, seed, invalidChallengeCount); + } + + function testMultiplePeriodsLate() public { + // Set up the proving deadline + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + // Warp to 3 periods after the deadline + vm.roll(block.number + pdpService.getMaxProvingPeriod() * 3 + 1); + // unable to prove possession + vm.expectRevert("Current proving period passed. Open a new proving period."); + pdpService.possessionProven(dataSetId, leafCount, seed, challengeCount); + + vm.expectEmit(true, true, true, true); + emit SimplePDPService.FaultRecord(dataSetId, 3, pdpService.provingDeadlines(dataSetId)); + pdpService.nextProvingPeriod(dataSetId, pdpService.nextChallengeWindowStart(dataSetId), leafCount, empty); + } + + function testMultiplePeriodsLateWithInitialProof() public { + // Set up the proving deadline + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + // Move to first open proving period + vm.roll(block.number + pdpService.getMaxProvingPeriod() - pdpService.challengeWindow()); + + // Submit valid proof in first period + pdpService.possessionProven(dataSetId, leafCount, seed, challengeCount); + assertTrue(pdpService.provenThisPeriod(dataSetId)); + + // Warp to 3 periods after the deadline + vm.roll(block.number + pdpService.getMaxProvingPeriod() * 3 + 1); + + // Should emit fault record for 2 periods (current period not counted since not yet expired) + vm.expectEmit(true, true, true, true); + emit SimplePDPService.FaultRecord(dataSetId, 2, pdpService.provingDeadlines(dataSetId)); + pdpService.nextProvingPeriod(dataSetId, pdpService.nextChallengeWindowStart(dataSetId), leafCount, empty); + } + + function testCanOnlyProveOncePerPeriod() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + + // We're in the previous deadline so we fail to prove until we roll forward into challenge window + vm.expectRevert("Too early. Wait for challenge window to open"); + pdpService.possessionProven(dataSetId, leafCount, seed, 5); + vm.roll(block.number + pdpService.getMaxProvingPeriod() - pdpService.challengeWindow() - 1); + // We're one before the challenge window so we should still fail + vm.expectRevert("Too early. Wait for challenge window to open"); + pdpService.possessionProven(dataSetId, leafCount, seed, 5); + // now we succeed + vm.roll(block.number + 1); + pdpService.possessionProven(dataSetId, leafCount, seed, 5); + vm.expectRevert("Only one proof of possession allowed per proving period. Open a new proving period."); + pdpService.possessionProven(dataSetId, leafCount, seed, 5); + } + + function testCantProveBeforePeriodIsOpen() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + vm.roll(block.number + pdpService.getMaxProvingPeriod() - pdpService.challengeWindow()); + pdpService.possessionProven(dataSetId, leafCount, seed, 5); + pdpService.nextProvingPeriod(dataSetId, pdpService.nextChallengeWindowStart(dataSetId), leafCount, empty); + vm.expectRevert("Too early. Wait for challenge window to open"); + pdpService.possessionProven(dataSetId, leafCount, seed, 5); + } + + function testMissChallengeWindow() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + vm.roll(block.number + pdpService.getMaxProvingPeriod() - 100); + // Too early + uint256 tooEarly = pdpService.nextChallengeWindowStart(dataSetId) - 1; + vm.expectRevert("Next challenge epoch must fall within the next challenge window"); + pdpService.nextProvingPeriod(dataSetId, tooEarly, leafCount, empty); + // Too late + uint256 tooLate = pdpService.nextChallengeWindowStart(dataSetId) + pdpService.challengeWindow() + 1; + vm.expectRevert("Next challenge epoch must fall within the next challenge window"); + pdpService.nextProvingPeriod(dataSetId, tooLate, leafCount, empty); + + // Works right on the deadline + pdpService.nextProvingPeriod( + dataSetId, pdpService.nextChallengeWindowStart(dataSetId) + pdpService.challengeWindow(), leafCount, empty + ); + } + + function testMissChallengeWindowAfterFaults() public { + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + // Skip 2 proving periods + vm.roll(block.number + pdpService.getMaxProvingPeriod() * 3 - 100); + + // Too early + uint256 tooEarly = pdpService.nextChallengeWindowStart(dataSetId) - 1; + vm.expectRevert("Next challenge epoch must fall within the next challenge window"); + pdpService.nextProvingPeriod(dataSetId, tooEarly, leafCount, empty); + + // Too late + uint256 tooLate = pdpService.nextChallengeWindowStart(dataSetId) + pdpService.challengeWindow() + 1; + vm.expectRevert("Next challenge epoch must fall within the next challenge window"); + pdpService.nextProvingPeriod(dataSetId, tooLate, leafCount, empty); + + // Should emit fault record for 2 periods + vm.expectEmit(true, true, true, true); + emit SimplePDPService.FaultRecord(dataSetId, 2, pdpService.provingDeadlines(dataSetId)); + // Works right on the deadline + pdpService.nextProvingPeriod( + dataSetId, pdpService.nextChallengeWindowStart(dataSetId) + pdpService.challengeWindow(), leafCount, empty + ); + } + + function testInactivateWithCurrentPeriodFault() public { + // Setup initial state + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + + // Move to end of period without proving + vm.roll(block.number + pdpService.getMaxProvingPeriod()); + + // Expect fault event for the unproven period + vm.expectEmit(true, true, true, true); + emit SimplePDPService.FaultRecord(dataSetId, 1, pdpService.provingDeadlines(dataSetId)); + + pdpService.nextProvingPeriod(dataSetId, pdpService.NO_CHALLENGE_SCHEDULED(), leafCount, empty); + + assertEq( + pdpService.provingDeadlines(dataSetId), + pdpService.NO_PROVING_DEADLINE(), + "Proving deadline should be set to NO_PROVING_DEADLINE" + ); + } + + function testInactivateWithMultiplePeriodFaults() public { + // Setup initial state + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + + // Skip 3 proving periods without proving + vm.roll(block.number + pdpService.getMaxProvingPeriod() * 3 + 1); + + // Expect fault event for all missed periods + vm.expectEmit(true, true, true, true); + emit SimplePDPService.FaultRecord(dataSetId, 3, pdpService.provingDeadlines(dataSetId)); + + pdpService.nextProvingPeriod(dataSetId, pdpService.NO_CHALLENGE_SCHEDULED(), leafCount, empty); + + assertEq( + pdpService.provingDeadlines(dataSetId), + pdpService.NO_PROVING_DEADLINE(), + "Proving deadline should be set to NO_PROVING_DEADLINE" + ); + } + + function testGetPDPConfig() public view { + (uint64 maxProvingPeriod, uint256 challengeWindow, uint256 challengesPerProof, uint256 initChallengeWindowStart) + = pdpService.getPDPConfig(); + + assertEq(maxProvingPeriod, 2880, "Max proving period should be 2880"); + assertEq(challengeWindow, 60, "Challenge window should be 60"); + assertEq(challengesPerProof, 5, "Challenges per proof should be 5"); + assertEq( + initChallengeWindowStart, + block.number + 2880 - 60, + "Init challenge window start should be calculated correctly" + ); + } + + function testNextPDPChallengeWindowStart() public { + // Setup initial state + pdpService.piecesAdded(dataSetId, 0, new Cids.Cid[](0), empty); + pdpService.nextProvingPeriod(dataSetId, pdpService.initChallengeWindowStart(), leafCount, empty); + + // Test that nextPDPChallengeWindowStart returns the same as nextChallengeWindowStart + uint256 expected = pdpService.nextChallengeWindowStart(dataSetId); + uint256 actual = pdpService.nextPDPChallengeWindowStart(dataSetId); + assertEq(actual, expected, "nextPDPChallengeWindowStart should match nextChallengeWindowStart"); + + // Move to challenge window and prove + vm.roll(block.number + pdpService.getMaxProvingPeriod() - pdpService.challengeWindow()); + pdpService.possessionProven(dataSetId, leafCount, seed, 5); + + // Open next period + pdpService.nextProvingPeriod(dataSetId, pdpService.nextChallengeWindowStart(dataSetId), leafCount, empty); + + // Test again in new period + expected = pdpService.nextChallengeWindowStart(dataSetId); + actual = pdpService.nextPDPChallengeWindowStart(dataSetId); + assertEq(actual, expected, "nextPDPChallengeWindowStart should match nextChallengeWindowStart in new period"); + } + + function testNextPDPChallengeWindowStartNotInitialized() public { + // Test that it reverts when proving period not initialized + vm.expectRevert("Proving period not yet initialized"); + pdpService.nextPDPChallengeWindowStart(dataSetId); + } +} diff --git a/service_contracts/test/Extsload.t.sol b/service_contracts/test/service-provider/Extsload.t.sol similarity index 96% rename from service_contracts/test/Extsload.t.sol rename to service_contracts/test/service-provider/Extsload.t.sol index a62887ed..09c5785c 100644 --- a/service_contracts/test/Extsload.t.sol +++ b/service_contracts/test/service-provider/Extsload.t.sol @@ -2,7 +2,7 @@ pragma solidity ^0.8.20; import {Test} from "forge-std/Test.sol"; -import {Extsload} from "../src/Extsload.sol"; +import {Extsload} from "@service-provider/Extsload.sol"; contract Extsstore is Extsload { function extsstore(bytes32 slot, bytes32 value) external { diff --git a/service_contracts/test/FilecoinWarmStorageService.t.sol b/service_contracts/test/service-provider/FilecoinWarmStorageService.t.sol similarity index 99% rename from service_contracts/test/FilecoinWarmStorageService.t.sol rename to service_contracts/test/service-provider/FilecoinWarmStorageService.t.sol index 8a537b43..467cee76 100644 --- a/service_contracts/test/FilecoinWarmStorageService.t.sol +++ b/service_contracts/test/service-provider/FilecoinWarmStorageService.t.sol @@ -9,15 +9,15 @@ import {Cids} from "@pdp/Cids.sol"; import {MyERC1967Proxy} from "@pdp/ERC1967Proxy.sol"; import {SessionKeyRegistry} from "@session-key-registry/SessionKeyRegistry.sol"; -import {FilecoinWarmStorageService} from "../src/FilecoinWarmStorageService.sol"; -import {FilecoinWarmStorageServiceStateView} from "../src/FilecoinWarmStorageServiceStateView.sol"; -import {Payments} from "@fws-payments/Payments.sol"; +import {FilecoinWarmStorageService} from "@service-provider/FilecoinWarmStorageService.sol"; +import {FilecoinWarmStorageServiceStateView} from "@service-provider/FilecoinWarmStorageServiceStateView.sol"; +import {Payments} from "@payments/Payments.sol"; import {MockERC20, MockPDPVerifier} from "./mocks/SharedMocks.sol"; import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import {Errors} from "../src/Errors.sol"; +import {Errors} from "@service-provider/Errors.sol"; -import {ServiceProviderRegistryStorage} from "../src/ServiceProviderRegistryStorage.sol"; -import {ServiceProviderRegistry} from "../src/ServiceProviderRegistry.sol"; +import {ServiceProviderRegistryStorage} from "@service-provider/ServiceProviderRegistryStorage.sol"; +import {ServiceProviderRegistry} from "@service-provider/ServiceProviderRegistry.sol"; contract FilecoinWarmStorageServiceTest is Test { using SafeERC20 for MockERC20; diff --git a/service_contracts/test/FilecoinWarmStorageServiceOwner.t.sol b/service_contracts/test/service-provider/FilecoinWarmStorageServiceOwner.t.sol similarity index 96% rename from service_contracts/test/FilecoinWarmStorageServiceOwner.t.sol rename to service_contracts/test/service-provider/FilecoinWarmStorageServiceOwner.t.sol index aaba29d3..b5fc6849 100644 --- a/service_contracts/test/FilecoinWarmStorageServiceOwner.t.sol +++ b/service_contracts/test/service-provider/FilecoinWarmStorageServiceOwner.t.sol @@ -2,16 +2,16 @@ pragma solidity ^0.8.13; import {Test, console} from "forge-std/Test.sol"; -import {FilecoinWarmStorageService} from "../src/FilecoinWarmStorageService.sol"; -import {FilecoinWarmStorageServiceStateView} from "../src/FilecoinWarmStorageServiceStateView.sol"; -import {ServiceProviderRegistry} from "../src/ServiceProviderRegistry.sol"; -import {ServiceProviderRegistryStorage} from "../src/ServiceProviderRegistryStorage.sol"; +import {FilecoinWarmStorageService} from "@service-provider/FilecoinWarmStorageService.sol"; +import {FilecoinWarmStorageServiceStateView} from "@service-provider/FilecoinWarmStorageServiceStateView.sol"; +import {ServiceProviderRegistry} from "@service-provider/ServiceProviderRegistry.sol"; +import {ServiceProviderRegistryStorage} from "@service-provider/ServiceProviderRegistryStorage.sol"; import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; import {SessionKeyRegistry} from "@session-key-registry/SessionKeyRegistry.sol"; import {PDPListener} from "@pdp/PDPVerifier.sol"; import {MyERC1967Proxy} from "@pdp/ERC1967Proxy.sol"; -import {Payments} from "@fws-payments/Payments.sol"; -import {Errors} from "../src/Errors.sol"; +import {Payments} from "@payments/Payments.sol"; +import {Errors} from "@service-provider/Errors.sol"; import {MockERC20, MockPDPVerifier} from "./mocks/SharedMocks.sol"; import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; diff --git a/service_contracts/test/ProviderValidation.t.sol b/service_contracts/test/service-provider/ProviderValidation.t.sol similarity index 97% rename from service_contracts/test/ProviderValidation.t.sol rename to service_contracts/test/service-provider/ProviderValidation.t.sol index 1452b277..44cdc1fb 100644 --- a/service_contracts/test/ProviderValidation.t.sol +++ b/service_contracts/test/service-provider/ProviderValidation.t.sol @@ -2,19 +2,19 @@ pragma solidity ^0.8.13; import {Test} from "forge-std/Test.sol"; -import {Payments} from "@fws-payments/Payments.sol"; +import {Payments} from "@payments/Payments.sol"; import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; import {MyERC1967Proxy} from "@pdp/ERC1967Proxy.sol"; import {PDPListener} from "@pdp/PDPVerifier.sol"; import {SessionKeyRegistry} from "@session-key-registry/SessionKeyRegistry.sol"; -import {FilecoinWarmStorageService} from "../src/FilecoinWarmStorageService.sol"; -import {FilecoinWarmStorageServiceStateView} from "../src/FilecoinWarmStorageServiceStateView.sol"; -import {ServiceProviderRegistry} from "../src/ServiceProviderRegistry.sol"; -import {ServiceProviderRegistryStorage} from "../src/ServiceProviderRegistryStorage.sol"; +import {FilecoinWarmStorageService} from "@service-provider/FilecoinWarmStorageService.sol"; +import {FilecoinWarmStorageServiceStateView} from "@service-provider/FilecoinWarmStorageServiceStateView.sol"; +import {ServiceProviderRegistry} from "@service-provider/ServiceProviderRegistry.sol"; +import {ServiceProviderRegistryStorage} from "@service-provider/ServiceProviderRegistryStorage.sol"; import {MockERC20, MockPDPVerifier} from "./mocks/SharedMocks.sol"; -import {Errors} from "../src/Errors.sol"; +import {Errors} from "@service-provider/Errors.sol"; contract ProviderValidationTest is Test { using SafeERC20 for MockERC20; diff --git a/service_contracts/test/ServiceProviderRegistry.t.sol b/service_contracts/test/service-provider/ServiceProviderRegistry.t.sol similarity index 98% rename from service_contracts/test/ServiceProviderRegistry.t.sol rename to service_contracts/test/service-provider/ServiceProviderRegistry.t.sol index fe4f24b6..2aace619 100644 --- a/service_contracts/test/ServiceProviderRegistry.t.sol +++ b/service_contracts/test/service-provider/ServiceProviderRegistry.t.sol @@ -5,8 +5,8 @@ import {Test} from "forge-std/Test.sol"; import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import {ServiceProviderRegistry} from "../src/ServiceProviderRegistry.sol"; -import {ServiceProviderRegistryStorage} from "../src/ServiceProviderRegistryStorage.sol"; +import {ServiceProviderRegistry} from "@service-provider/ServiceProviderRegistry.sol"; +import {ServiceProviderRegistryStorage} from "@service-provider/ServiceProviderRegistryStorage.sol"; contract ServiceProviderRegistryTest is Test { ServiceProviderRegistry public implementation; @@ -464,8 +464,7 @@ contract ServiceProviderRegistryTest is Test { assertEq(providerInfos.length, 3, "Should return three results"); assertEq(validIds.length, 3, "Should return three validity flags"); - // All should be invalid - assertFalse(validIds[0], "ID 0 should be invalid"); + assertFalse(validIds[0], "Zero ID should be invalid"); assertFalse(validIds[1], "Non-existent ID should be invalid"); assertFalse(validIds[2], "Unregistered ID should be invalid"); diff --git a/service_contracts/test/ServiceProviderRegistryFull.t.sol b/service_contracts/test/service-provider/ServiceProviderRegistryFull.t.sol similarity index 99% rename from service_contracts/test/ServiceProviderRegistryFull.t.sol rename to service_contracts/test/service-provider/ServiceProviderRegistryFull.t.sol index 188f6e8d..53bc1c0e 100644 --- a/service_contracts/test/ServiceProviderRegistryFull.t.sol +++ b/service_contracts/test/service-provider/ServiceProviderRegistryFull.t.sol @@ -2,8 +2,8 @@ pragma solidity ^0.8.20; import {Test} from "forge-std/Test.sol"; -import {ServiceProviderRegistry} from "../src/ServiceProviderRegistry.sol"; -import {ServiceProviderRegistryStorage} from "../src/ServiceProviderRegistryStorage.sol"; +import {ServiceProviderRegistry} from "@service-provider/ServiceProviderRegistry.sol"; +import {ServiceProviderRegistryStorage} from "@service-provider/ServiceProviderRegistryStorage.sol"; import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; diff --git a/service_contracts/test/ServiceProviderRegistryPagination.t.sol b/service_contracts/test/service-provider/ServiceProviderRegistryPagination.t.sol similarity index 98% rename from service_contracts/test/ServiceProviderRegistryPagination.t.sol rename to service_contracts/test/service-provider/ServiceProviderRegistryPagination.t.sol index 645e0ad2..4aa4d818 100644 --- a/service_contracts/test/ServiceProviderRegistryPagination.t.sol +++ b/service_contracts/test/service-provider/ServiceProviderRegistryPagination.t.sol @@ -4,8 +4,8 @@ pragma solidity ^0.8.20; import {Test} from "forge-std/Test.sol"; import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; -import {ServiceProviderRegistry} from "../src/ServiceProviderRegistry.sol"; -import {ServiceProviderRegistryStorage} from "../src/ServiceProviderRegistryStorage.sol"; +import {ServiceProviderRegistry} from "@service-provider/ServiceProviderRegistry.sol"; +import {ServiceProviderRegistryStorage} from "@service-provider/ServiceProviderRegistryStorage.sol"; contract ServiceProviderRegistryPaginationTest is Test { ServiceProviderRegistry public registry; diff --git a/service_contracts/test/SignatureFixtureTest.t.sol b/service_contracts/test/service-provider/SignatureFixtureTest.t.sol similarity index 99% rename from service_contracts/test/SignatureFixtureTest.t.sol rename to service_contracts/test/service-provider/SignatureFixtureTest.t.sol index e9cb08bd..35b99b33 100644 --- a/service_contracts/test/SignatureFixtureTest.t.sol +++ b/service_contracts/test/service-provider/SignatureFixtureTest.t.sol @@ -358,7 +358,7 @@ contract MetadataSignatureFixturesTest is Test { * @dev Test external signatures against contract verification */ function testExternalSignatures() public view { - string memory json = vm.readFile("./test/external_signatures.json"); + string memory json = vm.readFile("./test/service-provider/external_signatures.json"); address signer = vm.parseJsonAddress(json, ".signer"); console.log("Testing external signatures for signer:", signer); diff --git a/service_contracts/test/external_signatures.json b/service_contracts/test/service-provider/external_signatures.json similarity index 100% rename from service_contracts/test/external_signatures.json rename to service_contracts/test/service-provider/external_signatures.json diff --git a/service_contracts/test/mocks/SharedMocks.sol b/service_contracts/test/service-provider/mocks/SharedMocks.sol similarity index 100% rename from service_contracts/test/mocks/SharedMocks.sol rename to service_contracts/test/service-provider/mocks/SharedMocks.sol diff --git a/service_contracts/test/session-key-registry/SessionKeyRegistry.t.sol b/service_contracts/test/session-key-registry/SessionKeyRegistry.t.sol new file mode 100644 index 00000000..df302732 --- /dev/null +++ b/service_contracts/test/session-key-registry/SessionKeyRegistry.t.sol @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT +pragma solidity^0.8.30; + +import {Test} from "forge-std/Test.sol"; +import {SessionKeyRegistry} from "@session-key-registry/SessionKeyRegistry.sol"; + +contract SessionKeyRegistryTest is Test { + SessionKeyRegistry registry = new SessionKeyRegistry(); + + address payable constant SIGNER_ONE = payable(0x1111111111111111111111111111111111111111); + address payable constant SIGNER_TWO = payable(0x2222222222222222222222222222222222222222); + bytes32 private constant permission1 = 0x1111111111111111111111111111111111111111111111111111111111111111; + bytes32 private constant permission2 = 0x2222222222222222222222222222222222222222222222222222222222222222; + bytes32 private constant permission3 = 0x3333333333333333333333333333333333333333333333333333333333333333; + + uint256 DAY_SECONDS = 24 * 60 * 60; + + function test_loginAndFund() public { + bytes32[] memory permissions = new bytes32[](3); + permissions[0] = permission1; + permissions[1] = permission2; + permissions[2] = permission3; + + assertEq(SIGNER_ONE.balance, 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_ONE, permission1), 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_ONE, permission2), 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_ONE, permission3), 0); + + uint256 expiry = block.timestamp + DAY_SECONDS; + registry.loginAndFund{value: 1 ether}(SIGNER_ONE, expiry, permissions); + + assertEq(SIGNER_ONE.balance, 1 ether); + assertEq(registry.authorizationExpiry(address(this), SIGNER_ONE, permission1), expiry); + assertEq(registry.authorizationExpiry(address(this), SIGNER_ONE, permission2), expiry); + assertEq(registry.authorizationExpiry(address(this), SIGNER_ONE, permission3), expiry); + + registry.revoke(SIGNER_ONE, permissions); + assertEq(registry.authorizationExpiry(address(this), SIGNER_ONE, permission1), 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_ONE, permission2), 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_ONE, permission3), 0); + } + + function test_login() public { + bytes32[] memory permissions = new bytes32[](2); + permissions[0] = permission3; + permissions[1] = permission1; + + assertEq(registry.authorizationExpiry(address(this), SIGNER_TWO, permission1), 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_TWO, permission2), 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_TWO, permission3), 0); + + uint256 expiry = block.timestamp + 4 * DAY_SECONDS; + + registry.login(SIGNER_TWO, expiry, permissions); + + assertEq(registry.authorizationExpiry(address(this), SIGNER_TWO, permission1), expiry); + assertEq(registry.authorizationExpiry(address(this), SIGNER_TWO, permission2), 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_TWO, permission3), expiry); + + registry.revoke(SIGNER_TWO, permissions); + assertEq(registry.authorizationExpiry(address(this), SIGNER_TWO, permission1), 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_TWO, permission2), 0); + assertEq(registry.authorizationExpiry(address(this), SIGNER_TWO, permission3), 0); + } +} diff --git a/service_contracts/tools/common/check-contract-size-payments.sh b/service_contracts/tools/common/check-contract-size-payments.sh new file mode 100755 index 00000000..81268fd5 --- /dev/null +++ b/service_contracts/tools/common/check-contract-size-payments.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash +# +# This script checks if any Solidity contract/library in the `src/` folder +# exceeds the EIP-170 contract runtime size limit (24,576 bytes) +# and the EIP-3860 init code size limit (49,152 bytes). +# Intended for use in CI (e.g., GitHub Actions) with Foundry. +# Exits 1 and prints the list of exceeding contracts if violations are found. +# NOTE: This script requires Bash (not sh or dash) due to use of mapfile and [[ ... ]]. + +set -euo pipefail + +command -v jq >/dev/null 2>&1 || { echo >&2 "jq is required but not installed."; exit 1; } +command -v forge >/dev/null 2>&1 || { echo >&2 "forge is required but not installed."; exit 1; } + +# Gather contract and library names from src/ +# Only matches [A-Za-z0-9_] in contract/library names (no special characters) +if [[ -d src/ ]]; then + mapfile -t contracts < <(grep -rE '^(contract|library) ' src/ 2>/dev/null | sed -E 's/.*(contract|library) ([A-Za-z0-9_]+).*/\2/') +else + contracts=() +fi + +# Exit early if none found (common in empty/new projects) +if [[ ${#contracts[@]} -eq 0 ]]; then + echo "No contracts or libraries found in src/." + exit 0 +fi + +# Build the contracts, get size info as JSON (ignore non-zero exit to always parse output) +forge build --sizes --json | jq . > contract_sizes.json || true + +# Validate JSON output +if ! jq empty contract_sizes.json 2>/dev/null; then + echo "forge build did not return valid JSON. Output:" + cat contract_sizes.json + exit 1 +fi + +if jq -e '. == {}' contract_sizes.json >/dev/null; then + echo "forge did not find any contracts. forge build:" + # This usually means build failure + forge build + exit 1 +fi + +json=$(cat contract_sizes.json) + +# Filter JSON: keep only contracts/libraries from src/ +json=$(echo "$json" | jq --argjson keys "$(printf '%s\n' "${contracts[@]}" | jq -R . | jq -s .)" ' + to_entries + | map(select(.key as $k | $keys | index($k))) + | from_entries +') + +# Find all that violate the EIP-170 runtime size limit (24,576 bytes) +exceeding_runtime=$(echo "$json" | jq -r ' + to_entries + | map(select(.value.runtime_size > 24576)) + | .[] + | "\(.key): \(.value.runtime_size) bytes (runtime size)"' +) + +# Find all that violate the EIP-3860 init code size limit (49,152 bytes) +exceeding_initcode=$(echo "$json" | jq -r ' + to_entries + | map(select(.value.init_size > 49152)) + | .[] + | "\(.key): \(.value.init_size) bytes (init code size)"' +) + +# Initialize status +status=0 + +if [[ -n "$exceeding_runtime" ]]; then + echo "ERROR: The following contracts exceed EIP-170 runtime size (24,576 bytes):" + echo "$exceeding_runtime" + status=1 +fi + +if [[ -n "$exceeding_initcode" ]]; then + echo "ERROR: The following contracts exceed EIP-3860 init code size (49,152 bytes):" + echo "$exceeding_initcode" + status=1 +fi + +if [[ $status -eq 0 ]]; then + echo "All contracts are within the EIP-170 runtime and EIP-3860 init code size limits." +fi + +# Clean up temporary file +rm -f contract_sizes.json + +# Exit with appropriate status +exit $status diff --git a/service_contracts/tools/check-contract-size.sh b/service_contracts/tools/common/check-contract-size.sh similarity index 100% rename from service_contracts/tools/check-contract-size.sh rename to service_contracts/tools/common/check-contract-size.sh diff --git a/service_contracts/tools/payments/README.md b/service_contracts/tools/payments/README.md new file mode 100644 index 00000000..00706e6a --- /dev/null +++ b/service_contracts/tools/payments/README.md @@ -0,0 +1,70 @@ +# Filecoin Payment Services Tools + +A place for all tools related to deploying, upgrading, and managing the Payments contract. + +## Tools + +### Available Tools + +- **Deployment Script**: `deploy.sh` (all networks) + +### Deployment Script + +#### deploy.sh +This script deploys the Payments contract to the specified network. Usage: + +```bash +./tools/deploy.sh +# Example: 314159 (calibnet), 314 (mainnet), 12345 (devnet) +``` +- Uses `PAYMENTS_PATH` if set, otherwise defaults to `src/Payments.sol:Payments`. +- Sets a default `RPC_URL` if not provided, based on `CHAIN_ID`. +- Outputs the Payments Contract Address (proxy) and Implementation Address. + +### Environment Variables + +To use these scripts, set the following environment variables: +- `RPC_URL` - The RPC URL for the network. For Calibration Testnet (314159) and Mainnet (314), a default is set if not provided. For devnet or any custom CHAIN_ID, you must set `RPC_URL` explicitly. +- `KEYSTORE` - Path to the keystore file +- `PASSWORD` - Password for the keystore +- `PAYMENTS_PATH` - Path to the implementation contract (e.g., "src/Payments.sol:Payments") + +### Make Targets + +```bash +# Deployment +make deploy-devnet # Deploy to local devnet +make deploy-calibnet # Deploy to Calibration Testnet +make deploy-mainnet # Deploy to Mainnet +``` + +--- + +### Direct Script Usage (without Make) + +You can run all scripts directly from the `tools/` directory without using Makefile targets. +Set the required environment variables as shown below, then invoke the scripts with the appropriate arguments. + +**Note:** +- For Calibration Testnet (314159) and Mainnet (314), the script sets a default `RPC_URL` if not provided. +- For devnet or any custom `CHAIN_ID`, you must set `RPC_URL` explicitly or the script will exit with an error. +- You can always inspect each script for more details on required and optional environment variables. + +#### Deploy + +```bash +export KEYSTORE="/path/to/keystore" +export PASSWORD="your-password" +# Optionally set PAYMENTS_PATH and RPC_URL +./tools/deploy.sh +# Example: ./tools/deploy.sh 314159 +``` + +### Example Usage + +```bash +# Deploy to calibnet +export KEYSTORE="/path/to/keystore" +export PASSWORD="your-password" +make deploy-calibnet +``` diff --git a/service_contracts/tools/payments/deploy.sh b/service_contracts/tools/payments/deploy.sh new file mode 100755 index 00000000..c43c8370 --- /dev/null +++ b/service_contracts/tools/payments/deploy.sh @@ -0,0 +1,56 @@ +#! /bin/bash +# deploy.sh deploys the Payments contract to the specified network +# Usage: ./tools/deploy.sh +# Example: ./tools/deploy.sh 314159 (calibnet) +# ./tools/deploy.sh 314 (mainnet) +# ./tools/deploy.sh 31415926 (devnet) +# +if [ -f ".env" ]; then + export $(grep -v '^#' .env | xargs) +fi +set -euo pipefail + +CHAIN_ID=${1:-314159} # Default to calibnet + +# Set default RPC_URL if not set +if [ -z "${RPC_URL:-}" ]; then + if [ "$CHAIN_ID" = "314159" ]; then + export RPC_URL="https://api.calibration.node.glif.io/rpc/v1" + elif [ "$CHAIN_ID" = "314" ]; then + export RPC_URL="https://api.node.glif.io/rpc/v1" + else + echo "Error: RPC_URL must be set for CHAIN_ID $CHAIN_ID" + exit 1 + fi +fi + +if [ -z "${KEYSTORE:-}" ]; then + echo "Error: KEYSTORE is not set" + exit 1 +fi +if [ -z "${PASSWORD:-}" ]; then + echo "Error: PASSWORD is not set" + exit 1 +fi + +ADDR=$(cast wallet address --keystore "$KEYSTORE" --password "$PASSWORD") +echo "Deploying Payments from address $ADDR to chain $CHAIN_ID" +NONCE="$(cast nonce --rpc-url "$RPC_URL" "$ADDR")" + +# Use PAYMENTS_PATH if set, otherwise default +if [ -z "${PAYMENTS_PATH:-}" ]; then + PAYMENTS_PATH="src/Payments.sol:Payments" +fi + +echo "Deploying Payments implementation ($PAYMENTS_PATH)" +export PAYMENTS_CONTRACT_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --broadcast --nonce $NONCE --chain-id $CHAIN_ID $PAYMENTS_PATH | grep "Deployed to" | awk '{print $3}') +if [ -z "$PAYMENTS_CONTRACT_ADDRESS" ]; then + echo "Error: Failed to extract Payments implementation contract address" + exit 1 +fi +echo "Payments Address: $PAYMENTS_CONTRACT_ADDRESS" + +echo "" +echo "=== DEPLOYMENT SUMMARY ===" +echo "Payments Contract Address: $PAYMENTS_CONTRACT_ADDRESS" +echo "==========================" diff --git a/service_contracts/tools/pdp/README.md b/service_contracts/tools/pdp/README.md new file mode 100644 index 00000000..146b4fa2 --- /dev/null +++ b/service_contracts/tools/pdp/README.md @@ -0,0 +1,40 @@ +A place for all tools related to running and developing the PDP contracts. When adding a tool please fill in a description. + +# Tools + +## Deployment Scripts + +### deploy-devnet.sh +Deploys PDPVerifier to a local filecoin devnet. Assumes lotus binary is in path and local devnet is running with eth API enabled. The keystore will be funded automatically from lotus default address. + +### deploy-calibnet.sh +Deploys PDPVerifier to Filecoin Calibration testnet. + +### deploy-mainnet.sh +Deploys PDPVerifier to Filecoin mainnet. + +### deploy-simple-pdp-service.sh ⚠️ DEPRECATED +**As of v2.0.0, SimplePDPService is deprecated.** This optional script allows deployment of SimplePDPService for reference/community use only. Requires an existing PDPVerifier deployment. See `DEPRECATION.md` for details. + +## Upgrade Scripts + +### upgrade-contract-calibnet.sh +Generic script for upgrading proxy contracts on Calibration testnet. + +### deploy-transfer-ownership-upgrade-calibnet.sh +Deploys, upgrades, and transfers ownership of PDPVerifier on Calibration testnet. + +## PDP Interaction Scripts +We have some scripts for interacting with the PDP service contract through ETH RPC API: +- add.sh +- remove.sh +- create_data_set.sh +- find.sh +- size.sh + +To use these scripts set the following environment variables: +- KEYSTORE +- PASSWORD +- RPC_URL + +with values corresponding to local geth keystore path, the password for the keystore and the RPC URL for the network where PDP service contract is deployed. diff --git a/service_contracts/tools/pdp/add.sh b/service_contracts/tools/pdp/add.sh new file mode 100755 index 00000000..aa97f2a1 --- /dev/null +++ b/service_contracts/tools/pdp/add.sh @@ -0,0 +1,7 @@ +#! /bin/bash +# Usage: ./add.sh +# add-input-list is a comma separated list of tuples of the form ((bytes),uint256) +# Example: ./add.sh 0x067fd08940ba732C25c44423005D662BF95e6763 0 '[((0x000181E20392202070FB4C14254CE86AB762E0280E469AF4E01B34A1B4B08F75C258F197798EE33C),256)]' +addCallData=$(cast calldata "addPieces(uint256,((bytes),uint256)[])(uint256)" $2 $3) + +cast send --keystore $KEYSTORE --password "$PASSWORD" --rpc-url $RPC_URL $1 $addCallData diff --git a/service_contracts/tools/pdp/check-contract-size.sh b/service_contracts/tools/pdp/check-contract-size.sh new file mode 100644 index 00000000..32bebd5e --- /dev/null +++ b/service_contracts/tools/pdp/check-contract-size.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash +# +# This script checks if any Solidity contract/library in the `src/` folder +# exceeds the EIP-170 contract runtime size limit (24,576 bytes) +# and the EIP-3860 init code size limit (49,152 bytes). +# Intended for use in CI (e.g., GitHub Actions) with Foundry. +# Exits 1 and prints the list of exceeding contracts if violations are found. +# NOTE: This script requires Bash (not sh or dash) due to use of mapfile and [[ ... ]]. + +set -euo pipefail + +command -v jq >/dev/null 2>&1 || { echo >&2 "jq is required but not installed."; exit 1; } +command -v forge >/dev/null 2>&1 || { echo >&2 "forge is required but not installed."; exit 1; } + +# Gather contract and library names from src/ +# Only matches [A-Za-z0-9_] in contract/library names (no special characters) +if [[ -d src/ ]]; then + mapfile -t contracts < <(grep -rE '^(contract|library) ' src/ 2>/dev/null | sed -E 's/.*(contract|library) ([A-Za-z0-9_]+).*/\2/') +else + contracts=() +fi + +# Exit early if none found +if [[ ${#contracts[@]} -eq 0 ]]; then + echo "No contracts or libraries found in src/." + exit 0 +fi + +# Build the contracts, get size info as JSON (ignore non-zero exit to always parse output) +forge clean || true +forge build --sizes --json | jq . > contract_sizes.json || true + +# Validate JSON output +if ! jq empty contract_sizes.json 2>/dev/null; then + echo "forge build did not return valid JSON. Output:" + cat contract_sizes.json + exit 1 +fi + +if jq -e '. == {}' contract_sizes.json >/dev/null; then + echo "forge did not find any contracts. forge build:" + # This usually means build failure + forge build + exit 1 +fi + +json=$(cat contract_sizes.json) + +# Filter JSON: keep only contracts/libraries from src/ +json=$(echo "$json" | jq --argjson keys "$(printf '%s\n' "${contracts[@]}" | jq -R . | jq -s .)" ' + to_entries + | map(select(.key as $k | $keys | index($k))) + | from_entries +') + +# Find all that violate the EIP-170 runtime size limit (24,576 bytes) +exceeding_runtime=$(echo "$json" | jq -r ' + to_entries + | map(select(.value.runtime_size > 24576)) + | .[] + | "\(.key): \(.value.runtime_size) bytes (runtime size)"' +) + +# Find all that violate the EIP-3860 init code size limit (49,152 bytes) +exceeding_initcode=$(echo "$json" | jq -r ' + to_entries + | map(select(.value.init_size > 49152)) + | .[] + | "\(.key): \(.value.init_size) bytes (init code size)"' +) + +# Initialize status +status=0 + +if [[ -n "$exceeding_runtime" ]]; then + echo "ERROR: The following contracts exceed EIP-170 runtime size (24,576 bytes):" + echo "$exceeding_runtime" + status=1 +fi + +if [[ -n "$exceeding_initcode" ]]; then + echo "ERROR: The following contracts exceed EIP-3860 init code size (49,152 bytes):" + echo "$exceeding_initcode" + status=1 +fi + +if [[ $status -eq 0 ]]; then + echo "All contracts are within the EIP-170 runtime and EIP-3860 init code size limits." +fi + +# Clean up temporary file +rm -f contract_sizes.json + +# Exit with appropriate status +exit $status + diff --git a/service_contracts/tools/pdp/claim-owner.sh b/service_contracts/tools/pdp/claim-owner.sh new file mode 100755 index 00000000..d20528c2 --- /dev/null +++ b/service_contracts/tools/pdp/claim-owner.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# claim_ownership.sh - Script for claiming ownership of a data set + +# Check if correct number of arguments provided +if [ "$#" -ne 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Get argument +DATA_SET_ID=$1 + +# Check required environment variables +if [ -z "$PASSWORD" ] || [ -z "$KEYSTORE" ] || [ -z "$RPC_URL" ] || [ -z "$CONTRACT_ADDRESS" ]; then + echo "Error: Missing required environment variables." + echo "Please set PASSWORD, KEYSTORE, RPC_URL, and CONTRACT_ADDRESS." + exit 1 +fi + +echo "Claiming ownership of data set ID: $DATA_SET_ID" + +# Get claimer's address from keystore +CLAIMER_ADDRESS=$(cast wallet address --keystore "$KEYSTORE") +echo "New owner address (claiming ownership): $CLAIMER_ADDRESS" + +# Construct calldata using cast calldata +CALLDATA=$(cast calldata "claimDataSetStorageProvider(uint256,bytes)" "$DATA_SET_ID" "0x") + +echo "Sending transaction..." + +# Send transaction +TX_HASH=$(cast send --rpc-url "$RPC_URL" \ + --keystore "$KEYSTORE" \ + --password "$PASSWORD" \ + "$CONTRACT_ADDRESS" \ + "$CALLDATA") + +echo "Transaction sent! Hash: $TX_HASH" +echo "Successfully claimed ownership of data set $DATA_SET_ID" \ No newline at end of file diff --git a/service_contracts/tools/pdp/create_data_set.sh b/service_contracts/tools/pdp/create_data_set.sh new file mode 100755 index 00000000..44f5cdb5 --- /dev/null +++ b/service_contracts/tools/pdp/create_data_set.sh @@ -0,0 +1,22 @@ +#! /bin/bash +# Usage: ./create_data_set.sh + +# Check if required environment variables are set +if [ -z "$RPC_URL" ] || [ -z "$KEYSTORE" ] ; then + echo "Error: Please set RPC_URL, KEYSTORE, and PASSWORD environment variables." + exit 1 +fi + +# Get the contract address from the command line argument +if [ -z "$1" ] || [ -z "$2" ]; then + echo "Usage: $0 " + exit 1 +fi + +CONTRACT_ADDRESS=$1 + +# Create the calldata for createDataSet() +CALLDATA=$(cast calldata "createDataSet(address)(uint256)" $2) + +# Send the transaction +cast send --keystore $KEYSTORE --password "$PASSWORD" --rpc-url $RPC_URL $CONTRACT_ADDRESS $CALLDATA \ No newline at end of file diff --git a/service_contracts/tools/pdp/deploy-calibnet.sh b/service_contracts/tools/pdp/deploy-calibnet.sh new file mode 100755 index 00000000..1c8ff416 --- /dev/null +++ b/service_contracts/tools/pdp/deploy-calibnet.sh @@ -0,0 +1,53 @@ +#! /bin/bash +# deploy-devnet deploys the PDP verifier and PDP service contracts to calibration net +# Assumption: KEYSTORE, PASSWORD, RPC_URL env vars are set to an appropriate eth keystore path and password +# and to a valid RPC_URL for the devnet. +# Assumption: forge, cast, jq are in the PATH +# Assumption: called from contracts directory so forge paths work out +# +echo "Deploying to calibnet" + +if [ -z "$RPC_URL" ]; then + echo "Error: RPC_URL is not set" + exit 1 +fi + +if [ -z "$KEYSTORE" ]; then + echo "Error: KEYSTORE is not set" + exit 1 +fi + +if [ -z "$CHALLENGE_FINALITY" ]; then + echo "Error: CHALLENGE_FINALITY is not set" + exit 1 +fi + +ADDR=$(cast wallet address --keystore "$KEYSTORE" --password "$PASSWORD") +echo "Deploying PDP verifier from address $ADDR" +# Parse the output of forge create to extract the contract address + +NONCE="$(cast nonce --rpc-url "$RPC_URL" "$ADDR")" +VERIFIER_IMPLEMENTATION_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --broadcast --nonce $NONCE --chain-id 314159 src/PDPVerifier.sol:PDPVerifier | grep "Deployed to" | awk '{print $3}') +if [ -z "$VERIFIER_IMPLEMENTATION_ADDRESS" ]; then + echo "Error: Failed to extract PDP verifier contract address" + exit 1 +fi +echo "PDP verifier implementation deployed at: $VERIFIER_IMPLEMENTATION_ADDRESS" +echo "Deploying PDP verifier proxy" +NONCE=$(expr $NONCE + "1") + +INIT_DATA=$(cast calldata "initialize(uint256)" $CHALLENGE_FINALITY) +PDP_VERIFIER_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --broadcast --nonce $NONCE --chain-id 314159 src/ERC1967Proxy.sol:MyERC1967Proxy --constructor-args $VERIFIER_IMPLEMENTATION_ADDRESS $INIT_DATA | grep "Deployed to" | awk '{print $3}') +echo "PDP verifier deployed at: $PDP_VERIFIER_ADDRESS" + +echo "" +echo "=================================================" +echo "DEPLOYMENT COMPLETE" +echo "=================================================" +echo "PDPVerifier Implementation: $VERIFIER_IMPLEMENTATION_ADDRESS" +echo "PDPVerifier Proxy: $PDP_VERIFIER_ADDRESS" +echo "" +echo "NOTE: SimplePDPService is no longer deployed by default as of v2.0.0." +echo " It remains available as a reference implementation in src/SimplePDPService.sol" +echo " For community use and learning purposes." +echo "" diff --git a/service_contracts/tools/pdp/deploy-devnet.sh b/service_contracts/tools/pdp/deploy-devnet.sh new file mode 100755 index 00000000..3c0519d9 --- /dev/null +++ b/service_contracts/tools/pdp/deploy-devnet.sh @@ -0,0 +1,55 @@ +#! /bin/bash +# deploy-devnet deploys the PDP service contract and all auxillary contracts to a filecoin devnet +# Assumption: KEYSTORE, PASSWORD, RPC_URL env vars are set to an appropriate eth keystore path and password +# and to a valid RPC_URL for the devnet. +# Assumption: forge, cast, lotus, jq are in the PATH +# Assumption: called from contracts directory so forge paths work out +# +echo "Deploying to devnet" + +if [ -z "$RPC_URL" ]; then + echo "Error: RPC_URL is not set" + exit 1 +fi + +if [ -z "$KEYSTORE" ]; then + echo "Error: KEYSTORE is not set" + exit 1 +fi + +# Send funds from default to keystore address +# assumes lotus binary in path +clientAddr=$(cat $KEYSTORE | jq '.address' | sed -e 's/\"//g') +echo "Sending funds to $clientAddr" +lotus send $clientAddr 10000 +sleep 5 ## Sleep for 5 seconds so fund are available and actor is registered + +NONCE="$(cast nonce --rpc-url "$RPC_URL" "$clientAddr")" + +echo "Deploying PDP verifier" +# Parse the output of forge create to extract the contract address +VERIFIER_IMPLEMENTATION_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --nonce $NONCE --broadcast src/PDPVerifier.sol:PDPVerifier | grep "Deployed to" | awk '{print $3}') +if [ -z "$VERIFIER_IMPLEMENTATION_ADDRESS" ]; then + echo "Error: Failed to extract PDP verifier contract address" + exit 1 +fi +echo "PDP verifier implementation deployed at: $VERIFIER_IMPLEMENTATION_ADDRESS" + +NONCE=$(expr $NONCE + "1") + +echo "Deploying PDP verifier proxy" +INIT_DATA=$(cast calldata "initialize(uint256)" 150) +PDP_VERIFIER_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --nonce $NONCE --broadcast src/ERC1967Proxy.sol:MyERC1967Proxy --constructor-args $VERIFIER_IMPLEMENTATION_ADDRESS $INIT_DATA | grep "Deployed to" | awk '{print $3}') +echo "PDP verifier deployed at: $PDP_VERIFIER_ADDRESS" + +echo "" +echo "=================================================" +echo "DEPLOYMENT COMPLETE" +echo "=================================================" +echo "PDPVerifier Implementation: $VERIFIER_IMPLEMENTATION_ADDRESS" +echo "PDPVerifier Proxy: $PDP_VERIFIER_ADDRESS" +echo "" +echo "NOTE: SimplePDPService is no longer deployed by default as of v2.0.0." +echo " It remains available as a reference implementation in src/SimplePDPService.sol" +echo " For community use and learning purposes." +echo "" diff --git a/service_contracts/tools/pdp/deploy-mainnet.sh b/service_contracts/tools/pdp/deploy-mainnet.sh new file mode 100755 index 00000000..1a543820 --- /dev/null +++ b/service_contracts/tools/pdp/deploy-mainnet.sh @@ -0,0 +1,51 @@ +#! /bin/bash +# deploy-devnet deploys the PDP verifier and PDP service contracts to calibration net +# Assumption: KEYSTORE, PASSWORD, RPC_URL env vars are set to an appropriate eth keystore path and password +# and to a valid RPC_URL for the devnet. +# Assumption: forge, cast, jq are in the PATH +# Assumption: called from contracts directory so forge paths work out +# +echo "Deploying to mainnet" + +if [ -z "$RPC_URL" ]; then + echo "Error: RPC_URL is not set" + exit 1 +fi + +if [ -z "$KEYSTORE" ]; then + echo "Error: KEYSTORE is not set" + exit 1 +fi + +# CHALLENGE_FINALITY should always be 150 in production +CHALLENGE_FINALITY=150 + +ADDR=$(cast wallet address --keystore "$KEYSTORE" --password "$PASSWORD") +echo "Deploying PDP verifier from address $ADDR" +# Parse the output of forge create to extract the contract address + +NONCE="$(cast nonce --rpc-url "$RPC_URL" "$ADDR")" +VERIFIER_IMPLEMENTATION_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --broadcast --nonce $NONCE --chain-id 314 src/PDPVerifier.sol:PDPVerifier | grep "Deployed to" | awk '{print $3}') +if [ -z "$VERIFIER_IMPLEMENTATION_ADDRESS" ]; then + echo "Error: Failed to extract PDP verifier contract address" + exit 1 +fi +echo "PDP verifier implementation deployed at: $VERIFIER_IMPLEMENTATION_ADDRESS" +echo "Deploying PDP verifier proxy" +NONCE=$(expr $NONCE + "1") + +INIT_DATA=$(cast calldata "initialize(uint256)" $CHALLENGE_FINALITY) +PDP_VERIFIER_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --broadcast --nonce $NONCE --chain-id 314 src/ERC1967Proxy.sol:MyERC1967Proxy --constructor-args $VERIFIER_IMPLEMENTATION_ADDRESS $INIT_DATA | grep "Deployed to" | awk '{print $3}') +echo "PDP verifier deployed at: $PDP_VERIFIER_ADDRESS" + +echo "" +echo "=================================================" +echo "DEPLOYMENT COMPLETE" +echo "=================================================" +echo "PDPVerifier Implementation: $VERIFIER_IMPLEMENTATION_ADDRESS" +echo "PDPVerifier Proxy: $PDP_VERIFIER_ADDRESS" +echo "" +echo "NOTE: SimplePDPService is no longer deployed by default as of v2.0.0." +echo " It remains available as a reference implementation in src/SimplePDPService.sol" +echo " For community use and learning purposes." +echo "" diff --git a/service_contracts/tools/pdp/deploy-simple-pdp-service.sh b/service_contracts/tools/pdp/deploy-simple-pdp-service.sh new file mode 100755 index 00000000..eb81ae6d --- /dev/null +++ b/service_contracts/tools/pdp/deploy-simple-pdp-service.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# deploy-simple-pdp-service.sh - Optional deployment script for SimplePDPService +# +# ⚠️ DEPRECATED as of v2.0.0 ⚠️ +# SimplePDPService is no longer actively maintained but remains available +# as a reference implementation for the community. +# +# This script deploys SimplePDPService to work with an existing PDPVerifier. +# +# Prerequisites: +# - PDPVerifier must already be deployed +# - Set PDP_VERIFIER_ADDRESS environment variable to the PDPVerifier proxy address +# - Set RPC_URL, KEYSTORE, PASSWORD environment variables +# +# Usage: +# export PDP_VERIFIER_ADDRESS=0x... +# export RPC_URL=https://... +# export KEYSTORE=/path/to/keystore +# export PASSWORD=your_password +# ./deploy-simple-pdp-service.sh + +echo "=================================================" +echo "⚠️ DEPRECATED: SimplePDPService Deployment ⚠️" +echo "=================================================" +echo "" +echo "SimplePDPService is no longer actively maintained as of v2.0.0." +echo "This script is provided for reference and community use only." +echo "" +echo "Consider implementing your own service layer using PDPVerifier directly." +echo "See src/SimplePDPService.sol as a reference implementation." +echo "" +read -p "Do you want to continue with SimplePDPService deployment? (y/N): " -n 1 -r +echo +if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Deployment cancelled." + exit 0 +fi + +echo "" +echo "Proceeding with SimplePDPService deployment..." + +# Validate required environment variables +if [ -z "$PDP_VERIFIER_ADDRESS" ]; then + echo "Error: PDP_VERIFIER_ADDRESS is not set" + echo "Please set it to your deployed PDPVerifier proxy address" + exit 1 +fi + +if [ -z "$RPC_URL" ]; then + echo "Error: RPC_URL is not set" + exit 1 +fi + +if [ -z "$KEYSTORE" ]; then + echo "Error: KEYSTORE is not set" + exit 1 +fi + +# Determine chain ID based on RPC URL +CHAIN_ID=314 # Default to mainnet +if [[ "$RPC_URL" == *"calibration"* ]]; then + CHAIN_ID=314159 +fi + +ADDR=$(cast wallet address --keystore "$KEYSTORE" --password "$PASSWORD") +echo "Deploying SimplePDPService from address $ADDR" +echo "Using PDPVerifier at: $PDP_VERIFIER_ADDRESS" + +NONCE="$(cast nonce --rpc-url "$RPC_URL" "$ADDR")" + +echo "Deploying SimplePDPService implementation..." +SERVICE_IMPLEMENTATION_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --broadcast --nonce $NONCE --chain-id $CHAIN_ID src/SimplePDPService.sol:SimplePDPService | grep "Deployed to" | awk '{print $3}') + +if [ -z "$SERVICE_IMPLEMENTATION_ADDRESS" ]; then + echo "Error: Failed to extract SimplePDPService contract address" + exit 1 +fi + +echo "SimplePDPService implementation deployed at: $SERVICE_IMPLEMENTATION_ADDRESS" + +NONCE=$(expr $NONCE + "1") + +echo "Deploying SimplePDPService proxy..." +INIT_DATA=$(cast calldata "initialize(address)" $PDP_VERIFIER_ADDRESS) +PDP_SERVICE_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --broadcast --nonce $NONCE --chain-id $CHAIN_ID src/ERC1967Proxy.sol:MyERC1967Proxy --constructor-args $SERVICE_IMPLEMENTATION_ADDRESS $INIT_DATA | grep "Deployed to" | awk '{print $3}') + +if [ -z "$PDP_SERVICE_ADDRESS" ]; then + echo "Error: Failed to deploy SimplePDPService proxy" + exit 1 +fi + +echo "" +echo "=================================================" +echo "SimplePDPService DEPLOYMENT COMPLETE" +echo "=================================================" +echo "SimplePDPService Implementation: $SERVICE_IMPLEMENTATION_ADDRESS" +echo "SimplePDPService Proxy: $PDP_SERVICE_ADDRESS" +echo "Connected to PDPVerifier: $PDP_VERIFIER_ADDRESS" +echo "" +echo "⚠️ Remember: SimplePDPService is deprecated and not actively maintained." +echo " Consider migrating to a custom service implementation." +echo "" \ No newline at end of file diff --git a/service_contracts/tools/pdp/deploy-transfer-ownership-upgrade-calibnet.sh b/service_contracts/tools/pdp/deploy-transfer-ownership-upgrade-calibnet.sh new file mode 100755 index 00000000..26cee5d9 --- /dev/null +++ b/service_contracts/tools/pdp/deploy-transfer-ownership-upgrade-calibnet.sh @@ -0,0 +1,176 @@ +#!/usr/bin/env bash +set -euo pipefail + +##################################### +# Environment variables & defaults # +##################################### + +: "${FIL_CALIBNET_RPC_URL:?FIL_CALIBNET_RPC_URL not set. Please export it and rerun.}" +: "${FIL_CALIBNET_PRIVATE_KEY:?FIL_CALIBNET_PRIVATE_KEY not set. Please export it and rerun.}" +: "${NEW_OWNER:?NEW_OWNER not set. Please export it and rerun.}" + + +CHAIN_ID="${CHAIN_ID:-314159}" +COMPILER_VERSION="${COMPILER_VERSION:-0.8.22}" + +##################################### +# 1. Create INIT_DATA # +##################################### +echo "Generating calldata for initialize(uint256) with argument 150 ..." +INIT_DATA=$(cast calldata "initialize(uint256)" 150) +echo "INIT_DATA = $INIT_DATA" +echo + +##################################### +# 1. Get deployer address # +##################################### +echo "Deriving deployer address from private key ..." +DEPLOYER_ADDRESS=$(cast wallet address "$FIL_CALIBNET_PRIVATE_KEY") +NONCE="$(cast nonce --rpc-url "$FIL_CALIBNET_RPC_URL" "$DEPLOYER_ADDRESS")" +echo "Deployer address: $DEPLOYER_ADDRESS" +echo + +##################################### +# 2. Deploy PDPVerifier contract # +##################################### +echo "Deploying PDPVerifier contract ..." +DEPLOY_OUTPUT_VERIFIER=$( + forge create \ + --rpc-url "$FIL_CALIBNET_RPC_URL" \ + --private-key "$FIL_CALIBNET_PRIVATE_KEY" \ + --chain-id "$CHAIN_ID" \ + --broadcast \ + --nonce $NONCE \ + src/PDPVerifier.sol:PDPVerifier +) +NONCE=$(expr $NONCE + "1") + + +# Extract the deployed address from JSON output +PDP_VERIFIER_ADDRESS=$(echo "$DEPLOY_OUTPUT_VERIFIER" | grep "Deployed to" | awk '{print $3}') +echo "PDPVerifier deployed at: $PDP_VERIFIER_ADDRESS" +echo + +##################################### +# 3. Deploy Proxy contract # +##################################### +echo "Deploying Proxy contract (MyERC1967Proxy) ..." +DEPLOY_OUTPUT_PROXY=$(forge create --rpc-url "$FIL_CALIBNET_RPC_URL" --private-key "$FIL_CALIBNET_PRIVATE_KEY" --chain-id "$CHAIN_ID" --broadcast --nonce $NONCE src/ERC1967Proxy.sol:MyERC1967Proxy --constructor-args "$PDP_VERIFIER_ADDRESS" "$INIT_DATA") +NONCE=$(expr $NONCE + "1") + + +# Extract the deployed proxy address +PROXY_ADDRESS=$(echo "$DEPLOY_OUTPUT_PROXY" | grep "Deployed to" | awk '{print $3}') +echo "Proxy deployed at: $PROXY_ADDRESS" +echo + +##################################### +# 4. Check owner of proxy # +##################################### +echo "Querying the proxy's owner ..." +OWNER_ADDRESS=$( + cast call \ + --rpc-url "$FIL_CALIBNET_RPC_URL" \ + "$PROXY_ADDRESS" \ + "owner()(address)" +) +echo "Proxy owner: $OWNER_ADDRESS" + +# Add validation check +if [ "${OWNER_ADDRESS,,}" != "${DEPLOYER_ADDRESS,,}" ]; then + echo "failed to validate owner address" + echo "Expected owner to be: ${DEPLOYER_ADDRESS}" + echo "Got: ${OWNER_ADDRESS}" + exit 1 +fi +echo "✓ Owner address validated successfully" +echo + +##################################### +# 5. Check implementation address # +##################################### +# The storage slot for ERC1967 implementation: +IMPLEMENTATION_SLOT="0x360894A13BA1A3210667C828492DB98DCA3E2076CC3735A920A3CA505D382BBC" + +echo "Checking proxy's implementation address from storage slot $IMPLEMENTATION_SLOT ..." +sleep 35 +IMPLEMENTATION_ADDRESS=$(cast storage --rpc-url "$FIL_CALIBNET_RPC_URL" "$PROXY_ADDRESS" "$IMPLEMENTATION_SLOT") + +echo "Implementation address in Proxy: $IMPLEMENTATION_ADDRESS" +echo + + +##################################### +# Summary # +##################################### +echo "========== DEPLOYMENT SUMMARY ==========" +echo "PDPVerifier Address: $PDP_VERIFIER_ADDRESS" +echo "Proxy Address: $PROXY_ADDRESS" +echo "Proxy Owner (should match deployer): $OWNER_ADDRESS" +echo "PDPVerifier Implementation (via Proxy): $IMPLEMENTATION_ADDRESS" +echo "========================================" + + +##################################### +# 6. Upgrade proxy # +##################################### + +echo "Deploying a new PDPVerifier contract ..." +DEPLOY_OUTPUT_VERIFIER_2=$(forge create --nonce $NONCE --broadcast --rpc-url "$FIL_CALIBNET_RPC_URL" --private-key "$FIL_CALIBNET_PRIVATE_KEY" --chain-id "$CHAIN_ID" src/PDPVerifier.sol:PDPVerifier) +NONCE=$(expr $NONCE + "1") +PDP_VERIFIER_ADDRESS_2=$(echo "$DEPLOY_OUTPUT_VERIFIER_2" | grep "Deployed to" | awk '{print $3}') +echo "PDPVerifier deployed at: $PDP_VERIFIER_ADDRESS_2" +echo + +echo +echo "Upgrading proxy to new implementation..." + +cast send --rpc-url "$FIL_CALIBNET_RPC_URL" --private-key "$FIL_CALIBNET_PRIVATE_KEY" --nonce $NONCE --chain-id "$CHAIN_ID" "$PROXY_ADDRESS" "upgradeToAndCall(address,bytes)" "$PDP_VERIFIER_ADDRESS_2" "0x" +NONCE=$(expr $NONCE + "1") + +echo "✓ Upgrade transaction submitted" + +# Verify the upgrade +echo "Verifying new implementation..." +sleep 35 +NEW_IMPLEMENTATION_ADDRESS=$(cast storage --rpc-url "$FIL_CALIBNET_RPC_URL" "$PROXY_ADDRESS" "$IMPLEMENTATION_SLOT") + +if [ "${NEW_IMPLEMENTATION_ADDRESS,,}" != "${PDP_VERIFIER_ADDRESS_2,,}" ]; then + echo "failed to upgrade implementation" + echo "Expected new implementation to be: ${PDP_VERIFIER_ADDRESS_2}" + echo "Got: ${NEW_IMPLEMENTATION_ADDRESS}" + exit 1 +fi + +echo "✓ Proxy upgraded successfully to ${PDP_VERIFIER_ADDRESS_2}" +echo + +##################################### +# 7. Transfer ownership # +##################################### +echo +echo "Transferring ownership to new owner..." + +cast send --rpc-url "$FIL_CALIBNET_RPC_URL" --private-key "$FIL_CALIBNET_PRIVATE_KEY" --nonce $NONCE --chain-id "$CHAIN_ID" "$PROXY_ADDRESS" "transferOwnership(address)" "$NEW_OWNER" +NONCE=$(expr $NONCE + "1") + +echo "✓ Ownership transfer transaction submitted" + +# Verify the ownership transfer +echo "Verifying new owner..." +NEW_OWNER_ADDRESS=$( + cast call \ + --rpc-url "$FIL_CALIBNET_RPC_URL" \ + "$PROXY_ADDRESS" \ + "owner()(address)" +) + +if [ "${NEW_OWNER_ADDRESS,,}" != "${NEW_OWNER,,}" ]; then + echo "failed to transfer ownership" + echo "Expected new owner to be: ${NEW_OWNER}" + echo "Got: ${NEW_OWNER_ADDRESS}" + exit 1 +fi + +echo "✓ Ownership transferred successfully to ${NEW_OWNER}" +echo diff --git a/service_contracts/tools/pdp/find.sh b/service_contracts/tools/pdp/find.sh new file mode 100755 index 00000000..9895499e --- /dev/null +++ b/service_contracts/tools/pdp/find.sh @@ -0,0 +1,6 @@ +#! /bin/bash +# Usage: ./find.sh +# input-list is a comma separated list of uint256s representing leaf indices to search for +# Example: ./find.sh 0x067fd08940ba732C25c44423005D662BF95e6763 0 '[100,200]' +findCallData=$(cast calldata "findPieceIds(uint256,uint256[])((uint256,uint256)[])" $2 $3) +cast send --keystore $KEYSTORE --password "$PASSWORD" --rpc-url $RPC_URL $1 $findCallData diff --git a/service_contracts/tools/pdp/propose-owner.sh b/service_contracts/tools/pdp/propose-owner.sh new file mode 100755 index 00000000..6a13838f --- /dev/null +++ b/service_contracts/tools/pdp/propose-owner.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# propose_owner.sh - Script for proposing a new owner for a data set + +# Check if correct number of arguments provided +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Get arguments +DATA_SET_ID=$1 +NEW_OWNER_ADDRESS=$2 + +# Check required environment variables +if [ -z "$PASSWORD" ] || [ -z "$KEYSTORE" ] || [ -z "$RPC_URL" ] || [ -z "$CONTRACT_ADDRESS" ]; then + echo "Error: Missing required environment variables." + echo "Please set PASSWORD, KEYSTORE, RPC_URL, and CONTRACT_ADDRESS." + exit 1 +fi + +echo "Proposing new owner for data set ID: $DATA_SET_ID" +echo "New owner address: $NEW_OWNER_ADDRESS" + +# Get sender's address from keystore +SENDER_ADDRESS=$(cast wallet address --keystore "$KEYSTORE") +echo "Current owner address: $SENDER_ADDRESS" + +# Construct calldata using cast calldata +CALLDATA=$(cast calldata "proposeDataSetStorageProvider(uint256,address)" "$DATA_SET_ID" "$NEW_OWNER_ADDRESS") + +echo "Sending transaction..." + +# Send transaction +TX_HASH=$(cast send --rpc-url "$RPC_URL" \ + --keystore "$KEYSTORE" \ + --password "$PASSWORD" \ + "$CONTRACT_ADDRESS" \ + "$CALLDATA") + +echo "Transaction sent! Hash: $TX_HASH" +echo "Successfully proposed $NEW_OWNER_ADDRESS as new owner for data set $DATA_SET_ID" \ No newline at end of file diff --git a/service_contracts/tools/pdp/remove.sh b/service_contracts/tools/pdp/remove.sh new file mode 100755 index 00000000..7f1e83a3 --- /dev/null +++ b/service_contracts/tools/pdp/remove.sh @@ -0,0 +1,5 @@ +#! /bin/bash +# Usage: ./remove.sh +# input-list is a comma separated list of uint256s representing piece ids to remove +removeCallData=$(cast calldata "removePieces(uint256,uint256[])(uint256)" $2 $3) +cast send --keystore $KEYSTORE --password "$PASSWORD" --rpc-url $RPC_URL $1 $removeCallData diff --git a/service_contracts/tools/pdp/size.sh b/service_contracts/tools/pdp/size.sh new file mode 100755 index 00000000..222880d3 --- /dev/null +++ b/service_contracts/tools/pdp/size.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Usage: ./size.sh +# Returns the total number of piece ids ever added to the data set + +# Check if required environment variables are set +if [ -z "$RPC_URL" ] || [ -z "$KEYSTORE" ]; then + echo "Error: Please set RPC_URL, KEYSTORE, and PASSWORD environment variables." + exit 1 +fi + +# Check if data set ID is provided +if [ -z "$1" ] || [ -z "$2" ]; then + echo "Usage: " + exit 1 +fi + +CONTRACT_ADDRESS=$1 +DATA_SET_ID=$2 + +# Create the calldata for getDataSetLeafCount(uint256) +CALLDATA=$(cast calldata "getNextPieceId(uint256)" $DATA_SET_ID) + +# Call the contract and get the data set size +DATA_SET_SIZE=$(cast call --keystore $KEYSTORE --password "$PASSWORD" --rpc-url $RPC_URL $CONTRACT_ADDRESS $CALLDATA) +# Remove the "0x" prefix and convert the hexadecimal output to a decimal integer +DATA_SET_SIZE=$(echo $DATA_SET_SIZE | xargs printf "%d\n") + +echo "Data set size: $DATA_SET_SIZE" \ No newline at end of file diff --git a/service_contracts/tools/pdp/testBurnFee.sh b/service_contracts/tools/pdp/testBurnFee.sh new file mode 100644 index 00000000..9a1c538e --- /dev/null +++ b/service_contracts/tools/pdp/testBurnFee.sh @@ -0,0 +1,43 @@ +#! /bin/bash +# deploy-devnet deploys the PDP service contract and all auxillary contracts to a filecoin devnet +# Assumption: KEYSTORE, PASSWORD, RPC_URL env vars are set to an appropriate eth keystore path and password +# and to a valid RPC_URL for the devnet. +# Assumption: forge, cast, lotus, jq are in the PATH +# +echo "Deploying To Test Burn Fee" + +if [ -z "$RPC_URL" ]; then + echo "Error: RPC_URL is not set" + exit 1 +fi + +if [ -z "$KEYSTORE" ]; then + echo "Error: KEYSTORE is not set" + exit 1 +fi + +# Send funds from default to keystore address +# assumes lotus binary in path +clientAddr=$(cat $KEYSTORE | jq '.address' | sed -e 's/\"//g') +echo "Sending funds to $clientAddr" +lotus send $clientAddr 10000 + +# Deploy PDP service contract +echo "Deploying PDP service" +# Parse the output of forge create to extract the contract address +PDP_SERVICE_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --compiler-version 0.8.23 --chain-id 31415926 contracts/src/PDPService.sol:PDPService --constructor-args 3 | grep "Deployed to" | awk '{print $3}') + +if [ -z "$PDP_SERVICE_ADDRESS" ]; then + echo "Error: Failed to extract PDP service contract address" + exit 1 +fi + +echo "PDP service deployed at: $PDP_SERVICE_ADDRESS" + +echo "Executing burnFee function" + +# Create the calldata for burnFee() +CALLDATA=$(cast calldata "burnFee(uint256 amount)" 1) + +# Send the transaction +cast send --keystore $KEYSTORE --password "$PASSWORD" --rpc-url $RPC_URL $PDP_SERVICE_ADDRESS $CALLDATA --value 1 diff --git a/service_contracts/tools/pdp/transfer-owner.sh b/service_contracts/tools/pdp/transfer-owner.sh new file mode 100755 index 00000000..bebe3cd0 --- /dev/null +++ b/service_contracts/tools/pdp/transfer-owner.sh @@ -0,0 +1,65 @@ +#!/bin/bash +set -euo pipefail + +##################################### +# Environment variables & defaults # +##################################### + +if [ -z "$RPC_URL" ]; then + echo "Error: RPC_URL is not set" + exit 1 +fi + +if [ -z "$KEYSTORE" ]; then + echo "Error: KEYSTORE is not set" + exit 1 +fi + +if [ -z "$CONTRACT_ADDRESS" ]; then + echo "Error: CONTRACT_ADDRESS is not set" + exit 1 +fi + +if [ -z "$NEW_OWNER" ]; then + echo "Error: NEW_OWNER is not set" + exit 1 +fi + +##################################### +# Setup # +##################################### +echo "Using keystore for authentication..." +ADDR=$(cast wallet address --keystore "$KEYSTORE" --password "$PASSWORD") +NONCE="$(cast nonce --rpc-url "$RPC_URL" "$ADDR")" +echo "Deployer address: $ADDR" +echo + +##################################### +# Transfer ownership # +##################################### +echo "Transferring ownership to new owner..." +echo "Proxy address: $CONTRACT_ADDRESS" +echo "New owner: $NEW_OWNER" + +cast send --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --nonce $NONCE "$CONTRACT_ADDRESS" "transferOwnership(address)" "$NEW_OWNER" + +echo "✓ Ownership transfer transaction submitted" + +# Verify the ownership transfer +echo "Verifying new owner..." +NEW_OWNER_ADDRESS=$( + cast call \ + --rpc-url "$RPC_URL" \ + "$CONTRACT_ADDRESS" \ + "owner()(address)" +) + +if [ "${NEW_OWNER_ADDRESS,,}" != "${NEW_OWNER,,}" ]; then + echo "Failed to transfer ownership" + echo "Expected new owner to be: ${NEW_OWNER}" + echo "Got: ${NEW_OWNER_ADDRESS}" + exit 1 +fi + +echo "✓ Ownership transferred successfully to ${NEW_OWNER}" +echo \ No newline at end of file diff --git a/service_contracts/tools/pdp/upgrade-contract.sh b/service_contracts/tools/pdp/upgrade-contract.sh new file mode 100755 index 00000000..df1b33d5 --- /dev/null +++ b/service_contracts/tools/pdp/upgrade-contract.sh @@ -0,0 +1,90 @@ +#! /bin/bash +# upgrade-contract upgrades proxy at $PROXY_ADDRESS to a new deployment of the implementation +# of the contract at $IMPLEMENTATION_PATH (i.e. src/PDPService.sol:PDPService / src/PDPRecordKeeper.sol:PDPRecordKeeper) +# Assumption: KEYSTORE, PASSWORD, RPC_URL env vars are set to an appropriate eth keystore path and password +# and to a valid RPC_URL for the target network. +# Assumption: forge, cast, jq are in the PATH +# +# Set DRY_RUN=false to actually deploy and broadcast transactions (default is dry-run for safety) +DRY_RUN=${DRY_RUN:-true} + +if [ "$DRY_RUN" = "true" ]; then + echo "🧪 Running in DRY-RUN mode - simulation only, no actual deployment" +else + echo "🚀 Running in DEPLOYMENT mode - will actually deploy and upgrade contracts" +fi + +echo "Upgrading contract" + +if [ -z "$RPC_URL" ]; then + echo "Error: RPC_URL is not set" + exit 1 +fi + +if [ -z "$CHAIN_ID" ]; then + CHAIN_ID=$(cast chain-id --rpc-url "$RPC_URL") + if [ -z "$CHAIN_ID" ]; then + echo "Error: Failed to detect chain ID from RPC" + exit 1 + fi +fi + +if [ -z "$KEYSTORE" ]; then + echo "Error: KEYSTORE is not set" + exit 1 +fi + +if [ -z "$PROXY_ADDRESS" ]; then + echo "Error: PROXY_ADDRESS is not set" + exit 1 +fi + +if [ -z "$UPGRADE_DATA" ]; then + echo "Error: UPGRADE_DATA is not set" + exit 1 +fi + +if [ -z "$IMPLEMENTATION_PATH" ]; then + echo "Error: IMPLEMENTATION_PATH is not set (i.e. src/PDPService.sol:PDPService)" + exit 1 +fi + +if [ "$DRY_RUN" = "true" ]; then + echo "🔍 Simulating deployment of new $IMPLEMENTATION_PATH implementation contract" + forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --compiler-version 0.8.23 --chain-id "$CHAIN_ID" "$IMPLEMENTATION_PATH" + + if [ $? -eq 0 ]; then + echo "✅ Contract compilation and simulation successful!" + echo "🔍 Simulating proxy upgrade at $PROXY_ADDRESS" + echo " - Would call: upgradeToAndCall(address,bytes)" + echo " - With upgrade data: $UPGRADE_DATA" + echo "✅ Dry run completed successfully!" + echo "" + echo "To perform actual deployment, run with: DRY_RUN=false ./tools/upgrade-contract.sh" + else + echo "❌ Contract compilation failed during simulation" + exit 1 + fi +else + echo "🚀 Deploying new $IMPLEMENTATION_PATH implementation contract" + # Parse the output of forge create to extract the contract address + IMPLEMENTATION_ADDRESS=$(forge create --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --broadcast --compiler-version 0.8.23 --chain-id "$CHAIN_ID" "$IMPLEMENTATION_PATH" | grep "Deployed to" | awk '{print $3}') + + if [ -z "$IMPLEMENTATION_ADDRESS" ]; then + echo "❌ Error: Failed to extract PDP verifier contract address" + exit 1 + fi + echo "✅ $IMPLEMENTATION_PATH implementation deployed at: $IMPLEMENTATION_ADDRESS" + + echo "🔄 Upgrading proxy at $PROXY_ADDRESS" + cast send --rpc-url "$RPC_URL" --keystore "$KEYSTORE" --password "$PASSWORD" --chain-id "$CHAIN_ID" "$PROXY_ADDRESS" "upgradeToAndCall(address,bytes)" "$IMPLEMENTATION_ADDRESS" "$UPGRADE_DATA" + + if [ $? -eq 0 ]; then + echo "✅ Contract upgrade completed successfully!" + echo "📄 You can verify the upgrade by checking the VERSION:" + echo " cast call $PROXY_ADDRESS \"VERSION()\" --rpc-url $RPC_URL | cast --to-ascii" + else + echo "❌ Contract upgrade failed" + exit 1 + fi +fi diff --git a/service_contracts/tools/create_data_set_with_payments.sh b/service_contracts/tools/service-provider/create_data_set_with_payments.sh similarity index 100% rename from service_contracts/tools/create_data_set_with_payments.sh rename to service_contracts/tools/service-provider/create_data_set_with_payments.sh diff --git a/service_contracts/tools/deploy-all-warm-storage.sh b/service_contracts/tools/service-provider/deploy-all-warm-storage.sh similarity index 99% rename from service_contracts/tools/deploy-all-warm-storage.sh rename to service_contracts/tools/service-provider/deploy-all-warm-storage.sh index d416a8f3..a82b2408 100755 --- a/service_contracts/tools/deploy-all-warm-storage.sh +++ b/service_contracts/tools/service-provider/deploy-all-warm-storage.sh @@ -39,7 +39,8 @@ if [ -z "$CHAIN_ID" ]; then fi # Set network-specific configuration based on chain ID -# See service_contracts/tools/README.md for deployment parameter documentation +# NOTE: CHALLENGE_FINALITY should always be 150 in production for security. +# Calibnet uses lower values for faster testing and development. case "$CHAIN_ID" in "314159") NETWORK_NAME="calibnet" @@ -127,7 +128,6 @@ if [ "$MAX_PROVING_PERIOD" -lt "$MIN_REQUIRED" ]; then echo "Error: MAX_PROVING_PERIOD ($MAX_PROVING_PERIOD) is too small for CHALLENGE_FINALITY ($CHALLENGE_FINALITY)" echo " MAX_PROVING_PERIOD must be at least $MIN_REQUIRED (CHALLENGE_FINALITY + CHALLENGE_WINDOW_SIZE/2)" echo " Either increase MAX_PROVING_PERIOD or decrease CHALLENGE_FINALITY" - echo " See service_contracts/tools/README.md for deployment parameter guidelines." exit 1 fi diff --git a/service_contracts/tools/deploy-registry-calibnet.sh b/service_contracts/tools/service-provider/deploy-registry-calibnet.sh similarity index 100% rename from service_contracts/tools/deploy-registry-calibnet.sh rename to service_contracts/tools/service-provider/deploy-registry-calibnet.sh diff --git a/service_contracts/tools/deploy-session-key-registry.sh b/service_contracts/tools/service-provider/deploy-session-key-registry.sh similarity index 100% rename from service_contracts/tools/deploy-session-key-registry.sh rename to service_contracts/tools/service-provider/deploy-session-key-registry.sh diff --git a/service_contracts/tools/deploy-warm-storage-calibnet.sh b/service_contracts/tools/service-provider/deploy-warm-storage-calibnet.sh similarity index 100% rename from service_contracts/tools/deploy-warm-storage-calibnet.sh rename to service_contracts/tools/service-provider/deploy-warm-storage-calibnet.sh diff --git a/service_contracts/tools/deploy-warm-storage-implementation-only.sh b/service_contracts/tools/service-provider/deploy-warm-storage-implementation-only.sh similarity index 100% rename from service_contracts/tools/deploy-warm-storage-implementation-only.sh rename to service_contracts/tools/service-provider/deploy-warm-storage-implementation-only.sh diff --git a/service_contracts/tools/deploy-warm-storage-view.sh b/service_contracts/tools/service-provider/deploy-warm-storage-view.sh similarity index 100% rename from service_contracts/tools/deploy-warm-storage-view.sh rename to service_contracts/tools/service-provider/deploy-warm-storage-view.sh diff --git a/service_contracts/tools/generate_storage_layout.sh b/service_contracts/tools/service-provider/generate_storage_layout.sh similarity index 100% rename from service_contracts/tools/generate_storage_layout.sh rename to service_contracts/tools/service-provider/generate_storage_layout.sh diff --git a/service_contracts/tools/generate_view_contract.sh b/service_contracts/tools/service-provider/generate_view_contract.sh similarity index 100% rename from service_contracts/tools/generate_view_contract.sh rename to service_contracts/tools/service-provider/generate_view_contract.sh diff --git a/service_contracts/tools/set-warm-storage-view.sh b/service_contracts/tools/service-provider/set-warm-storage-view.sh similarity index 100% rename from service_contracts/tools/set-warm-storage-view.sh rename to service_contracts/tools/service-provider/set-warm-storage-view.sh