diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8f8d20609f..6350fb009d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1,3 @@ # CODEOWNERS: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners - -# Any PR to `master` branch with changes to production contracts notifies the protocol team -/contracts/ @lidofinance/lido-eth-protocol - -# Any PR to `master` branch with changes to GitHub workflows notifies the workflow review team -/.github/workflows/ @lidofinance/review-gh-workflows +* @lidofinance/lido-eth-protocol +.github @lidofinance/review-gh-workflows diff --git a/.github/workflows/tests-integration-mainnet.yml b/.github/workflows/tests-integration-mainnet.yml index 508b95efe0..1bad570d1f 100644 --- a/.github/workflows/tests-integration-mainnet.yml +++ b/.github/workflows/tests-integration-mainnet.yml @@ -1,30 +1,34 @@ name: Integration Tests -#on: [push] -# -#jobs: -# test_hardhat_integration_fork: -# name: Hardhat / Mainnet -# runs-on: ubuntu-latest -# timeout-minutes: 120 -# -# services: -# hardhat-node: -# image: ghcr.io/lidofinance/hardhat-node:2.22.18 -# ports: -# - 8545:8545 -# env: -# ETH_RPC_URL: "${{ secrets.ETH_RPC_URL }}" -# -# steps: -# - uses: actions/checkout@v4 -# -# - name: Common setup -# uses: ./.github/workflows/setup -# -# - name: Set env -# run: cp .env.example .env -# -# - name: Run integration tests -# run: yarn test:integration:fork:mainnet -# env: -# LOG_LEVEL: debug + +on: + push: + schedule: + - cron: "0 10 */2 * *" + +jobs: + test_hardhat_integration_fork: + name: Hardhat / Mainnet + runs-on: ubuntu-latest + timeout-minutes: 120 + + services: + hardhat-node: + image: ghcr.io/lidofinance/hardhat-node:2.22.17 + ports: + - 8545:8545 + env: + ETH_RPC_URL: "${{ secrets.ETH_RPC_URL }}" + + steps: + - uses: actions/checkout@v4 + + - name: Common setup + uses: ./.github/workflows/setup + + - name: Set env + run: cp .env.example .env + + - name: Run integration tests + run: yarn test:integration:fork:mainnet + env: + LOG_LEVEL: debug diff --git a/.gitignore b/.gitignore index e2d3e4f663..42b79ba602 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ lib/abi/*.json accounts.json deployed-local.json deployed-hardhat.json +deployed-local-devnet.json # MacOS .DS_Store diff --git a/deployed-holesky.json b/deployed-holesky.json index 861c4e7055..61174a4e0a 100644 --- a/deployed-holesky.json +++ b/deployed-holesky.json @@ -175,7 +175,7 @@ "app:simple-dvt": { "stakingRouterModuleParams": { "moduleName": "SimpleDVT", - "moduleType": "simple-dvt-onchain-v1", + "moduleType": "curated-onchain-v1", "targetShare": 50, "moduleFee": 800, "treasuryFee": 200, diff --git a/foundry.toml b/foundry.toml index 3ddeddae8e..3798d585bb 100644 --- a/foundry.toml +++ b/foundry.toml @@ -15,7 +15,7 @@ test = 'test' cache = true # The cache directory if enabled -cache_path = 'foundry/cache' +cache_path = 'foundry/cache' # Only run tests in contracts matching the specified glob pattern match_path = '**/test/**/*.t.sol' diff --git a/foundry/lib/forge-std b/foundry/lib/forge-std index 8f24d6b04c..ffa2ee0d92 160000 --- a/foundry/lib/forge-std +++ b/foundry/lib/forge-std @@ -1 +1 @@ -Subproject commit 8f24d6b04c92975e0795b5868aa0d783251cdeaa +Subproject commit ffa2ee0d921b4163b7abd0f1122df93ead205805 diff --git a/globals.d.ts b/globals.d.ts index 5860e7122e..f58fe5b101 100644 --- a/globals.d.ts +++ b/globals.d.ts @@ -39,7 +39,6 @@ declare namespace NodeJS { LOCAL_KERNEL_ADDRESS?: string; LOCAL_LEGACY_ORACLE_ADDRESS?: string; LOCAL_LIDO_ADDRESS?: string; - LOCAL_WSTETH_ADDRESS?: string; LOCAL_NOR_ADDRESS?: string; LOCAL_ORACLE_DAEMON_CONFIG_ADDRESS?: string; LOCAL_ORACLE_REPORT_SANITY_CHECKER_ADDRESS?: string; @@ -48,7 +47,6 @@ declare namespace NodeJS { LOCAL_VALIDATORS_EXIT_BUS_ORACLE_ADDRESS?: string; LOCAL_WITHDRAWAL_QUEUE_ADDRESS?: string; LOCAL_WITHDRAWAL_VAULT_ADDRESS?: string; - LOCAL_STAKING_VAULT_FACTORY_ADDRESS?: string; /* for mainnet fork testing */ MAINNET_RPC_URL: string; @@ -65,7 +63,6 @@ declare namespace NodeJS { MAINNET_KERNEL_ADDRESS?: string; MAINNET_LEGACY_ORACLE_ADDRESS?: string; MAINNET_LIDO_ADDRESS?: string; - MAINNET_WSTETH_ADDRESS?: string; MAINNET_NOR_ADDRESS?: string; MAINNET_ORACLE_DAEMON_CONFIG_ADDRESS?: string; MAINNET_ORACLE_REPORT_SANITY_CHECKER_ADDRESS?: string; @@ -74,17 +71,14 @@ declare namespace NodeJS { MAINNET_VALIDATORS_EXIT_BUS_ORACLE_ADDRESS?: string; MAINNET_WITHDRAWAL_QUEUE_ADDRESS?: string; MAINNET_WITHDRAWAL_VAULT_ADDRESS?: string; - MAINNET_STAKING_VAULT_FACTORY_ADDRESS?: string; - - HOLESKY_RPC_URL?: string; - SEPOLIA_RPC_URL?: string; - MEKONG_RPC_URL?: string; /* for contract sourcecode verification with `hardhat-verify` */ ETHERSCAN_API_KEY?: string; - BLOCKSCOUT_API_KEY?: string; - /* Scratch deploy environment variables */ - NETWORK_STATE_FILE?: string; + /* for local devnet */ + LOCAL_DEVNET_PK?: string; + LOCAL_DEVNET_CHAIN_ID?: string; + LOCAL_DEVNET_EXPLORER_API_URL?: string; + LOCAL_DEVNET_EXPLORER_URL?: string; } } diff --git a/hardhat.config.ts b/hardhat.config.ts index 460b268f5a..df23c8a245 100644 --- a/hardhat.config.ts +++ b/hardhat.config.ts @@ -11,7 +11,6 @@ import "hardhat-tracer"; import "hardhat-watcher"; import "hardhat-ignore-warnings"; import "hardhat-contract-sizer"; -import "hardhat-gas-reporter"; import { HardhatUserConfig } from "hardhat/config"; import { mochaRootHooks } from "test/hooks"; @@ -22,12 +21,22 @@ import { getHardhatForkingConfig, loadAccounts } from "./hardhat.helpers"; const RPC_URL: string = process.env.RPC_URL || ""; +export const ZERO_PK = "0x0000000000000000000000000000000000000000000000000000000000000000"; + const config: HardhatUserConfig = { defaultNetwork: "hardhat", - gasReporter: { - enabled: process.env.SKIP_GAS_REPORT ? false : true, - }, networks: { + "local": { + url: process.env.LOCAL_RPC_URL || RPC_URL, + }, + "local-devnet": { + url: process.env.LOCAL_RPC_URL || RPC_URL, + accounts: [process.env.LOCAL_DEVNET_PK || ZERO_PK], + }, + "mainnet-fork": { + url: process.env.MAINNET_RPC_URL || RPC_URL, + timeout: 20 * 60 * 1000, // 20 minutes + }, "hardhat": { // setting base fee to 0 to avoid extra calculations doesn't work :( // minimal base fee is 1 for EIP-1559 @@ -43,16 +52,8 @@ const config: HardhatUserConfig = { }, forking: getHardhatForkingConfig(), }, - "local": { - url: process.env.LOCAL_RPC_URL || RPC_URL, - }, - "holesky": { - url: process.env.HOLESKY_RPC_URL || RPC_URL, - chainId: 17000, - accounts: loadAccounts("holesky"), - }, "sepolia": { - url: process.env.SEPOLIA_RPC_URL || RPC_URL, + url: RPC_URL, chainId: 11155111, accounts: loadAccounts("sepolia"), }, @@ -60,13 +61,23 @@ const config: HardhatUserConfig = { url: process.env.SEPOLIA_RPC_URL || RPC_URL, chainId: 11155111, }, - "mainnet-fork": { - url: process.env.MAINNET_RPC_URL || RPC_URL, - timeout: 20 * 60 * 1000, // 20 minutes - }, }, etherscan: { - apiKey: process.env.ETHERSCAN_API_KEY || "", + customChains: [ + { + network: "local-devnet", + chainId: parseInt(process.env.LOCAL_DEVNET_CHAIN_ID ?? "32382", 10), + urls: { + apiURL: process.env.LOCAL_DEVNET_EXPLORER_API_URL ?? "", + browserURL: process.env.LOCAL_DEVNET_EXPLORER_URL ?? "", + }, + }, + ], + apiKey: process.env.LOCAL_DEVNET_EXPLORER_API_URL + ? { + "local-devnet": "local-devnet", + } + : process.env.ETHERSCAN_API_KEY || "", }, solidity: { compilers: [ @@ -120,16 +131,6 @@ const config: HardhatUserConfig = { evmVersion: "istanbul", }, }, - { - version: "0.8.25", - settings: { - optimizer: { - enabled: true, - runs: 200, - }, - evmVersion: "cancun", - }, - }, ], }, tracer: { @@ -144,10 +145,7 @@ const config: HardhatUserConfig = { }, watcher: { test: { - tasks: [ - { command: "compile", params: { quiet: true } }, - { command: "test", params: { noCompile: true, testFiles: ["{path}"] } }, - ], + tasks: [{ command: "test", params: { testFiles: ["{path}"] } }], files: ["./test/**/*"], clearOnStart: true, start: "echo Running tests...", @@ -174,7 +172,7 @@ const config: HardhatUserConfig = { contractSizer: { alphaSort: false, disambiguatePaths: false, - runOnCompile: process.env.SKIP_CONTRACT_SIZE ? false : true, + runOnCompile: true, strict: true, except: ["template", "mocks", "@aragon", "openzeppelin", "test"], }, diff --git a/lib/deploy.ts b/lib/deploy.ts index 1f0931f15f..e753f0091e 100644 --- a/lib/deploy.ts +++ b/lib/deploy.ts @@ -5,7 +5,7 @@ import { FactoryOptions } from "hardhat/types"; import { LidoLocator } from "typechain-types"; import { addContractHelperFields, DeployedContract, getContractPath, loadContract, LoadedContract } from "lib/contract"; -import { ConvertibleToString, cy, gr, log, yl } from "lib/log"; +import { ConvertibleToString, cy, log, yl } from "lib/log"; import { incrementGasUsed, Sk, updateObjectInState } from "lib/state-file"; const GAS_PRIORITY_FEE = process.env.GAS_PRIORITY_FEE || null; @@ -36,15 +36,11 @@ export async function makeTx( log.withArguments(`Call: ${yl(contract.name)}[${cy(contract.address)}].${yl(funcName)}`, args); const tx = await contract.getFunction(funcName)(...args, txParams); - log(` Transaction: ${tx.hash} (nonce ${yl(tx.nonce)})...`); const receipt = await tx.wait(); const gasUsed = receipt.gasUsed; incrementGasUsed(gasUsed, withStateFile); - log(` Executed (gas used: ${yl(gasUsed)})`); - log.emptyLine(); - return receipt; } @@ -80,8 +76,6 @@ async function deployContractType2( throw new Error(`Failed to send the deployment transaction for ${artifactName}`); } - log(` Transaction: ${tx.hash} (nonce ${yl(tx.nonce)})`); - const receipt = await tx.wait(); if (!receipt) { throw new Error(`Failed to wait till the transaction ${tx.hash} execution!`); @@ -92,9 +86,6 @@ async function deployContractType2( (contract as DeployedContract).deploymentGasUsed = gasUsed; (contract as DeployedContract).deploymentTx = tx.hash; - log(` Deployed: ${gr(receipt.contractAddress!)} (gas used: ${yl(gasUsed)})`); - log.emptyLine(); - await addContractHelperFields(contract, artifactName); return contract as DeployedContract; diff --git a/lib/index.ts b/lib/index.ts index 6c2aa00a1f..5c9dc14fff 100644 --- a/lib/index.ts +++ b/lib/index.ts @@ -21,7 +21,5 @@ export * from "./signing-keys"; export * from "./state-file"; export * from "./string"; export * from "./time"; -export * from "./transaction"; -export * from "./type"; export * from "./units"; export * from "./deposit"; diff --git a/lib/log.ts b/lib/log.ts index 1291fafba2..7e053e632f 100644 --- a/lib/log.ts +++ b/lib/log.ts @@ -1,8 +1,6 @@ import chalk from "chalk"; import path from "path"; -import { TraceableTransaction } from "./type"; - // @ts-expect-error TS2339: Property 'toJSON' does not exist on type 'BigInt'. BigInt.prototype.toJSON = function () { return this.toString(); @@ -127,24 +125,3 @@ log.debug = (title: string, records: Record) => { Object.keys(records).forEach((label) => _record(` ${label}`, records[label])); log.emptyLine(); }; - -log.traceTransaction = (name: string, tx: TraceableTransaction) => { - const value = tx.value === "0.0" ? "" : `Value: ${yl(tx.value)} ETH`; - const from = `From: ${yl(tx.from)}`; - const to = `To: ${yl(tx.to)}`; - const gasPrice = `Gas price: ${yl(tx.gasPrice)} gwei`; - const gasLimit = `Gas limit: ${yl(tx.gasLimit)}`; - const gasUsed = `Gas used: ${yl(tx.gasUsed)} (${yl(tx.gasUsedPercent)})`; - const block = `Block: ${yl(tx.blockNumber)}`; - const nonce = `Nonce: ${yl(tx.nonce)}`; - - const color = tx.status ? gr : rd; - const status = `${color(name)} ${color(tx.status ? "confirmed" : "failed")}`; - - log(`Transaction sent:`, yl(tx.hash)); - log(` ${from} ${to} ${value}`); - log(` ${gasPrice} ${gasLimit} ${gasUsed}`); - log(` ${block} ${nonce}`); - log(` ${status}`); - log.emptyLine(); -}; diff --git a/lib/protocol/helpers/accounting.ts b/lib/protocol/helpers/accounting.ts index 4280ed0d71..e8a497a779 100644 --- a/lib/protocol/helpers/accounting.ts +++ b/lib/protocol/helpers/accounting.ts @@ -17,47 +17,57 @@ import { impersonate, log, ONE_GWEI, - trace, + streccak, } from "lib"; import { ProtocolContext } from "../types"; -const ZERO_HASH = new Uint8Array(32).fill(0); -const ZERO_BYTES32 = "0x" + Buffer.from(ZERO_HASH).toString("hex"); -const SHARE_RATE_PRECISION = 10n ** 27n; -const MIN_MEMBERS_COUNT = 3n; +export type OracleReportOptions = { + clDiff: bigint; + clAppearedValidators: bigint; + elRewardsVaultBalance: bigint | null; + withdrawalVaultBalance: bigint | null; + sharesRequestedToBurn: bigint | null; + withdrawalFinalizationBatches: bigint[]; + simulatedShareRate: bigint | null; + refSlot: bigint | null; + dryRun: boolean; + excludeVaultsBalances: boolean; + skipWithdrawals: boolean; + waitNextReportTime: boolean; + extraDataFormat: bigint; + extraDataHash: string; + extraDataItemsCount: bigint; + extraDataList: Uint8Array; + stakingModuleIdsWithNewlyExitedValidators: bigint[]; + numExitedValidatorsByStakingModule: bigint[]; + reportElVault: boolean; + reportWithdrawalsVault: boolean; + silent: boolean; +}; -export type OracleReportParams = { - clDiff?: bigint; - clAppearedValidators?: bigint; - elRewardsVaultBalance?: bigint | null; - withdrawalVaultBalance?: bigint | null; - sharesRequestedToBurn?: bigint | null; +export type OracleReportPushOptions = { + refSlot: bigint; + clBalance: bigint; + numValidators: bigint; + withdrawalVaultBalance: bigint; + elRewardsVaultBalance: bigint; + sharesRequestedToBurn: bigint; + simulatedShareRate: bigint; + stakingModuleIdsWithNewlyExitedValidators?: bigint[]; + numExitedValidatorsByStakingModule?: bigint[]; withdrawalFinalizationBatches?: bigint[]; - simulatedShareRate?: bigint | null; - refSlot?: bigint | null; - dryRun?: boolean; - excludeVaultsBalances?: boolean; - skipWithdrawals?: boolean; - waitNextReportTime?: boolean; + isBunkerMode?: boolean; extraDataFormat?: bigint; extraDataHash?: string; extraDataItemsCount?: bigint; extraDataList?: Uint8Array; - stakingModuleIdsWithNewlyExitedValidators?: bigint[]; - numExitedValidatorsByStakingModule?: bigint[]; - reportElVault?: boolean; - reportWithdrawalsVault?: boolean; - vaultValues?: bigint[]; - inOutDeltas?: bigint[]; - silent?: boolean; }; -type OracleReportResults = { - data: AccountingOracle.ReportDataStruct; - reportTx: ContractTransactionResponse | undefined; - extraDataTx: ContractTransactionResponse | undefined; -}; +const ZERO_HASH = new Uint8Array(32).fill(0); +const ZERO_BYTES32 = "0x" + Buffer.from(ZERO_HASH).toString("hex"); +const SHARE_RATE_PRECISION = 10n ** 27n; +const MIN_MEMBERS_COUNT = 3n; /** * Prepare and push oracle report. @@ -71,6 +81,7 @@ export const report = async ( withdrawalVaultBalance = null, sharesRequestedToBurn = null, withdrawalFinalizationBatches = [], + simulatedShareRate = null, refSlot = null, dryRun = false, excludeVaultsBalances = false, @@ -84,17 +95,23 @@ export const report = async ( numExitedValidatorsByStakingModule = [], reportElVault = true, reportWithdrawalsVault = true, - vaultValues = [], - inOutDeltas = [], - }: OracleReportParams = {}, -): Promise => { + } = {} as Partial, +): Promise<{ + data: AccountingOracle.ReportDataStruct; + reportTx: ContractTransactionResponse | undefined; + extraDataTx: ContractTransactionResponse | undefined; +}> => { const { hashConsensus, lido, elRewardsVault, withdrawalVault, burner, accountingOracle } = ctx.contracts; + // Fast-forward to next report time if (waitNextReportTime) { await waitNextAvailableReportTime(ctx); } - refSlot = refSlot ?? (await hashConsensus.getCurrentFrame()).refSlot; + // Get report slot from the protocol + if (!refSlot) { + ({ refSlot } = await hashConsensus.getCurrentFrame()); + } const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); const postCLBalance = beaconBalance + clDiff; @@ -113,6 +130,9 @@ export const report = async ( "ElRewards vault": formatEther(elRewardsVaultBalance), }); + // excludeVaultsBalance safely forces LIDO to see vault balances as empty allowing zero/negative rebase + // simulateReports needs proper withdrawal and elRewards vaults balances + if (excludeVaultsBalances) { if (!reportWithdrawalsVault || !reportElVault) { log.warning("excludeVaultsBalances overrides reportWithdrawalsVault and reportElVault"); @@ -138,21 +158,19 @@ export const report = async ( let isBunkerMode = false; if (!skipWithdrawals) { - const simulatedReport = await simulateReport(ctx, { + const params = { refSlot, beaconValidators: postBeaconValidators, clBalance: postCLBalance, withdrawalVaultBalance, elRewardsVaultBalance, - vaultValues, - inOutDeltas, - }); + }; - if (!simulatedReport) { - throw new Error("Failed to simulate report"); - } + const simulatedReport = await simulateReport(ctx, params); + + expect(simulatedReport).to.not.be.undefined; - const { postTotalPooledEther, postTotalShares, withdrawals, elRewards } = simulatedReport; + const { postTotalPooledEther, postTotalShares, withdrawals, elRewards } = simulatedReport!; log.debug("Simulated report", { "Post Total Pooled Ether": formatEther(postTotalPooledEther), @@ -161,7 +179,9 @@ export const report = async ( "El Rewards": formatEther(elRewards), }); - const simulatedShareRate = (postTotalPooledEther * SHARE_RATE_PRECISION) / postTotalShares; + if (simulatedShareRate === null) { + simulatedShareRate = (postTotalPooledEther * SHARE_RATE_PRECISION) / postTotalShares; + } if (withdrawalFinalizationBatches.length === 0) { withdrawalFinalizationBatches = await getFinalizationBatches(ctx, { @@ -174,37 +194,67 @@ export const report = async ( isBunkerMode = (await lido.getTotalPooledEther()) > postTotalPooledEther; log.debug("Bunker Mode", { "Is Active": isBunkerMode }); + } else if (simulatedShareRate === null) { + simulatedShareRate = 0n; } - const reportData = { - consensusVersion: await accountingOracle.getConsensusVersion(), + if (dryRun) { + const data = { + consensusVersion: await accountingOracle.getConsensusVersion(), + refSlot, + numValidators: postBeaconValidators, + clBalanceGwei: postCLBalance / ONE_GWEI, + stakingModuleIdsWithNewlyExitedValidators, + numExitedValidatorsByStakingModule, + withdrawalVaultBalance, + elRewardsVaultBalance, + sharesRequestedToBurn, + withdrawalFinalizationBatches, + simulatedShareRate, + isBunkerMode, + extraDataFormat, + extraDataHash, + extraDataItemsCount, + } as AccountingOracle.ReportDataStruct; + + log.debug("Final Report (Dry Run)", { + "Consensus version": data.consensusVersion, + "Ref slot": data.refSlot, + "CL balance": data.clBalanceGwei, + "Num validators": data.numValidators, + "Withdrawal vault balance": data.withdrawalVaultBalance, + "EL rewards vault balance": data.elRewardsVaultBalance, + "Shares requested to burn": data.sharesRequestedToBurn, + "Withdrawal finalization batches": data.withdrawalFinalizationBatches, + "Simulated share rate": data.simulatedShareRate, + "Is bunker mode": data.isBunkerMode, + "Extra data format": data.extraDataFormat, + "Extra data hash": data.extraDataHash, + "Extra data items count": data.extraDataItemsCount, + }); + + return { data, reportTx: undefined, extraDataTx: undefined }; + } + + const reportParams = { refSlot, + clBalance: postCLBalance, numValidators: postBeaconValidators, - clBalanceGwei: postCLBalance / ONE_GWEI, - stakingModuleIdsWithNewlyExitedValidators, - numExitedValidatorsByStakingModule, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, + simulatedShareRate, + stakingModuleIdsWithNewlyExitedValidators, + numExitedValidatorsByStakingModule, withdrawalFinalizationBatches, isBunkerMode, - vaultsValues: vaultValues, - vaultsInOutDeltas: inOutDeltas, extraDataFormat, extraDataHash, extraDataItemsCount, - } satisfies AccountingOracle.ReportDataStruct; - - if (dryRun) { - log.debug("Final Report (Dry Run)", reportData); - return { data: reportData, reportTx: undefined, extraDataTx: undefined }; - } - - return submitReport(ctx, { - ...reportData, - clBalance: postCLBalance, extraDataList, - }); + }; + + return submitReport(ctx, reportParams); }; export const getReportTimeElapsed = async (ctx: ProtocolContext) => { @@ -271,43 +321,29 @@ export const waitNextAvailableReportTime = async (ctx: ProtocolContext): Promise expect(nextFrame.refSlot).to.equal(refSlot + slotsPerFrame, "Next frame refSlot is incorrect"); }; -type SimulateReportParams = { - refSlot: bigint; - beaconValidators: bigint; - clBalance: bigint; - withdrawalVaultBalance: bigint; - elRewardsVaultBalance: bigint; - vaultValues: bigint[]; - inOutDeltas: bigint[]; -}; - -type SimulateReportResult = { - postTotalPooledEther: bigint; - postTotalShares: bigint; - withdrawals: bigint; - elRewards: bigint; -}; - /** * Simulate oracle report to get the expected result. */ const simulateReport = async ( ctx: ProtocolContext, - { - refSlot, - beaconValidators, - clBalance, - withdrawalVaultBalance, - elRewardsVaultBalance, - vaultValues, - inOutDeltas, - }: SimulateReportParams, -): Promise => { - const { hashConsensus, accounting } = ctx.contracts; + params: { + refSlot: bigint; + beaconValidators: bigint; + clBalance: bigint; + withdrawalVaultBalance: bigint; + elRewardsVaultBalance: bigint; + }, +): Promise< + { postTotalPooledEther: bigint; postTotalShares: bigint; withdrawals: bigint; elRewards: bigint } | undefined +> => { + const { hashConsensus, accountingOracle, lido } = ctx.contracts; + const { refSlot, beaconValidators, clBalance, withdrawalVaultBalance, elRewardsVaultBalance } = params; const { genesisTime, secondsPerSlot } = await hashConsensus.getChainConfig(); const reportTimestamp = genesisTime + refSlot * secondsPerSlot; + const accountingOracleAccount = await impersonate(accountingOracle.address, ether("100")); + log.debug("Simulating oracle report", { "Ref Slot": refSlot, "Beacon Validators": beaconValidators, @@ -316,61 +352,84 @@ const simulateReport = async ( "El Rewards Vault Balance": formatEther(elRewardsVaultBalance), }); - const { timeElapsed } = await getReportTimeElapsed(ctx); - const update = await accounting.simulateOracleReport( - { - timestamp: reportTimestamp, - timeElapsed, - clValidators: beaconValidators, - clBalance, - withdrawalVaultBalance, - elRewardsVaultBalance, - sharesRequestedToBurn: 0n, - withdrawalFinalizationBatches: [], - vaultValues, - inOutDeltas, + // NOTE: To enable negative rebase sanity checker, the static call below + // replaced with advanced eth_call with stateDiff. + // const [postTotalPooledEther1, postTotalShares1, withdrawals1, elRewards1] = await lido + // .connect(accountingOracleAccount) + // .handleOracleReport.staticCall( + // reportTimestamp, + // 1n * 24n * 60n * 60n, // 1 day + // beaconValidators, + // clBalance, + // withdrawalVaultBalance, + // elRewardsVaultBalance, + // 0n, + // [], + // 0n, + // ); + + // Step 1: Encode the function call data + const data = lido.interface.encodeFunctionData("handleOracleReport", [ + reportTimestamp, + BigInt(24 * 60 * 60), // 1 day in seconds + beaconValidators, + clBalance, + withdrawalVaultBalance, + elRewardsVaultBalance, + BigInt(0), + [], + BigInt(0), + ]); + + // Step 2: Prepare the transaction object + const transactionObject = { + to: lido.address, + from: accountingOracleAccount.address, + data: data, + }; + + // Step 3: Prepare call parameters, state diff and perform eth_call + const accountingOracleAddr = await accountingOracle.getAddress(); + const callParams = [transactionObject, "latest"]; + const LAST_PROCESSING_REF_SLOT_POSITION = streccak("lido.BaseOracle.lastProcessingRefSlot"); + const stateDiff = { + [accountingOracleAddr]: { + stateDiff: { + [LAST_PROCESSING_REF_SLOT_POSITION]: refSlot, // setting the processing refslot for the sanity checker + }, }, - 0n, + }; + + const returnData = await ethers.provider.send("eth_call", [...callParams, stateDiff]); + + // Step 4: Decode the returned data + const [[postTotalPooledEther, postTotalShares, withdrawals, elRewards]] = lido.interface.decodeFunctionResult( + "handleOracleReport", + returnData, ); log.debug("Simulation result", { - "Post Total Pooled Ether": formatEther(update.postTotalPooledEther), - "Post Total Shares": update.postTotalShares, - "Withdrawals": formatEther(update.withdrawals), - "El Rewards": formatEther(update.elRewards), + "Post Total Pooled Ether": formatEther(postTotalPooledEther), + "Post Total Shares": postTotalShares, + "Withdrawals": formatEther(withdrawals), + "El Rewards": formatEther(elRewards), }); - return { - postTotalPooledEther: update.postTotalPooledEther, - postTotalShares: update.postTotalShares, - withdrawals: update.withdrawals, - elRewards: update.elRewards, - }; -}; - -type HandleOracleReportParams = { - beaconValidators: bigint; - clBalance: bigint; - sharesRequestedToBurn: bigint; - withdrawalVaultBalance: bigint; - elRewardsVaultBalance: bigint; - vaultValues?: bigint[]; - inOutDeltas?: bigint[]; + return { postTotalPooledEther, postTotalShares, withdrawals, elRewards }; }; export const handleOracleReport = async ( ctx: ProtocolContext, - { - beaconValidators, - clBalance, - sharesRequestedToBurn, - withdrawalVaultBalance, - elRewardsVaultBalance, - vaultValues = [], - inOutDeltas = [], - }: HandleOracleReportParams, + params: { + beaconValidators: bigint; + clBalance: bigint; + sharesRequestedToBurn: bigint; + withdrawalVaultBalance: bigint; + elRewardsVaultBalance: bigint; + }, ): Promise => { - const { hashConsensus, accountingOracle, accounting } = ctx.contracts; + const { hashConsensus, accountingOracle, lido } = ctx.contracts; + const { beaconValidators, clBalance, sharesRequestedToBurn, withdrawalVaultBalance, elRewardsVaultBalance } = params; const { refSlot } = await hashConsensus.getCurrentFrame(); const { genesisTime, secondsPerSlot } = await hashConsensus.getChainConfig(); @@ -387,42 +446,36 @@ export const handleOracleReport = async ( "El Rewards Vault Balance": formatEther(elRewardsVaultBalance), }); - const { timeElapsed } = await getReportTimeElapsed(ctx); - - const handleReportTx = await accounting.connect(accountingOracleAccount).handleOracleReport({ - timestamp: reportTimestamp, - timeElapsed, // 1 day - clValidators: beaconValidators, + await lido.connect(accountingOracleAccount).handleOracleReport( + reportTimestamp, + 1n * 24n * 60n * 60n, // 1 day + beaconValidators, clBalance, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, - withdrawalFinalizationBatches: [], - vaultValues, - inOutDeltas, - }); - - await trace("accounting.handleOracleReport", handleReportTx); + [], + 0n, + ); } catch (error) { log.error("Error", (error as Error).message ?? "Unknown error during oracle report simulation"); expect(error).to.be.undefined; } }; -type FinalizationBatchesParams = { - shareRate: bigint; - limitedWithdrawalVaultBalance: bigint; - limitedElRewardsVaultBalance: bigint; -}; - /** * Get finalization batches to finalize withdrawals. */ const getFinalizationBatches = async ( ctx: ProtocolContext, - { shareRate, limitedWithdrawalVaultBalance, limitedElRewardsVaultBalance }: FinalizationBatchesParams, + params: { + shareRate: bigint; + limitedWithdrawalVaultBalance: bigint; + limitedElRewardsVaultBalance: bigint; + }, ): Promise => { const { oracleReportSanityChecker, lido, withdrawalQueue } = ctx.contracts; + const { shareRate, limitedWithdrawalVaultBalance, limitedElRewardsVaultBalance } = params; const { requestTimestampMargin } = await oracleReportSanityChecker.getOracleReportLimits(); @@ -437,7 +490,13 @@ const getFinalizationBatches = async ( const MAX_REQUESTS_PER_CALL = 1000n; if (availableEth === 0n) { - log.warning("No available ether to request withdrawals"); + log.debug("No available ether to request withdrawals", { + "Share rate": shareRate, + "Available eth": formatEther(availableEth), + "Limited withdrawal vault balance": formatEther(limitedWithdrawalVaultBalance), + "Limited el rewards vault balance": formatEther(limitedElRewardsVaultBalance), + "Reserved buffer": formatEther(reservedBuffer), + }); return []; } @@ -492,35 +551,10 @@ const getFinalizationBatches = async ( return (batchesState.batches as Result).toArray().filter((x) => x > 0n); }; -export type OracleReportSubmitParams = { - refSlot: bigint; - clBalance: bigint; - numValidators: bigint; - withdrawalVaultBalance: bigint; - elRewardsVaultBalance: bigint; - sharesRequestedToBurn: bigint; - stakingModuleIdsWithNewlyExitedValidators?: bigint[]; - numExitedValidatorsByStakingModule?: bigint[]; - withdrawalFinalizationBatches?: bigint[]; - isBunkerMode?: boolean; - vaultsValues: bigint[]; - vaultsInOutDeltas: bigint[]; - extraDataFormat?: bigint; - extraDataHash?: string; - extraDataItemsCount?: bigint; - extraDataList?: Uint8Array; -}; - -type OracleReportSubmitResult = { - data: AccountingOracle.ReportDataStruct; - reportTx: ContractTransactionResponse; - extraDataTx: ContractTransactionResponse; -}; - /** * Main function to push oracle report to the protocol. */ -const submitReport = async ( +export const submitReport = async ( ctx: ProtocolContext, { refSlot, @@ -529,18 +563,21 @@ const submitReport = async ( withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, + simulatedShareRate, stakingModuleIdsWithNewlyExitedValidators = [], numExitedValidatorsByStakingModule = [], withdrawalFinalizationBatches = [], isBunkerMode = false, - vaultsValues = [], - vaultsInOutDeltas = [], extraDataFormat = 0n, extraDataHash = ZERO_BYTES32, extraDataItemsCount = 0n, extraDataList = new Uint8Array(), - }: OracleReportSubmitParams, -): Promise => { + } = {} as OracleReportPushOptions, +): Promise<{ + data: AccountingOracle.ReportDataStruct; + reportTx: ContractTransactionResponse; + extraDataTx: ContractTransactionResponse; +}> => { const { accountingOracle } = ctx.contracts; log.debug("Pushing oracle report", { @@ -550,12 +587,11 @@ const submitReport = async ( "Withdrawal vault": formatEther(withdrawalVaultBalance), "El rewards vault": formatEther(elRewardsVaultBalance), "Shares requested to burn": sharesRequestedToBurn, + "Simulated share rate": simulatedShareRate, "Staking module ids with newly exited validators": stakingModuleIdsWithNewlyExitedValidators, "Num exited validators by staking module": numExitedValidatorsByStakingModule, "Withdrawal finalization batches": withdrawalFinalizationBatches, "Is bunker mode": isBunkerMode, - "Vaults values": vaultsValues, - "Vaults in-out deltas": vaultsInOutDeltas, "Extra data format": extraDataFormat, "Extra data hash": extraDataHash, "Extra data items count": extraDataItemsCount, @@ -573,12 +609,11 @@ const submitReport = async ( withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, + simulatedShareRate, stakingModuleIdsWithNewlyExitedValidators, numExitedValidatorsByStakingModule, withdrawalFinalizationBatches, isBunkerMode, - vaultsValues, - vaultsInOutDeltas, extraDataFormat, extraDataHash, extraDataItemsCount, @@ -597,8 +632,6 @@ const submitReport = async ( const reportTx = await accountingOracle.connect(submitter).submitReportData(data, oracleVersion); - await trace("accountingOracle.submitReportData", reportTx); - log.debug("Pushed oracle report main data", { "Ref slot": refSlot, "Consensus version": consensusVersion, @@ -608,10 +641,8 @@ const submitReport = async ( let extraDataTx: ContractTransactionResponse; if (extraDataFormat) { extraDataTx = await accountingOracle.connect(submitter).submitReportExtraDataList(extraDataList); - await trace("accountingOracle.submitReportExtraDataList", extraDataTx); } else { extraDataTx = await accountingOracle.connect(submitter).submitReportExtraDataEmpty(); - await trace("accountingOracle.submitReportExtraDataEmpty", extraDataTx); } const state = await accountingOracle.getProcessingState(); @@ -651,10 +682,77 @@ const submitReport = async ( return { data, reportTx, extraDataTx }; }; -type ReachConsensusParams = { - refSlot: bigint; - reportHash: string; - consensusVersion: bigint; +/** + * Ensure that the oracle committee has the required number of members. + */ +export const ensureOracleCommitteeMembers = async (ctx: ProtocolContext, minMembersCount = MIN_MEMBERS_COUNT) => { + const { hashConsensus } = ctx.contracts; + + const members = await hashConsensus.getFastLaneMembers(); + const addresses = members.addresses.map((address) => address.toLowerCase()); + + const agentSigner = await ctx.getSigner("agent"); + + if (addresses.length >= minMembersCount) { + log.debug("Oracle committee members count is sufficient", { + "Min members count": minMembersCount, + "Members count": addresses.length, + "Members": addresses.join(", "), + }); + + return; + } + + const managementRole = await hashConsensus.MANAGE_MEMBERS_AND_QUORUM_ROLE(); + await hashConsensus.connect(agentSigner).grantRole(managementRole, agentSigner); + + let count = addresses.length; + while (addresses.length < minMembersCount) { + const address = getOracleCommitteeMemberAddress(count); + + log.debug(`Adding oracle committee member ${count}`, { + "Min members count": minMembersCount, + "Address": address, + }); + + await hashConsensus.connect(agentSigner).addMember(address, minMembersCount); + + addresses.push(address); + + log.success(`Added oracle committee member ${count}`); + + count++; + } + + await hashConsensus.connect(agentSigner).renounceRole(managementRole, agentSigner); + + log.debug("Checked oracle committee members count", { + "Min members count": minMembersCount, + "Members count": addresses.length, + "Members": addresses.join(", "), + }); + + expect(addresses.length).to.be.gte(minMembersCount); +}; + +export const ensureHashConsensusInitialEpoch = async (ctx: ProtocolContext) => { + const { hashConsensus } = ctx.contracts; + + const { initialEpoch } = await hashConsensus.getFrameConfig(); + if (initialEpoch === HASH_CONSENSUS_FAR_FUTURE_EPOCH) { + log.debug("Initializing hash consensus epoch...", { + "Initial epoch": initialEpoch, + }); + + const latestBlockTimestamp = await getCurrentBlockTimestamp(); + const { genesisTime, secondsPerSlot, slotsPerEpoch } = await hashConsensus.getChainConfig(); + const updatedInitialEpoch = (latestBlockTimestamp - genesisTime) / (slotsPerEpoch * secondsPerSlot); + + const agentSigner = await ctx.getSigner("agent"); + await hashConsensus.connect(agentSigner).updateInitialEpoch(updatedInitialEpoch); + + log.success("Hash consensus epoch initialized"); + } }; /** @@ -662,9 +760,14 @@ type ReachConsensusParams = { */ const reachConsensus = async ( ctx: ProtocolContext, - { refSlot, reportHash, consensusVersion }: ReachConsensusParams, + params: { + refSlot: bigint; + reportHash: string; + consensusVersion: bigint; + }, ) => { const { hashConsensus } = ctx.contracts; + const { refSlot, reportHash, consensusVersion } = params; const { addresses } = await hashConsensus.getFastLaneMembers(); @@ -683,8 +786,7 @@ const reachConsensus = async ( submitter = member; } - const tx = await hashConsensus.connect(member).submitReport(refSlot, reportHash, consensusVersion); - await trace("hashConsensus.submitReport", tx); + await hashConsensus.connect(member).submitReport(refSlot, reportHash, consensusVersion); } const { consensusReport } = await hashConsensus.getConsensusState(); @@ -708,9 +810,8 @@ const getReportDataItems = (data: AccountingOracle.ReportDataStruct) => [ data.elRewardsVaultBalance, data.sharesRequestedToBurn, data.withdrawalFinalizationBatches, + data.simulatedShareRate, data.isBunkerMode, - data.vaultsValues, - data.vaultsInOutDeltas, data.extraDataFormat, data.extraDataHash, data.extraDataItemsCount, @@ -731,9 +832,8 @@ const calcReportDataHash = (items: ReturnType) => { "uint256", // elRewardsVaultBalance "uint256", // sharesRequestedToBurn "uint256[]", // withdrawalFinalizationBatches + "uint256", // simulatedShareRate "bool", // isBunkerMode - "uint256[]", // vaultsValues - "int256[]", // vaultsInOutDeltas "uint256", // extraDataFormat "bytes32", // extraDataHash "uint256", // extraDataItemsCount @@ -747,76 +847,3 @@ const calcReportDataHash = (items: ReturnType) => { * Helper function to get oracle committee member address by id. */ const getOracleCommitteeMemberAddress = (id: number) => certainAddress(`AO:HC:OC:${id}`); - -/** - * Ensure that the oracle committee has the required number of members. - */ -export const ensureOracleCommitteeMembers = async (ctx: ProtocolContext, minMembersCount = MIN_MEMBERS_COUNT) => { - const { hashConsensus } = ctx.contracts; - - const members = await hashConsensus.getFastLaneMembers(); - const addresses = members.addresses.map((address) => address.toLowerCase()); - - const agentSigner = await ctx.getSigner("agent"); - - if (addresses.length >= minMembersCount) { - log.debug("Oracle committee members count is sufficient", { - "Min members count": minMembersCount, - "Members count": addresses.length, - "Members": addresses.join(", "), - }); - - return; - } - - const managementRole = await hashConsensus.MANAGE_MEMBERS_AND_QUORUM_ROLE(); - await hashConsensus.connect(agentSigner).grantRole(managementRole, agentSigner); - - let count = addresses.length; - while (addresses.length < minMembersCount) { - log.warning(`Adding oracle committee member ${count}`); - - const address = getOracleCommitteeMemberAddress(count); - const addTx = await hashConsensus.connect(agentSigner).addMember(address, minMembersCount); - await trace("hashConsensus.addMember", addTx); - - addresses.push(address); - - log.success(`Added oracle committee member ${count}`); - - count++; - } - - await hashConsensus.connect(agentSigner).renounceRole(managementRole, agentSigner); - - log.debug("Checked oracle committee members count", { - "Min members count": minMembersCount, - "Members count": addresses.length, - "Members": addresses.join(", "), - }); - - expect(addresses.length).to.be.gte(minMembersCount); -}; - -/** - * Ensure that the oracle committee members have consensus on the initial epoch. - */ -export const ensureHashConsensusInitialEpoch = async (ctx: ProtocolContext) => { - const { hashConsensus } = ctx.contracts; - - const { initialEpoch } = await hashConsensus.getFrameConfig(); - if (initialEpoch === HASH_CONSENSUS_FAR_FUTURE_EPOCH) { - log.warning("Initializing hash consensus epoch..."); - - const latestBlockTimestamp = await getCurrentBlockTimestamp(); - const { genesisTime, secondsPerSlot, slotsPerEpoch } = await hashConsensus.getChainConfig(); - const updatedInitialEpoch = (latestBlockTimestamp - genesisTime) / (slotsPerEpoch * secondsPerSlot); - - const agentSigner = await ctx.getSigner("agent"); - - const tx = await hashConsensus.connect(agentSigner).updateInitialEpoch(updatedInitialEpoch); - await trace("hashConsensus.updateInitialEpoch", tx); - - log.success("Hash consensus epoch initialized"); - } -}; diff --git a/lib/protocol/helpers/index.ts b/lib/protocol/helpers/index.ts index 174778f9cd..599fcb9c39 100644 --- a/lib/protocol/helpers/index.ts +++ b/lib/protocol/helpers/index.ts @@ -1,4 +1,4 @@ -export { unpauseStaking, ensureStakeLimit } from "./staking"; +export { unpauseStaking, ensureStakeLimit, depositAndReportValidators } from "./staking"; export { unpauseWithdrawalQueue, finalizeWithdrawalQueue } from "./withdrawal"; diff --git a/lib/protocol/helpers/nor.ts b/lib/protocol/helpers/nor.ts index c37cf5efa9..c350d7011c 100644 --- a/lib/protocol/helpers/nor.ts +++ b/lib/protocol/helpers/nor.ts @@ -1,10 +1,13 @@ import { expect } from "chai"; -import { randomBytes } from "ethers"; +import { ethers, randomBytes } from "ethers"; -import { certainAddress, log, trace } from "lib"; +import { certainAddress, log } from "lib"; import { ProtocolContext, StakingModuleName } from "../types"; +import { depositAndReportValidators } from "./staking"; + +const NOR_MODULE_ID = 1n; const MIN_OPS_COUNT = 3n; const MIN_OP_KEYS_COUNT = 10n; @@ -16,10 +19,9 @@ export const norEnsureOperators = async ( minOperatorsCount = MIN_OPS_COUNT, minOperatorKeysCount = MIN_OP_KEYS_COUNT, ) => { - await norEnsureOperatorsHaveMinKeys(ctx, minOperatorsCount, minOperatorKeysCount); - const { nor } = ctx.contracts; + const newOperatorsCount = await norEnsureOperatorsHaveMinKeys(ctx, minOperatorsCount, minOperatorKeysCount); for (let operatorId = 0n; operatorId < minOperatorsCount; operatorId++) { const nodeOperatorBefore = await nor.getNodeOperator(operatorId, false); @@ -39,6 +41,10 @@ export const norEnsureOperators = async ( "Min operators count": minOperatorsCount, "Min keys count": minOperatorKeysCount, }); + + if (newOperatorsCount > 0) { + await depositAndReportValidators(ctx, NOR_MODULE_ID, newOperatorsCount); + } }; /** @@ -48,8 +54,8 @@ const norEnsureOperatorsHaveMinKeys = async ( ctx: ProtocolContext, minOperatorsCount = MIN_OPS_COUNT, minKeysCount = MIN_OP_KEYS_COUNT, -) => { - await norEnsureMinOperators(ctx, minOperatorsCount); +): Promise => { + const newOperatorsCount = await norEnsureMinOperators(ctx, minOperatorsCount); const { nor } = ctx.contracts; @@ -67,12 +73,14 @@ const norEnsureOperatorsHaveMinKeys = async ( expect(keysCountAfter).to.be.gte(minKeysCount); } + + return newOperatorsCount; }; /** * Fills the NOR with some operators in case there are not enough of them. */ -const norEnsureMinOperators = async (ctx: ProtocolContext, minOperatorsCount = MIN_OPS_COUNT) => { +const norEnsureMinOperators = async (ctx: ProtocolContext, minOperatorsCount = MIN_OPS_COUNT): Promise => { const { nor } = ctx.contracts; const before = await nor.getNodeOperatorsCount(); @@ -96,6 +104,8 @@ const norEnsureMinOperators = async (ctx: ProtocolContext, minOperatorsCount = M expect(after).to.equal(before + count); expect(after).to.be.gte(minOperatorsCount); + + return count; }; /** @@ -113,12 +123,15 @@ export const norAddNodeOperator = async ( const { nor } = ctx.contracts; const { operatorId, name, rewardAddress, managerAddress } = params; - log.warning(`Adding fake NOR operator ${operatorId}`); + log.debug(`Adding fake NOR operator ${operatorId}`, { + "Operator ID": operatorId, + "Name": name, + "Reward address": rewardAddress, + "Manager address": managerAddress, + }); const agentSigner = await ctx.getSigner("agent"); - - const addTx = await nor.connect(agentSigner).addNodeOperator(name, rewardAddress); - await trace("nodeOperatorRegistry.addNodeOperator", addTx); + await nor.connect(agentSigner).addNodeOperator(name, rewardAddress); log.debug("Added NOR fake operator", { "Operator ID": operatorId, @@ -143,14 +156,17 @@ export const norAddOperatorKeys = async ( const { nor } = ctx.contracts; const { operatorId, keysToAdd } = params; - log.warning(`Adding fake keys to NOR operator ${operatorId}`); + log.debug(`Adding fake keys to NOR operator ${operatorId}`, { + "Operator ID": operatorId, + "Keys to add": keysToAdd, + }); const totalKeysBefore = await nor.getTotalSigningKeyCount(operatorId); const unusedKeysBefore = await nor.getUnusedSigningKeyCount(operatorId); const votingSigner = await ctx.getSigner("voting"); - const addKeysTx = await nor + await nor .connect(votingSigner) .addSigningKeys( operatorId, @@ -158,7 +174,6 @@ export const norAddOperatorKeys = async ( randomBytes(Number(keysToAdd * PUBKEY_LENGTH)), randomBytes(Number(keysToAdd * SIGNATURE_LENGTH)), ); - await trace("nodeOperatorRegistry.addSigningKeys", addKeysTx); const totalKeysAfter = await nor.getTotalSigningKeyCount(operatorId); const unusedKeysAfter = await nor.getUnusedSigningKeyCount(operatorId); @@ -191,12 +206,13 @@ const norSetOperatorStakingLimit = async ( const { nor } = ctx.contracts; const { operatorId, limit } = params; - log.warning(`Setting NOR operator ${operatorId} staking limit`); + log.debug(`Setting NOR operator ${operatorId} staking limit`, { + "Operator ID": operatorId, + "Limit": ethers.formatEther(limit), + }); const votingSigner = await ctx.getSigner("voting"); - - const setLimitTx = await nor.connect(votingSigner).setNodeOperatorStakingLimit(operatorId, limit); - await trace("nodeOperatorRegistry.setNodeOperatorStakingLimit", setLimitTx); + await nor.connect(votingSigner).setNodeOperatorStakingLimit(operatorId, limit); log.success(`Set NOR operator ${operatorId} staking limit`); }; diff --git a/lib/protocol/helpers/sdvt.ts b/lib/protocol/helpers/sdvt.ts index 85b1981acb..7dc99dcd49 100644 --- a/lib/protocol/helpers/sdvt.ts +++ b/lib/protocol/helpers/sdvt.ts @@ -1,13 +1,14 @@ import { expect } from "chai"; import { randomBytes } from "ethers"; -import { impersonate, log, streccak, trace } from "lib"; +import { ether, impersonate, log, streccak } from "lib"; -import { ether } from "../../units"; import { ProtocolContext } from "../types"; import { getOperatorManagerAddress, getOperatorName, getOperatorRewardAddress } from "./nor"; +import { depositAndReportValidators } from "./staking"; +const SDVT_MODULE_ID = 2n; const MIN_OPS_COUNT = 3n; const MIN_OP_KEYS_COUNT = 10n; @@ -21,7 +22,7 @@ export const sdvtEnsureOperators = async ( minOperatorsCount = MIN_OPS_COUNT, minOperatorKeysCount = MIN_OP_KEYS_COUNT, ) => { - await sdvtEnsureOperatorsHaveMinKeys(ctx, minOperatorsCount, minOperatorKeysCount); + const newOperatorsCount = await sdvtEnsureOperatorsHaveMinKeys(ctx, minOperatorsCount, minOperatorKeysCount); const { sdvt } = ctx.contracts; @@ -39,6 +40,10 @@ export const sdvtEnsureOperators = async ( expect(nodeOperatorAfter.totalVettedValidators).to.equal(nodeOperatorBefore.totalAddedValidators); } + + if (newOperatorsCount > 0) { + await depositAndReportValidators(ctx, SDVT_MODULE_ID, newOperatorsCount); + } }; /** @@ -48,8 +53,8 @@ const sdvtEnsureOperatorsHaveMinKeys = async ( ctx: ProtocolContext, minOperatorsCount = MIN_OPS_COUNT, minKeysCount = MIN_OP_KEYS_COUNT, -) => { - await sdvtEnsureMinOperators(ctx, minOperatorsCount); +): Promise => { + const newOperatorsCount = await sdvtEnsureMinOperators(ctx, minOperatorsCount); const { sdvt } = ctx.contracts; @@ -57,7 +62,10 @@ const sdvtEnsureOperatorsHaveMinKeys = async ( const unusedKeysCount = await sdvt.getUnusedSigningKeyCount(operatorId); if (unusedKeysCount < minKeysCount) { - log.warning(`Adding SDVT fake keys to operator ${operatorId}`); + log.debug(`Adding SDVT fake keys to operator ${operatorId}`, { + "Unused keys count": unusedKeysCount, + "Min keys count": minKeysCount, + }); await sdvtAddNodeOperatorKeys(ctx, { operatorId, @@ -74,12 +82,14 @@ const sdvtEnsureOperatorsHaveMinKeys = async ( "Min operators count": minOperatorsCount, "Min keys count": minKeysCount, }); + + return newOperatorsCount; }; /** * Fills the Simple DVT with some operators in case there are not enough of them. */ -const sdvtEnsureMinOperators = async (ctx: ProtocolContext, minOperatorsCount = MIN_OPS_COUNT) => { +const sdvtEnsureMinOperators = async (ctx: ProtocolContext, minOperatorsCount = MIN_OPS_COUNT): Promise => { const { sdvt } = ctx.contracts; const before = await sdvt.getNodeOperatorsCount(); @@ -95,7 +105,12 @@ const sdvtEnsureMinOperators = async (ctx: ProtocolContext, minOperatorsCount = managerAddress: getOperatorManagerAddress("sdvt", operatorId), }; - log.warning(`Adding SDVT fake operator ${operatorId}`); + log.debug(`Adding SDVT fake operator ${operatorId}`, { + "Operator ID": operatorId, + "Name": operator.name, + "Reward address": operator.rewardAddress, + "Manager address": operator.managerAddress, + }); await sdvtAddNodeOperator(ctx, operator); count++; @@ -110,6 +125,8 @@ const sdvtEnsureMinOperators = async (ctx: ProtocolContext, minOperatorsCount = "Min operators count": minOperatorsCount, "Operators count": after, }); + + return count; }; /** @@ -129,24 +146,16 @@ const sdvtAddNodeOperator = async ( const easyTrackExecutor = await ctx.getSigner("easyTrack"); - const addTx = await sdvt.connect(easyTrackExecutor).addNodeOperator(name, rewardAddress); - await trace("simpleDVT.addNodeOperator", addTx); - - const grantPermissionTx = await acl.connect(easyTrackExecutor).grantPermissionP( + await sdvt.connect(easyTrackExecutor).addNodeOperator(name, rewardAddress); + await acl.connect(easyTrackExecutor).grantPermissionP( managerAddress, sdvt.address, MANAGE_SIGNING_KEYS_ROLE, // See https://legacy-docs.aragon.org/developers/tools/aragonos/reference-aragonos-3#parameter-interpretation for details [1 << (240 + Number(operatorId))], ); - await trace("acl.grantPermissionP", grantPermissionTx); - log.debug("Added SDVT fake operator", { - "Operator ID": operatorId, - "Name": name, - "Reward address": rewardAddress, - "Manager address": managerAddress, - }); + log.success(`Added fake SDVT operator ${operatorId}`); }; /** @@ -167,8 +176,7 @@ const sdvtAddNodeOperatorKeys = async ( const { rewardAddress } = await sdvt.getNodeOperator(operatorId, false); const actor = await impersonate(rewardAddress, ether("100")); - - const addKeysTx = await sdvt + await sdvt .connect(actor) .addSigningKeys( operatorId, @@ -176,7 +184,6 @@ const sdvtAddNodeOperatorKeys = async ( randomBytes(Number(keysToAdd * PUBKEY_LENGTH)), randomBytes(Number(keysToAdd * SIGNATURE_LENGTH)), ); - await trace("simpleDVT.addSigningKeys", addKeysTx); const totalKeysAfter = await sdvt.getTotalSigningKeyCount(operatorId); const unusedKeysAfter = await sdvt.getUnusedSigningKeyCount(operatorId); @@ -184,14 +191,7 @@ const sdvtAddNodeOperatorKeys = async ( expect(totalKeysAfter).to.equal(totalKeysBefore + keysToAdd); expect(unusedKeysAfter).to.equal(unusedKeysBefore + keysToAdd); - log.debug("Added SDVT fake signing keys", { - "Operator ID": operatorId, - "Keys to add": keysToAdd, - "Total keys before": totalKeysBefore, - "Total keys after": totalKeysAfter, - "Unused keys before": unusedKeysBefore, - "Unused keys after": unusedKeysAfter, - }); + log.success(`Added fake keys to SDVT operator ${operatorId}`); }; /** @@ -208,7 +208,7 @@ const sdvtSetOperatorStakingLimit = async ( const { operatorId, limit } = params; const easyTrackExecutor = await ctx.getSigner("easyTrack"); + await sdvt.connect(easyTrackExecutor).setNodeOperatorStakingLimit(operatorId, limit); - const setLimitTx = await sdvt.connect(easyTrackExecutor).setNodeOperatorStakingLimit(operatorId, limit); - await trace("simpleDVT.setNodeOperatorStakingLimit", setLimitTx); + log.success(`Set SDVT operator ${operatorId} staking limit`); }; diff --git a/lib/protocol/helpers/staking.ts b/lib/protocol/helpers/staking.ts index e482f3b545..03422bef48 100644 --- a/lib/protocol/helpers/staking.ts +++ b/lib/protocol/helpers/staking.ts @@ -1,18 +1,21 @@ -import { ether, log, trace } from "lib"; +import { ethers, ZeroAddress } from "ethers"; + +import { certainAddress, ether, impersonate, log } from "lib"; + +import { ZERO_HASH } from "test/deploy"; import { ProtocolContext } from "../types"; +import { report } from "./accounting"; + /** * Unpauses the staking contract. */ export const unpauseStaking = async (ctx: ProtocolContext) => { const { lido } = ctx.contracts; if (await lido.isStakingPaused()) { - log.warning("Unpausing staking contract"); - const votingSigner = await ctx.getSigner("voting"); - const tx = await lido.connect(votingSigner).resume(); - await trace("lido.resume", tx); + await lido.connect(votingSigner).resume(); log.success("Staking contract unpaused"); } @@ -23,15 +26,52 @@ export const ensureStakeLimit = async (ctx: ProtocolContext) => { const stakeLimitInfo = await lido.getStakeLimitFullInfo(); if (!stakeLimitInfo.isStakingLimitSet) { - log.warning("Setting staking limit"); - const maxStakeLimit = ether("150000"); const stakeLimitIncreasePerBlock = ether("20"); // this is an arbitrary value + log.debug("Setting staking limit", { + "Max stake limit": ethers.formatEther(maxStakeLimit), + "Stake limit increase per block": ethers.formatEther(stakeLimitIncreasePerBlock), + }); + const votingSigner = await ctx.getSigner("voting"); - const tx = await lido.connect(votingSigner).setStakingLimit(maxStakeLimit, stakeLimitIncreasePerBlock); - await trace("lido.setStakingLimit", tx); + await lido.connect(votingSigner).setStakingLimit(maxStakeLimit, stakeLimitIncreasePerBlock); log.success("Staking limit set"); } }; + +export const depositAndReportValidators = async (ctx: ProtocolContext, moduleId: bigint, depositsCount: bigint) => { + const { lido, depositSecurityModule } = ctx.contracts; + const ethHolder = await impersonate(certainAddress("provision:eth:whale"), ether("100000")); + + await lido.connect(ethHolder).submit(ZeroAddress, { value: ether("10000") }); + + // Deposit validators + const dsmSigner = await impersonate(depositSecurityModule.address, ether("100000")); + await lido.connect(dsmSigner).deposit(depositsCount, moduleId, ZERO_HASH); + + const before = await lido.getBeaconStat(); + + log.debug("Validators on beacon chain before provisioning", { + "Module ID to deposit": moduleId, + "Deposited": before.depositedValidators, + "Total": before.beaconValidators, + "Balance": before.beaconBalance, + }); + + // Add new validators to beacon chain + await report(ctx, { + clDiff: ether("32") * depositsCount, + clAppearedValidators: depositsCount, + }); + + const after = await lido.getBeaconStat(); + + log.debug("Validators on beacon chain after depositing", { + "Module ID deposited": moduleId, + "Deposited": after.depositedValidators, + "Total": after.beaconValidators, + "Balance": after.beaconBalance, + }); +}; diff --git a/lib/protocol/helpers/withdrawal.ts b/lib/protocol/helpers/withdrawal.ts index 43315298b5..eb10e630ba 100644 --- a/lib/protocol/helpers/withdrawal.ts +++ b/lib/protocol/helpers/withdrawal.ts @@ -1,9 +1,6 @@ -import { expect } from "chai"; import { ZeroAddress } from "ethers"; -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; - -import { ether, log, trace, updateBalance } from "lib"; +import { certainAddress, ether, impersonate, log } from "lib"; import { ProtocolContext } from "../types"; @@ -15,41 +12,27 @@ import { report } from "./accounting"; export const unpauseWithdrawalQueue = async (ctx: ProtocolContext) => { const { withdrawalQueue } = ctx.contracts; if (await withdrawalQueue.isPaused()) { - log.warning("Unpausing withdrawal queue contract"); - const resumeRole = await withdrawalQueue.RESUME_ROLE(); const agentSigner = await ctx.getSigner("agent"); const agentSignerAddress = await agentSigner.getAddress(); await withdrawalQueue.connect(agentSigner).grantRole(resumeRole, agentSignerAddress); - - const tx = await withdrawalQueue.connect(agentSigner).resume(); - await trace("withdrawalQueue.resume", tx); - + await withdrawalQueue.connect(agentSigner).resume(); await withdrawalQueue.connect(agentSigner).revokeRole(resumeRole, agentSignerAddress); log.success("Unpaused withdrawal queue contract"); } }; -export const finalizeWithdrawalQueue = async ( - ctx: ProtocolContext, - stEthHolder: HardhatEthersSigner, - ethHolder: HardhatEthersSigner, -) => { +export const finalizeWithdrawalQueue = async (ctx: ProtocolContext) => { const { lido, withdrawalQueue } = ctx.contracts; - await updateBalance(ethHolder.address, ether("1000000")); - await updateBalance(stEthHolder.address, ether("1000000")); - + const ethHolder = await impersonate(certainAddress("withdrawalQueue:eth:whale"), ether("100000")); + const stEthHolder = await impersonate(certainAddress("withdrawalQueue:stEth:whale"), ether("100000")); const stEthHolderAmount = ether("10000"); // Here sendTransaction is used to validate native way of submitting ETH for stETH - const tx = await stEthHolder.sendTransaction({ to: lido.address, value: stEthHolderAmount }); - await trace("stEthHolder.sendTransaction", tx); - - const stEthHolderBalance = await lido.balanceOf(stEthHolder.address); - expect(stEthHolderBalance).to.approximately(stEthHolderAmount, 10n, "stETH balance increased"); + await stEthHolder.sendTransaction({ to: lido.address, value: stEthHolderAmount }); let lastFinalizedRequestId = await withdrawalQueue.getLastFinalizedRequestId(); let lastRequestId = await withdrawalQueue.getLastRequestId(); @@ -65,14 +48,10 @@ export const finalizeWithdrawalQueue = async ( "Last request ID": lastRequestId, }); - const submitTx = await ctx.contracts.lido.connect(ethHolder).submit(ZeroAddress, { value: ether("10000") }); - - await trace("lido.submit", submitTx); + await ctx.contracts.lido.connect(ethHolder).submit(ZeroAddress, { value: ether("10000") }); } - const submitTx = await ctx.contracts.lido.connect(ethHolder).submit(ZeroAddress, { value: ether("10000") }); - - await trace("lido.submit", submitTx); + await ctx.contracts.lido.connect(ethHolder).submit(ZeroAddress, { value: ether("10000") }); log.success("Finalized withdrawal queue"); }; diff --git a/lib/protocol/provision.ts b/lib/protocol/provision.ts index 29a0d76816..9457ba39a1 100644 --- a/lib/protocol/provision.ts +++ b/lib/protocol/provision.ts @@ -1,7 +1,10 @@ +import { log } from "lib"; + import { ensureHashConsensusInitialEpoch, ensureOracleCommitteeMembers, ensureStakeLimit, + finalizeWithdrawalQueue, norEnsureOperators, sdvtEnsureOperators, unpauseStaking, @@ -9,20 +12,32 @@ import { } from "./helpers"; import { ProtocolContext } from "./types"; +let alreadyProvisioned = false; + /** * In order to make the protocol fully operational from scratch deploy, the additional steps are required: */ export const provision = async (ctx: ProtocolContext) => { + if (alreadyProvisioned) { + log.success("Already provisioned"); + return; + } + await ensureHashConsensusInitialEpoch(ctx); await ensureOracleCommitteeMembers(ctx, 5n); await unpauseStaking(ctx); - await unpauseWithdrawalQueue(ctx); await norEnsureOperators(ctx, 3n, 5n); await sdvtEnsureOperators(ctx, 3n, 5n); + await finalizeWithdrawalQueue(ctx); + await ensureStakeLimit(ctx); + + alreadyProvisioned = true; + + log.success("Provisioned"); }; diff --git a/lib/scratch.ts b/lib/scratch.ts index e5d9751f6f..4613f9d4ad 100644 --- a/lib/scratch.ts +++ b/lib/scratch.ts @@ -6,6 +6,27 @@ import { ethers } from "hardhat"; import { log } from "./log"; import { resetStateFile } from "./state-file"; +class StepsFileNotFoundError extends Error { + constructor(filePath: string) { + super(`Steps file ${filePath} not found!`); + this.name = "StepsFileNotFoundError"; + } +} + +class MigrationFileNotFoundError extends Error { + constructor(filePath: string) { + super(`Migration file ${filePath} not found!`); + this.name = "MigrationFileNotFoundError"; + } +} + +class MigrationMainFunctionError extends Error { + constructor(filePath: string) { + super(`Migration file ${filePath} does not export a 'main' function!`); + this.name = "MigrationMainFunctionError"; + } +} + const deployedSteps: string[] = []; async function applySteps(steps: string[]) { @@ -35,8 +56,11 @@ export async function deployUpgrade(networkName: string): Promise { await applySteps(steps); } catch (error) { - log.error("Upgrade failed:", (error as Error).message); - log.warning("Upgrade steps not found, assuming the protocol is already deployed"); + if (error instanceof StepsFileNotFoundError) { + log.warning("Upgrade steps not found, assuming the protocol is already deployed"); + } else { + log.error("Upgrade failed:", (error as Error).message); + } } } @@ -55,7 +79,7 @@ type StepsFile = { export const loadSteps = (stepsFile: string): string[] => { const stepsPath = path.resolve(process.cwd(), `scripts/${stepsFile}`); if (!fs.existsSync(stepsPath)) { - throw new Error(`Steps file ${stepsPath} not found!`); + throw new StepsFileNotFoundError(stepsPath); } return (JSON.parse(fs.readFileSync(stepsPath, "utf8")) as StepsFile).steps; @@ -64,7 +88,7 @@ export const loadSteps = (stepsFile: string): string[] => { export const resolveMigrationFile = (step: string): string => { const migrationFile = path.resolve(process.cwd(), `scripts/${step}.ts`); if (!fs.existsSync(migrationFile)) { - throw new Error(`Migration file ${migrationFile} not found!`); + throw new MigrationFileNotFoundError(migrationFile); } return migrationFile; @@ -80,7 +104,7 @@ export async function applyMigrationScript(migrationFile: string): Promise const { main } = await import(fullPath); if (typeof main !== "function") { - throw new Error(`Migration file ${migrationFile} does not export a 'main' function!`); + throw new MigrationMainFunctionError(migrationFile); } try { diff --git a/lib/transaction.ts b/lib/transaction.ts deleted file mode 100644 index 0160a7f398..0000000000 --- a/lib/transaction.ts +++ /dev/null @@ -1,42 +0,0 @@ -import { - ContractTransactionReceipt, - ContractTransactionResponse, - TransactionReceipt, - TransactionResponse, -} from "ethers"; -import hre, { ethers } from "hardhat"; - -import { log } from "lib"; - -type Transaction = TransactionResponse | ContractTransactionResponse; -type Receipt = TransactionReceipt | ContractTransactionReceipt; - -export const trace = async (name: string, tx: Transaction) => { - const receipt = await tx.wait(); - - if (!receipt) { - log.error("Failed to trace transaction: no receipt!"); - throw new Error(`Failed to trace transaction for ${name}: no receipt!`); - } - - const network = await tx.provider.getNetwork(); - const config = hre.config.networks[network.name]; - const blockGasLimit = "blockGasLimit" in config ? config.blockGasLimit : 30_000_000; - const gasUsedPercent = (Number(receipt.gasUsed) / blockGasLimit) * 100; - - log.traceTransaction(name, { - from: tx.from, - to: tx.to ?? `New contract @ ${receipt.contractAddress}`, - value: ethers.formatEther(tx.value), - gasUsed: ethers.formatUnits(receipt.gasUsed, "wei"), - gasPrice: ethers.formatUnits(receipt.gasPrice, "gwei"), - gasUsedPercent: `${gasUsedPercent.toFixed(2)}%`, - gasLimit: blockGasLimit.toString(), - nonce: tx.nonce, - blockNumber: receipt.blockNumber, - hash: receipt.hash, - status: !!receipt.status, - }); - - return receipt as T; -}; diff --git a/lib/type.ts b/lib/type.ts deleted file mode 100644 index 1660da4eaf..0000000000 --- a/lib/type.ts +++ /dev/null @@ -1,15 +0,0 @@ -export type ArrayToUnion = A[number]; - -export type TraceableTransaction = { - from: string; - to: string; - value: string; - gasUsed: string; - gasPrice: string; - gasLimit: string; - gasUsedPercent: string; - nonce: number; - blockNumber: number; - hash: string; - status: boolean; -}; diff --git a/package.json b/package.json index 7b926cb78d..23057b7c80 100644 --- a/package.json +++ b/package.json @@ -4,12 +4,11 @@ "description": "Lido on Ethereum is a liquid-staking protocol allowing anyone to earn staking rewards without locking ether or maintaining infrastructure", "license": "GPL-3.0-only", "engines": { - "node": ">=22" + "node": ">=20" }, - "packageManager": "yarn@4.6.0", + "packageManager": "yarn@4.5.0", "scripts": { "compile": "hardhat compile", - "cleanup": "hardhat clean", "lint:sol": "solhint 'contracts/**/*.sol'", "lint:sol:fix": "yarn lint:sol --fix", "lint:ts": "eslint . --max-warnings=0", @@ -22,25 +21,24 @@ "test:sequential": "hardhat test test/**/*.test.ts", "test:trace": "hardhat test test/**/*.test.ts --trace --disabletracer", "test:fulltrace": "hardhat test test/**/*.test.ts --fulltrace --disabletracer", - "test:watch": "SKIP_GAS_REPORT=true SKIP_CONTRACT_SIZE=true hardhat watch test", + "test:watch": "hardhat watch", "test:integration": "hardhat test test/integration/**/*.ts", "test:integration:trace": "hardhat test test/integration/**/*.ts --trace --disabletracer", "test:integration:fulltrace": "hardhat test test/integration/**/*.ts --fulltrace --disabletracer", - "test:integration:scratch": "INTEGRATION_WITH_CSM=off INTEGRATION_WITH_SCRATCH_DEPLOY=on hardhat test test/integration/**/*.ts", - "test:integration:scratch:trace": "INTEGRATION_WITH_CSM=off INTEGRATION_WITH_SCRATCH_DEPLOY=on hardhat test test/integration/**/*.ts --trace --disabletracer", - "test:integration:scratch:fulltrace": "INTEGRATION_WITH_CSM=off INTEGRATION_WITH_SCRATCH_DEPLOY=on hardhat test test/integration/**/*.ts --fulltrace --disabletracer", + "test:integration:scratch": "HARDHAT_FORKING_URL= INTEGRATION_WITH_CSM=off INTEGRATION_WITH_SCRATCH_DEPLOY=on hardhat test test/integration/**/*.ts", + "test:integration:scratch:trace": "HARDHAT_FORKING_URL= INTEGRATION_WITH_CSM=off INTEGRATION_WITH_SCRATCH_DEPLOY=on hardhat test test/integration/**/*.ts --trace --disabletracer", + "test:integration:scratch:fulltrace": "HARDHAT_FORKING_URL= INTEGRATION_WITH_CSM=off INTEGRATION_WITH_SCRATCH_DEPLOY=on hardhat test test/integration/**/*.ts --fulltrace --disabletracer", "test:integration:fork:local": "hardhat test test/integration/**/*.ts --network local", "test:integration:fork:mainnet": "hardhat test test/integration/**/*.ts --network mainnet-fork", "test:integration:fork:mainnet:custom": "hardhat test --network mainnet-fork", "typecheck": "tsc --noEmit", "prepare": "husky", "abis:extract": "hardhat abis:extract", - "verify:deployed": "hardhat verify:deployed", - "postinstall": "husky" + "verify:deployed": "hardhat verify:deployed" }, "lint-staged": { "./**/*.ts": [ - "eslint --max-warnings=0 --fix" + "eslint --max-warnings=0" ], "./**/*.{ts,md,json}": [ "prettier --write" @@ -50,56 +48,56 @@ ] }, "devDependencies": { - "@commitlint/cli": "19.6.1", - "@commitlint/config-conventional": "19.6.0", - "@eslint/compat": "1.2.5", - "@eslint/js": "9.19.0", - "@nomicfoundation/hardhat-chai-matchers": "2.0.8", - "@nomicfoundation/hardhat-ethers": "3.0.8", - "@nomicfoundation/hardhat-ignition": "0.15.9", - "@nomicfoundation/hardhat-ignition-ethers": "0.15.9", - "@nomicfoundation/hardhat-network-helpers": "1.0.12", - "@nomicfoundation/hardhat-toolbox": "5.0.0", - "@nomicfoundation/hardhat-verify": "2.0.12", - "@nomicfoundation/ignition-core": "0.15.9", - "@typechain/ethers-v6": "0.5.1", - "@typechain/hardhat": "9.1.0", - "@types/chai": "4.3.20", - "@types/eslint": "9.6.1", - "@types/eslint__js": "8.42.3", - "@types/mocha": "10.0.10", - "@types/node": "22.10.10", - "bigint-conversion": "2.4.3", - "chai": "4.5.0", - "chalk": "4.1.2", - "dotenv": "16.4.7", - "eslint": "9.19.0", - "eslint-config-prettier": "9.1.0", - "eslint-plugin-no-only-tests": "3.3.0", - "eslint-plugin-prettier": "5.2.3", + "@commitlint/cli": "^19.6.0", + "@commitlint/config-conventional": "^19.6.0", + "@eslint/compat": "^1.2.3", + "@eslint/js": "^9.15.0", + "@nomicfoundation/hardhat-chai-matchers": "^2.0.8", + "@nomicfoundation/hardhat-ethers": "^3.0.8", + "@nomicfoundation/hardhat-ignition": "^0.15.5", + "@nomicfoundation/hardhat-ignition-ethers": "^0.15.5", + "@nomicfoundation/hardhat-network-helpers": "^1.0.12", + "@nomicfoundation/hardhat-toolbox": "^5.0.0", + "@nomicfoundation/hardhat-verify": "^2.0.11", + "@nomicfoundation/ignition-core": "^0.15.5", + "@typechain/ethers-v6": "^0.5.1", + "@typechain/hardhat": "^9.1.0", + "@types/chai": "^4.3.19", + "@types/eslint": "^9.6.1", + "@types/eslint__js": "^8.42.3", + "@types/mocha": "10.0.8", + "@types/node": "20.16.6", + "bigint-conversion": "^2.4.3", + "chai": "^4.5.0", + "chalk": "^4.1.2", + "dotenv": "^16.4.5", + "eslint": "^9.11.1", + "eslint-config-prettier": "^9.1.0", + "eslint-plugin-no-only-tests": "^3.3.0", + "eslint-plugin-prettier": "^5.2.1", "eslint-plugin-simple-import-sort": "12.1.1", - "ethereumjs-util": "7.1.5", - "ethers": "6.13.5", - "glob": "11.0.1", - "globals": "15.14.0", - "hardhat": "2.22.18", - "hardhat-contract-sizer": "2.10.0", - "hardhat-gas-reporter": "1.0.10", - "hardhat-ignore-warnings": "0.2.12", + "ethereumjs-util": "^7.1.5", + "ethers": "^6.13.4", + "glob": "^11.0.0", + "globals": "^15.9.0", + "hardhat": "^2.22.17", + "hardhat-contract-sizer": "^2.10.0", + "hardhat-gas-reporter": "^1.0.10", + "hardhat-ignore-warnings": "^0.2.12", "hardhat-tracer": "3.1.0", "hardhat-watcher": "2.5.0", - "husky": "9.1.7", - "lint-staged": "15.4.3", - "prettier": "3.4.2", - "prettier-plugin-solidity": "1.4.2", - "solhint": "5.0.5", - "solhint-plugin-lido": "0.0.4", - "solidity-coverage": "0.8.14", - "ts-node": "10.9.2", - "tsconfig-paths": "4.2.0", - "typechain": "8.3.2", - "typescript": "5.7.3", - "typescript-eslint": "8.21.0" + "husky": "^9.1.6", + "lint-staged": "^15.2.10", + "prettier": "^3.3.3", + "prettier-plugin-solidity": "^1.4.1", + "solhint": "^5.0.3", + "solhint-plugin-lido": "^0.0.4", + "solidity-coverage": "^0.8.13", + "ts-node": "^10.9.2", + "tsconfig-paths": "^4.2.0", + "typechain": "^8.3.2", + "typescript": "^5.6.2", + "typescript-eslint": "^8.7.0" }, "dependencies": { "@aragon/apps-agent": "2.1.0", @@ -111,7 +109,6 @@ "@aragon/os": "4.4.0", "@openzeppelin/contracts": "3.4.0", "@openzeppelin/contracts-v4.4": "npm:@openzeppelin/contracts@4.4.1", - "@openzeppelin/contracts-v5.2": "npm:@openzeppelin/contracts@5.2.0", "openzeppelin-solidity": "2.0.0" } } diff --git a/scripts/defaults/testnet-defaults.json b/scripts/defaults/testnet-defaults.json index 1a2e0426b2..562cb7f723 100644 --- a/scripts/defaults/testnet-defaults.json +++ b/scripts/defaults/testnet-defaults.json @@ -90,7 +90,7 @@ }, "validatorsExitBusOracle": { "deployParameters": { - "consensusVersion": 1 + "consensusVersion": 2 } }, "depositSecurityModule": { @@ -138,7 +138,7 @@ }, "simpleDvt": { "deployParameters": { - "stakingModuleTypeId": "simple-dvt-onchain-v1", + "stakingModuleTypeId": "curated-onchain-v1", "stuckPenaltyDelay": 432000 } }, diff --git a/scripts/scratch/steps/0000-populate-deploy-artifact-from-env.ts b/scripts/scratch/steps/0000-populate-deploy-artifact-from-env.ts index c73a896818..50b4e03463 100644 --- a/scripts/scratch/steps/0000-populate-deploy-artifact-from-env.ts +++ b/scripts/scratch/steps/0000-populate-deploy-artifact-from-env.ts @@ -17,6 +17,7 @@ export async function main() { const deployer = ethers.getAddress(getEnvVariable("DEPLOYER")); const gateSealFactoryAddress = getEnvVariable("GATE_SEAL_FACTORY", ""); const genesisTime = parseInt(getEnvVariable("GENESIS_TIME")); + const slotsPerEpoch = parseInt(getEnvVariable("SLOTS_PER_EPOCH", "32"), 10); const depositContractAddress = getEnvVariable("DEPOSIT_CONTRACT", ""); const withdrawalQueueBaseUri = getEnvVariable("WITHDRAWAL_QUEUE_BASE_URI", ""); const dsmPredefinedAddress = getEnvVariable("DSM_PREDEFINED_ADDRESS", ""); @@ -29,7 +30,7 @@ export async function main() { state.deployer = deployer; // Update state with new values from environment variables - state.chainSpec = { ...state.chainSpec, genesisTime }; + state.chainSpec = { ...state.chainSpec, genesisTime, slotsPerEpoch }; if (depositContractAddress) { state.chainSpec.depositContract = ethers.getAddress(depositContractAddress); diff --git a/scripts/scratch/steps/0120-initialize-non-aragon-contracts.ts b/scripts/scratch/steps/0120-initialize-non-aragon-contracts.ts index f16e93c5fd..d1cb11e0e6 100644 --- a/scripts/scratch/steps/0120-initialize-non-aragon-contracts.ts +++ b/scripts/scratch/steps/0120-initialize-non-aragon-contracts.ts @@ -110,6 +110,10 @@ export async function main() { { from: deployer }, ); + // Initialize WithdrawalVault + const withdrawalVault = await loadContract("WithdrawalVault", withdrawalVaultAddress); + await makeTx(withdrawalVault, "initialize", [], { from: deployer }); + // Initialize WithdrawalQueue const withdrawalQueue = await loadContract("WithdrawalQueueERC721", withdrawalQueueAddress); await makeTx(withdrawalQueue, "initialize", [withdrawalQueueAdmin], { from: deployer }); diff --git a/tasks/index.ts b/tasks/index.ts index 04b17d7c9b..570db57d5a 100644 --- a/tasks/index.ts +++ b/tasks/index.ts @@ -1,3 +1,4 @@ -export * from "./verify-contracts"; -export * from "./extract-abis"; -export * from "./solidity-get-source"; +import "./logger"; +import "./solidity-get-source"; +import "./extract-abis"; +import "./verify-contracts"; diff --git a/tasks/logger.ts b/tasks/logger.ts new file mode 100644 index 0000000000..ecd0c75e88 --- /dev/null +++ b/tasks/logger.ts @@ -0,0 +1,158 @@ +import "hardhat/types/runtime"; +import chalk from "chalk"; +import { formatUnits, Interface, TransactionReceipt, TransactionResponse } from "ethers"; +import { extendEnvironment } from "hardhat/config"; +import { HardhatNetworkConfig, HardhatRuntimeEnvironment } from "hardhat/types"; + +const LOG_LEVEL = process.env.LOG_LEVEL || "info"; +const DEFAULT_BLOCK_GAS_LIMIT = 30_000_000; +const FUNCTION_SIGNATURE_LENGTH = 10; + +const interfaceCache = new Map(); +const callCache = new Map(); + +enum TransactionType { + CONTRACT_DEPLOYMENT = "Contract deployment", + ETH_TRANSFER = "ETH transfer", + CONTRACT_CALL = "Contract call", +} + +type Call = { + contract: string; + function: string; +}; + +function outputTransaction( + tx: TransactionResponse, + txType: TransactionType, + receipt: TransactionReceipt, + call: Call, + gasLimit: number, + gasPrice: string, +): void { + const gasUsedPercent = (Number(receipt.gasUsed) * 100) / gasLimit; + + const txHash = chalk.yellow(receipt.hash); + const txFrom = chalk.cyan(tx.from); + const txTo = chalk.cyan(tx.to || receipt.contractAddress); + const txGasPrice = chalk.yellow(gasPrice); + const txGasLimit = chalk.yellow(gasLimit); + const txGasUsed = chalk.yellow(`${receipt.gasUsed} (${gasUsedPercent.toFixed(2)}%)`); + const txBlock = chalk.yellow(receipt.blockNumber); + const txNonce = chalk.yellow(tx.nonce); + const txStatus = receipt.status ? chalk.green("confirmed") : chalk.red("failed"); + const txContract = chalk.cyan(call.contract || "Contract deployment"); + const txFunction = chalk.cyan(call.function || ""); + const txCall = `${txContract}.${txFunction}`; + + console.log(`Transaction sent: ${txHash}`); + console.log(` From: ${txFrom} To: ${txTo}`); + console.log(` Gas price: ${txGasPrice} gwei Gas limit: ${txGasLimit} Gas used: ${txGasUsed}`); + console.log(` Block: ${txBlock} Nonce: ${txNonce}`); + + if (txType === TransactionType.CONTRACT_DEPLOYMENT) { + console.log(` Contract deployed: ${chalk.cyan(receipt.contractAddress)}`); + } else if (txType === TransactionType.ETH_TRANSFER) { + console.log(` ETH transfer: ${chalk.yellow(tx.value)}`); + } else { + console.log(` ${txCall} ${txStatus}`); + } + console.log(); +} + +// Transaction Processing +async function getCall(tx: TransactionResponse, hre: HardhatRuntimeEnvironment): Promise { + if (!tx.data || tx.data === "0x" || !tx.to) return { contract: "", function: "" }; + + const cacheKey = `${tx.to}-${tx.data.slice(0, FUNCTION_SIGNATURE_LENGTH)}`; + if (callCache.has(cacheKey)) { + return callCache.get(cacheKey)!; + } + + try { + const call = await extractCallDetails(tx, hre); + callCache.set(cacheKey, call); + return call; + } catch (error) { + console.warn("Error getting call details:", error); + const fallbackCall = { contract: tx.data.slice(0, FUNCTION_SIGNATURE_LENGTH), function: "" }; + callCache.set(cacheKey, fallbackCall); + return fallbackCall; + } +} + +async function extractCallDetails(tx: TransactionResponse, hre: HardhatRuntimeEnvironment): Promise { + try { + const artifacts = await hre.artifacts.getAllFullyQualifiedNames(); + for (const name of artifacts) { + const iface = await getOrCreateInterface(name, hre); + const result = iface.parseTransaction({ data: tx.data }); + if (result) { + return { + contract: name.split(":").pop() || "", + function: result.name || "", + }; + } + } + } catch { + // Ignore errors and return empty call + } + + return { contract: "", function: "" }; +} + +async function getOrCreateInterface(artifactName: string, hre: HardhatRuntimeEnvironment) { + if (interfaceCache.has(artifactName)) { + return interfaceCache.get(artifactName)!; + } + + const artifact = await hre.artifacts.readArtifact(artifactName); + const iface = new Interface(artifact.abi); + interfaceCache.set(artifactName, iface); + return iface; +} + +async function getTxType(tx: TransactionResponse, receipt: TransactionReceipt): Promise { + if (receipt.contractAddress) return TransactionType.CONTRACT_DEPLOYMENT; + if (!tx.data || tx.data === "0x") return TransactionType.ETH_TRANSFER; + return TransactionType.CONTRACT_CALL; +} + +async function logTransaction(tx: TransactionResponse, hre: HardhatRuntimeEnvironment) { + const receipt = await tx.wait(); + if (!receipt) throw new Error("Transaction receipt not found"); + + try { + const network = await tx.provider.getNetwork(); + const config = hre.config.networks[network.name] as HardhatNetworkConfig; + const gasLimit = config.blockGasLimit ?? DEFAULT_BLOCK_GAS_LIMIT; + + const txType = await getTxType(tx, receipt); + const call = await getCall(tx, hre); + const gasPrice = formatUnits(receipt.gasPrice || 0n, "gwei"); + + outputTransaction(tx, txType, receipt, call, gasLimit, gasPrice); + + return receipt; + } catch (error) { + console.error("Error logging transaction:", error); + return receipt; + } +} + +extendEnvironment((hre: HardhatRuntimeEnvironment) => { + if (LOG_LEVEL != "debug" && LOG_LEVEL != "all") return; + + const originalSendTransaction = hre.ethers.provider.send; + + hre.ethers.provider.send = async function (method: string, params: unknown[]) { + const result = await originalSendTransaction.apply(this, [method, params]); + + if (method === "eth_sendTransaction" || method === "eth_sendRawTransaction") { + const tx = (await this.getTransaction(result)) as TransactionResponse; + await logTransaction(tx, hre); + } + + return result; + }; +}); diff --git a/tasks/verify-contracts.ts b/tasks/verify-contracts.ts index 1169170841..c039406656 100644 --- a/tasks/verify-contracts.ts +++ b/tasks/verify-contracts.ts @@ -2,13 +2,12 @@ import fs from "node:fs/promises"; import path from "node:path"; import { task } from "hardhat/config"; -import { HardhatRuntimeEnvironment, TaskArguments } from "hardhat/types"; +import { HardhatRuntimeEnvironment } from "hardhat/types"; import { cy, log, yl } from "lib/log"; type DeployedContract = { contract: string; - contractName?: string; address: string; constructorArgs: unknown[]; }; @@ -27,16 +26,13 @@ type NetworkState = { const errors = [] as string[]; -task("verify:deployed", "Verifies deployed contracts based on state file") - .addOptionalParam("file", "Path to network state file") - .setAction(async (taskArgs: TaskArguments, hre: HardhatRuntimeEnvironment) => { +task("verify:deployed", "Verifies deployed contracts based on state file").setAction( + async (_: unknown, hre: HardhatRuntimeEnvironment) => { try { const network = hre.network.name; log("Verifying contracts for network:", network); - const networkStateFile = taskArgs.file ?? `deployed-${network}.json`; - log("Using network state file:", networkStateFile); - + const networkStateFile = `deployed-${network}.json`; const networkStateFilePath = path.resolve("./", networkStateFile); const data = await fs.readFile(networkStateFilePath, "utf8"); const networkState = JSON.parse(data) as NetworkState; @@ -47,12 +43,6 @@ task("verify:deployed", "Verifies deployed contracts based on state file") // Not using Promise.all to avoid logging messages out of order for (const contract of deployedContracts) { - if (!contract.contract || !contract.address) { - log.error("Invalid contract:", contract); - log.emptyLine(); - continue; - } - await verifyContract(contract, hre); } } catch (error) { @@ -64,15 +54,18 @@ task("verify:deployed", "Verifies deployed contracts based on state file") log.error(`Failed to verify ${errors.length} contract(s):`, errors as string[]); process.exitCode = errors.length; } - }); + }, +); async function verifyContract(contract: DeployedContract, hre: HardhatRuntimeEnvironment) { - log.splitter(); - - const contractName = contract.contractName ?? contract.contract.split("/").pop()?.split(".")[0]; + if (!contract.contract) { + // TODO: In the case of state processing on the local devnet there are skips, we need to find the cause + return; + } + const contractName = contract.contract.split("/").pop()?.split(".")[0]; const verificationParams = { address: contract.address, - constructorArguments: contract.constructorArgs ?? [], + constructorArguments: contract.constructorArgs, contract: `${contract.contract}:${contractName}`, }; diff --git a/test/0.8.25/Accounting.t.sol b/test/0.8.25/Accounting.t.sol new file mode 100644 index 0000000000..c54bb9734f --- /dev/null +++ b/test/0.8.25/Accounting.t.sol @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity ^0.8.0; + +import {Vm} from "forge-std/Vm.sol"; +import {CommonBase} from "forge-std/Base.sol"; +import {StdCheats} from "forge-std/StdCheats.sol"; +import {StdUtils} from "forge-std/StdUtils.sol"; +import {console2} from "forge-std/console2.sol"; + +import {BaseProtocolTest} from "./Protocol__Deployment.t.sol"; +import {LimitsList} from "contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol"; +import {ReportValues} from "contracts/common/interfaces/ReportValues.sol"; + +interface IStakingRouter { + function getRecipients() external view returns (address[] memory); +} + +interface IAccounting { + function handleOracleReport(ReportValues memory _report) external; + + function simulateOracleReport(ReportValues memory _report, uint256 _withdrawalShareRate) external; +} + +interface ILido { + function getTotalShares() external view returns (uint256); + + function getTotalPooledEther() external view returns (uint256); + + function getBufferedEther() external view returns (uint256); + + function getExternalShares() external view returns (uint256); + + function getPooledEthByShares(uint256 _sharesAmount) external view returns (uint256); + + function resume() external; + + function getBeaconStat() + external + view + returns (uint256 depositedValidators, uint256 beaconValidators, uint256 beaconBalance); +} + +interface ISecondOpinionOracleMock { + function mock__setReportValues( + bool _success, + uint256 _clBalanceGwei, + uint256 _withdrawalVaultBalanceWei, + uint256 _totalDepositedValidators, + uint256 _totalExitedValidators + ) external; +} + +// 0.002792 * 10^18 +// 0.0073 * 10^18 +uint256 constant maxYieldPerOperatorWei = 2_792_000_000_000_000; // which % of slashing could be? +uint256 constant maxLossPerOperatorWei = 7_300_000_000_000_000; +uint256 constant stableBalanceWei = 32 * 1 ether; + +struct FuzzValues { + uint256 _preClValidators; + uint256 _preClBalanceWei; + uint256 _clValidators; + uint256 _clBalanceWei; + uint256 _withdrawalVaultBalance; + uint256 _elRewardsVaultBalanceWei; + uint256 _sharesRequestedToBurn; + uint256 _lidoExecutionLayerRewardVaultWei; +} + +struct LidoTransfer { + address from; + address to; +} + +contract AccountingHandler is CommonBase, StdCheats, StdUtils { + struct Ghost { + int256 clValidators; + int256 depositedValidators; + int256 sharesMintAsFees; + int256 transferShares; + int256 totalRewardsWei; + int256 principalClBalanceWei; + int256 unifiedClBalanceWei; + } + + struct BoundaryValues { + uint256 minPreClValidators; + uint256 maxPreClValidators; + uint256 minClValidators; + uint256 maxClValidators; + uint256 minClBalanceWei; + uint256 maxClBalanceWei; + uint256 minDepositedValidators; + uint256 maxDepositedValidators; + uint256 minElRewardsVaultBalanceWei; + uint256 maxElRewardsVaultBalanceWei; + } + + IAccounting private accounting; + ILido private lido; + ISecondOpinionOracleMock private secondOpinionOracle; + IStakingRouter public stakingRouter; + + Ghost public ghost; + LidoTransfer[] public ghost_lidoTransfers; + BoundaryValues public boundaryValues; + + address private accountingOracle; + address private lidoExecutionLayerRewardVault; + address private burner; + LimitsList public limitList; + + constructor( + address _accounting, + address _lido, + address _accountingOracle, + LimitsList memory _limitList, + address _lidoExecutionLayerRewardVault, + address _secondOpinionOracle, + address _burnerAddress, + address _stakingRouter + ) { + accounting = IAccounting(_accounting); + lido = ILido(_lido); + accountingOracle = _accountingOracle; + limitList = _limitList; + lidoExecutionLayerRewardVault = _lidoExecutionLayerRewardVault; + + ghost = Ghost(0, 0, 0, 0, 0, 0, 0); + secondOpinionOracle = ISecondOpinionOracleMock(_secondOpinionOracle); + burner = _burnerAddress; + stakingRouter = IStakingRouter(_stakingRouter); + + // Initialize boundary values with extreme values + boundaryValues = BoundaryValues({ + minPreClValidators: type(uint256).max, + maxPreClValidators: 0, + minClValidators: type(uint256).max, + maxClValidators: 0, + minClBalanceWei: type(uint256).max, + maxClBalanceWei: 0, + minDepositedValidators: type(uint256).max, + maxDepositedValidators: 0, + minElRewardsVaultBalanceWei: type(uint256).max, + maxElRewardsVaultBalanceWei: 0 + }); + } + + function cutGwei(uint256 value) public pure returns (uint256) { + return (value / 1 gwei) * 1 gwei; + } + + function handleOracleReport(FuzzValues memory fuzz) external { + uint256 _timeElapsed = 86_400; + uint256 _timestamp = block.timestamp + _timeElapsed; + + // cheatCode for + // if (_report.timestamp >= block.timestamp) revert IncorrectReportTimestamp(_report.timestamp, block.timestamp); + vm.warp(_timestamp + 1); + + fuzz._lidoExecutionLayerRewardVaultWei = bound(fuzz._lidoExecutionLayerRewardVaultWei, 0, 1_000) * 1 ether; + fuzz._elRewardsVaultBalanceWei = bound( + fuzz._elRewardsVaultBalanceWei, + 0, + fuzz._lidoExecutionLayerRewardVaultWei + ); + + // Update boundary values for elRewardsVaultBalanceWei + if (fuzz._elRewardsVaultBalanceWei < boundaryValues.minElRewardsVaultBalanceWei) { + boundaryValues.minElRewardsVaultBalanceWei = fuzz._elRewardsVaultBalanceWei; + } + if (fuzz._elRewardsVaultBalanceWei > boundaryValues.maxElRewardsVaultBalanceWei) { + boundaryValues.maxElRewardsVaultBalanceWei = fuzz._elRewardsVaultBalanceWei; + } + + fuzz._preClValidators = bound(fuzz._preClValidators, 250_000, 100_000_000_000); + fuzz._preClBalanceWei = cutGwei(fuzz._preClValidators * stableBalanceWei); + + // Update boundary values for preClValidators + if (fuzz._preClValidators < boundaryValues.minPreClValidators) { + boundaryValues.minPreClValidators = fuzz._preClValidators; + } + if (fuzz._preClValidators > boundaryValues.maxPreClValidators) { + boundaryValues.maxPreClValidators = fuzz._preClValidators; + } + + ghost.clValidators = int256(fuzz._preClValidators); + + fuzz._clValidators = bound( + fuzz._clValidators, + fuzz._preClValidators, + fuzz._preClValidators + limitList.appearedValidatorsPerDayLimit + ); + + // Update boundary values for clValidators + if (fuzz._clValidators < boundaryValues.minClValidators) { + boundaryValues.minClValidators = fuzz._clValidators; + } + if (fuzz._clValidators > boundaryValues.maxClValidators) { + boundaryValues.maxClValidators = fuzz._clValidators; + } + + uint256 minBalancePerValidatorWei = fuzz._clValidators * (stableBalanceWei - maxLossPerOperatorWei); + uint256 maxBalancePerValidatorWei = fuzz._clValidators * (stableBalanceWei + maxYieldPerOperatorWei); + fuzz._clBalanceWei = bound(fuzz._clBalanceWei, minBalancePerValidatorWei, maxBalancePerValidatorWei); + + // Update boundary values for clBalanceWei + if (fuzz._clBalanceWei < boundaryValues.minClBalanceWei) { + boundaryValues.minClBalanceWei = fuzz._clBalanceWei; + } + if (fuzz._clBalanceWei > boundaryValues.maxClBalanceWei) { + boundaryValues.maxClBalanceWei = fuzz._clBalanceWei; + } + + // depositedValidators is always greater or equal to beaconValidators + // Todo: Upper extremum ? + uint256 depositedValidators = bound( + fuzz._preClValidators, + fuzz._clValidators + 1, + fuzz._clValidators + limitList.appearedValidatorsPerDayLimit + ); + + // Update boundary values for depositedValidators + if (depositedValidators < boundaryValues.minDepositedValidators) { + boundaryValues.minDepositedValidators = depositedValidators; + } + if (depositedValidators > boundaryValues.maxDepositedValidators) { + boundaryValues.maxDepositedValidators = depositedValidators; + } + + ghost.depositedValidators = int256(depositedValidators); + + vm.store(address(lido), keccak256("lido.Lido.depositedValidators"), bytes32(depositedValidators)); + vm.store(address(lido), keccak256("lido.Lido.beaconValidators"), bytes32(fuzz._preClValidators)); + vm.store(address(lido), keccak256("lido.Lido.beaconBalance"), bytes32(fuzz._preClBalanceWei)); + + vm.deal(lidoExecutionLayerRewardVault, fuzz._lidoExecutionLayerRewardVaultWei); + + ReportValues memory currentReport = ReportValues({ + timestamp: _timestamp, + timeElapsed: _timeElapsed, + clValidators: fuzz._clValidators, + clBalance: (fuzz._clBalanceWei / 1e9) * 1e9, + elRewardsVaultBalance: fuzz._elRewardsVaultBalanceWei, + withdrawalVaultBalance: 0, + sharesRequestedToBurn: 0, + withdrawalFinalizationBatches: new uint256[](0), + vaultValues: new uint256[](0), + inOutDeltas: new int256[](0) + }); + + ghost.unifiedClBalanceWei = int256(fuzz._clBalanceWei + currentReport.withdrawalVaultBalance); // ? + ghost.principalClBalanceWei = int256( + fuzz._preClBalanceWei + (currentReport.clValidators - fuzz._preClValidators) * stableBalanceWei + ); + + ghost.totalRewardsWei = + ghost.unifiedClBalanceWei - + ghost.principalClBalanceWei + + int256(fuzz._elRewardsVaultBalanceWei); + + secondOpinionOracle.mock__setReportValues( + true, + fuzz._clBalanceWei / 1e9, + currentReport.withdrawalVaultBalance, + uint256(ghost.depositedValidators), + 0 + ); + + vm.prank(accountingOracle); + + delete ghost_lidoTransfers; + vm.recordLogs(); + accounting.handleOracleReport(currentReport); + Vm.Log[] memory entries = vm.getRecordedLogs(); + + bytes32 totalSharesSignature = keccak256("Mock__MintedTotalShares(uint256)"); + bytes32 transferSharesSignature = keccak256("TransferShares(address,address,uint256)"); + bytes32 lidoTransferSignature = keccak256("Transfer(address,address,uint256)"); + + for (uint256 i = 0; i < entries.length; i++) { + if (entries[i].topics[0] == totalSharesSignature) { + ghost.sharesMintAsFees = int256(abi.decode(abi.encodePacked(entries[i].topics[1]), (uint256))); + } + + if (entries[i].topics[0] == transferSharesSignature) { + ghost.transferShares = int256(abi.decode(entries[i].data, (uint256))); + } + + if (entries[i].topics[0] == lidoTransferSignature) { + if (entries[i].emitter == address(lido)) { + address from = abi.decode(abi.encodePacked(entries[i].topics[1]), (address)); + address to = abi.decode(abi.encodePacked(entries[i].topics[2]), (address)); + + ghost_lidoTransfers.push(LidoTransfer({from: from, to: to})); + } + } + } + } + + function getGhost() public view returns (Ghost memory) { + return ghost; + } + + function getLidoTransfers() public view returns (LidoTransfer[] memory) { + return ghost_lidoTransfers; + } + + function getBoundaryValues() public view returns (BoundaryValues memory) { + return boundaryValues; + } +} + +contract AccountingTest is BaseProtocolTest { + AccountingHandler private accountingHandler; + + uint256 private protocolStartBalance = 1 ether; + + address private rootAccount = address(0x123); + address private userAccount = address(0x321); + + mapping(address => bool) public possibleLidoRecipients; + + function setUp() public { + BaseProtocolTest.setUpProtocol(protocolStartBalance, rootAccount, userAccount); + + accountingHandler = new AccountingHandler( + lidoLocator.accounting(), + lidoLocator.lido(), + lidoLocator.accountingOracle(), + limitList, + lidoLocator.elRewardsVault(), + address(secondOpinionOracleMock), + lidoLocator.burner(), + lidoLocator.stakingRouter() + ); + + // Set target contract to the accounting handler + targetContract(address(accountingHandler)); + + vm.prank(userAccount); + lidoContract.resume(); + + possibleLidoRecipients[lidoLocator.burner()] = true; + possibleLidoRecipients[lidoLocator.treasury()] = true; + + for (uint256 i = 0; i < accountingHandler.stakingRouter().getRecipients().length; i++) { + possibleLidoRecipients[accountingHandler.stakingRouter().getRecipients()[i]] = true; + } + + // Set target selectors to the accounting handler + bytes4[] memory selectors = new bytes4[](1); + selectors[0] = accountingHandler.handleOracleReport.selector; + + targetSelector(FuzzSelector({addr: address(accountingHandler), selectors: selectors})); + } + + function logBoundaryValues() internal view { + AccountingHandler.BoundaryValues memory bounds = accountingHandler.getBoundaryValues(); + console2.log("Boundary Values:"); + console2.log("PreClValidators min:", bounds.minPreClValidators); + console2.log("PreClValidators max:", bounds.maxPreClValidators); + console2.log("ClValidators min:", bounds.minClValidators); + console2.log("ClValidators max:", bounds.maxClValidators); + console2.log("ClBalanceWei min:", bounds.minClBalanceWei); + console2.log("ClBalanceWei max:", bounds.maxClBalanceWei); + console2.log("DepositedValidators min:", bounds.minDepositedValidators); + console2.log("DepositedValidators max:", bounds.maxDepositedValidators); + console2.log("ElRewardsVaultBalanceWei min:", bounds.minElRewardsVaultBalanceWei); + console2.log("ElRewardsVaultBalanceWei max:", bounds.maxElRewardsVaultBalanceWei); + } + + /** + * https://book.getfoundry.sh/reference/config/inline-test-config#in-line-invariant-configs + * forge-config: default.invariant.runs = 256 + * forge-config: default.invariant.depth = 256 + * forge-config: default.invariant.fail-on-revert = true + */ + function invariant_clValidatorNotDecreased() public view { + ILido lido = ILido(lidoLocator.lido()); + + (uint256 depositedValidators, uint256 clValidators, ) = lido.getBeaconStat(); + + // Should not be able to decrease validator number + assertGe(clValidators, uint256(accountingHandler.getGhost().clValidators)); + assertEq(depositedValidators, uint256(accountingHandler.getGhost().depositedValidators)); + + logBoundaryValues(); + } + + /** + * 0 OR 10% OF PROTOCOL FEES SHOULD BE REPORTED (Collect total fees from reports in handler) + * CLb + ELr <= 10% + * + * https://book.getfoundry.sh/reference/config/inline-test-config#in-line-invariant-configs + * forge-config: default.invariant.runs = 256 + * forge-config: default.invariant.depth = 256 + * forge-config: default.invariant.fail-on-revert = true + */ + function invariant_NonNegativeRebase() public view { + ILido lido = ILido(lidoLocator.lido()); + + AccountingHandler.Ghost memory ghost = accountingHandler.getGhost(); + + bool isRebasePositive = ghost.unifiedClBalanceWei > ghost.principalClBalanceWei; + if (isRebasePositive) { + if (ghost.sharesMintAsFees < 0) { + revert("sharesMintAsFees < 0"); + } + + if (ghost.transferShares < 0) { + revert("transferShares < 0"); + } + + int256 treasuryFeesETH = int256(lido.getPooledEthByShares(uint256(ghost.sharesMintAsFees))); + int256 reportRewardsMintedETH = int256(lido.getPooledEthByShares(uint256(ghost.transferShares))); + int256 totalFees = int256(treasuryFeesETH + reportRewardsMintedETH); + int256 totalRewards = ghost.totalRewardsWei; + + if (totalRewards != 0) { + int256 percents = (totalFees * 100) / totalRewards; + + assertTrue(percents <= 10, "all distributed rewards > 10%"); + assertTrue(percents >= 0, "all distributed rewards < 0%"); + } + } else { + console2.log("Negative rebase. Skipping report", ghost.totalRewardsWei / 1 ether); + } + + logBoundaryValues(); + } + + /** + * Lido.Transfer from (0x00, to treasure or burner. Other -> collect and check what is it) + * https://book.getfoundry.sh/reference/config/inline-test-config#in-line-invariant-configs + * forge-config: default.invariant.runs = 256 + * forge-config: default.invariant.depth = 256 + * forge-config: default.invariant.fail-on-revert = true + */ + function invariant_LidoTransfers() public view { + LidoTransfer[] memory lidoTransfers = accountingHandler.getLidoTransfers(); + + for (uint256 i = 0; i < lidoTransfers.length; i++) { + assertEq(lidoTransfers[i].from, address(0), "Lido.Transfer sender is not zero"); + assertTrue( + possibleLidoRecipients[lidoTransfers[i].to], + "Lido.Transfer recipient is not possibleLidoRecipients" + ); + } + + logBoundaryValues(); + } + + /** + * solvency - stETH <> ETH = 1:1 - internal and total share rates are equal + * vault params do not affect protocol share rate + * + * https://book.getfoundry.sh/reference/config/inline-test-config#in-line-invariant-configs + * forge-config: default.invariant.runs = 256 + * forge-config: default.invariant.depth = 256 + * forge-config: default.invariant.fail-on-revert = true + */ + function invariant_vaultsDonAffectSharesRate() public view { + ILido lido = ILido(lidoLocator.lido()); + + uint256 totalPooledEther = lido.getTotalPooledEther(); + uint256 bufferedEther = lido.getBufferedEther(); + uint256 totalShares = lido.getTotalShares(); + uint256 externalShares = lido.getExternalShares(); + + uint256 totalShareRate = totalPooledEther / totalShares; + + console2.log("bufferedEther", bufferedEther); + console2.log("totalPooledEther", totalPooledEther); + console2.log("totalShares", totalShares); + console2.log("totalShareRate", totalShareRate); + + // Get transient ether + (uint256 depositedValidators, uint256 clValidators, uint256 clBalance) = lido.getBeaconStat(); + // clValidators can never be less than deposited ones. + uint256 transientEther = (depositedValidators - clValidators) * 32 ether; + console2.log("transientEther", transientEther); + + // Calculate internal ether + uint256 internalEther = bufferedEther + clBalance + transientEther; + console2.log("internalEther", internalEther); + + // Calculate internal shares + uint256 internalShares = totalShares - externalShares; + console2.log("internalShares", internalShares); + console2.log("getExternalShares", externalShares); + + uint256 internalShareRate = internalEther / internalShares; + + console2.log("internalShareRate", internalShareRate); + + assertEq(totalShareRate, internalShareRate); + + logBoundaryValues(); + } +} diff --git a/test/0.8.25/Protocol__Deployment.t.sol b/test/0.8.25/Protocol__Deployment.t.sol new file mode 100644 index 0000000000..6a56f98c28 --- /dev/null +++ b/test/0.8.25/Protocol__Deployment.t.sol @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity ^0.8.0; + +import "forge-std/Test.sol"; +import {CommonBase} from "forge-std/Base.sol"; +import {StdUtils} from "forge-std/StdUtils.sol"; +import {Vm} from "forge-std/Vm.sol"; +import {console2} from "forge-std/console2.sol"; +import {StdCheats} from "forge-std/StdCheats.sol"; + +import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; +import {LimitsList} from "contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol"; + +import {StakingRouter__MockForLidoAccountingFuzzing} from "./contracts/StakingRouter__MockForLidoAccountingFuzzing.sol"; +import {SecondOpinionOracle__MockForAccountingFuzzing} from "./contracts/SecondOpinionOracle__MockForAccountingFuzzing.sol"; + +interface IAccounting { + function initialize(address _admin) external; +} + +interface ILido { + function getTotalShares() external view returns (uint256); + + function getExternalShares() external view returns (uint256); + + function mintExternalShares(address _recipient, uint256 _amountOfShares) external; + + function burnExternalShares(uint256 _amountOfShares) external; + + function setMaxExternalRatioBP(uint256 _maxExternalRatioBP) external; + + function initialize(address _lidoLocator, address _eip712StETH) external payable; + + function resumeStaking() external; + + function resume() external; + + function setStakingLimit(uint256 _maxStakeLimit, uint256 _stakeLimitIncreasePerBlock) external; +} + +interface IKernel { + function acl() external view returns (IACL); + + function newAppInstance( + bytes32 _appId, + address _appBase, + bytes calldata _initializePayload, + bool _setDefault + ) external; +} + +interface IACL { + function initialize(address _permissionsCreator) external; + + function createPermission(address _entity, address _app, bytes32 _role, address _manager) external; + + function hasPermission(address _who, address _where, bytes32 _what) external view returns (bool); +} + +interface IDaoFactory { + function newDAO(address _root) external returns (IKernel); +} + +struct LidoLocatorConfig { + address accountingOracle; + address depositSecurityModule; + address elRewardsVault; + address legacyOracle; + address lido; + address oracleReportSanityChecker; + address postTokenRebaseReceiver; + address burner; + address stakingRouter; + address treasury; + address validatorsExitBusOracle; + address withdrawalQueue; + address withdrawalVault; + address oracleDaemonConfig; + address accounting; + address wstETH; +} + +contract BaseProtocolTest is Test { + ILido public lidoContract; + ILidoLocator public lidoLocator; + IACL public acl; + SecondOpinionOracle__MockForAccountingFuzzing public secondOpinionOracleMock; + IKernel private dao; + + address private rootAccount; + address private userAccount; + + address public kernelBase; + address public aclBase; + address public evmScriptRegistryFactory; + address public daoFactoryAdr; + + uint256 public genesisTimestamp = 1_695_902_400; + address private depositContract = address(0x4242424242424242424242424242424242424242); + address public lidoTreasury = makeAddr("dummy-lido:treasury"); + + LimitsList public limitList = + LimitsList({ + exitedValidatorsPerDayLimit: 9000, + appearedValidatorsPerDayLimit: 43200, + annualBalanceIncreaseBPLimit: 10_00, + maxValidatorExitRequestsPerReport: 600, + maxItemsPerExtraDataTransaction: 8, + maxNodeOperatorsPerExtraDataItem: 24, + requestTimestampMargin: 7680, + maxPositiveTokenRebase: 750000, + initialSlashingAmountPWei: 1000, + inactivityPenaltiesAmountPWei: 101, + clBalanceOraclesErrorUpperBPLimit: 50 + }); + + function setUpProtocol(uint256 _startBalance, address _rootAccount, address _userAccount) public { + rootAccount = _rootAccount; + userAccount = _userAccount; + + address impl = deployCode("Lido.sol:Lido"); + + vm.startPrank(rootAccount); + (dao, acl) = createAragonDao(); + address lidoProxyAddress = addAragonApp(dao, impl); + + lidoContract = ILido(lidoProxyAddress); + + /// @dev deal lido contract with start balance + vm.deal(lidoProxyAddress, _startBalance); + + acl.createPermission(userAccount, lidoProxyAddress, keccak256("STAKING_CONTROL_ROLE"), rootAccount); + acl.createPermission(userAccount, lidoProxyAddress, keccak256("STAKING_PAUSE_ROLE"), rootAccount); + acl.createPermission(userAccount, lidoProxyAddress, keccak256("RESUME_ROLE"), rootAccount); + acl.createPermission(userAccount, lidoProxyAddress, keccak256("PAUSE_ROLE"), rootAccount); + + StakingRouter__MockForLidoAccountingFuzzing stakingRouter = new StakingRouter__MockForLidoAccountingFuzzing(); + + uint256[] memory stakingModuleIds = new uint256[](3); + stakingModuleIds[0] = 1; + stakingModuleIds[1] = 2; + stakingModuleIds[2] = 3; + + uint96[] memory stakingModuleFees = new uint96[](3); + stakingModuleFees[0] = 4876942047684326532; + stakingModuleFees[1] = 145875332634464962; + stakingModuleFees[2] = 38263043302959438; + + address[] memory recipients = new address[](3); + recipients[0] = 0x55032650b14df07b85bF18A3a3eC8E0Af2e028d5; + recipients[1] = 0xaE7B191A31f627b4eB1d4DaC64eaB9976995b433; + recipients[2] = 0xdA7dE2ECdDfccC6c3AF10108Db212ACBBf9EA83F; + + stakingRouter.mock__getStakingRewardsDistribution( + recipients, + stakingModuleIds, + stakingModuleFees, + 9999999999999999996, + 100000000000000000000 + ); + + /// @dev deploy lido locator with dummy default values + lidoLocator = _deployLidoLocator(lidoProxyAddress, address(stakingRouter)); + + // Add accounting contract with handler to the protocol + address accountingImpl = deployCode( + "Accounting.sol:Accounting", + abi.encode([address(lidoLocator), lidoProxyAddress]) + ); + + deployCodeTo( + "OssifiableProxy.sol:OssifiableProxy", + abi.encode(accountingImpl, rootAccount, new bytes(0)), + lidoLocator.accounting() + ); + + deployCodeTo( + "AccountingOracle.sol:AccountingOracle", + abi.encode( + address(lidoLocator), + lidoLocator.legacyOracle(), + 12, // secondsPerSlot + genesisTimestamp + ), + lidoLocator.accountingOracle() + ); + + // Add burner contract to the protocol + deployCodeTo( + "Burner.sol:Burner", + abi.encode(rootAccount, address(lidoLocator), lidoProxyAddress, 0, 0), + lidoLocator.burner() + ); + + // Add burner contract to the protocol + deployCodeTo( + "LidoExecutionLayerRewardsVault.sol:LidoExecutionLayerRewardsVault", + abi.encode(lidoProxyAddress, lidoTreasury), + lidoLocator.elRewardsVault() + ); + + // Add oracle report sanity checker contract to the protocol + deployCodeTo( + "OracleReportSanityChecker.sol:OracleReportSanityChecker", + abi.encode( + address(lidoLocator), + rootAccount, + [ + limitList.exitedValidatorsPerDayLimit, + limitList.appearedValidatorsPerDayLimit, + limitList.annualBalanceIncreaseBPLimit, + limitList.maxValidatorExitRequestsPerReport, + limitList.maxItemsPerExtraDataTransaction, + limitList.maxNodeOperatorsPerExtraDataItem, + limitList.requestTimestampMargin, + limitList.maxPositiveTokenRebase, + limitList.initialSlashingAmountPWei, + limitList.inactivityPenaltiesAmountPWei, + limitList.clBalanceOraclesErrorUpperBPLimit + ] + ), + lidoLocator.oracleReportSanityChecker() + ); + + secondOpinionOracleMock = new SecondOpinionOracle__MockForAccountingFuzzing(); + vm.store( + lidoLocator.oracleReportSanityChecker(), + bytes32(uint256(2)), + bytes32(uint256(uint160(address(secondOpinionOracleMock)))) + ); + + IAccounting(lidoLocator.accounting()).initialize(rootAccount); + + /// @dev deploy eip712steth + address eip712steth = deployCode("EIP712StETH.sol:EIP712StETH", abi.encode(lidoProxyAddress)); + + lidoContract.initialize(address(lidoLocator), address(eip712steth)); + + vm.stopPrank(); + } + + /// @dev create aragon dao and return kernel and acl + function createAragonDao() private returns (IKernel, IACL) { + kernelBase = deployCode("Kernel.sol:Kernel", abi.encode(true)); + aclBase = deployCode("ACL.sol:ACL"); + evmScriptRegistryFactory = deployCode("EVMScriptRegistryFactory.sol:EVMScriptRegistryFactory"); + daoFactoryAdr = deployCode( + "DAOFactory.sol:DAOFactory", + abi.encode(kernelBase, aclBase, evmScriptRegistryFactory) + ); + + IDaoFactory daoFactory = IDaoFactory(daoFactoryAdr); + + vm.recordLogs(); + daoFactory.newDAO(rootAccount); + Vm.Log[] memory logs = vm.getRecordedLogs(); + address daoAddress = abi.decode(logs[logs.length - 1].data, (address)); + + IKernel _dao = IKernel(address(daoAddress)); + IACL _acl = IACL(address(_dao.acl())); + + _acl.createPermission(rootAccount, daoAddress, keccak256("APP_MANAGER_ROLE"), rootAccount); + + return (_dao, _acl); + } + + /// @dev add aragon app to dao and return proxy address + function addAragonApp(IKernel _dao, address _impl) private returns (address) { + vm.recordLogs(); + _dao.newAppInstance(keccak256(bytes("lido.aragonpm.test")), _impl, "", false); + Vm.Log[] memory logs = vm.getRecordedLogs(); + + address proxyAddress = abi.decode(logs[logs.length - 1].data, (address)); + + return proxyAddress; + } + + /// @dev deploy lido locator with dummy default values + function _deployLidoLocator(address lido, address stakingRouterAddress) internal returns (ILidoLocator) { + LidoLocatorConfig memory config = LidoLocatorConfig({ + accountingOracle: makeAddr("dummy-locator:accountingOracle"), + depositSecurityModule: makeAddr("dummy-locator:depositSecurityModule"), + elRewardsVault: makeAddr("dummy-locator:elRewardsVault"), + legacyOracle: makeAddr("dummy-locator:legacyOracle"), + lido: lido, + oracleReportSanityChecker: makeAddr("dummy-locator:oracleReportSanityChecker"), + postTokenRebaseReceiver: address(0), + burner: makeAddr("dummy-locator:burner"), + stakingRouter: stakingRouterAddress, + treasury: makeAddr("dummy-locator:treasury"), + validatorsExitBusOracle: makeAddr("dummy-locator:validatorsExitBusOracle"), + withdrawalQueue: makeAddr("dummy-locator:withdrawalQueue"), + withdrawalVault: makeAddr("dummy-locator:withdrawalVault"), + oracleDaemonConfig: makeAddr("dummy-locator:oracleDaemonConfig"), + accounting: makeAddr("dummy-locator:accounting"), + wstETH: makeAddr("dummy-locator:wstETH") + }); + + return ILidoLocator(deployCode("LidoLocator.sol:LidoLocator", abi.encode(config))); + } +} diff --git a/test/0.8.25/ShareRate.t.sol b/test/0.8.25/ShareRate.t.sol new file mode 100644 index 0000000000..45af94b67e --- /dev/null +++ b/test/0.8.25/ShareRate.t.sol @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only +pragma solidity ^0.8.0; + +import "contracts/0.8.9/EIP712StETH.sol"; + +import {CommonBase} from "forge-std/Base.sol"; +import {LidoLocator} from "contracts/0.8.9/LidoLocator.sol"; +import {StdCheats} from "forge-std/StdCheats.sol"; +import {StdUtils} from "forge-std/StdUtils.sol"; +import {Vm} from "forge-std/Vm.sol"; +import {console2} from "forge-std/console2.sol"; + +import {BaseProtocolTest, ILido} from "./Protocol__Deployment.t.sol"; + +contract ShareRateHandler is CommonBase, StdCheats, StdUtils { + ILido public lidoContract; + address public accounting; + address public userAccount; + + uint256 public maxAmountOfShares; + + constructor(ILido _lido, address _accounting, address _userAccount, uint256 _maxAmountOfShares) { + lidoContract = _lido; + accounting = _accounting; + userAccount = _userAccount; + maxAmountOfShares = _maxAmountOfShares; + } + + function mintExternalShares(address _recipient, uint256 _amountOfShares) external { + // we don't want to test the zero address case, as it would revert + vm.assume(_recipient != address(0)); + + _amountOfShares = bound(_amountOfShares, 1, maxAmountOfShares); + // TODO: We need to make this condition work + // _amountOfShares = bound(_amountOfShares, 1, _amountOfShares); + + vm.prank(userAccount); + lidoContract.resumeStaking(); + + vm.prank(accounting); + lidoContract.mintExternalShares(_recipient, _amountOfShares); + } + + function burnExternalShares(uint256 _amountOfShares) external { + uint256 totalShares = lidoContract.getExternalShares(); + if (totalShares != 0) { + _amountOfShares = bound(_amountOfShares, 2, maxAmountOfShares); + } else { + _amountOfShares = 1; + } + + vm.prank(userAccount); + lidoContract.resumeStaking(); + + vm.prank(accounting); + lidoContract.burnExternalShares(_amountOfShares); + } + + function getTotalShares() external view returns (uint256) { + return lidoContract.getTotalShares(); + } +} + +contract ShareRateTest is BaseProtocolTest { + ShareRateHandler public shareRateHandler; + + uint256 private _maxExternalRatioBP = 10_000; + uint256 private _maxStakeLimit = 15_000 ether; + uint256 private _stakeLimitIncreasePerBlock = 20 ether; + uint256 private _maxAmountOfShares = 100; + + uint256 private protocolStartBalance = 15_000 ether; + uint256 private protocolStartExternalShares = 10_000; + + address private rootAccount = address(0x123); + address private userAccount = address(0x321); + + function setUp() public { + BaseProtocolTest.setUpProtocol(protocolStartBalance, rootAccount, userAccount); + + address accountingContract = lidoLocator.accounting(); + + vm.startPrank(userAccount); + lidoContract.setMaxExternalRatioBP(_maxExternalRatioBP); + lidoContract.setStakingLimit(_maxStakeLimit, _stakeLimitIncreasePerBlock); + lidoContract.resume(); + vm.stopPrank(); + + shareRateHandler = new ShareRateHandler(lidoContract, accountingContract, userAccount, _maxAmountOfShares); + targetContract(address(shareRateHandler)); + + bytes4[] memory selectors = new bytes4[](2); + selectors[0] = shareRateHandler.mintExternalShares.selector; + selectors[1] = shareRateHandler.burnExternalShares.selector; + // TODO: transfers + // TODO: submit + // TODO: withdrawals request + // TODO: claim + + targetSelector(FuzzSelector({addr: address(shareRateHandler), selectors: selectors})); + + // @dev mint 10000 external shares to simulate some shares already minted, so + // burnExternalShares will be able to actually burn some shares + vm.prank(accountingContract); + lidoContract.mintExternalShares(accountingContract, protocolStartExternalShares); + } + + /** + * https://book.getfoundry.sh/reference/config/inline-test-config#in-line-invariant-configs + * forge-config: default.invariant.runs = 256 + * forge-config: default.invariant.depth = 256 + * forge-config: default.invariant.fail-on-revert = true + * + * TODO: Maybe add an invariant that lido.getExternalShares = startExternalBalance + mintedExternal - burnedExternal? + * So we'll know it something is odd inside a math for external shares? + */ + function invariant_totalShares() public view { + assertEq(lidoContract.getTotalShares(), shareRateHandler.getTotalShares()); + } +} diff --git a/test/0.8.25/contracts/SecondOpinionOracle__MockForAccountingFuzzing.sol b/test/0.8.25/contracts/SecondOpinionOracle__MockForAccountingFuzzing.sol new file mode 100644 index 0000000000..519d67e190 --- /dev/null +++ b/test/0.8.25/contracts/SecondOpinionOracle__MockForAccountingFuzzing.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +contract SecondOpinionOracle__MockForAccountingFuzzing { + bool private success; + uint256 private clBalanceGwei; + uint256 private withdrawalVaultBalanceWei; + uint256 private totalDepositedValidators; + uint256 private totalExitedValidators; + + function getReport(uint256) external view returns (bool, uint256, uint256, uint256, uint256) { + return (success, clBalanceGwei, withdrawalVaultBalanceWei, totalDepositedValidators, totalExitedValidators); + } + + function mock__setReportValues( + bool _success, + uint256 _clBalanceGwei, + uint256 _withdrawalVaultBalanceWei, + uint256 _totalDepositedValidators, + uint256 _totalExitedValidators + ) external { + success = _success; + clBalanceGwei = _clBalanceGwei; + withdrawalVaultBalanceWei = _withdrawalVaultBalanceWei; + totalDepositedValidators = _totalDepositedValidators; + totalExitedValidators = _totalExitedValidators; + } +} diff --git a/test/0.8.25/contracts/StakingRouter__MockForLidoAccountingFuzzing.sol b/test/0.8.25/contracts/StakingRouter__MockForLidoAccountingFuzzing.sol new file mode 100644 index 0000000000..e861b7f6e9 --- /dev/null +++ b/test/0.8.25/contracts/StakingRouter__MockForLidoAccountingFuzzing.sol @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +interface IStakingRouter { + struct StakingModule { + uint256 id; + address stakingModuleAddress; + uint96 stakingModuleFee; + uint96 treasuryFee; + uint256 stakeShareLimit; + uint256 status; + string name; + uint256 lastDepositAt; + uint256 lastDepositBlock; + uint256 exitedValidatorsCount; + uint256 priorityExitShareThreshold; + uint256 maxDepositsPerBlock; + uint256 minDepositBlockDistance; + } +} + +contract StakingRouter__MockForLidoAccountingFuzzing { + event Mock__MintedRewardsReported(); + event Mock__MintedTotalShares(uint256 indexed _totalShares); + + address[] private recipients__mocked; + uint256[] private stakingModuleIds__mocked; + uint96[] private stakingModuleFees__mocked; + uint96 private totalFee__mocked; + uint256 private precisionPoint__mocked; + + function getStakingRewardsDistribution() + public + view + returns ( + address[] memory recipients, + uint256[] memory stakingModuleIds, + uint96[] memory stakingModuleFees, + uint96 totalFee, + uint256 precisionPoints + ) + { + recipients = recipients__mocked; + stakingModuleIds = stakingModuleIds__mocked; + stakingModuleFees = stakingModuleFees__mocked; + totalFee = totalFee__mocked; + precisionPoints = precisionPoint__mocked; + } + + function reportRewardsMinted(uint256[] calldata, uint256[] calldata _totalShares) external { + emit Mock__MintedRewardsReported(); + + uint256 totalShares = 0; + for (uint256 i = 0; i < _totalShares.length; i++) { + totalShares += _totalShares[i]; + } + + emit Mock__MintedTotalShares(totalShares); + } + + function mock__getStakingRewardsDistribution( + address[] calldata _recipients, + uint256[] calldata _stakingModuleIds, + uint96[] calldata _stakingModuleFees, + uint96 _totalFee, + uint256 _precisionPoints + ) external { + recipients__mocked = _recipients; + stakingModuleIds__mocked = _stakingModuleIds; + stakingModuleFees__mocked = _stakingModuleFees; + totalFee__mocked = _totalFee; + precisionPoint__mocked = _precisionPoints; + } + + function getStakingModuleIds() public view returns (uint256[] memory) { + return stakingModuleIds__mocked; + } + + function getRecipients() public view returns (address[] memory) { + return recipients__mocked; + } + + function getStakingModule( + uint256 _stakingModuleId + ) public pure returns (IStakingRouter.StakingModule memory stakingModule) { + if (_stakingModuleId >= 4) { + revert("Staking module does not exist"); + } + + if (_stakingModuleId == 1) { + stakingModule = IStakingRouter.StakingModule({ + id: 1, + stakingModuleAddress: 0x55032650b14df07b85bF18A3a3eC8E0Af2e028d5, + stakingModuleFee: 500, + treasuryFee: 500, + stakeShareLimit: 10000, + status: 0, + name: "curated-onchain-v1", + lastDepositAt: 1732694279, + lastDepositBlock: 21277744, + exitedValidatorsCount: 88207, + priorityExitShareThreshold: 10000, + maxDepositsPerBlock: 150, + minDepositBlockDistance: 25 + }); + } + + if (_stakingModuleId == 2) { + stakingModule = IStakingRouter.StakingModule({ + id: 2, + stakingModuleAddress: 0xaE7B191A31f627b4eB1d4DaC64eaB9976995b433, + stakingModuleFee: 800, + treasuryFee: 200, + stakeShareLimit: 400, + status: 0, + name: "SimpleDVT", + lastDepositAt: 1735217831, + lastDepositBlock: 21486781, + exitedValidatorsCount: 5, + priorityExitShareThreshold: 444, + maxDepositsPerBlock: 150, + minDepositBlockDistance: 25 + }); + } + + if (_stakingModuleId == 3) { + stakingModule = IStakingRouter.StakingModule({ + id: 3, + stakingModuleAddress: 0xdA7dE2ECdDfccC6c3AF10108Db212ACBBf9EA83F, + stakingModuleFee: 600, + treasuryFee: 400, + stakeShareLimit: 100, + status: 0, + name: "Community Staking", + lastDepositAt: 1735217387, + lastDepositBlock: 21486745, + exitedValidatorsCount: 104, + priorityExitShareThreshold: 125, + maxDepositsPerBlock: 30, + minDepositBlockDistance: 25 + }); + } + } +} diff --git a/test/0.8.9/lidoLocator.test.ts b/test/0.8.9/lidoLocator.test.ts index 72a2347e34..85f7824329 100644 --- a/test/0.8.9/lidoLocator.test.ts +++ b/test/0.8.9/lidoLocator.test.ts @@ -4,7 +4,7 @@ import { ethers } from "hardhat"; import { LidoLocator } from "typechain-types"; -import { ArrayToUnion, randomAddress } from "lib"; +import { randomAddress } from "lib"; const services = [ "accountingOracle", @@ -24,6 +24,7 @@ const services = [ "wstETH", ] as const; +type ArrayToUnion = A[number]; type Service = ArrayToUnion; type Config = Record & { postTokenRebaseReceiver: string; // can be ZeroAddress diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts new file mode 100644 index 0000000000..53c0e1e297 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts @@ -0,0 +1,205 @@ +import { expect } from "chai"; +import { ContractTransactionResponse, ZeroAddress } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { HashConsensus__Harness, ValidatorsExitBus__Harness } from "typechain-types"; + +import { CONSENSUS_VERSION, de0x, numberToHex } from "lib"; + +import { DATA_FORMAT_LIST, deployVEBO, initVEBO } from "test/deploy"; +import { Snapshot } from "test/suite"; + +const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", +]; + +describe("ValidatorsExitBusOracle.sol:accessControl", () => { + let consensus: HashConsensus__Harness; + let oracle: ValidatorsExitBus__Harness; + let admin: HardhatEthersSigner; + let originalState: string; + + let initTx: ContractTransactionResponse; + let oracleVersion: bigint; + let exitRequests: ExitRequest[]; + let reportFields: ReportFields; + let reportItems: ReturnType; + let reportHash: string; + + let member1: HardhatEthersSigner; + let member2: HardhatEthersSigner; + let member3: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + let account1: HardhatEthersSigner; + + interface ExitRequest { + moduleId: number; + nodeOpId: number; + valIndex: number; + valPubkey: string; + } + + interface ReportFields { + consensusVersion: bigint; + refSlot: bigint; + requestsCount: number; + dataFormat: number; + data: string; + } + + const calcValidatorsExitBusReportDataHash = (items: ReturnType) => { + const data = ethers.AbiCoder.defaultAbiCoder().encode(["(uint256,uint256,uint256,uint256,bytes)"], [items]); + return ethers.keccak256(data); + }; + + const getValidatorsExitBusReportDataItems = (r: ReportFields) => { + return [r.consensusVersion, r.refSlot, r.requestsCount, r.dataFormat, r.data]; + }; + + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + expect(pubkeyHex.length).to.equal(48 * 2); + return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + }; + + const encodeExitRequestsDataList = (requests: ExitRequest[]) => { + return "0x" + requests.map(encodeExitRequestHex).join(""); + }; + + const deploy = async () => { + const deployed = await deployVEBO(admin.address); + oracle = deployed.oracle; + consensus = deployed.consensus; + + initTx = await initVEBO({ admin: admin.address, oracle, consensus, resumeAfterDeploy: true }); + + oracleVersion = await oracle.getContractVersion(); + + await consensus.addMember(member1, 1); + await consensus.addMember(member2, 2); + await consensus.addMember(member3, 2); + + const { refSlot } = await consensus.getCurrentFrame(); + exitRequests = [ + { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, + ]; + + reportFields = { + consensusVersion: CONSENSUS_VERSION, + dataFormat: DATA_FORMAT_LIST, + refSlot: refSlot, + requestsCount: exitRequests.length, + data: encodeExitRequestsDataList(exitRequests), + }; + + reportItems = getValidatorsExitBusReportDataItems(reportFields); + reportHash = calcValidatorsExitBusReportDataHash(reportItems); + + await consensus.connect(member1).submitReport(refSlot, reportHash, CONSENSUS_VERSION); + await consensus.connect(member3).submitReport(refSlot, reportHash, CONSENSUS_VERSION); + }; + + before(async () => { + [admin, member1, member2, member3, stranger, account1] = await ethers.getSigners(); + + await deploy(); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("deploying", () => { + it("deploying accounting oracle", async () => { + expect(oracle).to.be.not.null; + expect(consensus).to.be.not.null; + expect(initTx).to.be.not.null; + expect(oracleVersion).to.be.not.null; + expect(exitRequests).to.be.not.null; + expect(reportFields).to.be.not.null; + expect(reportItems).to.be.not.null; + expect(reportHash).to.be.not.null; + }); + }); + + context("DEFAULT_ADMIN_ROLE", () => { + context("Admin is set at initialize", () => { + it("should set admin at initialize", async () => { + const DEFAULT_ADMIN_ROLE = await oracle.DEFAULT_ADMIN_ROLE(); + await expect(initTx).to.emit(oracle, "RoleGranted").withArgs(DEFAULT_ADMIN_ROLE, admin, admin); + }); + it("should revert without admin address", async () => { + await expect( + oracle.initialize(ZeroAddress, await consensus.getAddress(), CONSENSUS_VERSION, 0), + ).to.be.revertedWithCustomError(oracle, "AdminCannotBeZero"); + }); + }); + }); + + context("PAUSE_ROLE", () => { + it("should revert without PAUSE_ROLE role", async () => { + await expect(oracle.connect(stranger).pauseFor(0)).to.be.revertedWithOZAccessControlError( + await stranger.getAddress(), + await oracle.PAUSE_ROLE(), + ); + }); + + it("should allow calling from a possessor of PAUSE_ROLE role", async () => { + await oracle.grantRole(await oracle.PAUSE_ROLE(), account1); + + const tx = await oracle.connect(account1).pauseFor(9999); + await expect(tx).to.emit(oracle, "Paused").withArgs(9999); + }); + }); + + context("RESUME_ROLE", () => { + it("should revert without RESUME_ROLE role", async () => { + await oracle.connect(admin).pauseFor(9999); + + await expect(oracle.connect(stranger).resume()).to.be.revertedWithOZAccessControlError( + await stranger.getAddress(), + await oracle.RESUME_ROLE(), + ); + }); + + it("should allow calling from a possessor of RESUME_ROLE role", async () => { + await oracle.pauseFor(9999, { from: admin }); + await oracle.grantRole(await oracle.RESUME_ROLE(), account1); + + const tx = await oracle.connect(account1).resume(); + await expect(tx).to.emit(oracle, "Resumed").withArgs(); + }); + }); + + context("SUBMIT_DATA_ROLE", () => { + context("_checkMsgSenderIsAllowedToSubmitData", () => { + it("should revert from not consensus member without SUBMIT_DATA_ROLE role", async () => { + await expect( + oracle.connect(stranger).submitReportData(reportFields, oracleVersion), + ).to.be.revertedWithCustomError(oracle, "SenderNotAllowed"); + }); + + it("should allow calling from a possessor of SUBMIT_DATA_ROLE role", async () => { + await oracle.grantRole(await oracle.SUBMIT_DATA_ROLE(), account1); + const deadline = (await oracle.getConsensusReport()).processingDeadlineTime; + await consensus.setTime(deadline); + + const tx = await oracle.connect(account1).submitReportData(reportFields, oracleVersion); + + await expect(tx).to.emit(oracle, "ProcessingStarted").withArgs(reportFields.refSlot, reportHash); + }); + + it("should allow calling from a member", async () => { + const tx = await oracle.connect(member2).submitReportData(reportFields, oracleVersion); + + await expect(tx).to.emit(oracle, "ProcessingStarted").withArgs(reportFields.refSlot, reportHash); + }); + }); + }); +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.deploy.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.deploy.test.ts new file mode 100644 index 0000000000..48dee32afc --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.deploy.test.ts @@ -0,0 +1,81 @@ +import { expect } from "chai"; +import { ZeroAddress } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { HashConsensus__Harness, ValidatorsExitBus__Harness, ValidatorsExitBusOracle } from "typechain-types"; + +import { CONSENSUS_VERSION, SECONDS_PER_SLOT } from "lib"; + +import { deployVEBO, initVEBO } from "test/deploy"; + +describe("ValidatorsExitBusOracle.sol:deploy", () => { + context("Deployment and initial configuration", () => { + let admin: HardhatEthersSigner; + let defaultOracle: ValidatorsExitBusOracle; + + before(async () => { + [admin] = await ethers.getSigners(); + defaultOracle = (await deployVEBO(admin.address)).oracle; + }); + + it("initialize reverts if admin address is zero", async () => { + const deployed = await deployVEBO(admin.address); + + await expect( + deployed.oracle.initialize(ZeroAddress, await deployed.consensus.getAddress(), CONSENSUS_VERSION, 0), + ).to.be.revertedWithCustomError(defaultOracle, "AdminCannotBeZero"); + }); + + it("reverts when slotsPerSecond is zero", async () => { + await expect(deployVEBO(admin.address, { secondsPerSlot: 0n })).to.be.revertedWithCustomError( + defaultOracle, + "SecondsPerSlotCannotBeZero", + ); + }); + + context("deployment and init finishes successfully (default setup)", async () => { + let consensus: HashConsensus__Harness; + let oracle: ValidatorsExitBus__Harness; + + before(async () => { + const deployed = await deployVEBO(admin.address); + await initVEBO({ + admin: admin.address, + oracle: deployed.oracle, + consensus: deployed.consensus, + }); + + consensus = deployed.consensus; + oracle = deployed.oracle; + }); + + it("mock time-travellable setup is correct", async () => { + const time1 = await consensus.getTime(); + expect(await oracle.getTime()).to.equal(time1); + + await consensus.advanceTimeBy(SECONDS_PER_SLOT); + + const time2 = await consensus.getTime(); + expect(time2).to.equal(time1 + SECONDS_PER_SLOT); + expect(await oracle.getTime()).to.equal(time2); + }); + + it("initial configuration is correct", async () => { + expect(await oracle.getConsensusContract()).to.equal(await consensus.getAddress()); + expect(await oracle.getConsensusVersion()).to.equal(CONSENSUS_VERSION); + expect(await oracle.SECONDS_PER_SLOT()).to.equal(SECONDS_PER_SLOT); + expect(await oracle.isPaused()).to.equal(true); + }); + + it("pause/resume operations work", async () => { + expect(await oracle.isPaused()).to.equal(true); + await oracle.resume(); + expect(await oracle.isPaused()).to.equal(false); + await oracle.pauseFor(123); + expect(await oracle.isPaused()).to.equal(true); + }); + }); + }); +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts new file mode 100644 index 0000000000..c92fc799c1 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts @@ -0,0 +1,247 @@ +import { expect } from "chai"; +import { ContractTransactionReceipt, ZeroHash } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { HashConsensus__Harness, ValidatorsExitBus__Harness } from "typechain-types"; + +import { CONSENSUS_VERSION, de0x, numberToHex } from "lib"; + +import { + computeTimestampAtSlot, + DATA_FORMAT_LIST, + deployVEBO, + initVEBO, + SECONDS_PER_FRAME, + SLOTS_PER_FRAME, +} from "test/deploy"; +import { Snapshot } from "test/suite"; + +const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", +]; + +describe("ValidatorsExitBusOracle.sol:gas", () => { + let consensus: HashConsensus__Harness; + let oracle: ValidatorsExitBus__Harness; + let admin: HardhatEthersSigner; + + let oracleVersion: bigint; + + let member1: HardhatEthersSigner; + let member2: HardhatEthersSigner; + let member3: HardhatEthersSigner; + + const NUM_MODULES = 5; + const NODE_OPS_PER_MODULE = 100; + + let nextValIndex = 1; + + interface ExitRequest { + moduleId: number; + nodeOpId: number; + valIndex: number; + valPubkey: string; + } + + interface ReportFields { + consensusVersion: bigint; + refSlot: bigint; + requestsCount: number; + dataFormat: number; + data: string; + } + + const calcValidatorsExitBusReportDataHash = (items: ReturnType) => { + const data = ethers.AbiCoder.defaultAbiCoder().encode(["(uint256,uint256,uint256,uint256,bytes)"], [items]); + return ethers.keccak256(data); + }; + + const getValidatorsExitBusReportDataItems = (r: ReportFields) => { + return [r.consensusVersion, r.refSlot, r.requestsCount, r.dataFormat, r.data]; + }; + + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + expect(pubkeyHex.length).to.equal(48 * 2); + return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + }; + + const encodeExitRequestsDataList = (requests: ExitRequest[]) => { + return "0x" + requests.map(encodeExitRequestHex).join(""); + }; + + const deploy = async () => { + const deployed = await deployVEBO(admin.address); + oracle = deployed.oracle; + consensus = deployed.consensus; + + await initVEBO({ + admin: admin.address, + oracle, + consensus, + resumeAfterDeploy: true, + }); + + oracleVersion = await oracle.getContractVersion(); + + await consensus.addMember(member1, 1); + await consensus.addMember(member2, 2); + await consensus.addMember(member3, 2); + }; + + const triggerConsensusOnHash = async (hash: string) => { + const { refSlot } = await consensus.getCurrentFrame(); + await consensus.connect(member1).submitReport(refSlot, hash, CONSENSUS_VERSION); + await consensus.connect(member3).submitReport(refSlot, hash, CONSENSUS_VERSION); + expect((await consensus.getConsensusState()).consensusReport).to.equal(hash); + }; + + const generateExitRequests = (totalRequests: number) => { + const requestsPerModule = Math.max(1, Math.floor(totalRequests / NUM_MODULES)); + const requestsPerNodeOp = Math.max(1, Math.floor(requestsPerModule / NODE_OPS_PER_MODULE)); + + const requests = []; + + for (let i = 0; i < totalRequests; ++i) { + const moduleId = Math.floor(i / requestsPerModule); + const nodeOpId = Math.floor((i - moduleId * requestsPerModule) / requestsPerNodeOp); + const valIndex = nextValIndex++; + const valPubkey = PUBKEYS[valIndex % PUBKEYS.length]; + requests.push({ moduleId: moduleId + 1, nodeOpId, valIndex, valPubkey }); + } + + return { requests, requestsPerModule, requestsPerNodeOp }; + }; + + const gasUsages: { totalRequests: number; requestsPerModule: number; requestsPerNodeOp: number; gasUsed: number }[] = + []; + + before(async () => { + [admin, member1, member2, member3] = await ethers.getSigners(); + await deploy(); + }); + + after(async () => { + gasUsages.forEach(({ totalRequests, requestsPerModule, requestsPerNodeOp, gasUsed }) => + console.log( + `${totalRequests} requests (per module ${requestsPerModule}, ` + + `per node op ${requestsPerNodeOp}): total gas ${gasUsed}, ` + + `gas per request: ${Math.round(gasUsed / totalRequests)}`, + ), + ); + }); + + for (const totalRequests of [10, 50, 100, 1000, 2000]) { + context(`Total requests: ${totalRequests}`, () => { + let exitRequests: { requests: ExitRequest[]; requestsPerModule: number; requestsPerNodeOp: number }; + let reportFields: ReportFields; + let reportItems: ReturnType; + let reportHash: string; + let originalState: string; + + before(async () => (originalState = await Snapshot.take())); + + after(async () => await Snapshot.restore(originalState)); + + it("initially, consensus report is not being processed", async () => { + const { refSlot } = await consensus.getCurrentFrame(); + + const report = await oracle.getConsensusReport(); + expect(refSlot).to.above(report.refSlot); + + const procState = await oracle.getProcessingState(); + expect(procState.dataHash, ZeroHash); + expect(procState.dataSubmitted).to.equal(false); + }); + + it("committee reaches consensus on a report hash", async () => { + const { refSlot } = await consensus.getCurrentFrame(); + + exitRequests = generateExitRequests(totalRequests); + + reportFields = { + consensusVersion: CONSENSUS_VERSION, + refSlot: refSlot, + requestsCount: exitRequests.requests.length, + dataFormat: DATA_FORMAT_LIST, + data: encodeExitRequestsDataList(exitRequests.requests), + }; + + reportItems = getValidatorsExitBusReportDataItems(reportFields); + reportHash = calcValidatorsExitBusReportDataHash(reportItems); + + await triggerConsensusOnHash(reportHash); + }); + + it("oracle gets the report hash", async () => { + const report = await oracle.getConsensusReport(); + expect(report.hash).to.equal(reportHash); + expect(report.refSlot).to.equal(reportFields.refSlot); + expect(report.processingDeadlineTime).to.equal(computeTimestampAtSlot(report.refSlot + SLOTS_PER_FRAME)); + expect(report.processingStarted).to.equal(false); + + const procState = await oracle.getProcessingState(); + expect(procState.dataHash).to.equal(reportHash); + expect(procState.dataSubmitted).to.equal(false); + expect(procState.dataFormat).to.equal(0); + expect(procState.requestsCount).to.equal(0); + expect(procState.requestsSubmitted).to.equal(0); + }); + + it("some time passes", async () => { + await consensus.advanceTimeBy(SECONDS_PER_FRAME / 3n); + }); + + it(`a committee member submits the report data, exit requests are emitted`, async () => { + const tx = await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + const receipt = (await tx.wait()) as ContractTransactionReceipt; + await expect(tx).to.emit(oracle, "ProcessingStarted").withArgs(reportFields.refSlot, reportHash); + expect((await oracle.getConsensusReport()).processingStarted).to.equal(true); + + const timestamp = await oracle.getTime(); + + const evFirst = exitRequests.requests[0]; + const evLast = exitRequests.requests[exitRequests.requests.length - 1]; + + await expect(tx) + .to.emit(oracle, "ValidatorExitRequest") + .withArgs(evFirst.moduleId, evFirst.nodeOpId, evFirst.valIndex, evFirst.valPubkey, timestamp); + + await expect(tx) + .to.emit(oracle, "ValidatorExitRequest") + .withArgs(evLast.moduleId, evLast.nodeOpId, evLast.valIndex, evLast.valPubkey, timestamp); + + const { gasUsed } = receipt; + + gasUsages.push({ + totalRequests, + requestsPerModule: exitRequests.requestsPerModule, + requestsPerNodeOp: exitRequests.requestsPerNodeOp, + gasUsed: Number(gasUsed), + }); + }); + + it(`reports are marked as processed`, async () => { + const procState = await oracle.getProcessingState(); + expect(procState.dataHash).to.equal(reportHash); + expect(procState.dataSubmitted).to.equal(true); + expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST); + expect(procState.requestsCount).to.equal(exitRequests.requests.length); + expect(procState.requestsSubmitted).to.equal(exitRequests.requests.length); + }); + + it("some time passes", async () => { + const prevFrame = await consensus.getCurrentFrame(); + await consensus.advanceTimeBy(SECONDS_PER_FRAME - SECONDS_PER_FRAME / 3n); + const newFrame = await consensus.getCurrentFrame(); + expect(newFrame.refSlot).to.above(prevFrame.refSlot); + }); + }); + } +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts new file mode 100644 index 0000000000..615050cf49 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts @@ -0,0 +1,256 @@ +import { expect } from "chai"; +import { ZeroHash } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { HashConsensus__Harness, ValidatorsExitBus__Harness } from "typechain-types"; + +import { CONSENSUS_VERSION, de0x, numberToHex } from "lib"; + +import { + computeTimestampAtSlot, + DATA_FORMAT_LIST, + deployVEBO, + initVEBO, + SECONDS_PER_FRAME, + SLOTS_PER_FRAME, +} from "test/deploy"; + +const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", +]; + +describe("ValidatorsExitBusOracle.sol:happyPath", () => { + let consensus: HashConsensus__Harness; + let oracle: ValidatorsExitBus__Harness; + let admin: HardhatEthersSigner; + + let oracleVersion: bigint; + let exitRequests: ExitRequest[]; + let reportFields: ReportFields; + let reportItems: ReturnType; + let reportHash: string; + + let member1: HardhatEthersSigner; + let member2: HardhatEthersSigner; + let member3: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + const LAST_PROCESSING_REF_SLOT = 1; + + interface ExitRequest { + moduleId: number; + nodeOpId: number; + valIndex: number; + valPubkey: string; + } + + interface ReportFields { + consensusVersion: bigint; + refSlot: bigint; + requestsCount: number; + dataFormat: number; + data: string; + } + + const calcValidatorsExitBusReportDataHash = (items: ReturnType) => { + const data = ethers.AbiCoder.defaultAbiCoder().encode(["(uint256,uint256,uint256,uint256,bytes)"], [items]); + return ethers.keccak256(data); + }; + + const getValidatorsExitBusReportDataItems = (r: ReportFields) => { + return [r.consensusVersion, r.refSlot, r.requestsCount, r.dataFormat, r.data]; + }; + + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + expect(pubkeyHex.length).to.equal(48 * 2); + return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + }; + + const encodeExitRequestsDataList = (requests: ExitRequest[]) => { + return "0x" + requests.map(encodeExitRequestHex).join(""); + }; + + const deploy = async () => { + const deployed = await deployVEBO(admin.address); + oracle = deployed.oracle; + consensus = deployed.consensus; + + await initVEBO({ + admin: admin.address, + oracle, + consensus, + resumeAfterDeploy: true, + lastProcessingRefSlot: LAST_PROCESSING_REF_SLOT, + }); + + oracleVersion = await oracle.getContractVersion(); + + await consensus.addMember(member1, 1); + await consensus.addMember(member2, 2); + await consensus.addMember(member3, 2); + }; + + before(async () => { + [admin, member1, member2, member3, stranger] = await ethers.getSigners(); + + await deploy(); + }); + + const triggerConsensusOnHash = async (hash: string) => { + const { refSlot } = await consensus.getCurrentFrame(); + await consensus.connect(member1).submitReport(refSlot, hash, CONSENSUS_VERSION); + await consensus.connect(member3).submitReport(refSlot, hash, CONSENSUS_VERSION); + expect((await consensus.getConsensusState()).consensusReport).to.equal(hash); + }; + + it("initially, consensus report is empty and is not being processed", async () => { + const report = await oracle.getConsensusReport(); + expect(report.hash).to.equal(ZeroHash); + + expect(report.processingDeadlineTime).to.equal(0); + expect(report.processingStarted).to.equal(false); + + const frame = await consensus.getCurrentFrame(); + const procState = await oracle.getProcessingState(); + + expect(procState.currentFrameRefSlot).to.equal(frame.refSlot); + expect(procState.dataHash).to.equal(ZeroHash); + expect(procState.processingDeadlineTime).to.equal(0); + expect(procState.dataSubmitted).to.equal(false); + expect(procState.dataFormat).to.equal(0); + expect(procState.requestsCount).to.equal(0); + expect(procState.requestsSubmitted).to.equal(0); + }); + + it("reference slot of the empty initial consensus report is set to the last processing slot passed to the initialize function", async () => { + const report = await oracle.getConsensusReport(); + expect(report.refSlot).to.equal(LAST_PROCESSING_REF_SLOT); + }); + + it("committee reaches consensus on a report hash", async () => { + const { refSlot } = await consensus.getCurrentFrame(); + + exitRequests = [ + { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, + ]; + + reportFields = { + consensusVersion: CONSENSUS_VERSION, + refSlot: refSlot, + requestsCount: exitRequests.length, + dataFormat: DATA_FORMAT_LIST, + data: encodeExitRequestsDataList(exitRequests), + }; + + reportItems = getValidatorsExitBusReportDataItems(reportFields); + reportHash = calcValidatorsExitBusReportDataHash(reportItems); + + await triggerConsensusOnHash(reportHash); + }); + + it("oracle gets the report hash", async () => { + const report = await oracle.getConsensusReport(); + expect(report.hash).to.equal(reportHash); + expect(report.refSlot).to.equal(reportFields.refSlot); + expect(report.processingDeadlineTime).to.equal(computeTimestampAtSlot(report.refSlot + SLOTS_PER_FRAME)); + + expect(report.processingStarted).to.equal(false); + + const frame = await consensus.getCurrentFrame(); + const procState = await oracle.getProcessingState(); + + expect(procState.currentFrameRefSlot).to.equal(frame.refSlot); + expect(procState.dataHash).to.equal(reportHash); + expect(procState.processingDeadlineTime).to.equal(computeTimestampAtSlot(frame.reportProcessingDeadlineSlot)); + expect(procState.dataSubmitted).to.equal(false); + expect(procState.dataFormat).to.equal(0); + expect(procState.requestsCount).to.equal(0); + expect(procState.requestsSubmitted).to.equal(0); + }); + + it("some time passes", async () => { + await consensus.advanceTimeBy(SECONDS_PER_FRAME / 3n); + }); + + it("non-member cannot submit the data", async () => { + await expect(oracle.connect(stranger).submitReportData(reportFields, oracleVersion)).to.be.revertedWithCustomError( + oracle, + "SenderNotAllowed", + ); + }); + + it("the data cannot be submitted passing a different contract version", async () => { + await expect(oracle.connect(member1).submitReportData(reportFields, oracleVersion - 1n)) + .to.be.revertedWithCustomError(oracle, "UnexpectedContractVersion") + .withArgs(oracleVersion, oracleVersion - 1n); + }); + + it("the data cannot be submitted passing a different consensus version", async () => { + const invalidReport = { ...reportFields, consensusVersion: CONSENSUS_VERSION + 1n }; + await expect(oracle.connect(member1).submitReportData(invalidReport, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "UnexpectedConsensusVersion") + .withArgs(CONSENSUS_VERSION, CONSENSUS_VERSION + 1n); + }); + + it("a data not matching the consensus hash cannot be submitted", async () => { + const invalidReport = { ...reportFields, requestsCount: reportFields.requestsCount + 1 }; + const invalidReportItems = getValidatorsExitBusReportDataItems(invalidReport); + const invalidReportHash = calcValidatorsExitBusReportDataHash(invalidReportItems); + + await expect(oracle.connect(member1).submitReportData(invalidReport, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "UnexpectedDataHash") + .withArgs(reportHash, invalidReportHash); + }); + + it("a committee member submits the report data, exit requests are emitted", async () => { + const tx = await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + + await expect(tx).to.emit(oracle, "ProcessingStarted").withArgs(reportFields.refSlot, reportHash); + expect((await oracle.getConsensusReport()).processingStarted).to.equal(true); + + const timestamp = await oracle.getTime(); + + for (const request of exitRequests) { + await expect(tx) + .to.emit(oracle, "ValidatorExitRequest") + .withArgs(request.moduleId, request.nodeOpId, request.valIndex, request.valPubkey, timestamp); + } + }); + + it("reports are marked as processed", async () => { + const frame = await consensus.getCurrentFrame(); + const procState = await oracle.getProcessingState(); + + expect(procState.currentFrameRefSlot).to.equal(frame.refSlot); + expect(procState.dataHash).to.equal(reportHash); + expect(procState.processingDeadlineTime).to.equal(computeTimestampAtSlot(frame.reportProcessingDeadlineSlot)); + expect(procState.dataSubmitted).to.equal(true); + expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST); + expect(procState.requestsCount).to.equal(exitRequests.length); + expect(procState.requestsSubmitted).to.equal(exitRequests.length); + }); + + it("last requested validator indices are updated", async () => { + const indices1 = await oracle.getLastRequestedValidatorIndices(1n, [0n, 1n, 2n]); + const indices2 = await oracle.getLastRequestedValidatorIndices(2n, [0n, 1n, 2n]); + + expect([...indices1]).to.have.ordered.members([2n, -1n, -1n]); + expect([...indices2]).to.have.ordered.members([1n, -1n, -1n]); + }); + + it("no data can be submitted for the same reference slot again", async () => { + await expect(oracle.connect(member2).submitReportData(reportFields, oracleVersion)).to.be.revertedWithCustomError( + oracle, + "RefSlotAlreadyProcessing", + ); + }); +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts new file mode 100644 index 0000000000..a5f7fd6283 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts @@ -0,0 +1,697 @@ +import { expect } from "chai"; +import { ZeroHash } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { HashConsensus__Harness, OracleReportSanityChecker, ValidatorsExitBus__Harness } from "typechain-types"; + +import { CONSENSUS_VERSION, de0x, numberToHex } from "lib"; + +import { computeTimestampAtSlot, DATA_FORMAT_LIST, deployVEBO, initVEBO } from "test/deploy"; +import { Snapshot } from "test/suite"; + +const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", +]; +const HASH_1 = "0x1111111111111111111111111111111111111111111111111111111111111111"; + +describe("ValidatorsExitBusOracle.sol:submitReportData", () => { + let consensus: HashConsensus__Harness; + let oracle: ValidatorsExitBus__Harness; + let admin: HardhatEthersSigner; + let oracleReportSanityChecker: OracleReportSanityChecker; + + let oracleVersion: bigint; + + let member1: HardhatEthersSigner; + let member2: HardhatEthersSigner; + let member3: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + const LAST_PROCESSING_REF_SLOT = 1; + + interface ExitRequest { + moduleId: number; + nodeOpId: number; + valIndex: number; + valPubkey: string; + } + + interface ReportFields { + consensusVersion: bigint; + refSlot: bigint; + requestsCount: number; + dataFormat: number; + data: string; + } + + const calcValidatorsExitBusReportDataHash = (items: ReturnType) => { + const data = ethers.AbiCoder.defaultAbiCoder().encode(["(uint256,uint256,uint256,uint256,bytes)"], [items]); + return ethers.keccak256(data); + }; + + const getValidatorsExitBusReportDataItems = (r: ReportFields) => { + return [r.consensusVersion, r.refSlot, r.requestsCount, r.dataFormat, r.data]; + }; + + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + expect(pubkeyHex.length).to.equal(48 * 2); + return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + }; + + const encodeExitRequestsDataList = (requests: ExitRequest[]) => { + return "0x" + requests.map(encodeExitRequestHex).join(""); + }; + + const triggerConsensusOnHash = async (hash: string) => { + const { refSlot } = await consensus.getCurrentFrame(); + await consensus.connect(member1).submitReport(refSlot, hash, CONSENSUS_VERSION); + await consensus.connect(member3).submitReport(refSlot, hash, CONSENSUS_VERSION); + expect((await consensus.getConsensusState()).consensusReport).to.equal(hash); + }; + + const prepareReportAndSubmitHash = async ( + requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }], + options = { reportFields: {} }, + ) => { + const { refSlot } = await consensus.getCurrentFrame(); + + const reportData = { + consensusVersion: CONSENSUS_VERSION, + dataFormat: DATA_FORMAT_LIST, + refSlot, + requestsCount: requests.length, + data: encodeExitRequestsDataList(requests), + ...options.reportFields, + }; + + const reportItems = getValidatorsExitBusReportDataItems(reportData); + const reportHash = calcValidatorsExitBusReportDataHash(reportItems); + + await triggerConsensusOnHash(reportHash); + + return { reportData, reportHash, reportItems }; + }; + + async function getLastRequestedValidatorIndex(moduleId: number, nodeOpId: number) { + return (await oracle.getLastRequestedValidatorIndices(moduleId, [nodeOpId]))[0]; + } + + const deploy = async () => { + const deployed = await deployVEBO(admin.address); + oracle = deployed.oracle; + consensus = deployed.consensus; + oracleReportSanityChecker = deployed.oracleReportSanityChecker; + + await initVEBO({ + admin: admin.address, + oracle, + consensus, + resumeAfterDeploy: true, + lastProcessingRefSlot: LAST_PROCESSING_REF_SLOT, + }); + + oracleVersion = await oracle.getContractVersion(); + + await consensus.addMember(member1, 1); + await consensus.addMember(member2, 2); + await consensus.addMember(member3, 2); + }; + + before(async () => { + [admin, member1, member2, member3, stranger] = await ethers.getSigners(); + + await deploy(); + }); + + context("discarded report prevents data submit", () => { + let reportData: ReportFields; + let reportHash: string; + let originalState: string; + + before(async () => { + originalState = await Snapshot.take(); + }); + + after(async () => await Snapshot.restore(originalState)); + + it("report is discarded", async () => { + ({ reportData, reportHash } = await prepareReportAndSubmitHash()); + const { refSlot } = await consensus.getCurrentFrame(); + + // change of mind + const tx = await consensus.connect(member3).submitReport(refSlot, HASH_1, CONSENSUS_VERSION); + + await expect(tx).to.emit(oracle, "ReportDiscarded").withArgs(refSlot, reportHash); + }); + + it("processing state reverts to pre-report state ", async () => { + const state = await oracle.getProcessingState(); + expect(state.dataHash).to.equal(ZeroHash); + expect(state.dataSubmitted).to.equal(false); + expect(state.dataFormat).to.equal(0); + expect(state.requestsCount).to.equal(0); + expect(state.requestsSubmitted).to.equal(0); + }); + + it("reverts on trying to submit the discarded report", async () => { + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "UnexpectedDataHash") + .withArgs(ZeroHash, reportHash); + }); + }); + + context("_handleConsensusReportData", () => { + let originalState: string; + + beforeEach(async () => { + originalState = await Snapshot.take(); + await consensus.advanceTimeToNextFrameStart(); + }); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("enforces data format", () => { + it("dataFormat = 0 reverts", async () => { + const dataFormatUnsupported = 0; + const { reportData } = await prepareReportAndSubmitHash( + [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }], + { reportFields: { dataFormat: dataFormatUnsupported } }, + ); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "UnsupportedRequestsDataFormat") + .withArgs(dataFormatUnsupported); + }); + + it("dataFormat = 2 reverts", async () => { + const dataFormatUnsupported = 2; + const { reportData } = await prepareReportAndSubmitHash( + [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }], + { reportFields: { dataFormat: dataFormatUnsupported } }, + ); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "UnsupportedRequestsDataFormat") + .withArgs(dataFormatUnsupported); + }); + + it("dataFormat = 1 pass", async () => { + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + ]); + await oracle.connect(member1).submitReportData(reportData, oracleVersion); + }); + }); + + context("enforces data length", () => { + it("reverts if there is more data than expected", async () => { + const { refSlot } = await consensus.getCurrentFrame(); + const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }]; + const { reportData } = await prepareReportAndSubmitHash(exitRequests, { + reportFields: { data: encodeExitRequestsDataList(exitRequests) + "aaaaaaaaaaaaaaaaaa", refSlot }, + }); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).to.be.revertedWithCustomError( + oracle, + "InvalidRequestsDataLength", + ); + }); + + it("reverts if there is less data than expected", async () => { + const { refSlot } = await consensus.getCurrentFrame(); + const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }]; + const data = encodeExitRequestsDataList(exitRequests); + + const { reportData } = await prepareReportAndSubmitHash(exitRequests, { + reportFields: { + data: data.slice(0, data.length - 18), + refSlot, + }, + }); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).to.be.revertedWithCustomError( + oracle, + "InvalidRequestsDataLength", + ); + }); + + it("pass if there is exact amount of data", async () => { + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + ]); + await oracle.connect(member1).submitReportData(reportData, oracleVersion); + }); + }); + + context("invokes sanity check", () => { + before(async () => { + await oracleReportSanityChecker.grantRole( + await oracleReportSanityChecker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), + admin.address, + ); + }); + + it("reverts if request limit is reached", async () => { + const exitRequestsLimit = 1; + await oracleReportSanityChecker.connect(admin).setMaxExitRequestsPerOracleReport(exitRequestsLimit); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + ]); + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectNumberOfExitRequestsPerReport") + .withArgs(exitRequestsLimit); + }); + it("pass if requests amount equals to limit", async () => { + const exitRequestsLimit = 1; + await oracleReportSanityChecker.connect(admin).setMaxExitRequestsPerOracleReport(exitRequestsLimit); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, + ]); + await oracle.connect(member1).submitReportData(reportData, oracleVersion); + }); + }); + + context("validates data.requestsCount field with given data", () => { + it("reverts if requestsCount does not match with encoded data size", async () => { + const { reportData } = await prepareReportAndSubmitHash( + [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }], + { reportFields: { requestsCount: 2 } }, + ); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).to.be.revertedWithCustomError( + oracle, + "UnexpectedRequestsDataLength", + ); + }); + }); + + it("reverts if moduleId equals zero", async () => { + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 0, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + ]); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).to.be.revertedWithCustomError( + oracle, + "InvalidRequestsData", + ); + }); + + it("emits ValidatorExitRequest events", async () => { + const requests = [ + { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + ]; + const { reportData } = await prepareReportAndSubmitHash(requests); + const tx = await oracle.connect(member1).submitReportData(reportData, oracleVersion); + const timestamp = await consensus.getTime(); + + await expect(tx) + .to.emit(oracle, "ValidatorExitRequest") + .withArgs(requests[0].moduleId, requests[0].nodeOpId, requests[0].valIndex, requests[0].valPubkey, timestamp); + + await expect(tx) + .to.emit(oracle, "ValidatorExitRequest") + .withArgs(requests[1].moduleId, requests[1].nodeOpId, requests[1].valIndex, requests[1].valPubkey, timestamp); + }); + + it("updates processing state", async () => { + const storageBefore = await oracle.getDataProcessingState(); + expect(storageBefore.refSlot).to.equal(0); + expect(storageBefore.requestsCount).to.equal(0); + + expect(storageBefore.requestsProcessed).to.equal(0); + expect(storageBefore.dataFormat).to.equal(0); + + const { refSlot } = await consensus.getCurrentFrame(); + const requests = [ + { moduleId: 4, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + ]; + const { reportData } = await prepareReportAndSubmitHash(requests); + await oracle.connect(member1).submitReportData(reportData, oracleVersion); + + const storageAfter = await oracle.getDataProcessingState(); + expect(storageAfter.refSlot).to.equal(refSlot); + expect(storageAfter.requestsCount).to.equal(requests.length); + expect(storageAfter.requestsProcessed).to.equal(requests.length); + expect(storageAfter.dataFormat).to.equal(DATA_FORMAT_LIST); + }); + + it("updates total requests processed count", async () => { + let currentCount = 0; + const countStep0 = await oracle.getTotalRequestsProcessed(); + expect(countStep0).to.equal(currentCount); + + // Step 1 — process 1 item + const requestsStep1 = [{ moduleId: 3, nodeOpId: 1, valIndex: 2, valPubkey: PUBKEYS[1] }]; + const { reportData: reportStep1 } = await prepareReportAndSubmitHash(requestsStep1); + await oracle.connect(member1).submitReportData(reportStep1, oracleVersion); + const countStep1 = await oracle.getTotalRequestsProcessed(); + currentCount += requestsStep1.length; + expect(countStep1).to.equal(currentCount); + + // Step 2 — process 2 items + await consensus.advanceTimeToNextFrameStart(); + const requestsStep2 = [ + { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + ]; + const { reportData: reportStep2 } = await prepareReportAndSubmitHash(requestsStep2); + await oracle.connect(member1).submitReportData(reportStep2, oracleVersion); + const countStep2 = await oracle.getTotalRequestsProcessed(); + currentCount += requestsStep2.length; + expect(countStep2).to.equal(currentCount); + + // // Step 3 — process no items + await consensus.advanceTimeToNextFrameStart(); + const requestsStep3: ExitRequest[] = []; + const { reportData: reportStep3 } = await prepareReportAndSubmitHash(requestsStep3); + await oracle.connect(member1).submitReportData(reportStep3, oracleVersion); + const countStep3 = await oracle.getTotalRequestsProcessed(); + currentCount += requestsStep3.length; + expect(countStep3).to.equal(currentCount); + }); + }); + + context(`requires validator indices for the same node operator to increase`, () => { + let originalState: string; + + before(async () => { + originalState = await Snapshot.take(); + await consensus.advanceTimeToNextFrameStart(); + }); + + after(async () => await Snapshot.restore(originalState)); + + it(`requesting NO 5-3 to exit validator 0`, async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + ]); + await oracle.connect(member1).submitReportData(reportData, oracleVersion); + expect(await getLastRequestedValidatorIndex(5, 3)).to.equal(0); + }); + + it(`cannot request NO 5-3 to exit validator 0 again`, async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + ]); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "NodeOpValidatorIndexMustIncrease") + .withArgs(5, 3, 0, 0); + }); + + it(`requesting NO 5-3 to exit validator 1`, async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[1] }, + ]); + await oracle.connect(member1).submitReportData(reportData, oracleVersion, { from: member1 }); + expect(await getLastRequestedValidatorIndex(5, 3)).to.equal(1); + }); + + it(`cannot request NO 5-3 to exit validator 1 again`, async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[1] }, + ]); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "NodeOpValidatorIndexMustIncrease") + .withArgs(5, 3, 1, 1); + }); + + it(`cannot request NO 5-3 to exit validator 0 again`, async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + ]); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "NodeOpValidatorIndexMustIncrease") + .withArgs(5, 3, 1, 0); + }); + + it(`cannot request NO 5-3 to exit validator 1 again (multiple requests)`, async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[0] }, + ]); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "NodeOpValidatorIndexMustIncrease") + .withArgs(5, 3, 1, 1); + }); + + it(`cannot request NO 5-3 to exit validator 1 again (multiple requests, case 2)`, async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }, + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[3] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[4] }, + ]); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "NodeOpValidatorIndexMustIncrease") + .withArgs(5, 3, 1, 1); + }); + + it(`cannot request NO 5-3 to exit validator 2 two times per request`, async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + ]); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).to.be.revertedWithCustomError( + oracle, + "InvalidRequestsDataSortOrder", + ); + }); + }); + + context(`only consensus member or SUBMIT_DATA_ROLE can submit report on unpaused contract`, () => { + let originalState: string; + + beforeEach(async () => { + originalState = await Snapshot.take(); + await consensus.advanceTimeToNextFrameStart(); + }); + + afterEach(async () => await Snapshot.restore(originalState)); + + it("reverts on stranger", async () => { + const { reportData } = await prepareReportAndSubmitHash(); + + await expect(oracle.connect(stranger).submitReportData(reportData, oracleVersion)).to.be.revertedWithCustomError( + oracle, + "SenderNotAllowed", + ); + }); + + it("SUBMIT_DATA_ROLE is allowed", async () => { + await oracle.grantRole(await oracle.SUBMIT_DATA_ROLE(), stranger, { from: admin }); + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash(); + await oracle.connect(stranger).submitReportData(reportData, oracleVersion); + }); + + it("consensus member is allowed", async () => { + expect(await consensus.getIsMember(member1)).to.equal(true); + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash(); + await oracle.connect(member1).submitReportData(reportData, oracleVersion); + }); + + it("reverts on paused contract", async () => { + await consensus.advanceTimeToNextFrameStart(); + const PAUSE_INFINITELY = await oracle.PAUSE_INFINITELY(); + await oracle.pauseFor(PAUSE_INFINITELY, { from: admin }); + const { reportData } = await prepareReportAndSubmitHash(); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).to.be.revertedWithCustomError( + oracle, + "ResumedExpected", + ); + }); + }); + + context("invokes internal baseOracle checks", () => { + let originalState: string; + + beforeEach(async () => { + originalState = await Snapshot.take(); + await consensus.advanceTimeToNextFrameStart(); + }); + + afterEach(async () => await Snapshot.restore(originalState)); + + it(`reverts on contract version mismatch`, async () => { + const { reportData } = await prepareReportAndSubmitHash(); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion + 1n)) + .to.be.revertedWithCustomError(oracle, "UnexpectedContractVersion") + .withArgs(oracleVersion, oracleVersion + 1n); + }); + + it("reverts on hash mismatch", async () => { + const requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }]; + const { reportHash: actualReportHash } = await prepareReportAndSubmitHash(requests); + const newRequests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[1] }]; + + const { refSlot } = await consensus.getCurrentFrame(); + // change pubkey + const reportData = { + consensusVersion: CONSENSUS_VERSION, + dataFormat: DATA_FORMAT_LIST, + refSlot, + requestsCount: newRequests.length, + data: encodeExitRequestsDataList(newRequests), + }; + + const reportItems = getValidatorsExitBusReportDataItems(reportData); + const changedReportHash = calcValidatorsExitBusReportDataHash(reportItems); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "UnexpectedDataHash") + .withArgs(actualReportHash, changedReportHash); + }); + + it("reverts on processing deadline miss", async () => { + const { reportData } = await prepareReportAndSubmitHash(); + const deadline = (await oracle.getConsensusReport()).processingDeadlineTime.toString(10); + await consensus.advanceTimeToNextFrameStart(); + + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) + .to.be.revertedWithCustomError(oracle, "ProcessingDeadlineMissed") + .withArgs(deadline); + }); + }); + + context("getTotalRequestsProcessed reflects report history", () => { + let originalState: string; + + before(async () => { + originalState = await Snapshot.take(); + await consensus.advanceTimeToNextFrameStart(); + }); + + after(async () => await Snapshot.restore(originalState)); + + let requestCount = 0; + + it("should be zero at init", async () => { + requestCount = 0; + expect(await oracle.getTotalRequestsProcessed()).to.equal(requestCount); + }); + + it("should increase after report", async () => { + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + ]); + await oracle.connect(member1).submitReportData(reportData, oracleVersion, { from: member1 }); + requestCount += 1; + expect(await oracle.getTotalRequestsProcessed()).to.equal(requestCount); + }); + + it("should double increase for two exits", async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[0] }, + ]); + await oracle.connect(member1).submitReportData(reportData, oracleVersion); + requestCount += 2; + expect(await oracle.getTotalRequestsProcessed()).to.equal(requestCount); + }); + + it("should not change on empty report", async () => { + await consensus.advanceTimeToNextFrameStart(); + const { reportData } = await prepareReportAndSubmitHash([]); + await oracle.connect(member1).submitReportData(reportData, oracleVersion); + expect(await oracle.getTotalRequestsProcessed()).to.equal(requestCount); + }); + }); + + context("getProcessingState reflects state change", () => { + let originalState: string; + before(async () => { + originalState = await Snapshot.take(); + await consensus.advanceTimeToNextFrameStart(); + }); + after(async () => await Snapshot.restore(originalState)); + + let report: ReportFields; + let hash: string; + + it("has correct defaults on init", async () => { + const state = await oracle.getProcessingState(); + expect(Object.values(state)).to.deep.equal([ + (await consensus.getCurrentFrame()).refSlot, + 0, + ZeroHash, + false, + 0, + 0, + 0, + ]); + }); + + it("consensus report submitted", async () => { + ({ reportData: report, reportHash: hash } = await prepareReportAndSubmitHash([ + { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }, + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[3] }, + ])); + const state = await oracle.getProcessingState(); + + expect(Object.values(state)).to.deep.equal([ + (await consensus.getCurrentFrame()).refSlot, + computeTimestampAtSlot((await consensus.getCurrentFrame()).reportProcessingDeadlineSlot), + hash, + false, + 0, + 0, + 0, + ]); + }); + + it("report is processed", async () => { + await oracle.connect(member1).submitReportData(report, oracleVersion); + const state = await oracle.getProcessingState(); + expect(Object.values(state)).to.deep.equal([ + (await consensus.getCurrentFrame()).refSlot, + computeTimestampAtSlot((await consensus.getCurrentFrame()).reportProcessingDeadlineSlot), + hash, + true, + DATA_FORMAT_LIST, + 2, + 2, + ]); + }); + + it("at next frame state resets", async () => { + await consensus.advanceTimeToNextFrameStart(); + const state = await oracle.getProcessingState(); + expect(Object.values(state)).to.deep.equal([ + (await consensus.getCurrentFrame()).refSlot, + 0, + ZeroHash, + false, + 0, + 0, + 0, + ]); + }); + }); +}); diff --git a/test/common/memUtils.t.sol b/test/common/memUtils.t.sol index 1e10db0577..7fd2c916e7 100644 --- a/test/common/memUtils.t.sol +++ b/test/common/memUtils.t.sol @@ -483,6 +483,7 @@ contract MemUtilsTest is Test, MemUtilsTestHelper { assertEq(dst, abi.encodePacked(bytes32(0x2211111111111111111111111111111111111111111111111111111111111111))); } + /// forge-config: default.allow_internal_expect_revert = true function test_copyBytes_RevertsWhenSrcArrayIsOutOfBounds() external { bytes memory src = abi.encodePacked( bytes32(0x1111111111111111111111111111111111111111111111111111111111111111) diff --git a/test/deploy/index.ts b/test/deploy/index.ts index d7afaf8589..281dd47aba 100644 --- a/test/deploy/index.ts +++ b/test/deploy/index.ts @@ -4,3 +4,4 @@ export * from "./locator"; export * from "./dao"; export * from "./hashConsensus"; export * from "./withdrawalQueue"; +export * from "./validatorExitBusOracle"; diff --git a/test/deploy/validatorExitBusOracle.ts b/test/deploy/validatorExitBusOracle.ts new file mode 100644 index 0000000000..1b5e0e2805 --- /dev/null +++ b/test/deploy/validatorExitBusOracle.ts @@ -0,0 +1,120 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HashConsensus__Harness, ReportProcessor__Mock, ValidatorsExitBusOracle } from "typechain-types"; + +import { + CONSENSUS_VERSION, + EPOCHS_PER_FRAME, + GENESIS_TIME, + INITIAL_EPOCH, + SECONDS_PER_SLOT, + SLOTS_PER_EPOCH, +} from "lib"; + +import { deployHashConsensus } from "./hashConsensus"; +import { deployLidoLocator, updateLidoLocatorImplementation } from "./locator"; + +export const DATA_FORMAT_LIST = 1; + +async function deployMockAccountingOracle(secondsPerSlot = SECONDS_PER_SLOT, genesisTime = GENESIS_TIME) { + const lido = await ethers.deployContract("Lido__MockForAccountingOracle"); + const ao = await ethers.deployContract("AccountingOracle__MockForSanityChecker", [ + await lido.getAddress(), + secondsPerSlot, + genesisTime, + ]); + return { ao, lido }; +} + +async function deployOracleReportSanityCheckerForExitBus(lidoLocator: string, admin: string) { + const maxValidatorExitRequestsPerReport = 2000; + const limitsList = [0, 0, 0, 0, maxValidatorExitRequestsPerReport, 0, 0, 0, 0, 0, 0, 0]; + + return await ethers.deployContract("OracleReportSanityChecker", [lidoLocator, admin, limitsList]); +} + +export async function deployVEBO( + admin: string, + { + epochsPerFrame = EPOCHS_PER_FRAME, + secondsPerSlot = SECONDS_PER_SLOT, + slotsPerEpoch = SLOTS_PER_EPOCH, + genesisTime = GENESIS_TIME, + initialEpoch = INITIAL_EPOCH, + } = {}, +) { + const locator = await deployLidoLocator(); + const locatorAddr = await locator.getAddress(); + + const oracle = await ethers.deployContract("ValidatorsExitBus__Harness", [secondsPerSlot, genesisTime, locatorAddr]); + + const { consensus } = await deployHashConsensus(admin, { + reportProcessor: oracle as unknown as ReportProcessor__Mock, + epochsPerFrame, + secondsPerSlot, + genesisTime, + }); + + const { ao, lido } = await deployMockAccountingOracle(secondsPerSlot, genesisTime); + + await updateLidoLocatorImplementation(locatorAddr, { + lido: await lido.getAddress(), + accountingOracle: await ao.getAddress(), + }); + + const oracleReportSanityChecker = await deployOracleReportSanityCheckerForExitBus(locatorAddr, admin); + + await updateLidoLocatorImplementation(locatorAddr, { + validatorsExitBusOracle: await oracle.getAddress(), + oracleReportSanityChecker: await oracleReportSanityChecker.getAddress(), + }); + + await consensus.setTime(genesisTime + initialEpoch * slotsPerEpoch * secondsPerSlot); + + return { + locatorAddr, + oracle, + consensus, + oracleReportSanityChecker, + }; +} + +interface VEBOConfig { + admin: string; + oracle: ValidatorsExitBusOracle; + consensus: HashConsensus__Harness; + dataSubmitter?: string; + consensusVersion?: bigint; + lastProcessingRefSlot?: number; + resumeAfterDeploy?: boolean; +} + +export async function initVEBO({ + admin, + oracle, + consensus, + dataSubmitter = undefined, + consensusVersion = CONSENSUS_VERSION, + lastProcessingRefSlot = 0, + resumeAfterDeploy = false, +}: VEBOConfig) { + const initTx = await oracle.initialize(admin, await consensus.getAddress(), consensusVersion, lastProcessingRefSlot); + + await oracle.grantRole(await oracle.MANAGE_CONSENSUS_CONTRACT_ROLE(), admin); + await oracle.grantRole(await oracle.MANAGE_CONSENSUS_VERSION_ROLE(), admin); + await oracle.grantRole(await oracle.PAUSE_ROLE(), admin); + await oracle.grantRole(await oracle.RESUME_ROLE(), admin); + + if (dataSubmitter) { + await oracle.grantRole(await oracle.SUBMIT_DATA_ROLE(), dataSubmitter); + } + + expect(await oracle.DATA_FORMAT_LIST()).to.equal(DATA_FORMAT_LIST); + + if (resumeAfterDeploy) { + await oracle.resume(); + } + + return initTx; +} diff --git a/test/integration/accounting.integration.ts b/test/integration/accounting.integration.ts index 94b4bc714a..03d22a5f47 100644 --- a/test/integration/accounting.integration.ts +++ b/test/integration/accounting.integration.ts @@ -5,35 +5,22 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; -import { ether, impersonate, log, ONE_GWEI, trace, updateBalance } from "lib"; +import { ether, impersonate, log, ONE_GWEI, updateBalance } from "lib"; import { getProtocolContext, ProtocolContext } from "lib/protocol"; -import { - finalizeWithdrawalQueue, - getReportTimeElapsed, - norEnsureOperators, - report, - sdvtEnsureOperators, -} from "lib/protocol/helpers"; +import { getReportTimeElapsed, report } from "lib/protocol/helpers"; import { Snapshot } from "test/suite"; -import { - CURATED_MODULE_ID, - LIMITER_PRECISION_BASE, - MAX_BASIS_POINTS, - MAX_DEPOSIT, - ONE_DAY, - SHARE_RATE_PRECISION, - SIMPLE_DVT_MODULE_ID, - ZERO_HASH, -} from "test/suite/constants"; - -const AMOUNT = ether("100"); - -describe("Integration: Accounting", () => { + +const LIMITER_PRECISION_BASE = BigInt(10 ** 9); + +const SHARE_RATE_PRECISION = BigInt(10 ** 27); +const ONE_DAY = 86400n; +const MAX_BASIS_POINTS = 10000n; + +describe("Accounting", () => { let ctx: ProtocolContext; let ethHolder: HardhatEthersSigner; - let stEthHolder: HardhatEthersSigner; let snapshot: string; let originalState: string; @@ -41,27 +28,9 @@ describe("Integration: Accounting", () => { before(async () => { ctx = await getProtocolContext(); - [stEthHolder, ethHolder] = await ethers.getSigners(); + [ethHolder] = await ethers.getSigners(); snapshot = await Snapshot.take(); - - const { lido, depositSecurityModule } = ctx.contracts; - - await finalizeWithdrawalQueue(ctx, stEthHolder, ethHolder); - - await norEnsureOperators(ctx, 3n, 5n); - await sdvtEnsureOperators(ctx, 3n, 5n); - - // Deposit node operators - const dsmSigner = await impersonate(depositSecurityModule.address, AMOUNT); - await lido.connect(dsmSigner).deposit(MAX_DEPOSIT, CURATED_MODULE_ID, ZERO_HASH); - await lido.connect(dsmSigner).deposit(MAX_DEPOSIT, SIMPLE_DVT_MODULE_ID, ZERO_HASH); - - await report(ctx, { - clDiff: ether("32") * 6n, // 32 ETH * (3 + 3) validators - clAppearedValidators: 6n, - excludeVaultsBalances: true, - }); }); beforeEach(async () => (originalState = await Snapshot.take())); @@ -134,8 +103,7 @@ describe("Integration: Accounting", () => { const { lido, wstETH } = ctx.contracts; if (!(await lido.sharesOf(wstETH.address))) { const wstEthSigner = await impersonate(wstETH.address, ether("10001")); - const submitTx = await lido.connect(wstEthSigner).submit(ZeroAddress, { value: ether("10000") }); - await trace("lido.submit", submitTx); + await lido.connect(wstEthSigner).submit(ZeroAddress, { value: ether("10000") }); } } @@ -147,8 +115,7 @@ describe("Integration: Accounting", () => { while ((await withdrawalQueue.getLastRequestId()) != (await withdrawalQueue.getLastFinalizedRequestId())) { await report(ctx); - const submitTx = await lido.connect(ethHolder).submit(ZeroAddress, { value: ether("10000") }); - await trace("lido.submit", submitTx); + await lido.connect(ethHolder).submit(ZeroAddress, { value: ether("10000") }); } } @@ -249,7 +216,7 @@ describe("Integration: Accounting", () => { expect(sharesRateAfter).to.be.lessThan(sharesRateBefore); const ethDistributedEvent = ctx.getEvents(reportTxReceipt, "ETHDistributed"); - expect(ethDistributedEvent[0].args.preClBalance + REBASE_AMOUNT).to.equal( + expect(ethDistributedEvent[0].args.preCLBalance + REBASE_AMOUNT).to.equal( ethDistributedEvent[0].args.postCLBalance, "ETHDistributed: CL balance differs from expected", ); @@ -351,7 +318,7 @@ describe("Integration: Accounting", () => { expect(sharesRateAfter).to.be.greaterThan(sharesRateBefore, "Shares rate has not increased"); const ethDistributedEvent = ctx.getEvents(reportTxReceipt, "ETHDistributed"); - expect(ethDistributedEvent[0].args.preClBalance + rebaseAmount).to.equal( + expect(ethDistributedEvent[0].args.preCLBalance + rebaseAmount).to.equal( ethDistributedEvent[0].args.postCLBalance, "ETHDistributed: CL balance has not increased", ); @@ -773,8 +740,7 @@ describe("Integration: Accounting", () => { const stethOfShares = await lido.getPooledEthByShares(sharesLimit); const wstEthSigner = await impersonate(wstETH.address, ether("1")); - const approveTx = await lido.connect(wstEthSigner).approve(burner.address, stethOfShares); - await trace("lido.approve", approveTx); + await lido.connect(wstEthSigner).approve(burner.address, stethOfShares); const coverShares = sharesLimit / 3n; const noCoverShares = sharesLimit - sharesLimit / 3n; @@ -782,7 +748,7 @@ describe("Integration: Accounting", () => { const lidoSigner = await impersonate(lido.address); const burnTx = await burner.connect(lidoSigner).requestBurnShares(wstETH.address, noCoverShares); - const burnTxReceipt = await trace("burner.requestBurnShares", burnTx); + const burnTxReceipt = (await burnTx.wait()) as ContractTransactionReceipt; const sharesBurntEvent = getFirstEvent(burnTxReceipt, "StETHBurnRequested"); expect(sharesBurntEvent.args.amountOfShares).to.equal(noCoverShares, "StETHBurnRequested: amountOfShares mismatch"); @@ -793,10 +759,7 @@ describe("Integration: Accounting", () => { ); const burnForCoverTx = await burner.connect(lidoSigner).requestBurnSharesForCover(wstETH.address, coverShares); - const burnForCoverTxReceipt = await trace( - "burner.requestBurnSharesForCover", - burnForCoverTx, - ); + const burnForCoverTxReceipt = (await burnForCoverTx.wait()) as ContractTransactionReceipt; const sharesBurntForCoverEvent = getFirstEvent(burnForCoverTxReceipt, "StETHBurnRequested"); expect(sharesBurntForCoverEvent.args.amountOfShares).to.equal(coverShares); @@ -850,8 +813,7 @@ describe("Integration: Accounting", () => { const stethOfShares = await lido.getPooledEthByShares(limitWithExcess); const wstEthSigner = await impersonate(wstETH.address, ether("1")); - const approveTx = await lido.connect(wstEthSigner).approve(burner.address, stethOfShares); - await trace("lido.approve", approveTx); + await lido.connect(wstEthSigner).approve(burner.address, stethOfShares); const coverShares = limit / 3n; const noCoverShares = limit - limit / 3n + excess; @@ -859,7 +821,7 @@ describe("Integration: Accounting", () => { const lidoSigner = await impersonate(lido.address); const burnTx = await burner.connect(lidoSigner).requestBurnShares(wstETH.address, noCoverShares); - const burnTxReceipt = await trace("burner.requestBurnShares", burnTx); + const burnTxReceipt = (await burnTx.wait()) as ContractTransactionReceipt; const sharesBurntEvent = getFirstEvent(burnTxReceipt, "StETHBurnRequested"); expect(sharesBurntEvent.args.amountOfShares).to.equal(noCoverShares, "StETHBurnRequested: amountOfShares mismatch"); diff --git a/test/integration/burn-shares.integration.ts b/test/integration/burn-shares.integration.ts index 6b43657a9c..e4268a9ddc 100644 --- a/test/integration/burn-shares.integration.ts +++ b/test/integration/burn-shares.integration.ts @@ -4,9 +4,9 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { ether, impersonate, log, trace } from "lib"; +import { ether, impersonate, log } from "lib"; import { getProtocolContext, ProtocolContext } from "lib/protocol"; -import { finalizeWithdrawalQueue, handleOracleReport } from "lib/protocol/helpers"; +import { handleOracleReport } from "lib/protocol/helpers"; import { bailOnFailure, Snapshot } from "test/suite"; @@ -14,8 +14,6 @@ describe("Scenario: Burn Shares", () => { let ctx: ProtocolContext; let snapshot: string; - let ethHolder: HardhatEthersSigner; - let stEthHolder: HardhatEthersSigner; let stranger: HardhatEthersSigner; const amount = ether("1"); @@ -26,7 +24,7 @@ describe("Scenario: Burn Shares", () => { before(async () => { ctx = await getProtocolContext(); - [stEthHolder, ethHolder, stranger] = await ethers.getSigners(); + [stranger] = await ethers.getSigners(); snapshot = await Snapshot.take(); }); @@ -35,22 +33,10 @@ describe("Scenario: Burn Shares", () => { after(async () => await Snapshot.restore(snapshot)); - it("Should finalize withdrawal queue", async () => { - const { withdrawalQueue } = ctx.contracts; - - await finalizeWithdrawalQueue(ctx, stEthHolder, ethHolder); - - const lastFinalizedRequestId = await withdrawalQueue.getLastFinalizedRequestId(); - const lastRequestId = await withdrawalQueue.getLastRequestId(); - - expect(lastFinalizedRequestId).to.equal(lastRequestId); - }); - it("Should allow stranger to submit ETH", async () => { const { lido } = ctx.contracts; - const submitTx = await lido.connect(stranger).submit(ZeroAddress, { value: amount }); - await trace("lido.submit", submitTx); + await lido.connect(stranger).submit(ZeroAddress, { value: amount }); const stEthBefore = await lido.balanceOf(stranger.address); expect(stEthBefore).to.be.approximately(amount, 10n, "Incorrect stETH balance after submit"); @@ -76,12 +62,10 @@ describe("Scenario: Burn Shares", () => { it("Should burn shares after report", async () => { const { lido, burner } = ctx.contracts; - const approveTx = await lido.connect(stranger).approve(burner.address, ether("1000000")); - await trace("lido.approve", approveTx); + await lido.connect(stranger).approve(burner.address, ether("1000000")); const lidoSigner = await impersonate(lido.address); - const burnTx = await burner.connect(lidoSigner).requestBurnSharesForCover(stranger, sharesToBurn); - await trace("burner.requestBurnSharesForCover", burnTx); + await burner.connect(lidoSigner).requestBurnSharesForCover(stranger, sharesToBurn); const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); diff --git a/test/integration/negative-rebase.integration.ts b/test/integration/negative-rebase.integration.ts index af1dbedb18..1dfc4c61c0 100644 --- a/test/integration/negative-rebase.integration.ts +++ b/test/integration/negative-rebase.integration.ts @@ -4,11 +4,9 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; -import { ether, impersonate } from "lib"; +import { ether } from "lib"; import { getProtocolContext, ProtocolContext } from "lib/protocol"; -import { report } from "lib/protocol/helpers/accounting"; -import { norEnsureOperators } from "lib/protocol/helpers/nor"; -import { finalizeWithdrawalQueue } from "lib/protocol/helpers/withdrawal"; +import { report } from "lib/protocol/helpers"; import { Snapshot } from "test/suite"; @@ -18,16 +16,17 @@ describe.skip("Negative rebase", () => { let ctx: ProtocolContext; let beforeSnapshot: string; let beforeEachSnapshot: string; - let ethHolder, stEthHolder: HardhatEthersSigner; + let ethHolder: HardhatEthersSigner; before(async () => { beforeSnapshot = await Snapshot.take(); ctx = await getProtocolContext(); - [ethHolder, stEthHolder] = await ethers.getSigners(); + [ethHolder] = await ethers.getSigners(); await setBalance(ethHolder.address, ether("1000000")); const network = await ethers.provider.getNetwork(); - console.log("network", network.name); + + // In case of sepolia network, transfer some BEPOLIA tokens to the adapter contract if (network.name == "sepolia" || network.name == "sepolia-fork") { const sepoliaDepositContractAddress = "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D"; const bepoliaWhaleHolder = "0xf97e180c050e5Ab072211Ad2C213Eb5AEE4DF134"; @@ -39,35 +38,11 @@ describe.skip("Negative rebase", () => { const adapterAddr = await ctx.contracts.stakingRouter.DEPOSIT_CONTRACT(); await bepoliaToken.connect(bepiloaSigner).transfer(adapterAddr, BEPOLIA_TO_TRANSFER); } - const beaconStat = await ctx.contracts.lido.getBeaconStat(); - if (beaconStat.beaconValidators == 0n) { - const MAX_DEPOSIT = 150n; - const CURATED_MODULE_ID = 1n; - const ZERO_HASH = new Uint8Array(32).fill(0); - const { lido, depositSecurityModule } = ctx.contracts; - - await finalizeWithdrawalQueue(ctx, stEthHolder, ethHolder); - - await norEnsureOperators(ctx, 3n, 5n); - - const dsmSigner = await impersonate(depositSecurityModule.address, ether("100")); - await lido.connect(dsmSigner).deposit(MAX_DEPOSIT, CURATED_MODULE_ID, ZERO_HASH); - - await report(ctx, { - clDiff: ether("32") * 3n, - clAppearedValidators: 3n, - excludeVaultsBalances: true, - }); - } }); - after(async () => { - await Snapshot.restore(beforeSnapshot); - }); + after(async () => await Snapshot.restore(beforeSnapshot)); - beforeEach(async () => { - beforeEachSnapshot = await Snapshot.take(); - }); + beforeEach(async () => (beforeEachSnapshot = await Snapshot.take())); afterEach(async () => await Snapshot.restore(beforeEachSnapshot)); diff --git a/test/integration/protocol-happy-path.integration.ts b/test/integration/protocol-happy-path.integration.ts index e13d3bf135..5b32f87839 100644 --- a/test/integration/protocol-happy-path.integration.ts +++ b/test/integration/protocol-happy-path.integration.ts @@ -4,36 +4,38 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { batch, ether, impersonate, log, trace, updateBalance } from "lib"; +import { batch, ether, impersonate, log, updateBalance } from "lib"; import { getProtocolContext, ProtocolContext } from "lib/protocol"; import { finalizeWithdrawalQueue, norEnsureOperators, - OracleReportParams, + OracleReportOptions, report, sdvtEnsureOperators, } from "lib/protocol/helpers"; import { bailOnFailure, Snapshot } from "test/suite"; -import { MAX_DEPOSIT, ZERO_HASH } from "test/suite/constants"; const AMOUNT = ether("100"); +const MAX_DEPOSIT = 150n; -describe("Scenario: Protocol Happy Path", () => { +const ZERO_HASH = new Uint8Array(32).fill(0); + +describe("Protocol Happy Path", () => { let ctx: ProtocolContext; let snapshot: string; - let ethHolder: HardhatEthersSigner; let stEthHolder: HardhatEthersSigner; let stranger: HardhatEthersSigner; let uncountedStETHShares: bigint; let amountWithRewards: bigint; + let depositCount: bigint; before(async () => { ctx = await getProtocolContext(); - [stEthHolder, ethHolder, stranger] = await ethers.getSigners(); + [stEthHolder, stranger] = await ethers.getSigners(); snapshot = await Snapshot.take(); }); @@ -53,7 +55,15 @@ describe("Scenario: Protocol Happy Path", () => { it("Should finalize withdrawal queue", async () => { const { lido, withdrawalQueue } = ctx.contracts; - await finalizeWithdrawalQueue(ctx, stEthHolder, ethHolder); + const stEthHolderAmount = ether("1000"); + + // Deposit some eth + await lido.connect(stEthHolder).submit(ZeroAddress, { value: stEthHolderAmount }); + + const stEthHolderBalance = await lido.balanceOf(stEthHolder.address); + expect(stEthHolderBalance).to.approximately(stEthHolderAmount, 10n, "stETH balance increased"); + + await finalizeWithdrawalQueue(ctx); const lastFinalizedRequestId = await withdrawalQueue.getLastFinalizedRequestId(); const lastRequestId = await withdrawalQueue.getLastRequestId(); @@ -62,11 +72,8 @@ describe("Scenario: Protocol Happy Path", () => { uncountedStETHShares = await lido.sharesOf(withdrawalQueue.address); // Added to facilitate the burner transfers - const approveTx = await lido.connect(stEthHolder).approve(withdrawalQueue.address, 1000n); - await trace("lido.approve", approveTx); - - const requestWithdrawalsTx = await withdrawalQueue.connect(stEthHolder).requestWithdrawals([1000n], stEthHolder); - await trace("withdrawalQueue.requestWithdrawals", requestWithdrawalsTx); + await lido.connect(stEthHolder).approve(withdrawalQueue.address, 1000n); + await withdrawalQueue.connect(stEthHolder).requestWithdrawals([1000n], stEthHolder); expect(lastFinalizedRequestId).to.equal(lastRequestId); }); @@ -118,7 +125,7 @@ describe("Scenario: Protocol Happy Path", () => { }); const tx = await lido.connect(stranger).submit(ZeroAddress, { value: AMOUNT }); - const receipt = await trace("lido.submit", tx); + const receipt = (await tx.wait()) as ContractTransactionReceipt; expect(receipt).not.to.be.null; @@ -182,14 +189,16 @@ describe("Scenario: Protocol Happy Path", () => { ); } else { expect(stakingLimitAfterSubmit).to.equal( - stakingLimitBeforeSubmit - AMOUNT + BigInt(growthPerBlock), + stakingLimitBeforeSubmit - AMOUNT + growthPerBlock, "Staking limit after submit", ); } }); it("Should deposit to staking modules", async () => { - const { lido, withdrawalQueue, stakingRouter, depositSecurityModule } = ctx.contracts; + const { lido, withdrawalQueue, stakingRouter } = ctx.contracts; + + const { depositSecurityModule } = ctx.contracts; const withdrawalsUninitializedStETH = await withdrawalQueue.unfinalizedStETH(); const depositableEther = await lido.getDepositableEther(); @@ -208,11 +217,11 @@ describe("Scenario: Protocol Happy Path", () => { const dsmSigner = await impersonate(depositSecurityModule.address, ether("100")); const stakingModules = await stakingRouter.getStakingModules(); - let depositCount = 0n; + depositCount = 0n; let expectedBufferedEtherAfterDeposit = bufferedEtherBeforeDeposit; for (const module of stakingModules) { const depositTx = await lido.connect(dsmSigner).deposit(MAX_DEPOSIT, module.id, ZERO_HASH); - const depositReceipt = await trace(`lido.deposit (${module.name})`, depositTx); + const depositReceipt = (await depositTx.wait()) as ContractTransactionReceipt; const unbufferedEvent = ctx.getEvents(depositReceipt, "Unbuffered")[0]; const unbufferedAmount = unbufferedEvent?.args[0] || 0n; const deposits = unbufferedAmount / ether("32"); @@ -282,11 +291,10 @@ describe("Scenario: Protocol Happy Path", () => { const treasuryBalanceBeforeRebase = await lido.sharesOf(treasuryAddress); - // Stranger deposited 100 ETH, enough to deposit 3 validators, need to reflect this in the report - // 0.01 ETH is added to the clDiff to simulate some rewards - const reportData: Partial = { - clDiff: ether("96.01"), - clAppearedValidators: 3n, + // 0.001 – to simulate rewards + const reportData: Partial = { + clDiff: ether("32") * depositCount + ether("0.001"), + clAppearedValidators: depositCount, }; const { reportTx, extraDataTx } = (await report(ctx, reportData)) as { @@ -416,7 +424,7 @@ describe("Scenario: Protocol Happy Path", () => { amountWithRewards = balanceBeforeRequest.stETH; const approveTx = await lido.connect(stranger).approve(withdrawalQueue.address, amountWithRewards); - const approveTxReceipt = await trace("lido.approve", approveTx); + const approveTxReceipt = (await approveTx.wait()) as ContractTransactionReceipt; const approveEvent = ctx.getEvents(approveTxReceipt, "Approval")[0]; @@ -432,11 +440,7 @@ describe("Scenario: Protocol Happy Path", () => { const lastRequestIdBefore = await withdrawalQueue.getLastRequestId(); const withdrawalTx = await withdrawalQueue.connect(stranger).requestWithdrawals([amountWithRewards], stranger); - const withdrawalTxReceipt = await trace( - "withdrawalQueue.requestWithdrawals", - withdrawalTx, - ); - + const withdrawalTxReceipt = (await withdrawalTx.wait()) as ContractTransactionReceipt; const withdrawalEvent = ctx.getEvents(withdrawalTxReceipt, "WithdrawalRequested")[0]; expect(withdrawalEvent?.args.toObject()).to.deep.include( @@ -582,12 +586,11 @@ describe("Scenario: Protocol Happy Path", () => { expect(claimableEtherBeforeClaim).to.equal(amountWithRewards, "Claimable ether before claim"); const claimTx = await withdrawalQueue.connect(stranger).claimWithdrawals([requestId], hints); - const claimTxReceipt = await trace("withdrawalQueue.claimWithdrawals", claimTx); + const claimTxReceipt = (await claimTx.wait()) as ContractTransactionReceipt; + const claimEvent = ctx.getEvents(claimTxReceipt, "WithdrawalClaimed")[0]; const spentGas = claimTxReceipt.gasUsed * claimTxReceipt.gasPrice; - const claimEvent = ctx.getEvents(claimTxReceipt, "WithdrawalClaimed")[0]; - expect(claimEvent?.args.toObject()).to.deep.include( { requestId, diff --git a/test/integration/second-opinion.integration.ts b/test/integration/second-opinion.integration.ts index 673097ed91..b795feeed9 100644 --- a/test/integration/second-opinion.integration.ts +++ b/test/integration/second-opinion.integration.ts @@ -1,13 +1,11 @@ import { expect } from "chai"; import { ethers } from "hardhat"; -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; - import { SecondOpinionOracle__Mock } from "typechain-types"; import { ether, impersonate, log, ONE_GWEI } from "lib"; import { getProtocolContext, ProtocolContext } from "lib/protocol"; -import { finalizeWithdrawalQueue, norEnsureOperators, report, sdvtEnsureOperators } from "lib/protocol/helpers"; +import { report } from "lib/protocol/helpers"; import { bailOnFailure, Snapshot } from "test/suite"; @@ -26,9 +24,6 @@ function getDiffAmount(totalSupply: bigint): bigint { describe("Integration: Second opinion", () => { let ctx: ProtocolContext; - let ethHolder: HardhatEthersSigner; - let stEthHolder: HardhatEthersSigner; - let snapshot: string; let originalState: string; @@ -38,17 +33,10 @@ describe("Integration: Second opinion", () => { before(async () => { ctx = await getProtocolContext(); - [stEthHolder, ethHolder] = await ethers.getSigners(); - snapshot = await Snapshot.take(); const { lido, depositSecurityModule, oracleReportSanityChecker } = ctx.contracts; - await finalizeWithdrawalQueue(ctx, stEthHolder, ethHolder); - - await norEnsureOperators(ctx, 3n, 5n); - await sdvtEnsureOperators(ctx, 3n, 5n); - const { chainId } = await ethers.provider.getNetwork(); // Sepolia-specific initialization if (chainId === 11155111n) { @@ -63,6 +51,7 @@ describe("Integration: Second opinion", () => { const adapterAddr = await ctx.contracts.stakingRouter.DEPOSIT_CONTRACT(); await bepoliaToken.connect(bepiloaSigner).transfer(adapterAddr, BEPOLIA_TO_TRANSFER); } + const dsmSigner = await impersonate(depositSecurityModule.address, AMOUNT); await lido.connect(dsmSigner).deposit(MAX_DEPOSIT, CURATED_MODULE_ID, ZERO_HASH); diff --git a/yarn.lock b/yarn.lock index ec11548129..85a680e238 100644 --- a/yarn.lock +++ b/yarn.lock @@ -11834,11 +11834,11 @@ __metadata: linkType: hard "undici@npm:^5.14.0": - version: 5.28.4 - resolution: "undici@npm:5.28.4" + version: 5.28.5 + resolution: "undici@npm:5.28.5" dependencies: "@fastify/busboy": "npm:^2.0.0" - checksum: 10c0/08d0f2596553aa0a54ca6e8e9c7f45aef7d042c60918564e3a142d449eda165a80196f6ef19ea2ef2e6446959e293095d8e40af1236f0d67223b06afac5ecad7 + checksum: 10c0/4dfaa13089fe4c0758f84ec0d34b257e58608e6be3aa540f493b9864b39e3fdcd0a1ace38e434fe79db55f833aa30bcfddd8d6cbe3e0982b0dcae8ec17b65e08 languageName: node linkType: hard