diff --git a/.gitignore b/.gitignore index 93136e5d..7c357a61 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,7 @@ # VIM *.swp *.idea/ -*keystore/ \ No newline at end of file +*keystore/ + +# git worktrees +.trees/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 37cafcef..b1b91a0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,23 +9,23 @@ This is the General Availability (GA) release of the Filecoin Warm Storage Servi ## Core Contracts - Mainnet -1. Payments Contract: [0x23b1e018F08BB982348b15a86ee926eEBf7F4DAa](https://filfox.info/en/address/0x23b1e018F08BB982348b15a86ee926eEBf7F4DAa) +1. Payments Contract: [0x23b1e018F08BB982348b15a86ee926eEBf7F4DAa](https://filecoin.blockscout.com/address/0x23b1e018F08BB982348b15a86ee926eEBf7F4DAa) - From [Filecoin-Pay v1.0.0](https://github.com/FilOzone/filecoin-pay/releases/tag/v1.0.0) -2. PDPVerifier Implementation: [0xe2Dc211BffcA499761570E04e8143Be2BA66095f](https://filfox.info/en/address/0xe2Dc211BffcA499761570E04e8143Be2BA66095f) +2. PDPVerifier Implementation: [0xe2Dc211BffcA499761570E04e8143Be2BA66095f](https://filecoin.blockscout.com/address/0xe2Dc211BffcA499761570E04e8143Be2BA66095f) - From [PDP v3.1.0](https://github.com/FilOzone/pdp/releases/tag/v3.1.0) -3. PDPVerifier Proxy: [0xBADd0B92C1c71d02E7d520f64c0876538fa2557F](https://filfox.info/en/address/0xBADd0B92C1c71d02E7d520f64c0876538fa2557F) +3. PDPVerifier Proxy: [0xBADd0B92C1c71d02E7d520f64c0876538fa2557F](https://filecoin.blockscout.com/address/0xBADd0B92C1c71d02E7d520f64c0876538fa2557F) - From [PDP v3.1.0](https://github.com/FilOzone/pdp/releases/tag/v3.1.0) -4. SessionKeyRegistry: [0x74FD50525A958aF5d484601E252271f9625231aB](https://filfox.info/en/address/0x74FD50525A958aF5d484601E252271f9625231aB) -5. ServiceProviderRegistry Implementation: [0xe255D3a89D6B326b48bc0fC94a472A839471D6B0](https://filfox.info/en/address/0xe255D3a89D6B326b48bc0fC94a472A839471D6B0) -6. ServiceProviderRegistry Proxy: [0xf55dDbf63F1b55c3F1D4FA7e339a68AB7b64A5eB](https://filfox.info/en/address/0xf55dDbf63F1b55c3F1D4FA7e339a68AB7b64A5eB) -7. FilecoinWarmStorageService Implementation: [0xd60b90f6D3C42B26a246E141ec701a20Dde2fA61](https://filfox.info/en/address/0xd60b90f6D3C42B26a246E141ec701a20Dde2fA61) -8. FilecoinWarmStorageService Proxy: [0x8408502033C418E1bbC97cE9ac48E5528F371A9f](https://filfox.info/en/address/0x8408502033C418E1bbC97cE9ac48E5528F371A9f) -9. FilecoinWarmStorageServiceStateView: [0x9e4e6699d8F67dFc883d6b0A7344Bd56F7E80B46](https://filfox.info/en/address/0x9e4e6699d8F67dFc883d6b0A7344Bd56F7E80B46) +4. SessionKeyRegistry: [0x74FD50525A958aF5d484601E252271f9625231aB](https://filecoin.blockscout.com/address/0x74FD50525A958aF5d484601E252271f9625231aB) +5. ServiceProviderRegistry Implementation: [0xe255D3a89D6B326b48bc0fC94a472A839471D6B0](https://filecoin.blockscout.com/address/0xe255D3a89D6B326b48bc0fC94a472A839471D6B0) +6. ServiceProviderRegistry Proxy: [0xf55dDbf63F1b55c3F1D4FA7e339a68AB7b64A5eB](https://filecoin.blockscout.com/address/0xf55dDbf63F1b55c3F1D4FA7e339a68AB7b64A5eB) +7. FilecoinWarmStorageService Implementation: [0xd60b90f6D3C42B26a246E141ec701a20Dde2fA61](https://filecoin.blockscout.com/address/0xd60b90f6D3C42B26a246E141ec701a20Dde2fA61) +8. FilecoinWarmStorageService Proxy: [0x8408502033C418E1bbC97cE9ac48E5528F371A9f](https://filecoin.blockscout.com/address/0x8408502033C418E1bbC97cE9ac48E5528F371A9f) +9. FilecoinWarmStorageServiceStateView: [0x9e4e6699d8F67dFc883d6b0A7344Bd56F7E80B46](https://filecoin.blockscout.com/address/0x9e4e6699d8F67dFc883d6b0A7344Bd56F7E80B46) Configuration: -- USDFC Token: [0x80B98d3aa09ffff255c3ba4A241111Ff1262F045](https://filfox.info/en/address/0x80B98d3aa09ffff255c3ba4A241111Ff1262F045) -- FILBEAM_BENEFICIARY_ADDRESS: [0x1D60d2F5960Af6341e842C539985FA297E10d6eA](https://calibration.filfox.info/en/address/0x1D60d2F5960Af6341e842C539985FA297E10d6eA) -- FILBEAM_CONTROLLER_ADDRESS: [0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A](https://calibration.filfox.info/en/address/0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A) +- USDFC Token: [0x80B98d3aa09ffff255c3ba4A241111Ff1262F045](https://filecoin.blockscout.com/address/0x80B98d3aa09ffff255c3ba4A241111Ff1262F045) +- FILBEAM_BENEFICIARY_ADDRESS: [0x1D60d2F5960Af6341e842C539985FA297E10d6eA](https://filecoin-testnet.blockscout.com/address/0x1D60d2F5960Af6341e842C539985FA297E10d6eA) +- FILBEAM_CONTROLLER_ADDRESS: [0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A](https://filecoin-testnet.blockscout.com/address/0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A) - CHALLENGE_FINALITY: 150 epochs - MAX_PROVING_PERIOD: 2880 epochs - CHALLENGE_WINDOW_SIZE: 20 epochs @@ -34,23 +34,23 @@ Configuration: ## Core Contracts - Calibration Network -1. Payments Contract: [0x09a0fDc2723fAd1A7b8e3e00eE5DF73841df55a0](https://calibration.filfox.info/en/address/0x09a0fDc2723fAd1A7b8e3e00eE5DF73841df55a0) +1. Payments Contract: [0x09a0fDc2723fAd1A7b8e3e00eE5DF73841df55a0](https://filecoin-testnet.blockscout.com/address/0x09a0fDc2723fAd1A7b8e3e00eE5DF73841df55a0) - From [Filecoin-Pay v1.0.0](https://github.com/FilOzone/filecoin-pay/releases/tag/v1.0.0) -2. PDPVerifier Implementation: [0x2355Cb19BA1eFF51673562E1a5fc5eE292AF9D42](https://calibration.filfox.info/en/address/0x2355Cb19BA1eFF51673562E1a5fc5eE292AF9D42) +2. PDPVerifier Implementation: [0x2355Cb19BA1eFF51673562E1a5fc5eE292AF9D42](https://filecoin-testnet.blockscout.com/address/0x2355Cb19BA1eFF51673562E1a5fc5eE292AF9D42) - From [PDP v3.1.0](https://github.com/FilOzone/pdp/releases/tag/v3.1.0) -3. PDPVerifier Proxy: [0x85e366Cf9DD2c0aE37E963d9556F5f4718d6417C](https://calibration.filfox.info/en/address/0x85e366Cf9DD2c0aE37E963d9556F5f4718d6417C) +3. PDPVerifier Proxy: [0x85e366Cf9DD2c0aE37E963d9556F5f4718d6417C](https://filecoin-testnet.blockscout.com/address/0x85e366Cf9DD2c0aE37E963d9556F5f4718d6417C) - From [PDP v3.1.0](https://github.com/FilOzone/pdp/releases/tag/v3.1.0) -4. SessionKeyRegistry: [0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452](https://calibration.filfox.info/en/address/0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452) -5. ServiceProviderRegistry Implementation: [0xb32Bb530638d20f1B59B40CDD2Ce4208430f7DE3](https://calibration.filfox.info/en/address/0xb32Bb530638d20f1B59B40CDD2Ce4208430f7DE3) -6. ServiceProviderRegistry Proxy: [0x839e5c9988e4e9977d40708d0094103c0839Ac9D](https://calibration.filfox.info/en/address/0x839e5c9988e4e9977d40708d0094103c0839Ac9D) -7. FilecoinWarmStorageService Implementation: [0x4BCc752555Bf08A5Bd9a4Ce467a12607277450bA](https://calibration.filfox.info/en/address/0x4BCc752555Bf08A5Bd9a4Ce467a12607277450bA) -8. FilecoinWarmStorageService Proxy: [0x02925630df557F957f70E112bA06e50965417CA0](https://calibration.filfox.info/en/address/0x02925630df557F957f70E112bA06e50965417CA0) -9. FilecoinWarmStorageServiceStateView: [0xA5D87b04086B1d591026cCE10255351B5AA4689B](https://calibration.filfox.info/en/address/0xA5D87b04086B1d591026cCE10255351B5AA4689B) +4. SessionKeyRegistry: [0x518411c2062E119Aaf7A8B12A2eDf9a939347655](https://filecoin-testnet.blockscout.com/address/0x518411c2062E119Aaf7A8B12A2eDf9a939347655) +5. ServiceProviderRegistry Implementation: [0xb32Bb530638d20f1B59B40CDD2Ce4208430f7DE3](https://filecoin-testnet.blockscout.com/address/0xb32Bb530638d20f1B59B40CDD2Ce4208430f7DE3) +6. ServiceProviderRegistry Proxy: [0x839e5c9988e4e9977d40708d0094103c0839Ac9D](https://filecoin-testnet.blockscout.com/address/0x839e5c9988e4e9977d40708d0094103c0839Ac9D) +7. FilecoinWarmStorageService Implementation: [0x1cAeE5EfCfc3681C2bBF689Ccb30d70c6e45F49f](https://filecoin-testnet.blockscout.com/address/0x1cAeE5EfCfc3681C2bBF689Ccb30d70c6e45F49f) +8. FilecoinWarmStorageService Proxy: [0x02925630df557F957f70E112bA06e50965417CA0](https://filecoin-testnet.blockscout.com/address/0x02925630df557F957f70E112bA06e50965417CA0) +9. FilecoinWarmStorageServiceStateView: [0xA5D87b04086B1d591026cCE10255351B5AA4689B](https://filecoin-testnet.blockscout.com/address/0xA5D87b04086B1d591026cCE10255351B5AA4689B) Configuration: -- USDFC Token: [0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0](https://calibration.filfox.info/en/address/0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0) -- FILBEAM_BENEFICIARY_ADDRESS: [0x1D60d2F5960Af6341e842C539985FA297E10d6eA](https://calibration.filfox.info/en/address/0x1D60d2F5960Af6341e842C539985FA297E10d6eA) -- FILBEAM_CONTROLLER_ADDRESS: [0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A](https://calibration.filfox.info/en/address/0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A) +- USDFC Token: [0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0](https://filecoin-testnet.blockscout.com/address/0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0) +- FILBEAM_BENEFICIARY_ADDRESS: [0x1D60d2F5960Af6341e842C539985FA297E10d6eA](https://filecoin-testnet.blockscout.com/address/0x1D60d2F5960Af6341e842C539985FA297E10d6eA) +- FILBEAM_CONTROLLER_ADDRESS: [0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A](https://filecoin-testnet.blockscout.com/address/0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A) - CHALLENGE_FINALITY: 10 epochs - MAX_PROVING_PERIOD: 240 epochs - CHALLENGE_WINDOW_SIZE: 20 epochs @@ -129,23 +129,23 @@ Configuration: ## Core Contracts -1. Payments Contract: [0x6dB198201F900c17e86D267d7Df82567FB03df5E](https://calibration.filfox.info/en/address/0x6dB198201F900c17e86D267d7Df82567FB03df5E) +1. Payments Contract: [0x6dB198201F900c17e86D267d7Df82567FB03df5E](https://filecoin-testnet.blockscout.com/address/0x6dB198201F900c17e86D267d7Df82567FB03df5E) - From [Filecoin-Pay v0.6.0](https://github.com/FilOzone/filecoin-pay/releases/tag/v0.6.0) -2. PDPVerifier Implementation: [0x4EC9a8ae6e6A419056b6C332509deEA371b182EF](https://calibration.filfox.info/en/address/0x4EC9a8ae6e6A419056b6C332509deEA371b182EF) +2. PDPVerifier Implementation: [0x4EC9a8ae6e6A419056b6C332509deEA371b182EF](https://filecoin-testnet.blockscout.com/address/0x4EC9a8ae6e6A419056b6C332509deEA371b182EF) - From [PDP v2.2.1](https://github.com/FilOzone/pdp/releases/tag/v2.2.1) -3. PDPVerifier Proxy: [0x579dD9E561D4Cd1776CF3e52E598616E77D5FBcb](https://calibration.filfox.info/en/address/0x579dD9E561D4Cd1776CF3e52E598616E77D5FBcb) +3. PDPVerifier Proxy: [0x579dD9E561D4Cd1776CF3e52E598616E77D5FBcb](https://filecoin-testnet.blockscout.com/address/0x579dD9E561D4Cd1776CF3e52E598616E77D5FBcb) - From [PDP v2.2.1](https://github.com/FilOzone/pdp/releases/tag/v2.2.1) -4. SessionKeyRegistry: [0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452](https://calibration.filfox.info/en/address/0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452) -5. ServiceProviderRegistry Implementation: [0x5672fE3B5366819B4Bd2F538A2CAEA11f0b2Aff5](https://calibration.filfox.info/en/address/0x5672fE3B5366819B4Bd2F538A2CAEA11f0b2Aff5) -6. ServiceProviderRegistry Proxy: [0x1096ba1e7BB912136DA8524A22bF71091dc4FDd9](https://calibration.filfox.info/en/address/0x1096ba1e7BB912136DA8524A22bF71091dc4FDd9) -7. FilecoinWarmStorageService Implementation: [0x6B78a026309bc2659c5891559D412FA1BA6529A5](https://calibration.filfox.info/en/address/0x6B78a026309bc2659c5891559D412FA1BA6529A5) -8. FilecoinWarmStorageService Proxy: [0x468342072e0dc86AFFBe15519bc5B1A1aa86e4dc](https://calibration.filfox.info/en/address/0x468342072e0dc86AFFBe15519bc5B1A1aa86e4dc) -9. FilecoinWarmStorageServiceStateView: [0xE4587AAdB97d7B8197aa08E432bAD0D9Cfe3a17F](https://calibration.filfox.info/en/address/0xE4587AAdB97d7B8197aa08E432bAD0D9Cfe3a17F) +4. SessionKeyRegistry: [0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452](https://filecoin-testnet.blockscout.com/address/0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452) +5. ServiceProviderRegistry Implementation: [0x5672fE3B5366819B4Bd2F538A2CAEA11f0b2Aff5](https://filecoin-testnet.blockscout.com/address/0x5672fE3B5366819B4Bd2F538A2CAEA11f0b2Aff5) +6. ServiceProviderRegistry Proxy: [0x1096ba1e7BB912136DA8524A22bF71091dc4FDd9](https://filecoin-testnet.blockscout.com/address/0x1096ba1e7BB912136DA8524A22bF71091dc4FDd9) +7. FilecoinWarmStorageService Implementation: [0x6B78a026309bc2659c5891559D412FA1BA6529A5](https://filecoin-testnet.blockscout.com/address/0x6B78a026309bc2659c5891559D412FA1BA6529A5) +8. FilecoinWarmStorageService Proxy: [0x468342072e0dc86AFFBe15519bc5B1A1aa86e4dc](https://filecoin-testnet.blockscout.com/address/0x468342072e0dc86AFFBe15519bc5B1A1aa86e4dc) +9. FilecoinWarmStorageServiceStateView: [0xE4587AAdB97d7B8197aa08E432bAD0D9Cfe3a17F](https://filecoin-testnet.blockscout.com/address/0xE4587AAdB97d7B8197aa08E432bAD0D9Cfe3a17F) Configuration: -- USDFC Token: [0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0](https://calibration.filfox.info/en/address/0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0) -- FILBEAM_BENEFICIARY_ADDRESS: [0x1D60d2F5960Af6341e842C539985FA297E10d6eA](https://calibration.filfox.info/en/address/0x1D60d2F5960Af6341e842C539985FA297E10d6eA) -- FILBEAM_CONTROLLER_ADDRESS: [0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A](https://calibration.filfox.info/en/address/0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A) +- USDFC Token: [0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0](https://filecoin-testnet.blockscout.com/address/0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0) +- FILBEAM_BENEFICIARY_ADDRESS: [0x1D60d2F5960Af6341e842C539985FA297E10d6eA](https://filecoin-testnet.blockscout.com/address/0x1D60d2F5960Af6341e842C539985FA297E10d6eA) +- FILBEAM_CONTROLLER_ADDRESS: [0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A](https://filecoin-testnet.blockscout.com/address/0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A) - CHALLENGE_FINALITY: 10 epochs - MAX_PROVING_PERIOD: 240 epochs - CHALLENGE_WINDOW_SIZE: 30 epochs @@ -165,23 +165,23 @@ Configuration: ## Core Contracts ### Calibration Network: -1. Payments Contract: [0x6dB198201F900c17e86D267d7Df82567FB03df5E](https://calibration.filfox.info/en/address/0x6dB198201F900c17e86D267d7Df82567FB03df5E) +1. Payments Contract: [0x6dB198201F900c17e86D267d7Df82567FB03df5E](https://filecoin-testnet.blockscout.com/address/0x6dB198201F900c17e86D267d7Df82567FB03df5E) - From [Filecoin-Pay v0.6.0](https://github.com/FilOzone/filecoin-pay/releases/tag/v0.6.0) -2. PDPVerifier Implementation: [0xCa92b746a7af215e0AaC7D0F956d74B522b295b6](https://calibration.filfox.info/en/address/0xCa92b746a7af215e0AaC7D0F956d74B522b295b6) +2. PDPVerifier Implementation: [0xCa92b746a7af215e0AaC7D0F956d74B522b295b6](https://filecoin-testnet.blockscout.com/address/0xCa92b746a7af215e0AaC7D0F956d74B522b295b6) - From [PDP v2.2.0](https://github.com/FilOzone/pdp/releases/tag/v2.2.0) -3. PDPVerifier Proxy: [0x9ecb84bB617a6Fd9911553bE12502a1B091CdfD8](https://calibration.filfox.info/en/address/0x9ecb84bB617a6Fd9911553bE12502a1B091CdfD8) +3. PDPVerifier Proxy: [0x9ecb84bB617a6Fd9911553bE12502a1B091CdfD8](https://filecoin-testnet.blockscout.com/address/0x9ecb84bB617a6Fd9911553bE12502a1B091CdfD8) - From [PDP v2.2.0](https://github.com/FilOzone/pdp/releases/tag/v2.2.0) -4. SessionKeyRegistry: [0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452](https://calibration.filfox.info/en/address/0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452) -5. ServiceProviderRegistry Implementation: [0xEdc9A41371d69a736bEfBa7678007BDBA61425E5](https://calibration.filfox.info/en/address/0xEdc9A41371d69a736bEfBa7678007BDBA61425E5) -6. ServiceProviderRegistry Proxy: [0xA8a7e2130C27e4f39D1aEBb3D538D5937bCf8ddb](https://calibration.filfox.info/en/address/0xA8a7e2130C27e4f39D1aEBb3D538D5937bCf8ddb) -7. FilecoinWarmStorageService Implementation: [0x2d76e3A41fa4614D1840CEB73aa07c5d0af6a023](https://calibration.filfox.info/en/address/0x2d76e3A41fa4614D1840CEB73aa07c5d0af6a023) -8. FilecoinWarmStorageService Proxy: [0x9ef4cAb0aD0D19b8Df28791Df80b29bC784bE91b](https://calibration.filfox.info/en/address/0x9ef4cAb0aD0D19b8Df28791Df80b29bC784bE91b) -9. FilecoinWarmStorageServiceStateView: [0x7175a72479e2B0050ed310f1a49a517C03573547](https://calibration.filfox.info/en/address/0x7175a72479e2B0050ed310f1a49a517C03573547) +4. SessionKeyRegistry: [0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452](https://filecoin-testnet.blockscout.com/address/0x97Dd879F5a97A8c761B94746d7F5cfF50AAd4452) +5. ServiceProviderRegistry Implementation: [0xEdc9A41371d69a736bEfBa7678007BDBA61425E5](https://filecoin-testnet.blockscout.com/address/0xEdc9A41371d69a736bEfBa7678007BDBA61425E5) +6. ServiceProviderRegistry Proxy: [0xA8a7e2130C27e4f39D1aEBb3D538D5937bCf8ddb](https://filecoin-testnet.blockscout.com/address/0xA8a7e2130C27e4f39D1aEBb3D538D5937bCf8ddb) +7. FilecoinWarmStorageService Implementation: [0x2d76e3A41fa4614D1840CEB73aa07c5d0af6a023](https://filecoin-testnet.blockscout.com/address/0x2d76e3A41fa4614D1840CEB73aa07c5d0af6a023) +8. FilecoinWarmStorageService Proxy: [0x9ef4cAb0aD0D19b8Df28791Df80b29bC784bE91b](https://filecoin-testnet.blockscout.com/address/0x9ef4cAb0aD0D19b8Df28791Df80b29bC784bE91b) +9. FilecoinWarmStorageServiceStateView: [0x7175a72479e2B0050ed310f1a49a517C03573547](https://filecoin-testnet.blockscout.com/address/0x7175a72479e2B0050ed310f1a49a517C03573547) Configuration: -- USDFC Token: [0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0](https://calibration.filfox.info/en/address/0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0) -- FILBEAM_BENEFICIARY_ADDRESS: [0x1D60d2F5960Af6341e842C539985FA297E10d6eA](https://calibration.filfox.info/en/address/0x1D60d2F5960Af6341e842C539985FA297E10d6eA) -- FILBEAM_CONTROLLER_ADDRESS: [0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A](https://calibration.filfox.info/en/address/0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A) +- USDFC Token: [0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0](https://filecoin-testnet.blockscout.com/address/0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0) +- FILBEAM_BENEFICIARY_ADDRESS: [0x1D60d2F5960Af6341e842C539985FA297E10d6eA](https://filecoin-testnet.blockscout.com/address/0x1D60d2F5960Af6341e842C539985FA297E10d6eA) +- FILBEAM_CONTROLLER_ADDRESS: [0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A](https://filecoin-testnet.blockscout.com/address/0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A) - CHALLENGE_FINALITY: 10 epochs - MAX_PROVING_PERIOD: 240 epochs - CHALLENGE_WINDOW_SIZE: 30 epochs diff --git a/README.md b/README.md index bf1d2fb8..2d54ed0d 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,10 @@ This repository contains smart contracts and services for the Filecoin ecosystem - **Payment Integration**: Built on top of the [Filecoin Services Payments](https://github.com/FilOzone/filecoin-services-payments) framework - **Data Verification**: Uses [PDP verifiers](https://github.com/FilOzone/pdp) for cryptographic proof of data possession +## Pricing + +The service uses static global pricing set by the contract owner (default: 2.5 USDFC per TiB/month). Rail payment rates are calculated based on data size with a minimum floor. See [SPEC.md](SPEC.md) for details on rate calculation, pricing updates, and top-up/renewal behavior. + ## 🚀 Quick Start ### Prerequisites diff --git a/SPEC.md b/SPEC.md new file mode 100644 index 00000000..fd7f1cdf --- /dev/null +++ b/SPEC.md @@ -0,0 +1,141 @@ +# Filecoin Services Specification + +## Pricing + +### Pricing Model + +FilecoinWarmStorageService uses **static global pricing**. All payment rails use the same price regardless of which provider stores the data. The default storage price is 2.5 USDFC per TiB/month. + +Providers may advertise their own prices in the ServiceProviderRegistry, but these are informational for other services, and does not affect actual payments in FilecoinWarmStorageService. + +### Rate Calculation + +The payment rate per epoch is calculated from the total data size in bytes: + +``` +# Constants +EPOCHS_PER_MONTH = 86400 # 2880 epochs/day × 30 days +TiB = 1099511627776 # bytes + +# Default pricing (owner-adjustable) +pricePerTiBPerMonth = 2.5 USDFC +minimumStorageRatePerMonth = 0.06 USDFC + +# Per-epoch rate calculation +sizeBasedRate = totalBytes × pricePerTiBPerMonth ÷ TiB ÷ EPOCHS_PER_MONTH +minimumRate = minimumStorageRatePerMonth ÷ EPOCHS_PER_MONTH +finalRate = max(sizeBasedRate, minimumRate) +``` + +The default minimum floor ensures datasets below ~24.58 GiB still generate the minimum payment of 0.06 USDFC/month. + +**Precision note**: Integer division when computing `minimumRate` causes minor precision loss. The actual monthly payment (`minimumRate × EPOCHS_PER_MONTH`) is slightly less than `minimumStorageRatePerMonth`—under 0.0001% for typical floor prices. This is acceptable; see the lockup section below for how pre-flight checks handle this. + +### Pricing Updates + +Only the contract owner can update pricing by calling `updatePricing(newStoragePrice, newMinimumRate)`. Maximum allowed values are 10 USDFC for storage price and 0.24 USDFC for minimum rate. + +**Effect on existing datasets**: Pricing changes do not immediately update rates for existing datasets. New rates take effect when pieces are next added or removed. This avoids gas-expensive rate recalculations across all active datasets while ensuring new pricing applies to all future storage operations. + +### Rate Update Timing + +Rate recalculation timing differs for additions and deletions due to proving semantics: + +- **Adding pieces**: The rate updates immediately when `piecesAdded()` is called. The client begins paying for new pieces right away, even though those pieces won't be included in proof challenges until the next proving period. This fail-fast behavior protects providers: if the client lacks sufficient funds for the new lockup, the transaction fails before the provider commits resources. + +- **Removing pieces**: Deletions are scheduled and take effect at the next proving boundary (`nextProvingPeriod()`). The client continues paying the existing rate until the removal is finalized. This deferral is required because proofs may challenge any portion of the current data set during the proving period—the provider must continue storing and proving all existing data until the period ends. + +**Why the asymmetry?** + +During each proving period, proofs are generated over a fixed data set. The prover must maintain the complete data set because challenges can target any leaf: + +- **Additions expand the proof space** but don't affect existing challenges. New pieces simply won't be challenged until the next period. Payment starts immediately because storage resources are committed. + +- **Deletions would shrink the proof space** mid-period, potentially invalidating challenges. The data must remain intact until `nextProvingPeriod()` finalizes the removal. Only then does the rate decrease. + +This ensures proof integrity while providing fair payment semantics: you pay when you add, and continue paying for deletions until the proving period boundary. + +### Rate Changes After Termination + +When a service is terminated (by client or provider), the payment rail enters a lockup period during which funds continue flowing to the provider. Rate change behavior differs from active rails: + +- **Additions are blocked**: `piecesAdded()` reverts after termination. No new pieces can be added to a terminated dataset. + +- **Deletions are allowed**: Piece removals can still be scheduled during the lockup window via `piecesScheduledRemove()`, and take effect at the next proving boundary. + +- **Rate can only decrease or stay the same**: Since additions are blocked, the only size changes come from deletions. FilecoinPay enforces `newRate <= oldRate` on terminated rails—rate increases are rejected with `RateChangeNotAllowedOnTerminatedRail`. + +This design ensures the provider receives payment at or above the rate established before termination. The lockup period guarantees payment for the agreed service level, while still allowing the client to reduce their data footprint (and rate) through deletions. + +### Funding and Top-Up + +Clients pay for storage by depositing USDFC into the Filecoin Pay contract. These funds flow to providers over time based on the storage rate. + +**Lockup**: To protect providers from non-payment, FWSS requires clients to maintain a 30-day reserve of funds. This "lockup" guarantees the provider will be paid for at least 30 days even if the client stops adding funds. The lockup is not a pre-payment—funds still flow to the provider gradually—but it cannot be withdrawn while the storage agreement is active. + +``` +lockupRequired = finalRate × EPOCHS_PER_MONTH +``` + +At minimum pricing, this equals `minimumStorageRatePerMonth` (0.06 USDFC at default settings). For larger datasets, the lockup equals one month's storage cost. + +**Pre-flight check precision**: The pre-flight validation uses a multiply-first formula `(minimumStorageRatePerMonth × EPOCHS_PER_MONTH) ÷ EPOCHS_PER_MONTH` which preserves the exact monthly value. This produces cleaner error messages (the configured floor price rather than a value with precision loss artifacts) and is slightly more conservative than the actual rail lockup. The difference is under 0.0001% and always in the user's favor—they are never required to have less than needed. + +**Storage duration** extends as clients deposit additional funds: + +``` +storageDuration = availableFunds ÷ finalRate +``` + +Deposits extend the duration without changing the rate (unless adding pieces triggers an immediate rate recalculation, or scheduled deletions take effect at the next proving boundary). + +**Delinquency**: When a client's funded epoch falls below the current epoch, the payment rail can no longer be settled—no further payments flow to the provider. The provider may terminate the service to claim payment from the locked funds, guaranteeing up to 30 days of payment from the last funded epoch. + +## Settlement and Payment Validation + +### Proving Period Deadlines and Settlement + +Settlement progress (`settledUpTo`) tracks the epoch up to which payments have been processed. The validator callback `validatePayment()` determines how far settlement can advance and how much payment is due. + +**Key principle**: Settlement advancement is decoupled from payment amount. + +- **Proven periods**: Both settlement advancement and payment proceed normally. +- **Unproven periods with open deadline**: Settlement is blocked until the period is resolved (proven or deadline passes). +- **Unproven periods with passed deadline**: Settlement advances (the SP can never prove this period), but payment for those epochs is zero. + +This design ensures: +1. Clients are not stuck waiting indefinitely for a provider who has abandoned the service +2. Providers are not paid for periods they failed to prove +3. Settlement can complete even if the provider disappears after termination + +### Settlement During Lockup + +After termination, the payment rail enters a lockup period. Settlement continues normally during this time: + +- If the provider proves all periods, they receive full payment +- If the provider fails to prove some periods, those epochs receive zero payment +- If the provider abandons entirely, settlement advances with zero payment once all deadlines pass + +The client's locked funds are released proportionally as settlement progresses. Unproven epochs result in funds returning to the client rather than flowing to the provider. + +### Dataset Deletion Requirements + +Dataset deletion (`dataSetDeleted`) requires the payment rail to be fully settled before the dataset can be removed: + +``` +require(settledUpTo >= endEpoch, RailNotFullySettled) +``` + +**Rationale**: The `validatePayment()` callback reads dataset state (proving status, periods proven) to calculate payment amounts. If the dataset is deleted before settlement completes, `validatePayment()` cannot function, forcing clients to use `settleTerminatedRailWithoutValidation()` which pays the full amount regardless of proof status. + +**Implications**: + +- Providers must wait for settlement to complete before deleting datasets +- Clients can always settle rails (with zero payment for unproven periods) once deadlines pass +- Dataset deletion timing is controlled by proving period deadlines, not just the lockup period + +**Timing**: To delete a dataset after termination: +1. Wait for `block.number > pdpEndEpoch` (lockup period elapsed) +2. Wait for all proving period deadlines within the lockup to pass +3. Call `settleRail()` to complete settlement (rail may auto-finalize) +4. Call `deleteDataSet()` to remove the dataset diff --git a/service_contracts/Makefile b/service_contracts/Makefile index 27f7d689..9649c45a 100644 --- a/service_contracts/Makefile +++ b/service_contracts/Makefile @@ -130,6 +130,7 @@ ABI_CONTRACTS := \ FilecoinWarmStorageServiceStateView \ FilecoinPayV1 \ PDPVerifier \ + ProviderIdSet \ ServiceProviderRegistry \ ServiceProviderRegistryStorage \ SessionKeyRegistry \ diff --git a/service_contracts/abi/Errors.abi.json b/service_contracts/abi/Errors.abi.json index 1ea997f4..94d6b4b4 100644 --- a/service_contracts/abi/Errors.abi.json +++ b/service_contracts/abi/Errors.abi.json @@ -866,6 +866,27 @@ } ] }, + { + "type": "error", + "name": "RailNotFullySettled", + "inputs": [ + { + "name": "railId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "settledUpTo", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "endEpoch", + "type": "uint256", + "internalType": "uint256" + } + ] + }, { "type": "error", "name": "ServiceContractMustTerminateRail", diff --git a/service_contracts/abi/FilecoinWarmStorageService.abi.json b/service_contracts/abi/FilecoinWarmStorageService.abi.json index 30c5b94c..6ccb7320 100644 --- a/service_contracts/abi/FilecoinWarmStorageService.abi.json +++ b/service_contracts/abi/FilecoinWarmStorageService.abi.json @@ -2291,6 +2291,27 @@ } ] }, + { + "type": "error", + "name": "RailNotFullySettled", + "inputs": [ + { + "name": "railId", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "settledUpTo", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "endEpoch", + "type": "uint256", + "internalType": "uint256" + } + ] + }, { "type": "error", "name": "ServiceContractMustTerminateRail", diff --git a/service_contracts/abi/FilecoinWarmStorageServiceStateLibrary.abi.json b/service_contracts/abi/FilecoinWarmStorageServiceStateLibrary.abi.json index 02816c8a..8e632043 100644 --- a/service_contracts/abi/FilecoinWarmStorageServiceStateLibrary.abi.json +++ b/service_contracts/abi/FilecoinWarmStorageServiceStateLibrary.abi.json @@ -1,23 +1,4 @@ [ - { - "type": "function", - "name": "challengeWindow", - "inputs": [ - { - "name": "service", - "type": "FilecoinWarmStorageService", - "internalType": "contract FilecoinWarmStorageService" - } - ], - "outputs": [ - { - "name": "", - "type": "uint256", - "internalType": "uint256" - } - ], - "stateMutability": "view" - }, { "type": "function", "name": "clientDataSets", @@ -201,19 +182,6 @@ ], "stateMutability": "view" }, - { - "type": "function", - "name": "getChallengesPerProof", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "uint64", - "internalType": "uint64" - } - ], - "stateMutability": "pure" - }, { "type": "function", "name": "getClientDataSets", @@ -477,25 +445,6 @@ ], "stateMutability": "view" }, - { - "type": "function", - "name": "getMaxProvingPeriod", - "inputs": [ - { - "name": "service", - "type": "FilecoinWarmStorageService", - "internalType": "contract FilecoinWarmStorageService" - } - ], - "outputs": [ - { - "name": "", - "type": "uint64", - "internalType": "uint64" - } - ], - "stateMutability": "view" - }, { "type": "function", "name": "getPDPConfig", diff --git a/service_contracts/abi/FilecoinWarmStorageServiceStateView.abi.json b/service_contracts/abi/FilecoinWarmStorageServiceStateView.abi.json index deeabdc0..2e97acfa 100644 --- a/service_contracts/abi/FilecoinWarmStorageServiceStateView.abi.json +++ b/service_contracts/abi/FilecoinWarmStorageServiceStateView.abi.json @@ -10,19 +10,6 @@ ], "stateMutability": "nonpayable" }, - { - "type": "function", - "name": "challengeWindow", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "uint256", - "internalType": "uint256" - } - ], - "stateMutability": "view" - }, { "type": "function", "name": "clientDataSets", @@ -169,19 +156,6 @@ ], "stateMutability": "view" }, - { - "type": "function", - "name": "getChallengesPerProof", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "uint64", - "internalType": "uint64" - } - ], - "stateMutability": "pure" - }, { "type": "function", "name": "getClientDataSets", @@ -419,19 +393,6 @@ ], "stateMutability": "view" }, - { - "type": "function", - "name": "getMaxProvingPeriod", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "uint64", - "internalType": "uint64" - } - ], - "stateMutability": "view" - }, { "type": "function", "name": "getPDPConfig", diff --git a/service_contracts/abi/ProviderIdSet.abi.json b/service_contracts/abi/ProviderIdSet.abi.json new file mode 100644 index 00000000..fd4038d0 --- /dev/null +++ b/service_contracts/abi/ProviderIdSet.abi.json @@ -0,0 +1,161 @@ +[ + { + "type": "constructor", + "inputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "addProviderId", + "inputs": [ + { + "name": "providerId", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "containsProviderId", + "inputs": [ + { + "name": "providerId", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getProviderIds", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint256[]", + "internalType": "uint256[]" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "owner", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "removeProviderId", + "inputs": [ + { + "name": "providerId", + "type": "uint256", + "internalType": "uint256" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "renounceOwnership", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "transferOwnership", + "inputs": [ + { + "name": "newOwner", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "OwnershipTransferred", + "inputs": [ + { + "name": "previousOwner", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "newOwner", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "error", + "name": "OwnableInvalidOwner", + "inputs": [ + { + "name": "owner", + "type": "address", + "internalType": "address" + } + ] + }, + { + "type": "error", + "name": "OwnableUnauthorizedAccount", + "inputs": [ + { + "name": "account", + "type": "address", + "internalType": "address" + } + ] + }, + { + "type": "error", + "name": "ProviderIdNotFound", + "inputs": [ + { + "name": "providerId", + "type": "uint256", + "internalType": "uint256" + } + ] + }, + { + "type": "error", + "name": "ProviderIdTooLarge", + "inputs": [ + { + "name": "providerId", + "type": "uint256", + "internalType": "uint256" + } + ] + } +] diff --git a/service_contracts/abi/ServiceProviderRegistry.abi.json b/service_contracts/abi/ServiceProviderRegistry.abi.json index b605b3f4..a903bf5e 100644 --- a/service_contracts/abi/ServiceProviderRegistry.abi.json +++ b/service_contracts/abi/ServiceProviderRegistry.abi.json @@ -156,6 +156,31 @@ ], "stateMutability": "view" }, + { + "type": "function", + "name": "announcePlannedUpgrade", + "inputs": [ + { + "name": "plannedUpgrade", + "type": "tuple", + "internalType": "struct ServiceProviderRegistry.PlannedUpgrade", + "components": [ + { + "name": "nextImplementation", + "type": "address", + "internalType": "address" + }, + { + "name": "afterEpoch", + "type": "uint96", + "internalType": "uint96" + } + ] + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, { "type": "function", "name": "eip712Domain", @@ -794,6 +819,24 @@ "outputs": [], "stateMutability": "nonpayable" }, + { + "type": "function", + "name": "nextUpgrade", + "inputs": [], + "outputs": [ + { + "name": "nextImplementation", + "type": "address", + "internalType": "address" + }, + { + "name": "afterEpoch", + "type": "uint96", + "internalType": "uint96" + } + ], + "stateMutability": "view" + }, { "type": "function", "name": "owner", @@ -1304,6 +1347,31 @@ ], "anonymous": false }, + { + "type": "event", + "name": "UpgradeAnnounced", + "inputs": [ + { + "name": "plannedUpgrade", + "type": "tuple", + "indexed": false, + "internalType": "struct ServiceProviderRegistry.PlannedUpgrade", + "components": [ + { + "name": "nextImplementation", + "type": "address", + "internalType": "address" + }, + { + "name": "afterEpoch", + "type": "uint96", + "internalType": "uint96" + } + ] + } + ], + "anonymous": false + }, { "type": "event", "name": "Upgraded", diff --git a/service_contracts/deployments.json b/service_contracts/deployments.json new file mode 100644 index 00000000..970f254a --- /dev/null +++ b/service_contracts/deployments.json @@ -0,0 +1,34 @@ +{ + "314": { + "metadata": { + "note": "Filecoin mainnet deployments" + }, + "FILECOIN_PAY_ADDRESS": "0x23b1e018F08BB982348b15a86ee926eEBf7F4DAa", + "PDP_VERIFIER_PROXY_ADDRESS": "0xBADd0B92C1c71d02E7d520f64c0876538fa2557F", + "PDP_VERIFIER_IMPLEMENTATION_ADDRESS": "0xe2Dc211BffcA499761570E04e8143Be2BA66095f", + "SESSION_KEY_REGISTRY_ADDRESS": "0x74FD50525A958aF5d484601E252271f9625231aB", + "SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS": "0xf55dDbf63F1b55c3F1D4FA7e339a68AB7b64A5eB", + "SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS": "0xe255D3a89D6B326b48bc0fC94a472A839471D6B0", + "SIGNATURE_VERIFICATION_LIB_ADDRESS": "0x7BdDF92aaBA8a578C3F494729BfAC5190d84acda", + "FWSS_PROXY_ADDRESS": "0x8408502033C418E1bbC97cE9ac48E5528F371A9f", + "FWSS_IMPLEMENTATION_ADDRESS": "0xd60b90f6D3C42B26a246E141ec701a20Dde2fA61", + "FWSS_VIEW_ADDRESS": "0x9e4e6699d8F67dFc883d6b0A7344Bd56F7E80B46", + "ENDORSEMENT_SET_ADDRESS": "0x59eFa2e8324E1551d46010d7B0B140eE2F5c726b" + }, + "314159": { + "metadata": { + "note": "Filecoin calibnet deployments" + }, + "FILECOIN_PAY_ADDRESS": "0x09a0fDc2723fAd1A7b8e3e00eE5DF73841df55a0", + "PDP_VERIFIER_PROXY_ADDRESS": "0x85e366Cf9DD2c0aE37E963d9556F5f4718d6417C", + "PDP_VERIFIER_IMPLEMENTATION_ADDRESS": "0x2355Cb19BA1eFF51673562E1a5fc5eE292AF9D42", + "SESSION_KEY_REGISTRY_ADDRESS": "0x518411c2062E119Aaf7A8B12A2eDf9a939347655", + "SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS": "0x839e5c9988e4e9977d40708d0094103c0839Ac9D", + "SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS": "0xb32Bb530638d20f1B59B40CDD2Ce4208430f7DE3", + "SIGNATURE_VERIFICATION_LIB_ADDRESS": "0x3fD7BFFAcAfdf35083d1424c1feC6481c85087F6", + "FWSS_PROXY_ADDRESS": "0x02925630df557F957f70E112bA06e50965417CA0", + "FWSS_IMPLEMENTATION_ADDRESS": "0x1cAeE5EfCfc3681C2bBF689Ccb30d70c6e45F49f", + "FWSS_VIEW_ADDRESS": "0xA5D87b04086B1d591026cCE10255351B5AA4689B", + "ENDORSEMENT_SET_ADDRESS": "0xAA2f7CfC7ecAc616EC9C1f6d700fAd19087FAC84" + } +} diff --git a/service_contracts/src/Errors.sol b/service_contracts/src/Errors.sol index 54568795..c9a6203a 100644 --- a/service_contracts/src/Errors.sol +++ b/service_contracts/src/Errors.sol @@ -296,6 +296,13 @@ library Errors { /// @param pdpEndEpoch The end epoch when the PDP payment rail will finalize error PaymentRailsNotFinalized(uint256 dataSetId, uint256 pdpEndEpoch); + /// @notice Payment rail is not fully settled, so the data set can't be deleted + /// @dev Settlement must complete before deletion to preserve validatePayment state + /// @param railId The rail ID + /// @param settledUpTo The epoch the rail is settled up to + /// @param endEpoch The end epoch of the rail (must be <= settledUpTo) + error RailNotFullySettled(uint256 railId, uint256 settledUpTo, uint256 endEpoch); + /// @notice Extra data size exceeds the maximum allowed limit /// @param actualSize The size of the provided extra data /// @param maxAllowedSize The maximum allowed size for extra data diff --git a/service_contracts/src/FilecoinWarmStorageService.sol b/service_contracts/src/FilecoinWarmStorageService.sol index ad26732e..23b768d7 100644 --- a/service_contracts/src/FilecoinWarmStorageService.sol +++ b/service_contracts/src/FilecoinWarmStorageService.sol @@ -2,6 +2,7 @@ pragma solidity ^0.8.20; import {PDPListener} from "@pdp/PDPVerifier.sol"; +import {IPDPVerifier} from "@pdp/interfaces/IPDPVerifier.sol"; import {Cids} from "@pdp/Cids.sol"; import {SessionKeyRegistry} from "@session-key-registry/SessionKeyRegistry.sol"; import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; @@ -298,6 +299,9 @@ contract FilecoinWarmStorageService is uint256 private storagePricePerTibPerMonth; uint256 private minimumStorageRatePerMonth; + // Piece IDs awaiting metadata cleanup; cleared each nextProvingPeriod call + mapping(uint256 dataSetId => uint256[] pieceIds) internal scheduledPieceMetadataRemovals; + event UpgradeAnnounced(PlannedUpgrade plannedUpgrade); // ========================================================================= @@ -756,6 +760,20 @@ contract FilecoinWarmStorageService is Errors.PaymentRailsNotFinalized(dataSetId, info.pdpEndEpoch) ); + // Check if the rail is fully settled before allowing deletion. + // This ensures validatePayment() can still read dataset state during settlement. + // If deleted before settlement, clients would be forced to use + // settleTerminatedRailWithoutValidation() which pays full amount for unproven epochs. + FilecoinPayV1 payments = FilecoinPayV1(paymentsContractAddress); + try payments.getRail(info.pdpRailId) returns (FilecoinPayV1.RailView memory rail) { + require( + rail.settledUpTo >= rail.endEpoch, + Errors.RailNotFullySettled(info.pdpRailId, rail.settledUpTo, rail.endEpoch) + ); + } catch { + // Rail is finalized (zeroed out), meaning it was already fully settled + } + // NOTE keep clientNonces[payer][clientDataSetId] to prevent replay // Remove from client's dataset list @@ -834,6 +852,10 @@ contract FilecoinWarmStorageService is // Verify the signature verifyAddPiecesSignature(payer, info.clientDataSetId, pieceData, nonce, metadataKeys, metadataValues, signature); + // Validate lockup for the new data set size (fail-fast if client has insufficient funds) + uint256 currentLeafCount = IPDPVerifier(pdpVerifierAddress).getDataSetLeafCount(dataSetId); + updatePaymentRates(dataSetId, currentLeafCount); + // Store metadata for each new piece for (uint256 i = 0; i < pieceData.length; i++) { uint256 pieceId = firstAdded + i; @@ -897,7 +919,11 @@ contract FilecoinWarmStorageService is // Verify the signature verifySchedulePieceRemovalsSignature(payer, info.clientDataSetId, pieceIds, signature); - // Additional logic for scheduling removals can be added here + // Queue piece IDs for metadata cleanup at nextProvingPeriod + uint256[] storage scheduled = scheduledPieceMetadataRemovals[dataSetId]; + for (uint256 i = 0; i < pieceIds.length; i++) { + scheduled.push(pieceIds[i]); + } } // possession proven checks for correct challenge count and reverts if too low @@ -963,8 +989,10 @@ contract FilecoinWarmStorageService is // This marks when the data set became active for proving provingActivationEpoch[dataSetId] = block.number; - // Update the payment rates - updatePaymentRates(dataSetId, leafCount); + // Rate was already set in piecesAdded; only update if pieces were removed + if (processScheduledPieceMetadataRemovals(dataSetId)) { + updatePaymentRates(dataSetId, leafCount); + } return; } @@ -1010,8 +1038,10 @@ contract FilecoinWarmStorageService is provingDeadlines[dataSetId] = nextDeadline; provenThisPeriod[dataSetId] = false; - // Update the payment rates based on current data set size - updatePaymentRates(dataSetId, leafCount); + // Additions update rate immediately in piecesAdded; only update here if pieces were removed + if (processScheduledPieceMetadataRemovals(dataSetId)) { + updatePaymentRates(dataSetId, leafCount); + } } /** @@ -1181,7 +1211,12 @@ contract FilecoinWarmStorageService is internal view { - // Calculate required lockup for minimum pricing + // Calculate required lockup for minimum pricing. + // We use multiply-first here to preserve the exact monthly value for cleaner error messages + // (a round number like the configured floor price, rather than a value with many trailing digits + // from precision loss). This is slightly more conservative than the actual rail lockup (which + // uses the truncated per-epoch rate), but the difference is under 0.0001% and always in the + // user's favor - they are never required to have less than what the rail will actually lock. uint256 minimumLockupRequired = (minimumStorageRatePerMonth * DEFAULT_LOCKUP_PERIOD) / EPOCHS_PER_MONTH; // If CDN is enabled, include the fixed cache-miss and CDN lockup amounts @@ -1251,6 +1286,31 @@ contract FilecoinWarmStorageService is emit RailRateUpdated(dataSetId, pdpRailId, newStorageRatePerEpoch); } + function processScheduledPieceMetadataRemovals(uint256 dataSetId) internal returns (bool hadRemovals) { + uint256[] storage pieceIds = scheduledPieceMetadataRemovals[dataSetId]; + uint256 len = pieceIds.length; + if (len == 0) { + return false; + } + + mapping(uint256 => string[]) storage pieceMetadataKeys = dataSetPieceMetadataKeys[dataSetId]; + mapping(uint256 => mapping(string => string)) storage pieceMetadata = dataSetPieceMetadata[dataSetId]; + + for (uint256 i = 0; i < len; i++) { + uint256 pieceId = pieceIds[i]; + string[] storage metadataKeys = pieceMetadataKeys[pieceId]; + mapping(string => string) storage metadata = pieceMetadata[pieceId]; + uint256 keyLen = metadataKeys.length; + for (uint256 j = 0; j < keyLen; j++) { + delete metadata[metadataKeys[j]]; + } + delete pieceMetadataKeys[pieceId]; + } + + delete scheduledPieceMetadataRemovals[dataSetId]; + return true; + } + /** * @notice Determines which proving period an epoch belongs to * @dev For a given epoch, calculates the period ID based on activation time @@ -1333,7 +1393,11 @@ contract FilecoinWarmStorageService is // Calculate natural size-based rate uint256 naturalRate = calculateStorageSizeBasedRatePerEpoch(totalBytes, storagePricePerTibPerMonth); - // Calculate minimum rate (floor price converted to per-epoch) + // Calculate minimum rate (floor price converted to per-epoch). + // Integer division truncates, so (minimumRate × EPOCHS_PER_MONTH) yields slightly less than + // minimumStorageRatePerMonth. For typical floor prices this precision loss is under 0.0001%. + // The pre-flight lockup check in validatePayerOperatorApprovalAndFunds uses a multiply-first + // formula that preserves the full monthly value, ensuring users always have sufficient funds. uint256 minimumRate = minimumStorageRatePerMonth / EPOCHS_PER_MONTH; // Return whichever is higher: natural rate or minimum rate @@ -1570,7 +1634,7 @@ contract FilecoinWarmStorageService is (uint256 provenEpochCount, uint256 settleUpTo) = _findProvenEpochs(dataSetId, fromEpoch, toEpoch, activationEpoch); - // If no epochs are proven, we can't settle anything + // If no epochs are proven, no payment is due (but settlement may still advance) if (provenEpochCount == 0) { return ValidationResult({ modifiedAmount: 0, @@ -1619,13 +1683,18 @@ contract FilecoinWarmStorageService is provenEpochCount += maxProvingPeriod; } } - settleUpTo = _calcPeriodDeadline(activationEpoch, endingPeriod - 1); + uint256 endingPeriodDeadline = _calcPeriodDeadline(activationEpoch, endingPeriod); + settleUpTo = endingPeriodDeadline - maxProvingPeriod; // handle the last period separately if (_isPeriodProven(dataSetId, endingPeriod)) { provenEpochCount += (toEpoch - settleUpTo); settleUpTo = toEpoch; + } else if (endingPeriodDeadline < block.number) { + // Period deadline passed but unproven - advance settlement with zero payment + settleUpTo = toEpoch; } + // else: period still open - settlement blocked at previous settleUpTo } return (provenEpochCount, settleUpTo); } diff --git a/service_contracts/src/FilecoinWarmStorageServiceStateView.sol b/service_contracts/src/FilecoinWarmStorageServiceStateView.sol index 211917b9..8059ecb8 100644 --- a/service_contracts/src/FilecoinWarmStorageServiceStateView.sol +++ b/service_contracts/src/FilecoinWarmStorageServiceStateView.sol @@ -18,10 +18,6 @@ contract FilecoinWarmStorageServiceStateView is IPDPProvingSchedule { service = _service; } - function challengeWindow() external view returns (uint256) { - return service.challengeWindow(); - } - function clientDataSets(address payer) external view returns (uint256[] memory dataSetIds) { return service.clientDataSets(payer); } @@ -58,10 +54,6 @@ contract FilecoinWarmStorageServiceStateView is IPDPProvingSchedule { return service.getApprovedProvidersLength(); } - function getChallengesPerProof() external pure returns (uint64) { - return FilecoinWarmStorageServiceStateInternalLibrary.getChallengesPerProof(); - } - function getClientDataSets(address client) external view @@ -102,10 +94,6 @@ contract FilecoinWarmStorageServiceStateView is IPDPProvingSchedule { return service.getDataSetStatus(dataSetId); } - function getMaxProvingPeriod() external view returns (uint64) { - return service.getMaxProvingPeriod(); - } - function getPDPConfig() external view diff --git a/service_contracts/src/ProviderIdSet.sol b/service_contracts/src/ProviderIdSet.sol new file mode 100644 index 00000000..6e41be1e --- /dev/null +++ b/service_contracts/src/ProviderIdSet.sol @@ -0,0 +1,134 @@ +pragma solidity ^0.8.30; + +import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol"; + +// This linear design assumes that the set is small and sparse +// It uses much less space than an iterable mapping but performs worse when the set is sufficiently large +contract ProviderIdSet is Ownable { + constructor() Ownable(msg.sender) {} + + // compressed arrayset: 8 providerIds per item + uint256[] private list; + + error ProviderIdTooLarge(uint256 providerId); + error ProviderIdNotFound(uint256 providerId); + + function getProviderIds() public view returns (uint256[] memory) { + uint256[] memory providers = new uint256[](list.length * 8); + + unchecked { + uint256 size = 0; + for (uint256 i = 0; i < list.length; i++) { + uint256 iteration = list[i]; + while (iteration > 0) { + providers[size++] = iteration & 0xffffffff; + iteration >>= 32; + } + } + + // truncate length + assembly ("memory-safe") { + mstore(providers, size) + } + } + + return providers; + } + + function containsProviderId(uint256 providerId) public view returns (bool) { + for (uint256 i = 0; i < list.length; i++) { + uint256 word = list[i]; + while (word != 0) { + if (word & 0xffffffff == providerId) { + return true; + } + word >>= 32; + } + } + return false; + } + + /** + * No-op if providerId is 0 or if providerId is already in the set + */ + function addProviderId(uint256 providerId) external onlyOwner { + require(providerId < 0x100000000, ProviderIdTooLarge(providerId)); + for (uint256 i = 0; i < list.length; i++) { + uint256 read = list[i]; + uint256 iteration = read; + for (uint256 j = 0; j < 8; j++) { + uint256 curr = iteration & 0xffffffff; + if (curr == 0) { + // insert + list[i] = read | providerId << j * 32; + return; + } + if (curr == providerId) { + // found + return; + } + iteration >>= 32; + } + } + // insert + list.push(providerId); + } + + /** + * Reverts if providerId is not in the set + */ + function removeProviderId(uint256 providerId) external onlyOwner { + uint256 length = list.length; + for (uint256 i = 0; i < length; i++) { + uint256 read = list[i]; + uint256 iteration = read; + for (uint256 j = 0; j < 8; j++) { + uint256 curr = iteration & 0xffffffff; + require(curr != 0, ProviderIdNotFound(providerId)); + if (curr != providerId) { + iteration >>= 32; + continue; + } + // found at i,j + + unchecked { + uint256 lastFew; + if (i == length - 1) { + // can skip sload + lastFew = read; + } else { + lastFew = list[length - 1]; + } + if (lastFew < 0x100000000) { + // special case: lastFew contains one item + read ^= (lastFew ^ providerId) << j * 32; + list[i] = read; + list.pop(); + return; + } + + // find the last item + // could binary search for k but average performance is worse + for (uint256 k = 224; k != 0; k -= 32) { + uint256 last = lastFew >> k; + if (last == 0) { + continue; + } + // move last to i,j + read ^= (last ^ providerId) << j * 32; + if (i == length - 1) { + read &= (1 << k) - 1; + } else { + // pop last + lastFew &= (1 << k) - 1; + list[length - 1] = lastFew; + } + list[i] = read; + return; + } + } + } + } + require(false, ProviderIdNotFound(providerId)); + } +} diff --git a/service_contracts/src/ServiceProviderRegistry.sol b/service_contracts/src/ServiceProviderRegistry.sol index a8b03baa..c52db78e 100644 --- a/service_contracts/src/ServiceProviderRegistry.sol +++ b/service_contracts/src/ServiceProviderRegistry.sol @@ -102,6 +102,18 @@ contract ServiceProviderRegistry is /// @notice Emitted when the contract is upgraded event ContractUpgraded(string version, address implementation); + // Used for announcing upgrades, packed into one slot + struct PlannedUpgrade { + // Address of the new implementation contract + address nextImplementation; + // Upgrade will not occur until at least this epoch + uint96 afterEpoch; + } + + PlannedUpgrade public nextUpgrade; + + event UpgradeAnnounced(PlannedUpgrade plannedUpgrade); + /// @notice Ensures the caller is the service provider modifier onlyServiceProvider(uint256 providerId) { require(providers[providerId].serviceProvider == msg.sender, "Only service provider can call this function"); @@ -752,18 +764,32 @@ contract ServiceProviderRegistry is } } + /// @notice Announce a planned upgrade + /// @dev Can only be called by the contract owner + /// @param plannedUpgrade The planned upgrade details + function announcePlannedUpgrade(PlannedUpgrade calldata plannedUpgrade) external onlyOwner { + require(plannedUpgrade.nextImplementation.code.length > 3000); + require(plannedUpgrade.afterEpoch > block.number); + nextUpgrade = plannedUpgrade; + emit UpgradeAnnounced(plannedUpgrade); + } + /// @notice Authorizes an upgrade to a new implementation /// @dev Can only be called by the contract owner + /// @dev Supports both one-step (legacy) and two-step (announcePlannedUpgrade) upgrade mechanisms /// @param newImplementation Address of the new implementation contract function _authorizeUpgrade(address newImplementation) internal override onlyOwner { - // Authorization logic is handled by the onlyOwner modifier + // zero address already checked by ERC1967Utils._setImplementation + require(newImplementation == nextUpgrade.nextImplementation); + require(block.number >= nextUpgrade.afterEpoch); + delete nextUpgrade; } /// @notice Migration function for contract upgrades /// @dev This function should be called during upgrades to emit version tracking events + /// Only callable during proxy upgrade process /// @param newVersion The version string for the new implementation - function migrate(string memory newVersion) public onlyProxy reinitializer(2) { - require(msg.sender == address(this), "Only self can call migrate"); + function migrate(string memory newVersion) public onlyProxy onlyOwner reinitializer(2) { emit ContractUpgraded(newVersion, ERC1967Utils.getImplementation()); } } diff --git a/service_contracts/src/lib/FilecoinWarmStorageServiceLayout.sol b/service_contracts/src/lib/FilecoinWarmStorageServiceLayout.sol index fb8182af..07941de4 100644 --- a/service_contracts/src/lib/FilecoinWarmStorageServiceLayout.sol +++ b/service_contracts/src/lib/FilecoinWarmStorageServiceLayout.sol @@ -27,3 +27,4 @@ bytes32 constant FIL_BEAM_CONTROLLER_ADDRESS_SLOT = bytes32(uint256(18)); bytes32 constant NEXT_UPGRADE_SLOT = bytes32(uint256(19)); bytes32 constant STORAGE_PRICE_PER_TIB_PER_MONTH_SLOT = bytes32(uint256(20)); bytes32 constant MINIMUM_STORAGE_RATE_PER_MONTH_SLOT = bytes32(uint256(21)); +bytes32 constant SCHEDULED_PIECE_METADATA_REMOVALS_SLOT = bytes32(uint256(22)); diff --git a/service_contracts/src/lib/FilecoinWarmStorageServiceStateInternalLibrary.sol b/service_contracts/src/lib/FilecoinWarmStorageServiceStateInternalLibrary.sol index 8df5e514..0f9ed64d 100644 --- a/service_contracts/src/lib/FilecoinWarmStorageServiceStateInternalLibrary.sol +++ b/service_contracts/src/lib/FilecoinWarmStorageServiceStateInternalLibrary.sol @@ -74,10 +74,6 @@ library FilecoinWarmStorageServiceStateInternalLibrary { return leafCount * BYTES_PER_LEAF; } - function getChallengesPerProof() internal pure returns (uint64) { - return CHALLENGES_PER_PROOF; - } - function clientNonces(FilecoinWarmStorageService service, address payer, uint256 nonce) internal view diff --git a/service_contracts/src/lib/FilecoinWarmStorageServiceStateLibrary.sol b/service_contracts/src/lib/FilecoinWarmStorageServiceStateLibrary.sol index edd6a7d7..a09740bb 100644 --- a/service_contracts/src/lib/FilecoinWarmStorageServiceStateLibrary.sol +++ b/service_contracts/src/lib/FilecoinWarmStorageServiceStateLibrary.sol @@ -70,10 +70,6 @@ library FilecoinWarmStorageServiceStateLibrary { return leafCount * BYTES_PER_LEAF; } - function getChallengesPerProof() public pure returns (uint64) { - return CHALLENGES_PER_PROOF; - } - function clientNonces(FilecoinWarmStorageService service, address payer, uint256 nonce) public view @@ -188,13 +184,13 @@ library FilecoinWarmStorageServiceStateLibrary { return uint256(service.extsload(keccak256(abi.encode(setId, StorageLayout.PROVING_DEADLINES_SLOT)))); } - function getMaxProvingPeriod(FilecoinWarmStorageService service) public view returns (uint64) { + function getMaxProvingPeriod(FilecoinWarmStorageService service) internal view returns (uint64) { return uint64(uint256(service.extsload(StorageLayout.MAX_PROVING_PERIOD_SLOT))); } // Number of epochs at the end of a proving period during which a // proof of possession can be submitted - function challengeWindow(FilecoinWarmStorageService service) public view returns (uint256) { + function challengeWindow(FilecoinWarmStorageService service) internal view returns (uint256) { return uint256(service.extsload(StorageLayout.CHALLENGE_WINDOW_SIZE_SLOT)); } diff --git a/service_contracts/test/FilecoinWarmStorageService.t.sol b/service_contracts/test/FilecoinWarmStorageService.t.sol index d76020ed..77867cf1 100644 --- a/service_contracts/test/FilecoinWarmStorageService.t.sol +++ b/service_contracts/test/FilecoinWarmStorageService.t.sol @@ -745,8 +745,11 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { pdpServiceWithPayments.terminateService(newDataSetId2); FilecoinWarmStorageService.DataSetInfoView memory terminatedInfo = viewContract.getDataSet(newDataSetId2); assertTrue(terminatedInfo.pdpEndEpoch > 0, "Dataset 2 should be terminated"); - // Advance block number to be greater than the end epoch to allow deletion + // Advance block number past end epoch to allow settlement and deletion vm.roll(terminatedInfo.pdpEndEpoch + 1); + // Settle the rail before deletion + FilecoinPayV1.RailView memory rail = payments.getRail(terminatedInfo.pdpRailId); + payments.settleRail(terminatedInfo.pdpRailId, rail.endEpoch); vm.prank(serviceProvider); mockPDPVerifier.deleteDataSet(pdpServiceWithPayments, newDataSetId2, ""); @@ -803,9 +806,9 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // First batch (3 pieces) with key "meta" => metadataShort Cids.Cid[] memory pieceData1 = new Cids.Cid[](3); - pieceData1[0].data = bytes("1_0:1111"); - pieceData1[1].data = bytes("1_1:111100000"); - pieceData1[2].data = bytes("1_2:11110000000000"); + pieceData1[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("1_0:1111"))); + pieceData1[1] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("1_1:111100000"))); + pieceData1[2] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("1_2:11110000000000"))); string[] memory keys1 = new string[](1); string[] memory values1 = new string[](1); keys1[0] = "meta"; @@ -817,8 +820,10 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // Second batch (2 pieces) with key "meta" => metadataLong Cids.Cid[] memory pieceData2 = new Cids.Cid[](2); - pieceData2[0].data = bytes("2_0:22222222222222222222"); - pieceData2[1].data = bytes("2_1:222222222222222222220000000000000000000000000000000000000000"); + pieceData2[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("2_0:22222222222222222222"))); + pieceData2[1] = Cids.CommPv2FromDigest( + 0, 4, keccak256(abi.encodePacked("2_1:222222222222222222220000000000000000000000000000000000000000000")) + ); string[] memory keys2 = new string[](1); string[] memory values2 = new string[](1); keys2[0] = "meta"; @@ -1174,6 +1179,123 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { assertEq(dataSetId, 1, "Dataset should be created with above-minimum funds"); } + function testInsufficientFunds_AddPiecesFailsImmediately() public { + // Test that adding pieces fails immediately when client has insufficient funds + // for the new lockup amount. This validates that updatePaymentRates is called + // in piecesAdded rather than waiting until nextProvingPeriod. + + // Setup: Client with minimal funds - just enough to create an empty dataset + address limitedClient = makeAddr("limitedClient"); + uint256 limitedAmount = 7e16; // 0.07 USDFC (just above 0.06 minimum) + + mockUSDFC.safeTransfer(limitedClient, limitedAmount); + + vm.startPrank(limitedClient); + payments.setOperatorApproval(mockUSDFC, address(pdpServiceWithPayments), true, 1000e18, 1000e18, 365 days); + mockUSDFC.approve(address(payments), limitedAmount); + payments.deposit(mockUSDFC, limitedClient, limitedAmount); + vm.stopPrank(); + + // Create dataset - should succeed with minimal funds (uses minimum floor rate) + (string[] memory dsKeys, string[] memory dsValues) = _getSingleMetadataKV("label", "Limited Funds Test"); + FilecoinWarmStorageService.DataSetCreateData memory createData = FilecoinWarmStorageService.DataSetCreateData({ + payer: limitedClient, + clientDataSetId: 1001, + metadataKeys: dsKeys, + metadataValues: dsValues, + signature: FAKE_SIGNATURE + }); + + bytes memory encodedCreateData = abi.encode( + createData.payer, + createData.clientDataSetId, + createData.metadataKeys, + createData.metadataValues, + createData.signature + ); + + makeSignaturePass(limitedClient); + vm.prank(serviceProvider); + uint256 dataSetId = mockPDPVerifier.createDataSet(pdpServiceWithPayments, encodedCreateData); + assertEq(dataSetId, 1, "Dataset should be created successfully"); + + // Prepare a large piece - 1 TiB would cost 2.5 USDFC/month, way more than client has + // height=35 means 2^35 leaves × 32 bytes = 1 TiB + Cids.Cid[] memory largePieceData = new Cids.Cid[](1); + largePieceData[0] = Cids.CommPv2FromDigest(0, 35, keccak256(abi.encodePacked("large_piece"))); + string[] memory keys = new string[](0); + string[] memory values = new string[](0); + + // Attempt to add piece should fail immediately due to insufficient funds + // The error comes from FilecoinPayV1's modifyRailPayment when lockup check fails + makeSignaturePass(limitedClient); + vm.expectRevert(); // Reverts with "invariant failure: insufficient funds to cover lockup after function execution" + mockPDPVerifier.addPieces(pdpServiceWithPayments, dataSetId, 0, largePieceData, 1, FAKE_SIGNATURE, keys, values); + } + + function testAddPieces_RateUpdatedImmediately() public { + // Test that payment rates are updated immediately when pieces are added, + // not deferred to nextProvingPeriod. + + // Setup: Client with sufficient funds + address testClient = makeAddr("rateUpdateClient"); + uint256 depositAmount = 100e18; // 100 USDFC - plenty of funds + + mockUSDFC.safeTransfer(testClient, depositAmount); + + vm.startPrank(testClient); + payments.setOperatorApproval(mockUSDFC, address(pdpServiceWithPayments), true, 1000e18, 1000e18, 365 days); + mockUSDFC.approve(address(payments), depositAmount); + payments.deposit(mockUSDFC, testClient, depositAmount); + vm.stopPrank(); + + // Create dataset + (string[] memory dsKeys, string[] memory dsValues) = _getSingleMetadataKV("label", "Rate Update Test"); + FilecoinWarmStorageService.DataSetCreateData memory createData = FilecoinWarmStorageService.DataSetCreateData({ + payer: testClient, + clientDataSetId: 1002, + metadataKeys: dsKeys, + metadataValues: dsValues, + signature: FAKE_SIGNATURE + }); + + bytes memory encodedCreateData = abi.encode( + createData.payer, + createData.clientDataSetId, + createData.metadataKeys, + createData.metadataValues, + createData.signature + ); + + makeSignaturePass(testClient); + vm.prank(serviceProvider); + uint256 dataSetId = mockPDPVerifier.createDataSet(pdpServiceWithPayments, encodedCreateData); + + // Get initial rail info (should be at minimum rate for empty dataset) + FilecoinWarmStorageService.DataSetInfoView memory dataSetInfo = viewContract.getDataSet(dataSetId); + uint256 railId = dataSetInfo.pdpRailId; + + // Get initial rate + FilecoinPayV1.RailView memory initialRail = payments.getRail(railId); + uint256 initialRate = initialRail.paymentRate; + + // Add a large piece (1 TiB = height 35) + Cids.Cid[] memory pieceData = new Cids.Cid[](1); + pieceData[0] = Cids.CommPv2FromDigest(0, 35, keccak256(abi.encodePacked("1tib_piece"))); + string[] memory keys = new string[](0); + string[] memory values = new string[](0); + + makeSignaturePass(testClient); + mockPDPVerifier.addPieces(pdpServiceWithPayments, dataSetId, 0, pieceData, 1, FAKE_SIGNATURE, keys, values); + + // Get rate after adding piece - should be updated immediately, not waiting for nextProvingPeriod + FilecoinPayV1.RailView memory railAfterAdd = payments.getRail(railId); + uint256 rateAfterAdd = railAfterAdd.paymentRate; + + // Rate should have increased (1 TiB costs ~2.5 USDFC/month, much more than minimum 0.06) + assertGt(rateAfterAdd, initialRate, "Rate should increase immediately after adding piece"); + } + // Operator Approval Validation Tests function testOperatorApproval_NotApproved() public { // Setup: Client with sufficient funds but no operator approval @@ -1620,6 +1742,10 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // Advance block number to be greater than the end epoch to allow deletion vm.roll(terminatedInfo.pdpEndEpoch + 1); + // Settle the rail before deletion (required by the settlement check) + FilecoinPayV1.RailView memory rail = payments.getRail(terminatedInfo.pdpRailId); + payments.settleRail(terminatedInfo.pdpRailId, rail.endEpoch); + // Delete the second dataset (dataSet2) - this should completely remove it deleteDataSetForClient(sp2, client, dataSet2); @@ -1959,6 +2085,14 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { ); pdpServiceWithPayments.nextProvingPeriod(dataSetId, block.number + maxProvingPeriod, 100, ""); console.log("[OK] nextProvingPeriod correctly reverted"); + + // Roll past the last period deadline to allow settlement + vm.roll(info.pdpEndEpoch + maxProvingPeriod + 1); + + // Settle the rail before deletion + FilecoinPayV1.RailView memory rail = payments.getRail(info.pdpRailId); + payments.settleRail(info.pdpRailId, rail.endEpoch); + console.log("\n7. Testring dataSetDeleted"); vm.prank(address(mockPDPVerifier)); pdpServiceWithPayments.dataSetDeleted(dataSetId, 10, bytes("")); @@ -4361,6 +4495,10 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // Wait for payment end epoch to elapse vm.roll(info.pdpEndEpoch + 1); + // Settle the rail before deletion + FilecoinPayV1.RailView memory pdpRail = payments.getRail(info.pdpRailId); + payments.settleRail(info.pdpRailId, pdpRail.endEpoch); + // Call dataSetDeleted to trigger cleanup vm.prank(address(mockPDPVerifier)); pdpServiceWithPayments.dataSetDeleted(dataSetId, 10, bytes("")); @@ -4397,6 +4535,10 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // Wait for payment end epoch to elapse vm.roll(info.pdpEndEpoch + 1); + // Settle the rail before deletion + FilecoinPayV1.RailView memory pdpRail = payments.getRail(info.pdpRailId); + payments.settleRail(info.pdpRailId, pdpRail.endEpoch); + // Call dataSetDeleted to trigger cleanup vm.prank(address(mockPDPVerifier)); pdpServiceWithPayments.dataSetDeleted(dataSetId, 10, bytes("")); @@ -4476,8 +4618,13 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // Get updated info after termination info = viewContract.getDataSet(dataSetId); - // Wait for payment end epoch to elapse - vm.roll(info.pdpEndEpoch + 1); + // Wait for payment end epoch to elapse plus extra for proving deadline + (uint64 maxProvingPeriod,,,) = viewContract.getPDPConfig(); + vm.roll(info.pdpEndEpoch + maxProvingPeriod + 1); + + // Settle the rail before deletion + FilecoinPayV1.RailView memory pdpRail = payments.getRail(info.pdpRailId); + payments.settleRail(info.pdpRailId, pdpRail.endEpoch); // Call dataSetDeleted to trigger complete cleanup vm.prank(address(mockPDPVerifier)); @@ -4571,7 +4718,7 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // Prepare piece data Cids.Cid[] memory pieceData = new Cids.Cid[](1); - pieceData[0].data = bytes("test_piece_1"); + pieceData[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("test_piece_1"))); string[] memory keys = new string[](0); string[] memory values = new string[](0); @@ -4617,7 +4764,7 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // Prepare piece data Cids.Cid[] memory pieceData = new Cids.Cid[](1); - pieceData[0].data = bytes("test_piece"); + pieceData[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("test_piece_1"))); string[] memory keys = new string[](0); string[] memory values = new string[](0); @@ -4691,7 +4838,7 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // Prepare piece data Cids.Cid[] memory pieceData = new Cids.Cid[](1); - pieceData[0].data = bytes("test"); + pieceData[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("test_piece_1"))); string[] memory keys = new string[](0); string[] memory values = new string[](0); @@ -4738,7 +4885,7 @@ contract FilecoinWarmStorageServiceTest is MockFVMTest { // Prepare piece data Cids.Cid[] memory pieceData = new Cids.Cid[](1); - pieceData[0].data = bytes("test"); + pieceData[0] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("test_piece_1"))); string[] memory keys = new string[](0); string[] memory values = new string[](0); @@ -5211,11 +5358,12 @@ contract ValidatePaymentTest is FilecoinWarmStorageServiceTest { assertEq(result.settleUpto, activationEpoch + (maxProvingPeriod * 2), "Should not settle last period"); assertEq(result.note, "No proven epochs in the requested range"); - // Never settle less than 1 proving period when that period is unproven + // For partial first period, settlement doesn't advance even if deadline passed + // (caller should request a full period or use the multi-period path) toEpoch = activationEpoch + 1; result = pdpServiceWithPayments.validatePayment(info.pdpRailId, proposedAmount, activationEpoch, toEpoch, 0); assertEq(result.modifiedAmount, 0, "Should pay nothing"); - assertEq(result.settleUpto, activationEpoch, "Should not settle"); + assertEq(result.settleUpto, activationEpoch, "Should not settle partial first period"); assertEq(result.note, "No proven epochs in the requested range"); // Never settle less than 1 proving period when that period is unproven @@ -5415,4 +5563,372 @@ contract ValidatePaymentTest is FilecoinWarmStorageServiceTest { vm.expectRevert(abi.encodeWithSelector(Errors.InvalidEpochRange.selector, 200, 200)); pdpServiceWithPayments.validatePayment(info.pdpRailId, 1000e6, 200, 200, 0); } + + /** + * @notice Test: Piece metadata removal is deferred until nextProvingPeriod + * @dev Verifies that: + * 1. Metadata persists after piecesScheduledRemove + * 2. Metadata is cleaned up after nextProvingPeriod + */ + function testPieceMetadataRemovalDeferredToNextProvingPeriod() public { + // Setup: Create dataset with piece metadata + uint256 pieceId = 0; + string[] memory keys = new string[](2); + string[] memory values = new string[](2); + keys[0] = "filename"; + values[0] = "test.txt"; + keys[1] = "size"; + values[1] = "1024"; + + PieceMetadataSetup memory setup = + setupDataSetWithPieceMetadata(pieceId, keys, values, FAKE_SIGNATURE, address(mockPDPVerifier)); + + // Verify metadata exists + (string[] memory storedKeys, string[] memory storedValues) = + viewContract.getAllPieceMetadata(setup.dataSetId, setup.pieceId); + assertEq(storedKeys.length, 2, "Should have 2 metadata keys"); + assertEq(storedKeys[0], keys[0], "Key 0 should match"); + assertEq(storedValues[0], values[0], "Value 0 should match"); + + // Get proving period config + (uint64 provingPeriod,,,) = viewContract.getPDPConfig(); + + // Start proving period + uint256 firstDeadline = block.number + provingPeriod; + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.nextProvingPeriod(setup.dataSetId, firstDeadline, 100, ""); + + // Schedule piece removal + uint256[] memory pieceIds = new uint256[](1); + pieceIds[0] = pieceId; + bytes memory scheduleRemoveData = abi.encode(FAKE_SIGNATURE); + makeSignaturePass(client); + mockPDPVerifier.piecesScheduledRemove( + setup.dataSetId, pieceIds, address(pdpServiceWithPayments), scheduleRemoveData + ); + + // Metadata should STILL exist (deferred cleanup) + (storedKeys, storedValues) = viewContract.getAllPieceMetadata(setup.dataSetId, setup.pieceId); + assertEq(storedKeys.length, 2, "Metadata should persist after piecesScheduledRemove"); + + // Move to next proving period + vm.roll(block.number + provingPeriod + 1); + + // Call nextProvingPeriod to trigger cleanup + uint256 nextDeadline = firstDeadline + provingPeriod; + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.nextProvingPeriod(setup.dataSetId, nextDeadline, 100, ""); + + // Metadata should now be cleaned up + (storedKeys, storedValues) = viewContract.getAllPieceMetadata(setup.dataSetId, setup.pieceId); + assertEq(storedKeys.length, 0, "Metadata should be removed after nextProvingPeriod"); + } + + /** + * @notice Test: Multiple pieces scheduled for removal are all cleaned up + */ + function testMultiplePieceMetadataRemovalAtNextProvingPeriod() public { + // Create dataset + (string[] memory metadataKeys, string[] memory metadataValues) = _getSingleMetadataKV("label", "Test Dataset"); + uint256 dataSetId = createDataSetForClient(sp1, client, metadataKeys, metadataValues); + + // Add 3 pieces with metadata + uint256 numPieces = 3; + Cids.Cid[] memory pieceData = new Cids.Cid[](numPieces); + string[][] memory allKeys = new string[][](numPieces); + string[][] memory allValues = new string[][](numPieces); + + for (uint256 i = 0; i < numPieces; i++) { + pieceData[i] = Cids.CommPv2FromDigest(0, 4, keccak256(abi.encodePacked("file", i))); + allKeys[i] = new string[](1); + allValues[i] = new string[](1); + allKeys[i][0] = "index"; + allValues[i][0] = vm.toString(i); + } + + uint256 nonce = 5000; + bytes memory encodedData = abi.encode(nonce, allKeys, allValues, FAKE_SIGNATURE); + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.piecesAdded(dataSetId, 0, pieceData, encodedData); + + // Get proving period config + (uint64 provingPeriod,,,) = viewContract.getPDPConfig(); + + // Start proving period + uint256 firstDeadline = block.number + provingPeriod; + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.nextProvingPeriod(dataSetId, firstDeadline, 100, ""); + + // Schedule removal of all pieces + uint256[] memory pieceIds = new uint256[](numPieces); + for (uint256 i = 0; i < numPieces; i++) { + pieceIds[i] = i; + } + bytes memory scheduleRemoveData = abi.encode(FAKE_SIGNATURE); + makeSignaturePass(client); + mockPDPVerifier.piecesScheduledRemove(dataSetId, pieceIds, address(pdpServiceWithPayments), scheduleRemoveData); + + // Verify all metadata still exists + for (uint256 i = 0; i < numPieces; i++) { + (string[] memory storedKeys,) = viewContract.getAllPieceMetadata(dataSetId, i); + assertEq(storedKeys.length, 1, "Metadata should persist for each piece"); + } + + // Move to next proving period and trigger cleanup + vm.roll(block.number + provingPeriod + 1); + uint256 nextDeadline = firstDeadline + provingPeriod; + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.nextProvingPeriod(dataSetId, nextDeadline, 0, ""); + + // Verify all metadata is now cleaned up + for (uint256 i = 0; i < numPieces; i++) { + (string[] memory storedKeys,) = viewContract.getAllPieceMetadata(dataSetId, i); + assertEq(storedKeys.length, 0, "Metadata should be removed for each piece"); + } + } + + // ===== Settlement with Passed Deadlines Tests ===== + + /** + * @notice Test: Settlement advances past unproven periods when deadlines have passed + * @dev Verifies that validatePayment advances settleUpTo for periods with passed deadlines + */ + function testValidatePayment_AdvancesPastUnprovenPeriodsWithPassedDeadlines() public { + uint256 dataSetId = createDataSetForServiceProviderTest(sp1, client, "Test"); + + // Start proving + (uint64 maxProvingPeriod, uint256 challengeWindow,,) = viewContract.getPDPConfig(); + uint256 challengeEpoch = block.number + maxProvingPeriod - (challengeWindow / 2); + + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.nextProvingPeriod(dataSetId, challengeEpoch, 100, ""); + + uint256 activationEpoch = vm.getBlockNumber(); + + // Move forward 3 periods without submitting any proofs + // All 3 period deadlines will have passed + vm.roll(activationEpoch + (maxProvingPeriod * 3) + 1); + + // Validate payment - should advance settleUpTo to cover all passed periods + FilecoinWarmStorageService.DataSetInfoView memory info = viewContract.getDataSet(dataSetId); + uint256 fromEpoch = activationEpoch - 1; + uint256 toEpoch = activationEpoch + (maxProvingPeriod * 3); + uint256 proposedAmount = 1000e6; + + IValidator.ValidationResult memory result = + pdpServiceWithPayments.validatePayment(info.pdpRailId, proposedAmount, fromEpoch, toEpoch, 0); + + // With the fix, settlement should advance to toEpoch even with no proofs + // because all period deadlines have passed + assertEq(result.modifiedAmount, 0, "Should pay nothing for unproven epochs"); + assertEq(result.settleUpto, toEpoch, "Should advance settleUpTo to toEpoch since all deadlines passed"); + } + + /** + * @notice Test: Settlement blocks on current period if deadline hasn't passed + * @dev Verifies that validatePayment blocks on unproven period if deadline is still open + */ + function testValidatePayment_BlocksOnUnprovenPeriodWithOpenDeadline() public { + uint256 dataSetId = createDataSetForServiceProviderTest(sp1, client, "Test"); + + // Start proving + (uint64 maxProvingPeriod, uint256 challengeWindow,,) = viewContract.getPDPConfig(); + uint256 challengeEpoch = block.number + maxProvingPeriod - (challengeWindow / 2); + + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.nextProvingPeriod(dataSetId, challengeEpoch, 100, ""); + + uint256 activationEpoch = vm.getBlockNumber(); + + // Move forward only halfway through the first period (deadline hasn't passed) + vm.roll(activationEpoch + (maxProvingPeriod / 2)); + + // Validate payment - should NOT advance because deadline hasn't passed + FilecoinWarmStorageService.DataSetInfoView memory info = viewContract.getDataSet(dataSetId); + uint256 fromEpoch = activationEpoch - 1; + uint256 toEpoch = activationEpoch + (maxProvingPeriod / 2); + uint256 proposedAmount = 1000e6; + + IValidator.ValidationResult memory result = + pdpServiceWithPayments.validatePayment(info.pdpRailId, proposedAmount, fromEpoch, toEpoch, 0); + + // Settlement should block because the period isn't proven and deadline hasn't passed + assertEq(result.modifiedAmount, 0, "Should pay nothing"); + assertEq(result.settleUpto, fromEpoch, "Should not advance since deadline hasn't passed"); + } + + /** + * @notice Test: Mixed proven and unproven periods with passed deadlines + * @dev Verifies correct payment calculation when some periods are proven and others have passed deadlines + */ + function testValidatePayment_MixedProvenAndUnprovenWithPassedDeadlines() public { + uint256 dataSetId = createDataSetForServiceProviderTest(sp1, client, "Test"); + + // Start proving + (uint64 maxProvingPeriod, uint256 challengeWindow,,) = viewContract.getPDPConfig(); + uint256 firstChallengeEpoch = block.number + maxProvingPeriod - (challengeWindow / 2); + + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.nextProvingPeriod(dataSetId, firstChallengeEpoch, 100, ""); + + uint256 activationEpoch = vm.getBlockNumber(); + + // Submit proof for period 0 only + vm.roll(firstChallengeEpoch); + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.possessionProven(dataSetId, 100, 12345, CHALLENGES_PER_PROOF); + + // Move forward past 3 periods (only period 0 is proven) + vm.roll(activationEpoch + (maxProvingPeriod * 3) + 1); + + // Validate payment + FilecoinWarmStorageService.DataSetInfoView memory info = viewContract.getDataSet(dataSetId); + uint256 fromEpoch = activationEpoch - 1; + uint256 toEpoch = activationEpoch + (maxProvingPeriod * 3); + uint256 proposedAmount = 3000e6; // 1000 per period + + IValidator.ValidationResult memory result = + pdpServiceWithPayments.validatePayment(info.pdpRailId, proposedAmount, fromEpoch, toEpoch, 0); + + // Should pay for period 0 only, but advance to toEpoch since all deadlines passed + // Note: provenEpochs is maxProvingPeriod + 1 because of how the first period calculation + // includes epochs from (fromEpoch, startingPeriodDeadline] which is M + 1 epochs + uint256 totalEpochs = toEpoch - fromEpoch; + uint256 provenEpochs = maxProvingPeriod + 1; // Period 0 from (A-1, A+M] + uint256 expectedAmount = (proposedAmount * provenEpochs) / totalEpochs; + + assertEq(result.modifiedAmount, expectedAmount, "Should pay for proven period only"); + assertEq(result.settleUpto, toEpoch, "Should advance to toEpoch since all deadlines passed"); + } + + // ===== Dataset Deletion with Settlement Check Tests ===== + + /** + * @notice Test: Dataset deletion reverts if rail is not fully settled + * @dev Verifies that dataSetDeleted requires rail.settledUpTo >= rail.endEpoch + */ + function testDataSetDeleted_RevertsIfRailNotSettled() public { + uint256 dataSetId = createDataSetForServiceProviderTest(sp1, client, "Test"); + + // Terminate the dataset + vm.prank(client); + pdpServiceWithPayments.terminateService(dataSetId); + + // Get termination info + FilecoinWarmStorageService.DataSetInfoView memory info = viewContract.getDataSet(dataSetId); + assertTrue(info.pdpEndEpoch > 0, "Dataset should be terminated"); + + // Advance past the lockup period but DON'T settle the rail + vm.roll(info.pdpEndEpoch + 1); + + // Get rail info to check settlement status + FilecoinPayV1.RailView memory rail = payments.getRail(info.pdpRailId); + assertTrue(rail.settledUpTo < rail.endEpoch, "Rail should not be fully settled yet"); + + // Attempt to delete - should revert because rail is not settled + vm.expectRevert( + abi.encodeWithSelector(Errors.RailNotFullySettled.selector, info.pdpRailId, rail.settledUpTo, rail.endEpoch) + ); + vm.prank(sp1); + mockPDPVerifier.deleteDataSet(pdpServiceWithPayments, dataSetId, bytes("")); + } + + /** + * @notice Test: Dataset deletion succeeds after rail is fully settled + * @dev Verifies that dataSetDeleted succeeds when rail.settledUpTo >= rail.endEpoch + */ + function testDataSetDeleted_SucceedsAfterRailSettled() public { + uint256 dataSetId = createDataSetForServiceProviderTest(sp1, client, "Test"); + + // Start proving so we can settle with validated payments + (uint64 maxProvingPeriod, uint256 challengeWindow,,) = viewContract.getPDPConfig(); + uint256 challengeEpoch = block.number + maxProvingPeriod - (challengeWindow / 2); + + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.nextProvingPeriod(dataSetId, challengeEpoch, 100, ""); + + // Submit proof for first period + vm.roll(challengeEpoch); + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.possessionProven(dataSetId, 100, 12345, CHALLENGES_PER_PROOF); + + // Terminate the dataset + vm.prank(client); + pdpServiceWithPayments.terminateService(dataSetId); + + // Get termination info + FilecoinWarmStorageService.DataSetInfoView memory info = viewContract.getDataSet(dataSetId); + assertTrue(info.pdpEndEpoch > 0, "Dataset should be terminated"); + + // Advance past the lockup period AND past the last proving period deadline + // Settlement requires all period deadlines to have passed for unproven periods + vm.roll(info.pdpEndEpoch + maxProvingPeriod + 1); + + // Settle the rail to completion + // After full settlement, the rail gets finalized and zeroed out, so we can't access it via getRail() + FilecoinPayV1.RailView memory railBefore = payments.getRail(info.pdpRailId); + payments.settleRail(info.pdpRailId, railBefore.endEpoch); + + // Deletion should succeed (rail is either fully settled or finalized) + vm.prank(sp1); + mockPDPVerifier.deleteDataSet(pdpServiceWithPayments, dataSetId, bytes("")); + + // Verify dataset is deleted (pdpRailId == 0 indicates deleted/unregistered) + FilecoinWarmStorageService.DataSetInfoView memory deletedInfo = viewContract.getDataSet(dataSetId); + assertEq(deletedInfo.pdpRailId, 0, "Dataset should be deleted"); + } + + /** + * @notice Test: Full flow - SP abandons service, client can still settle and cleanup + * @dev Simulates the scenario from issue #375 where SP fails to prove + */ + function testFullFlow_SPAbandonsService_ClientCanSettleAndCleanup() public { + uint256 dataSetId = createDataSetForServiceProviderTest(sp1, client, "Test"); + + // Start proving + (uint64 maxProvingPeriod, uint256 challengeWindow,,) = viewContract.getPDPConfig(); + uint256 challengeEpoch = block.number + maxProvingPeriod - (challengeWindow / 2); + + vm.prank(address(mockPDPVerifier)); + pdpServiceWithPayments.nextProvingPeriod(dataSetId, challengeEpoch, 100, ""); + + uint256 activationEpoch = vm.getBlockNumber(); + + // SP abandons - no proofs submitted + // Move past the first period deadline + vm.roll(activationEpoch + maxProvingPeriod + 1); + + // Terminate the dataset (by client since SP abandoned) + vm.prank(client); + pdpServiceWithPayments.terminateService(dataSetId); + + // Get termination info + FilecoinWarmStorageService.DataSetInfoView memory info = viewContract.getDataSet(dataSetId); + + // Advance past the lockup period AND past the last proving period deadline + // Settlement requires all period deadlines to have passed for unproven periods + vm.roll(info.pdpEndEpoch + maxProvingPeriod + 1); + + // With the fix, client can now settle the rail even with no proofs + // because all proving deadlines have passed + FilecoinPayV1.RailView memory railBefore = payments.getRail(info.pdpRailId); + (, uint256 clientBalanceBefore,,) = payments.getAccountInfoIfSettled(mockUSDFC, client); + + // Settle the rail - should succeed and pay nothing (no proofs) + // After full settlement, the rail gets finalized and zeroed out + payments.settleRail(info.pdpRailId, railBefore.endEpoch); + + (, uint256 clientBalanceAfter,,) = payments.getAccountInfoIfSettled(mockUSDFC, client); + + // Client should not have lost money (SP got nothing because no proofs) + assertGe(clientBalanceAfter, clientBalanceBefore, "Client should not have paid for unproven service"); + + // SP can delete the dataset (rail is fully settled/finalized) + vm.prank(sp1); + mockPDPVerifier.deleteDataSet(pdpServiceWithPayments, dataSetId, bytes("")); + + // Verify dataset is deleted (pdpRailId == 0 indicates deleted/unregistered) + FilecoinWarmStorageService.DataSetInfoView memory deletedInfo = viewContract.getDataSet(dataSetId); + assertEq(deletedInfo.pdpRailId, 0, "Dataset should be deleted"); + } } diff --git a/service_contracts/test/ProviderIdSet.t.sol b/service_contracts/test/ProviderIdSet.t.sol new file mode 100644 index 00000000..bd508102 --- /dev/null +++ b/service_contracts/test/ProviderIdSet.t.sol @@ -0,0 +1,183 @@ +pragma solidity ^0.8.30; + +import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol"; +import {Test} from "forge-std/Test.sol"; +import {ProviderIdSet} from "../src/ProviderIdSet.sol"; + +contract ProviderIdSetTest is Test { + ProviderIdSet set; + + function setUp() public { + set = new ProviderIdSet(); + } + + function testAddGet() public { + for (uint256 i = 0; i < 300; i++) { + uint256[] memory providerIds = set.getProviderIds(); + assertEq(providerIds.length, i); + for (uint256 j = 0; j < i; j++) { + assertEq(providerIds[j], j * j + 1); + } + assertFalse(set.containsProviderId(i * i + 1)); + set.addProviderId(i * i + 1); + assertTrue(set.containsProviderId(i * i + 1)); + } + } + + function testOwnable() public { + assertEq(set.owner(), address(this)); + + address other = makeAddr("another"); + vm.expectRevert(abi.encodeWithSelector(Ownable.OwnableUnauthorizedAccount.selector, other)); + vm.prank(other); + set.addProviderId(1); + + vm.expectRevert(abi.encodeWithSelector(Ownable.OwnableUnauthorizedAccount.selector, other)); + vm.prank(other); + set.removeProviderId(1); + + vm.expectRevert(abi.encodeWithSelector(Ownable.OwnableInvalidOwner.selector, address(0))); + set.transferOwnership(address(0)); + + set.transferOwnership(other); + assertEq(set.owner(), other); + + vm.expectRevert(abi.encodeWithSelector(Ownable.OwnableUnauthorizedAccount.selector, address(this))); + set.renounceOwnership(); + + vm.prank(other); + set.renounceOwnership(); + + assertEq(set.owner(), address(0)); + } + + function testAddDuplicates() public { + assertFalse(set.containsProviderId(300)); + for (uint256 i = 0; i < 10; i++) { + set.addProviderId(300); + assertTrue(set.containsProviderId(300)); + uint256[] memory providerIds = set.getProviderIds(); + assertEq(providerIds.length, 1); + assertEq(providerIds[0], 300); + } + } + + function testAddManyDuplicates() public { + for (uint256 i = 0; i < 3; i++) { + // adding zero is a no-op + for (uint256 j = 0; j <= 35; j++) { + set.addProviderId(j); + } + uint256[] memory providerIds = set.getProviderIds(); + assertEq(providerIds.length, 35); + for (uint256 j = 1; j <= 35; j++) { + assertEq(providerIds[j - 1], j); + } + } + } + + function testProviderIdTooLarge() public { + vm.expectRevert(abi.encodeWithSelector(ProviderIdSet.ProviderIdTooLarge.selector, 0x100000000)); + set.addProviderId(0x100000000); + + vm.expectRevert( + abi.encodeWithSelector( + ProviderIdSet.ProviderIdTooLarge.selector, + 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff + ) + ); + set.addProviderId(0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff); + + set.addProviderId(0xffffffff); + assertTrue(set.containsProviderId(0xffffffff)); + } + + function testRemoveFIFO() public { + for (uint256 resets = 0; resets < 2; resets++) { + for (uint256 i = 0; i < 36; i++) { + assertEq(set.getProviderIds().length, i); + set.addProviderId(36 - i); + assertTrue(set.containsProviderId(36 - i)); + } + unchecked { + for (uint256 size = 36; size <= 36; size--) { + uint256[] memory providerIds = set.getProviderIds(); + assertEq(providerIds.length, size); + uint256 checkSum = 0; + for (uint256 i = 0; i < size; i++) { + checkSum += providerIds[i]; + checkSum -= i + 1; + } + assertEq(int256(checkSum), 0); + for (uint256 i = size + 1; i <= 36; i++) { + vm.expectRevert(abi.encodeWithSelector(ProviderIdSet.ProviderIdNotFound.selector, i)); + set.removeProviderId(i); + } + if (size > 0) { + assertTrue(set.containsProviderId(size)); + set.removeProviderId(size); + assertFalse(set.containsProviderId(size)); + } + } + } + } + } + + function testRemoveLIFO() public { + for (uint256 resets = 0; resets < 2; resets++) { + for (uint256 i = 0; i < 36; i++) { + assertEq(set.getProviderIds().length, i); + set.addProviderId(i + 1); + } + unchecked { + for (uint256 size = 36; size <= 36; size--) { + uint256[] memory providerIds = set.getProviderIds(); + assertEq(providerIds.length, size); + uint256 checkSum = 0; + for (uint256 i = 0; i < size; i++) { + checkSum += providerIds[i]; + checkSum -= i + 1; + } + assertEq(int256(checkSum), 0); + for (uint256 i = size + 1; i <= 36; i++) { + vm.expectRevert(abi.encodeWithSelector(ProviderIdSet.ProviderIdNotFound.selector, i)); + set.removeProviderId(i); + } + if (size > 0) { + assertTrue(set.containsProviderId(size)); + set.removeProviderId(size); + assertFalse(set.containsProviderId(size)); + } + } + } + } + } + + function testAddRemoveLargeIds() public { + for (uint256 shift = 0; shift < 32; shift++) { + assertEq(set.getProviderIds().length, shift); + assertFalse(set.containsProviderId(1 << shift)); + set.addProviderId(1 << shift); + assertTrue(set.containsProviderId(1 << shift)); + } + uint256 removed = 0; + for (uint256 shift = 0; shift < 32; shift++) { + uint256[] memory providerIds = set.getProviderIds(); + assertEq(providerIds.length, 32 - shift); + uint256 found = 0; + for (uint256 i = 0; i < providerIds.length; i++) { + // only one bit is set in these providerIds + assertEq(providerIds[i] & (providerIds[i] - 1), 0); + + found ^= providerIds[i]; + } + assertEq(found ^ removed, 0xffffffff); + + assertTrue(set.containsProviderId(1 << shift)); + set.removeProviderId(1 << shift); + assertFalse(set.containsProviderId(1 << shift)); + removed |= 1 << shift; + } + assertEq(set.getProviderIds().length, 0); + } +} diff --git a/service_contracts/test/ServiceProviderRegistry.t.sol b/service_contracts/test/ServiceProviderRegistry.t.sol index 5db937c1..a16da2d8 100644 --- a/service_contracts/test/ServiceProviderRegistry.t.sol +++ b/service_contracts/test/ServiceProviderRegistry.t.sol @@ -2,6 +2,7 @@ pragma solidity ^0.8.20; import {MockFVMTest} from "@fvm-solidity/mocks/MockFVMTest.sol"; +import {Vm} from "forge-std/Test.sol"; import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; @@ -52,6 +53,99 @@ contract ServiceProviderRegistryTest is MockFVMTest { registry.initialize(); } + function testAnnouncePlannedUpgrade() public { + // Initially, no upgrade is planned + (address nextImplementation, uint96 afterEpoch) = registry.nextUpgrade(); + assertEq(nextImplementation, address(0)); + assertEq(afterEpoch, uint96(0)); + + // Deploy new implementation + ServiceProviderRegistry newImplementation = new ServiceProviderRegistry(); + + // Announce upgrade + ServiceProviderRegistry.PlannedUpgrade memory plan; + plan.nextImplementation = address(newImplementation); + plan.afterEpoch = uint96(vm.getBlockNumber()) + 2000; + + vm.expectEmit(false, false, false, true); + emit ServiceProviderRegistry.UpgradeAnnounced(plan); + registry.announcePlannedUpgrade(plan); + + // Verify upgrade plan is stored + (nextImplementation, afterEpoch) = registry.nextUpgrade(); + assertEq(nextImplementation, plan.nextImplementation); + assertEq(afterEpoch, plan.afterEpoch); + + // Cannot upgrade before afterEpoch + bytes memory migrateData = + abi.encodeWithSelector(ServiceProviderRegistry.migrate.selector, newImplementation.VERSION()); + vm.expectRevert(); + registry.upgradeToAndCall(plan.nextImplementation, migrateData); + + // Still cannot upgrade at afterEpoch - 1 + vm.roll(plan.afterEpoch - 1); + vm.expectRevert(); + registry.upgradeToAndCall(plan.nextImplementation, migrateData); + + // Can upgrade at afterEpoch + vm.roll(plan.afterEpoch); + // Note: reinitializer(2) emits Initialized event first, then ContractUpgraded + // We use recordLogs to capture all events and verify ContractUpgraded is present + vm.recordLogs(); + registry.upgradeToAndCall(plan.nextImplementation, migrateData); + + // Verify ContractUpgraded event was emitted + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 expectedTopic = keccak256("ContractUpgraded(string,address)"); + bool foundEvent = false; + for (uint256 i = 0; i < logs.length; i++) { + if (logs[i].topics[0] == expectedTopic) { + (string memory version, address impl) = abi.decode(logs[i].data, (string, address)); + assertEq(version, newImplementation.VERSION(), "Version should match"); + assertEq(impl, plan.nextImplementation, "Implementation should match"); + foundEvent = true; + break; + } + } + assertTrue(foundEvent, "ContractUpgraded event should be emitted"); + + // After upgrade, nextUpgrade should be cleared + (nextImplementation, afterEpoch) = registry.nextUpgrade(); + assertEq(nextImplementation, address(0)); + assertEq(afterEpoch, uint96(0)); + } + + function testAnnouncePlannedUpgradeOnlyOwner() public { + ServiceProviderRegistry newImplementation = new ServiceProviderRegistry(); + ServiceProviderRegistry.PlannedUpgrade memory plan; + plan.nextImplementation = address(newImplementation); + plan.afterEpoch = uint96(vm.getBlockNumber()) + 2000; + + // Non-owner cannot announce upgrade + vm.prank(user1); + vm.expectRevert(); + registry.announcePlannedUpgrade(plan); + } + + function testAnnouncePlannedUpgradeInvalidImplementation() public { + ServiceProviderRegistry.PlannedUpgrade memory plan; + plan.nextImplementation = address(0x123); // Invalid address with no code + plan.afterEpoch = uint96(vm.getBlockNumber()) + 2000; + + vm.expectRevert(); + registry.announcePlannedUpgrade(plan); + } + + function testAnnouncePlannedUpgradeInvalidEpoch() public { + ServiceProviderRegistry newImplementation = new ServiceProviderRegistry(); + ServiceProviderRegistry.PlannedUpgrade memory plan; + plan.nextImplementation = address(newImplementation); + plan.afterEpoch = uint96(vm.getBlockNumber()); // Must be in the future + + vm.expectRevert(); + registry.announcePlannedUpgrade(plan); + } + function testIsRegisteredProviderReturnsFalse() public view { // Should return false for unregistered addresses assertFalse(registry.isRegisteredProvider(user1), "Should return false for unregistered address"); @@ -241,13 +335,22 @@ contract ServiceProviderRegistryTest is MockFVMTest { // Deploy new implementation ServiceProviderRegistry newImplementation = new ServiceProviderRegistry(); - // Non-owner cannot upgrade + // Non-owner cannot upgrade (will fail in _authorizeUpgrade due to onlyOwner) vm.prank(user1); vm.expectRevert(); registry.upgradeToAndCall(address(newImplementation), ""); - // Owner can upgrade - registry.upgradeToAndCall(address(newImplementation), ""); + // Owner can upgrade (but needs to announce first or it will fail in _authorizeUpgrade) + // Since we're testing the onlyOwner check, we need to announce the upgrade first + ServiceProviderRegistry.PlannedUpgrade memory plan; + plan.nextImplementation = address(newImplementation); + plan.afterEpoch = uint96(vm.getBlockNumber()) + 1; + registry.announcePlannedUpgrade(plan); + + vm.roll(plan.afterEpoch); + bytes memory migrateData = + abi.encodeWithSelector(ServiceProviderRegistry.migrate.selector, newImplementation.VERSION()); + registry.upgradeToAndCall(address(newImplementation), migrateData); } function testTransferOwnership() public { diff --git a/service_contracts/test/mocks/SharedMocks.sol b/service_contracts/test/mocks/SharedMocks.sol index 39e0b267..594c2f5a 100644 --- a/service_contracts/test/mocks/SharedMocks.sol +++ b/service_contracts/test/mocks/SharedMocks.sol @@ -99,6 +99,8 @@ contract MockPDPVerifier { // Track data set service providers for testing mapping(uint256 => address) public dataSetServiceProviders; + // Track simple leaf counts per data set for tests (approximate via bytes length) + mapping(uint256 => uint256) public dataSetLeafCount; event DataSetCreated(uint256 indexed setId, address indexed owner); event DataSetServiceProviderChanged( @@ -118,6 +120,9 @@ contract MockPDPVerifier { // Track service provider dataSetServiceProviders[setId] = msg.sender; + // initialize leaf count to 0 + dataSetLeafCount[setId] = 0; + emit DataSetCreated(setId, msg.sender); return setId; } @@ -128,6 +133,7 @@ contract MockPDPVerifier { } delete dataSetServiceProviders[setId]; + delete dataSetLeafCount[setId]; emit DataSetDeleted(setId, 0); } @@ -150,9 +156,22 @@ contract MockPDPVerifier { } bytes memory extraData = abi.encode(nonce, allKeys, allValues, signature); + + uint256 leafCount = 0; + for (uint256 i = 0; i < pieceData.length; i++) { + (uint256 padding, uint8 height,) = Cids.validateCommPv2(pieceData[i]); + leafCount += Cids.leafCount(padding, height); + } + dataSetLeafCount[dataSetId] += leafCount; + listenerAddr.piecesAdded(dataSetId, firstAdded, pieceData, extraData); } + // Expose leaf count similar to real PDPVerifier + function getDataSetLeafCount(uint256 setId) external view returns (uint256) { + return dataSetLeafCount[setId]; + } + /** * @notice Simulates service provider change for testing purposes * @dev This function mimics the PDPVerifier's claimDataSetOwnership functionality diff --git a/service_contracts/tools/README.md b/service_contracts/tools/README.md index 8c5fc1e8..9ad4a0dd 100644 --- a/service_contracts/tools/README.md +++ b/service_contracts/tools/README.md @@ -49,6 +49,62 @@ The following parameters are critical for proof generation and validation. They DEFAULT_CHALLENGE_WINDOW_SIZE="20" # 20 epochs ``` +## Deployment Address Management + +Deployment scripts automatically load and update contract addresses in `deployments.json`, keyed by chain ID. This makes deployments easier and reduces mistakes when updating addresses downstream. + +### deployments.json Structure + +The `deployments.json` file stores deployment addresses organized by chain ID: + +```json +{ + "314": { + "PDP_VERIFIER_PROXY_ADDRESS": "0x...", + "FILECOIN_PAY_ADDRESS": "0x...", + "FWSS_PROXY_ADDRESS": "0x...", + "metadata": { + "commit": "abc123...", + "deployed_at": "2024-01-01T00:00:00Z" + } + }, + "314159": { + ... + } +} +``` + +### How It Works + +1. **Loading addresses**: Scripts automatically load addresses from `deployments.json` for the detected chain ID. If an address doesn't exist in the JSON, the script will use environment variables or fail if required. + +2. **Updating addresses**: When a script deploys a new contract, it automatically updates `deployments.json` with the new address. + +3. **Environment variable override**: Environment variables take precedence over values loaded from JSON, allowing you to override specific addresses when needed. + +4. **Metadata tracking**: The system automatically tracks the git commit hash and deployment timestamp for each chain. + +### Control Flags + +- `SKIP_LOAD_DEPLOYMENTS=true` - Skip loading addresses from JSON (use only environment variables) +- `SKIP_UPDATE_DEPLOYMENTS=true` - Skip updating JSON after deployment + +### Querying Addresses + +You can query addresses using `jq`: + +```bash +# Get all addresses for a chain +jq '.["314"]' deployments.json + +# Get a specific address +jq -r '.["314"].FWSS_PROXY_ADDRESS' deployments.json +``` + +### Version Control + +The `deployments.json` file should be committed to version control. Updates to it should be tagged as version releases. + ## Environment Variables ### Required for all scripts: @@ -62,7 +118,7 @@ These scripts now follow forge/cast's environment variable conventions. Set the - `deploy-warm-storage-calibnet.sh` requires: - `PDP_VERIFIER_PROXY_ADDRESS` - Address of deployed PDPVerifier contract - - `PAYMENTS_CONTRACT_ADDRESS` - Address of deployed FilecoinPayV1 contract + - `FILECOIN_PAY_ADDRESS` - Address of deployed FilecoinPayV1 contract - `deploy-all-warm-storage.sh` requires: @@ -96,8 +152,8 @@ export CHALLENGE_WINDOW_SIZE="20" # 20 epochs for calibnet, 60 for mainnet export ETH_KEYSTORE="/path/to/keystore.json" export PASSWORD="your-password" export ETH_RPC_URL="https://api.calibration.node.glif.io/rpc/v1" -export PDP_VERIFIER_ADDRESS="0x123..." -export PAYMENTS_CONTRACT_ADDRESS="0x456..." +export PDP_VERIFIER_PROXY_ADDRESS="0x123..." +export FILECOIN_PAY_ADDRESS="0x456..." ./deploy-warm-storage-calibnet.sh ``` diff --git a/service_contracts/tools/announce-planned-upgrade-registry.sh b/service_contracts/tools/announce-planned-upgrade-registry.sh new file mode 100755 index 00000000..00542ab1 --- /dev/null +++ b/service_contracts/tools/announce-planned-upgrade-registry.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +# announce-planned-upgrade-registry.sh: Announces a planned upgrade for ServiceProviderRegistry +# Required args: ETH_RPC_URL, SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS, ETH_KEYSTORE, PASSWORD, NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS, AFTER_EPOCH + +if [ -z "$ETH_RPC_URL" ]; then + echo "Error: ETH_RPC_URL is not set" + exit 1 +fi + +if [ -z "$ETH_KEYSTORE" ]; then + echo "Error: ETH_KEYSTORE is not set" + exit 1 +fi + +if [ -z "$PASSWORD" ]; then + echo "Error: PASSWORD is not set" + exit 1 +fi + +if [ -z "$CHAIN" ]; then + CHAIN=$(cast chain-id) + if [ -z "$CHAIN" ]; then + echo "Error: Failed to detect chain ID from RPC" + exit 1 + fi +fi + +if [ -z "$NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" ]; then + echo "NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS is not set" + exit 1 +fi + +if [ -z "$AFTER_EPOCH" ]; then + echo "AFTER_EPOCH is not set" + exit 1 +fi + +CURRENT_EPOCH=$(cast block-number 2>/dev/null) + +if [ "$CURRENT_EPOCH" -gt "$AFTER_EPOCH" ]; then + echo "Already past AFTER_EPOCH ($CURRENT_EPOCH > $AFTER_EPOCH)" + exit 1 +else + echo "Announcing planned upgrade after $(($AFTER_EPOCH - $CURRENT_EPOCH)) epochs" +fi + + +ADDR=$(cast wallet address --password "$PASSWORD") +echo "Sending announcement from owner address: $ADDR" + +# Get current nonce +NONCE=$(cast nonce "$ADDR") + +if [ -z "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" ]; then + echo "Error: SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS is not set" + exit 1 +fi + +PROXY_OWNER=$(cast call -f 0x0000000000000000000000000000000000000000 "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" "owner()(address)" 2>/dev/null) +if [ "$PROXY_OWNER" != "$ADDR" ]; then + echo "Supplied ETH_KEYSTORE ($ADDR) is not the proxy owner ($PROXY_OWNER)." + exit 1 +fi + +TX_HASH=$(cast send "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" "announcePlannedUpgrade((address,uint96))" "($NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS,$AFTER_EPOCH)" \ + --password "$PASSWORD" \ + --nonce "$NONCE" \ + --json | jq -r '.transactionHash') + +if [ -z "$TX_HASH" ]; then + echo "Error: Failed to send announcePlannedUpgrade transaction" + exit 1 +fi + +echo "announcePlannedUpgrade transaction sent: $TX_HASH" + diff --git a/service_contracts/tools/announce-planned-upgrade.sh b/service_contracts/tools/announce-planned-upgrade.sh index c603fcbc..88d650b2 100755 --- a/service_contracts/tools/announce-planned-upgrade.sh +++ b/service_contracts/tools/announce-planned-upgrade.sh @@ -1,7 +1,7 @@ #!/bin/bash # announce-planned-upgrade.sh: Completes a pending upgrade -# Required args: ETH_RPC_URL, WARM_STORAGE_PROXY_ADDRESS, ETH_KEYSTORE, PASSWORD, NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS, AFTER_EPOCH +# Required args: ETH_RPC_URL, FWSS_PROXY_ADDRESS, ETH_KEYSTORE, PASSWORD, NEW_FWSS_IMPLEMENTATION_ADDRESS, AFTER_EPOCH if [ -z "$ETH_RPC_URL" ]; then echo "Error: ETH_RPC_URL is not set" @@ -26,8 +26,8 @@ if [ -z "$CHAIN" ]; then fi fi -if [ -z "$NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS" ]; then - echo "NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS is not set" +if [ -z "$NEW_FWSS_IMPLEMENTATION_ADDRESS" ]; then + echo "NEW_FWSS_IMPLEMENTATION_ADDRESS is not set" exit 1 fi @@ -52,24 +52,25 @@ echo "Sending announcement from owner address: $ADDR" # Get current nonce NONCE=$(cast nonce "$ADDR") -if [ -z "$WARM_STORAGE_PROXY_ADDRESS" ]; then - echo "Error: WARM_STORAGE_PROXY_ADDRESS is not set" +if [ -z "$FWSS_PROXY_ADDRESS" ]; then + echo "Error: FWSS_PROXY_ADDRESS is not set" exit 1 fi -PROXY_OWNER=$(cast call "$WARM_STORAGE_PROXY_ADDRESS" "owner()(address)" 2>/dev/null) +PROXY_OWNER=$(cast call -f 0x0000000000000000000000000000000000000000 "$FWSS_PROXY_ADDRESS" "owner()(address)" 2>/dev/null) if [ "$PROXY_OWNER" != "$ADDR" ]; then echo "Supplied ETH_KEYSTORE ($ADDR) is not the proxy owner ($PROXY_OWNER)." exit 1 fi -TX_HASH=$(cast send "$WARM_STORAGE_PROXY_ADDRESS" "announcePlannedUpgrade((address,uint96))" "($NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS,$AFTER_EPOCH)" \ +TX_HASH=$(cast send "$FWSS_PROXY_ADDRESS" "announcePlannedUpgrade((address,uint96))" "($NEW_FWSS_IMPLEMENTATION_ADDRESS,$AFTER_EPOCH)" \ --password "$PASSWORD" \ --nonce "$NONCE" \ --json | jq -r '.transactionHash') if [ -z "$TX_HASH" ]; then echo "Error: Failed to send announcePlannedUpgrade transaction" + exit 1 fi echo "announcePlannedUpgrade transaction sent: $TX_HASH" diff --git a/service_contracts/tools/deploy-all-warm-storage.sh b/service_contracts/tools/deploy-all-warm-storage.sh index 9915e00b..3c645a14 100755 --- a/service_contracts/tools/deploy-all-warm-storage.sh +++ b/service_contracts/tools/deploy-all-warm-storage.sh @@ -24,6 +24,9 @@ fi # in the same directory, regardless of where this script is executed from SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +# Source the shared deployments script +source "$SCRIPT_DIR/deployments.sh" + echo "Deploying all Warm Storage contracts" if [ -z "$ETH_RPC_URL" ]; then @@ -74,6 +77,9 @@ esac echo "Detected Chain ID: $CHAIN ($NETWORK_NAME)" +# Load deployment addresses from deployments.json +load_deployment_addresses "$CHAIN" + if [ "$DRY_RUN" != "true" ] && [ -z "$ETH_KEYSTORE" ]; then echo "Error: ETH_KEYSTORE is not set (required for actual deployment)" exit 1 @@ -194,6 +200,11 @@ deploy_implementation_if_needed() { eval "$var_name='$address'" echo " ✅ Deployed at: ${!var_name}" + + # Update deployments.json if this is an actual deployment + if [ "$DRY_RUN" != "true" ]; then + update_deployment_address "$CHAIN" "$var_name" "${!var_name}" + fi fi NONCE=$(expr $NONCE + "1") @@ -237,6 +248,11 @@ deploy_proxy_if_needed() { eval "$var_name='$address'" echo " ✅ Deployed at: ${!var_name}" + + # Update deployments.json if this is an actual deployment + if [ "$DRY_RUN" != "true" ]; then + update_deployment_address "$CHAIN" "$var_name" "${!var_name}" + fi fi NONCE=$(expr $NONCE + "1") @@ -262,6 +278,11 @@ deploy_session_key_registry_if_needed() { source "$SCRIPT_DIR/deploy-session-key-registry.sh" NONCE=$(expr $NONCE + "1") echo " ✅ Deployed at: $SESSION_KEY_REGISTRY_ADDRESS" + + # Update deployments.json + if [ -n "$SESSION_KEY_REGISTRY_ADDRESS" ]; then + update_deployment_address "$CHAIN" "SESSION_KEY_REGISTRY_ADDRESS" "$SESSION_KEY_REGISTRY_ADDRESS" + fi fi echo } @@ -344,27 +365,27 @@ deploy_session_key_registry_if_needed # Step 1: Deploy or use existing PDPVerifier implementation deploy_implementation_if_needed \ - "VERIFIER_IMPLEMENTATION_ADDRESS" \ + "PDP_VERIFIER_IMPLEMENTATION_ADDRESS" \ "lib/pdp/src/PDPVerifier.sol:PDPVerifier" \ "PDPVerifier implementation" # Step 2: Deploy or use existing PDPVerifier proxy INIT_DATA=$(cast calldata "initialize(uint256)" $CHALLENGE_FINALITY) deploy_proxy_if_needed \ - "PDP_VERIFIER_ADDRESS" \ - "$VERIFIER_IMPLEMENTATION_ADDRESS" \ + "PDP_VERIFIER_PROXY_ADDRESS" \ + "$PDP_VERIFIER_IMPLEMENTATION_ADDRESS" \ "$INIT_DATA" \ "PDPVerifier proxy" # Step 3: Deploy or use existing FilecoinPayV1 contract deploy_implementation_if_needed \ - "PAYMENTS_CONTRACT_ADDRESS" \ + "FILECOIN_PAY_ADDRESS" \ "lib/fws-payments/src/FilecoinPayV1.sol:FilecoinPayV1" \ "FilecoinPayV1" # Step 4: Deploy or use existing ServiceProviderRegistry implementation deploy_implementation_if_needed \ - "REGISTRY_IMPLEMENTATION_ADDRESS" \ + "SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" \ "src/ServiceProviderRegistry.sol:ServiceProviderRegistry" \ "ServiceProviderRegistry implementation" @@ -372,7 +393,7 @@ deploy_implementation_if_needed \ REGISTRY_INIT_DATA=$(cast calldata "initialize()") deploy_proxy_if_needed \ "SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" \ - "$REGISTRY_IMPLEMENTATION_ADDRESS" \ + "$SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" \ "$REGISTRY_INIT_DATA" \ "ServiceProviderRegistry proxy" @@ -386,11 +407,11 @@ deploy_implementation_if_needed \ # Set LIBRARIES variable for the deployment helper (format: path:name:address) LIBRARIES="src/lib/SignatureVerificationLib.sol:SignatureVerificationLib:$SIGNATURE_VERIFICATION_LIB_ADDRESS" deploy_implementation_if_needed \ - "FWS_IMPLEMENTATION_ADDRESS" \ + "FWSS_IMPLEMENTATION_ADDRESS" \ "src/FilecoinWarmStorageService.sol:FilecoinWarmStorageService" \ "FilecoinWarmStorageService implementation" \ - "$PDP_VERIFIER_ADDRESS" \ - "$PAYMENTS_CONTRACT_ADDRESS" \ + "$PDP_VERIFIER_PROXY_ADDRESS" \ + "$FILECOIN_PAY_ADDRESS" \ "$USDFC_TOKEN_ADDRESS" \ "$FILBEAM_BENEFICIARY_ADDRESS" \ "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" \ @@ -401,8 +422,8 @@ unset LIBRARIES # Initialize with max proving period, challenge window size, FilBeam controller address, name, and description INIT_DATA=$(cast calldata "initialize(uint64,uint256,address,string,string)" $MAX_PROVING_PERIOD $CHALLENGE_WINDOW_SIZE $FILBEAM_CONTROLLER_ADDRESS "$SERVICE_NAME" "$SERVICE_DESCRIPTION") deploy_proxy_if_needed \ - "WARM_STORAGE_SERVICE_ADDRESS" \ - "$FWS_IMPLEMENTATION_ADDRESS" \ + "FWSS_PROXY_ADDRESS" \ + "$FWSS_IMPLEMENTATION_ADDRESS" \ "$INIT_DATA" \ "FilecoinWarmStorageService proxy" @@ -410,12 +431,17 @@ deploy_proxy_if_needed \ echo -e "${BOLD}FilecoinWarmStorageServiceStateView${RESET}" if [ "$DRY_RUN" = "true" ]; then echo " 🔍 Would deploy (skipping in dry-run)" - WARM_STORAGE_VIEW_ADDRESS="0x8901234567890123456789012345678901234567" # Dummy address for dry-run - echo " ✅ Deployment planned (dummy: $WARM_STORAGE_VIEW_ADDRESS)" + FWSS_VIEW_ADDRESS="0x8901234567890123456789012345678901234567" # Dummy address for dry-run + echo " ✅ Deployment planned (dummy: $FWSS_VIEW_ADDRESS)" else echo " 🔧 Using external deployment script..." source "$SCRIPT_DIR/deploy-warm-storage-view.sh" - echo " ✅ Deployed at: $WARM_STORAGE_VIEW_ADDRESS" + echo " ✅ Deployed at: $FWSS_VIEW_ADDRESS" + + # Update deployments.json + if [ -n "$FWSS_VIEW_ADDRESS" ]; then + update_deployment_address "$CHAIN" "FWSS_VIEW_ADDRESS" "$FWSS_VIEW_ADDRESS" + fi fi echo @@ -446,14 +472,14 @@ else echo "# DEPLOYMENT SUMMARY ($NETWORK_NAME)" fi -echo "PDPVerifier Implementation: $VERIFIER_IMPLEMENTATION_ADDRESS" -echo "PDPVerifier Proxy: $PDP_VERIFIER_ADDRESS" -echo "FilecoinPayV1 Contract: $PAYMENTS_CONTRACT_ADDRESS" -echo "ServiceProviderRegistry Implementation: $REGISTRY_IMPLEMENTATION_ADDRESS" +echo "PDPVerifier Implementation: $PDP_VERIFIER_IMPLEMENTATION_ADDRESS" +echo "PDPVerifier Proxy: $PDP_VERIFIER_PROXY_ADDRESS" +echo "FilecoinPayV1 Contract: $FILECOIN_PAY_ADDRESS" +echo "ServiceProviderRegistry Implementation: $SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" echo "ServiceProviderRegistry Proxy: $SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" -echo "FilecoinWarmStorageService Implementation: $FWS_IMPLEMENTATION_ADDRESS" -echo "FilecoinWarmStorageService Proxy: $WARM_STORAGE_SERVICE_ADDRESS" -echo "FilecoinWarmStorageServiceStateView: $WARM_STORAGE_VIEW_ADDRESS" +echo "FilecoinWarmStorageService Implementation: $FWSS_IMPLEMENTATION_ADDRESS" +echo "FilecoinWarmStorageService Proxy: $FWSS_PROXY_ADDRESS" +echo "FilecoinWarmStorageServiceStateView: $FWSS_VIEW_ADDRESS" echo echo "Network Configuration ($NETWORK_NAME):" echo "Challenge finality: $CHALLENGE_FINALITY epochs" @@ -474,14 +500,21 @@ if [ "$DRY_RUN" = "false" ] && [ "${AUTO_VERIFY:-true}" = "true" ]; then source tools/verify-contracts.sh verify_contracts_batch \ - "$VERIFIER_IMPLEMENTATION_ADDRESS,lib/pdp/src/PDPVerifier.sol:PDPVerifier" \ - "$PDP_VERIFIER_ADDRESS,lib/pdp/src/ERC1967Proxy.sol:MyERC1967Proxy" \ - "$PAYMENTS_CONTRACT_ADDRESS,lib/fws-payments/src/FilecoinPayV1.sol:FilecoinPayV1" \ - "$REGISTRY_IMPLEMENTATION_ADDRESS,src/ServiceProviderRegistry.sol:ServiceProviderRegistry" \ + "$PDP_VERIFIER_IMPLEMENTATION_ADDRESS,lib/pdp/src/PDPVerifier.sol:PDPVerifier" \ + "$PDP_VERIFIER_PROXY_ADDRESS,lib/pdp/src/ERC1967Proxy.sol:MyERC1967Proxy" \ + "$FILECOIN_PAY_ADDRESS,lib/fws-payments/src/FilecoinPayV1.sol:FilecoinPayV1" \ + "$SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS,src/ServiceProviderRegistry.sol:ServiceProviderRegistry" \ "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS,lib/openzeppelin-contracts/contracts/proxy/ERC1967/ERC1967Proxy.sol:ERC1967Proxy" \ - "$FWS_IMPLEMENTATION_ADDRESS,src/FilecoinWarmStorageService.sol:FilecoinWarmStorageService" \ - "$WARM_STORAGE_SERVICE_ADDRESS,lib/openzeppelin-contracts/contracts/proxy/ERC1967/ERC1967Proxy.sol:ERC1967Proxy" \ - "$WARM_STORAGE_VIEW_ADDRESS,src/FilecoinWarmStorageServiceStateView.sol:FilecoinWarmStorageServiceStateView" + "$FWSS_IMPLEMENTATION_ADDRESS,src/FilecoinWarmStorageService.sol:FilecoinWarmStorageService" \ + "$FWSS_PROXY_ADDRESS,lib/openzeppelin-contracts/contracts/proxy/ERC1967/ERC1967Proxy.sol:ERC1967Proxy" \ + "$FWSS_VIEW_ADDRESS,src/FilecoinWarmStorageServiceStateView.sol:FilecoinWarmStorageServiceStateView" popd >/dev/null fi + +# Update deployment metadata if this was an actual deployment +if [ "$DRY_RUN" != "true" ]; then + echo + echo "📝 Updating deployment metadata..." + update_deployment_metadata "$CHAIN" +fi diff --git a/service_contracts/tools/deploy-provider-id-set.sh b/service_contracts/tools/deploy-provider-id-set.sh new file mode 100755 index 00000000..e78ceea7 --- /dev/null +++ b/service_contracts/tools/deploy-provider-id-set.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# deploy-provider-id-set deploys a ProviderIdSet contract +# Assumption: ETH_KEYSTORE, PASSWORD, ETH_RPC_URL env vars are set to an appropriate eth keystore path and password +# Assumption: forge, cast, jq are in the PATH +# Assumption: called from contracts directory so forge paths work out +# + +# Get script directory and source deployments.sh +SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$SCRIPT_DIR/deployments.sh" + +echo "Deploying ProviderIdSet Contract" + +if [ -z "$ETH_RPC_URL" ]; then + echo "Error: ETH_RPC_URL is not set" + exit 1 +fi + +export CHAIN=$(cast chain-id) + +if [ -z "$ETH_KEYSTORE" ]; then + echo "Error: ETH_KEYSTORE is not set" + exit 1 +fi + +# Optional: Check if PASSWORD is set (some users might use empty password) +if [ -z "$PASSWORD" ]; then + echo "Warning: PASSWORD is not set, using empty password" +fi + +ADDR=$(cast wallet address --password "$PASSWORD") +echo "Deploying contracts from address $ADDR" + +# Get current balance and nonce (cast will use ETH_RPC_URL) +BALANCE=$(cast balance "$ADDR") +echo "Deployer balance: $BALANCE" + +NONCE="$(cast nonce "$ADDR")" +echo "Starting nonce: $NONCE" + +# Deploy ProviderIdSet +ENDORSEMENT_SET_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE src/ProviderIdSet.sol:ProviderIdSet | grep "Deployed to" | awk '{print $3}') +if [ -z "$ENDORSEMENT_SET_ADDRESS" ]; then + echo "Error: Failed to extract ProviderIdSet address" + exit 1 +fi +echo "✓ ProviderIdSet deployed at: $ENDORSEMENT_SET_ADDRESS" + +# Update deployments.json +if [ -n "$ENDORSEMENT_SET_ADDRESS" ]; then + update_deployment_address "$CHAIN" "ENDORSEMENT_SET_ADDRESS" "$ENDORSEMENT_SET_ADDRESS" +fi + +# Automatic contract verification +if [ "${AUTO_VERIFY:-true}" = "true" ]; then + echo + echo "🔍 Starting automatic contract verification..." + + pushd "$(dirname $0)/.." >/dev/null + source tools/verify-contracts.sh + verify_contracts_batch "$ENDORSEMENT_SET_ADDRESS,src/ProviderIdSet.sol:ProviderIdSet" + popd >/dev/null +else + echo + echo "⏭️ Skipping automatic verification (export AUTO_VERIFY=true to enable)" +fi +echo "==========================================" + diff --git a/service_contracts/tools/deploy-registry-calibnet.sh b/service_contracts/tools/deploy-registry-calibnet.sh index 821b53cd..3321be8d 100755 --- a/service_contracts/tools/deploy-registry-calibnet.sh +++ b/service_contracts/tools/deploy-registry-calibnet.sh @@ -6,10 +6,17 @@ # Assumption: called from contracts directory so forge paths work out # +# Get script directory and source deployments.sh +SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$SCRIPT_DIR/deployments.sh" + echo "Deploying Service Provider Registry Contract" export CHAIN=314159 +# Load deployment addresses from deployments.json +load_deployment_addresses "$CHAIN" + if [ -z "$ETH_RPC_URL" ]; then echo "Error: ETH_RPC_URL is not set" exit 1 @@ -38,12 +45,12 @@ echo "Starting nonce: $NONCE" # Deploy ServiceProviderRegistry implementation echo "" echo "=== STEP 1: Deploying ServiceProviderRegistry Implementation ===" -REGISTRY_IMPLEMENTATION_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE src/ServiceProviderRegistry.sol:ServiceProviderRegistry --optimizer-runs 1 --via-ir | grep "Deployed to" | awk '{print $3}') -if [ -z "$REGISTRY_IMPLEMENTATION_ADDRESS" ]; then +SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE src/ServiceProviderRegistry.sol:ServiceProviderRegistry --optimizer-runs 1 --via-ir | grep "Deployed to" | awk '{print $3}') +if [ -z "$SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" ]; then echo "Error: Failed to extract ServiceProviderRegistry implementation address" exit 1 fi -echo "✓ ServiceProviderRegistry implementation deployed at: $REGISTRY_IMPLEMENTATION_ADDRESS" +echo "✓ ServiceProviderRegistry implementation deployed at: $SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" NONCE=$(expr $NONCE + "1") # Deploy ServiceProviderRegistry proxy @@ -53,7 +60,7 @@ echo "=== STEP 2: Deploying ServiceProviderRegistry Proxy ===" INIT_DATA=$(cast calldata "initialize()") echo "Initialization calldata: $INIT_DATA" -REGISTRY_PROXY_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE lib/pdp/src/ERC1967Proxy.sol:MyERC1967Proxy --constructor-args $REGISTRY_IMPLEMENTATION_ADDRESS $INIT_DATA --optimizer-runs 1 --via-ir | grep "Deployed to" | awk '{print $3}') +REGISTRY_PROXY_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE lib/pdp/src/ERC1967Proxy.sol:MyERC1967Proxy --constructor-args $SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS $INIT_DATA --optimizer-runs 1 --via-ir | grep "Deployed to" | awk '{print $3}') if [ -z "$REGISTRY_PROXY_ADDRESS" ]; then echo "Error: Failed to extract ServiceProviderRegistry proxy address" exit 1 @@ -107,9 +114,20 @@ echo "" echo "==========================================" echo "=== DEPLOYMENT SUMMARY ===" echo "==========================================" -echo "ServiceProviderRegistry Implementation: $REGISTRY_IMPLEMENTATION_ADDRESS" +echo "ServiceProviderRegistry Implementation: $SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" echo "ServiceProviderRegistry Proxy: $REGISTRY_PROXY_ADDRESS" echo "==========================================" + +# Update deployments.json +if [ -n "$SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" ]; then + update_deployment_address "$CHAIN" "SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" "$SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" +fi +if [ -n "$REGISTRY_PROXY_ADDRESS" ]; then + update_deployment_address "$CHAIN" "SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" "$REGISTRY_PROXY_ADDRESS" +fi +if [ -n "$SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" ] || [ -n "$REGISTRY_PROXY_ADDRESS" ]; then + update_deployment_metadata "$CHAIN" +fi echo "" echo "Contract Details:" echo " - Version: $CONTRACT_VERSION" @@ -139,7 +157,7 @@ if [ "${AUTO_VERIFY:-true}" = "true" ]; then pushd "$(dirname $0)/.." >/dev/null source tools/verify-contracts.sh - verify_contracts_batch "$REGISTRY_IMPLEMENTATION_ADDRESS,src/ServiceProviderRegistry.sol:ServiceProviderRegistry" + verify_contracts_batch "$SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS,src/ServiceProviderRegistry.sol:ServiceProviderRegistry" popd >/dev/null else echo diff --git a/service_contracts/tools/deploy-session-key-registry.sh b/service_contracts/tools/deploy-session-key-registry.sh index 3095bf9f..a97d189c 100755 --- a/service_contracts/tools/deploy-session-key-registry.sh +++ b/service_contracts/tools/deploy-session-key-registry.sh @@ -9,6 +9,10 @@ # - called from service_contracts directory # - PATH has forge and cast +# Get script directory and source deployments.sh +SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$SCRIPT_DIR/deployments.sh" + if [ -z "$ETH_RPC_URL" ]; then echo "Error: ETH_RPC_URL is not set" exit 1 @@ -23,6 +27,9 @@ if [ -z "$CHAIN" ]; then fi fi +# Load deployment addresses from deployments.json +load_deployment_addresses "$CHAIN" + if [ -z "$ETH_KEYSTORE" ]; then echo "Error: ETH_KEYSTORE is not set" @@ -42,6 +49,12 @@ export SESSION_KEY_REGISTRY_ADDRESS=$(forge create --password "$PASSWORD" --broa echo SessionKeyRegistry deployed at $SESSION_KEY_REGISTRY_ADDRESS +# Update deployments.json +if [ -n "$SESSION_KEY_REGISTRY_ADDRESS" ]; then + update_deployment_address "$CHAIN" "SESSION_KEY_REGISTRY_ADDRESS" "$SESSION_KEY_REGISTRY_ADDRESS" + update_deployment_metadata "$CHAIN" +fi + # Automatic contract verification if [ "${AUTO_VERIFY:-true}" = "true" ]; then echo diff --git a/service_contracts/tools/deploy-warm-storage-calibnet.sh b/service_contracts/tools/deploy-warm-storage-calibnet.sh index acb356e2..d155e505 100755 --- a/service_contracts/tools/deploy-warm-storage-calibnet.sh +++ b/service_contracts/tools/deploy-warm-storage-calibnet.sh @@ -6,10 +6,16 @@ # Assumption: called from contracts directory so forge paths work out # +# Get script directory and source deployments.sh +SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$SCRIPT_DIR/deployments.sh" + echo "Deploying Warm Storage Service Contract" export CHAIN=314159 +# Load deployment addresses from deployments.json +load_deployment_addresses "$CHAIN" if [ -z "$ETH_RPC_URL" ]; then echo "Error: ETH_RPC_URL is not set" @@ -21,8 +27,8 @@ if [ -z "$ETH_KEYSTORE" ]; then exit 1 fi -if [ -z "$PAYMENTS_CONTRACT_ADDRESS" ]; then - echo "Error: PAYMENTS_CONTRACT_ADDRESS is not set" +if [ -z "$FILECOIN_PAY_ADDRESS" ]; then + echo "Error: FILECOIN_PAY_ADDRESS is not set" exit 1 fi @@ -132,7 +138,7 @@ NONCE=$(expr $NONCE + "1") SERVICE_PAYMENTS_IMPLEMENTATION_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE \ --libraries "SignatureVerificationLib:$SIGNATURE_VERIFICATION_LIB_ADDRESS" \ - src/FilecoinWarmStorageService.sol:FilecoinWarmStorageService --constructor-args $PDP_VERIFIER_PROXY_ADDRESS $PAYMENTS_CONTRACT_ADDRESS $USDFC_TOKEN_ADDRESS $FILBEAM_BENEFICIARY_ADDRESS $SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS $SESSION_KEY_REGISTRY_ADDRESS | grep "Deployed to" | awk '{print $3}') + src/FilecoinWarmStorageService.sol:FilecoinWarmStorageService --constructor-args $PDP_VERIFIER_PROXY_ADDRESS $FILECOIN_PAY_ADDRESS $USDFC_TOKEN_ADDRESS $FILBEAM_BENEFICIARY_ADDRESS $SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS $SESSION_KEY_REGISTRY_ADDRESS | grep "Deployed to" | awk '{print $3}') if [ -z "$SERVICE_PAYMENTS_IMPLEMENTATION_ADDRESS" ]; then echo "Error: Failed to extract FilecoinWarmStorageService contract address" exit 1 @@ -145,22 +151,22 @@ NONCE=$(expr $NONCE + "1") echo "Deploying FilecoinWarmStorageService proxy..." # Initialize with max proving period, challenge window size, FilBeam controller address, name, and description INIT_DATA=$(cast calldata "initialize(uint64,uint256,address,string,string)" $MAX_PROVING_PERIOD $CHALLENGE_WINDOW_SIZE $FILBEAM_CONTROLLER_ADDRESS "$SERVICE_NAME" "$SERVICE_DESCRIPTION") -WARM_STORAGE_SERVICE_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE lib/pdp/src/ERC1967Proxy.sol:MyERC1967Proxy --constructor-args $SERVICE_PAYMENTS_IMPLEMENTATION_ADDRESS $INIT_DATA | grep "Deployed to" | awk '{print $3}') -if [ -z "$WARM_STORAGE_SERVICE_ADDRESS" ]; then +FWSS_PROXY_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE lib/pdp/src/ERC1967Proxy.sol:MyERC1967Proxy --constructor-args $SERVICE_PAYMENTS_IMPLEMENTATION_ADDRESS $INIT_DATA | grep "Deployed to" | awk '{print $3}') +if [ -z "$FWSS_PROXY_ADDRESS" ]; then echo "Error: Failed to extract FilecoinWarmStorageService proxy address" exit 1 fi -echo "FilecoinWarmStorageService proxy deployed at: $WARM_STORAGE_SERVICE_ADDRESS" +echo "FilecoinWarmStorageService proxy deployed at: $FWSS_PROXY_ADDRESS" # Summary of deployed contracts echo echo "# DEPLOYMENT SUMMARY" echo "FilecoinWarmStorageService Implementation: $SERVICE_PAYMENTS_IMPLEMENTATION_ADDRESS" -echo "FilecoinWarmStorageService Proxy: $WARM_STORAGE_SERVICE_ADDRESS" +echo "FilecoinWarmStorageService Proxy: $FWSS_PROXY_ADDRESS" echo echo "USDFC token address: $USDFC_TOKEN_ADDRESS" echo "PDPVerifier address: $PDP_VERIFIER_PROXY_ADDRESS" -echo "FilecoinPayV1 contract address: $PAYMENTS_CONTRACT_ADDRESS" +echo "FilecoinPayV1 contract address: $FILECOIN_PAY_ADDRESS" echo "FilBeam controller address: $FILBEAM_CONTROLLER_ADDRESS" echo "FilBeam beneficiary address: $FILBEAM_BENEFICIARY_ADDRESS" echo "ServiceProviderRegistry address: $SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" @@ -169,6 +175,20 @@ echo "Challenge window size: $CHALLENGE_WINDOW_SIZE epochs" echo "Service name: $SERVICE_NAME" echo "Service description: $SERVICE_DESCRIPTION" +# Update deployments.json +if [ -n "$SERVICE_PAYMENTS_IMPLEMENTATION_ADDRESS" ]; then + update_deployment_address "$CHAIN" "FWSS_IMPLEMENTATION_ADDRESS" "$SERVICE_PAYMENTS_IMPLEMENTATION_ADDRESS" +fi +if [ -n "$FWSS_PROXY_ADDRESS" ]; then + update_deployment_address "$CHAIN" "FWSS_PROXY_ADDRESS" "$FWSS_PROXY_ADDRESS" +fi +if [ -n "$SIGNATURE_VERIFICATION_LIB_ADDRESS" ]; then + update_deployment_address "$CHAIN" "SIGNATURE_VERIFICATION_LIB_ADDRESS" "$SIGNATURE_VERIFICATION_LIB_ADDRESS" +fi +if [ -n "$SERVICE_PAYMENTS_IMPLEMENTATION_ADDRESS" ] || [ -n "$FWSS_PROXY_ADDRESS" ]; then + update_deployment_metadata "$CHAIN" +fi + # Automatic contract verification if [ "${AUTO_VERIFY:-true}" = "true" ]; then echo diff --git a/service_contracts/tools/deploy-warm-storage-implementation-only.sh b/service_contracts/tools/deploy-warm-storage-implementation-only.sh index c5cb91db..c84d429c 100755 --- a/service_contracts/tools/deploy-warm-storage-implementation-only.sh +++ b/service_contracts/tools/deploy-warm-storage-implementation-only.sh @@ -34,21 +34,16 @@ echo "Deploying from address: $ADDR" # Get current nonce NONCE="$(cast nonce "$ADDR")" # Get required addresses from environment or use defaults -if [ -z "$PDP_VERIFIER_ADDRESS" ]; then - echo "Error: PDP_VERIFIER_ADDRESS is not set" +if [ -z "$PDP_VERIFIER_PROXY_ADDRESS" ]; then + echo "Error: PDP_VERIFIER_PROXY_ADDRESS is not set" exit 1 fi -if [ -z "$PAYMENTS_CONTRACT_ADDRESS" ]; then - echo "Error: PAYMENTS_CONTRACT_ADDRESS is not set" +if [ -z "$FILECOIN_PAY_ADDRESS" ]; then + echo "Error: FILECOIN_PAY_ADDRESS is not set" exit 1 fi -if [ -z "$FILBEAM_CONTROLLER_ADDRESS" ]; then - echo "Warning: FILBEAM_CONTROLLER_ADDRESS not set, using default" - FILBEAM_CONTROLLER_ADDRESS="0x5f7E5E2A756430EdeE781FF6e6F7954254Ef629A" -fi - if [ -z "$FILBEAM_BENEFICIARY_ADDRESS" ]; then echo "Warning: FILBEAM_BENEFICIARY_ADDRESS not set, using default" FILBEAM_BENEFICIARY_ADDRESS="0x1D60d2F5960Af6341e842C539985FA297E10d6eA" @@ -66,34 +61,35 @@ fi USDFC_TOKEN_ADDRESS="0xb3042734b608a1B16e9e86B374A3f3e389B4cDf0" # USDFC token address on calibnet -# Deploy SignatureVerificationLib first so we can link it into the implementation -echo "Deploying SignatureVerificationLib..." -SIGNATURE_VERIFICATION_LIB_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE src/lib/SignatureVerificationLib.sol:SignatureVerificationLib | grep "Deployed to" | awk '{print $3}') - if [ -z "$SIGNATURE_VERIFICATION_LIB_ADDRESS" ]; then - echo "Error: Failed to deploy SignatureVerificationLib" - exit 1 -fi + # Deploy SignatureVerificationLib first so we can link it into the implementation + echo "Deploying SignatureVerificationLib..." + export SIGNATURE_VERIFICATION_LIB_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE src/lib/SignatureVerificationLib.sol:SignatureVerificationLib | grep "Deployed to" | awk '{print $3}') -echo "SignatureVerificationLib deployed at: $SIGNATURE_VERIFICATION_LIB_ADDRESS" - -# Increment nonce for the next deployment -NONCE=$((NONCE + 1)) + if [ -z "$SIGNATURE_VERIFICATION_LIB_ADDRESS" ]; then + echo "Error: Failed to deploy SignatureVerificationLib" + exit 1 + fi + echo "SignatureVerificationLib deployed at: $SIGNATURE_VERIFICATION_LIB_ADDRESS" + # Increment nonce for the next deployment + NONCE=$((NONCE + 1)) +else + echo "Using SignatureVerificationLib at: $SIGNATURE_VERIFICATION_LIB_ADDRESS" +fi echo "" echo "Deploying FilecoinWarmStorageService implementation..." echo "Constructor arguments:" -echo " PDPVerifier: $PDP_VERIFIER_ADDRESS" -echo " FilecoinPayV1: $PAYMENTS_CONTRACT_ADDRESS" +echo " PDPVerifier: $PDP_VERIFIER_PROXY_ADDRESS" +echo " FilecoinPayV1: $FILECOIN_PAY_ADDRESS" echo " USDFC Token: $USDFC_TOKEN_ADDRESS" -echo " FilBeam Controller Address: $FILBEAM_CONTROLLER_ADDRESS" echo " FilBeam Beneficiary Address: $FILBEAM_BENEFICIARY_ADDRESS" echo " ServiceProviderRegistry: $SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" echo " SessionKeyRegistry: $SESSION_KEY_REGISTRY_ADDRESS" -WARM_STORAGE_IMPLEMENTATION_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE --libraries "SignatureVerificationLib:$SIGNATURE_VERIFICATION_LIB_ADDRESS" src/FilecoinWarmStorageService.sol:FilecoinWarmStorageService --constructor-args $PDP_VERIFIER_ADDRESS $PAYMENTS_CONTRACT_ADDRESS $USDFC_TOKEN_ADDRESS $FILBEAM_BENEFICIARY_ADDRESS $SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS $SESSION_KEY_REGISTRY_ADDRESS | grep "Deployed to" | awk '{print $3}') +FWSS_IMPLEMENTATION_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE --libraries "src/lib/SignatureVerificationLib.sol:SignatureVerificationLib:$SIGNATURE_VERIFICATION_LIB_ADDRESS" src/FilecoinWarmStorageService.sol:FilecoinWarmStorageService --constructor-args $PDP_VERIFIER_PROXY_ADDRESS $FILECOIN_PAY_ADDRESS $USDFC_TOKEN_ADDRESS $FILBEAM_BENEFICIARY_ADDRESS $SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS $SESSION_KEY_REGISTRY_ADDRESS | grep "Deployed to" | awk '{print $3}') -if [ -z "$WARM_STORAGE_IMPLEMENTATION_ADDRESS" ]; then +if [ -z "$FWSS_IMPLEMENTATION_ADDRESS" ]; then echo "Error: Failed to deploy FilecoinWarmStorageService implementation" exit 1 fi @@ -101,7 +97,7 @@ fi echo "" echo "# DEPLOYMENT COMPLETE" echo "SignatureVerificationLib deployed at: $SIGNATURE_VERIFICATION_LIB_ADDRESS" -echo "FilecoinWarmStorageService Implementation deployed at: $WARM_STORAGE_IMPLEMENTATION_ADDRESS" +echo "FilecoinWarmStorageService Implementation deployed at: $FWSS_IMPLEMENTATION_ADDRESS" echo "" # Automatic contract verification @@ -111,7 +107,7 @@ if [ "${AUTO_VERIFY:-true}" = "true" ]; then pushd "$(dirname $0)/.." >/dev/null source tools/verify-contracts.sh - verify_contracts_batch "$WARM_STORAGE_IMPLEMENTATION_ADDRESS,src/FilecoinWarmStorageService.sol:FilecoinWarmStorageService" + verify_contracts_batch "$FWSS_IMPLEMENTATION_ADDRESS,src/FilecoinWarmStorageService.sol:FilecoinWarmStorageService" popd >/dev/null else echo diff --git a/service_contracts/tools/deploy-warm-storage-view.sh b/service_contracts/tools/deploy-warm-storage-view.sh index ea26d4a5..5db5b7ef 100755 --- a/service_contracts/tools/deploy-warm-storage-view.sh +++ b/service_contracts/tools/deploy-warm-storage-view.sh @@ -2,7 +2,7 @@ # env params: # ETH_RPC_URL -# WARM_STORAGE_SERVICE_ADDRESS +# FWSS_PROXY_ADDRESS # ETH_KEYSTORE # PASSWORD @@ -24,8 +24,8 @@ if [ -z "$CHAIN" ]; then fi fi -if [ -z "$WARM_STORAGE_SERVICE_ADDRESS" ]; then - echo "Error: WARM_STORAGE_SERVICE_ADDRESS is not set" +if [ -z "$FWSS_PROXY_ADDRESS" ]; then + echo "Error: FWSS_PROXY_ADDRESS is not set" exit 1 fi @@ -43,9 +43,9 @@ if [ -z "$NONCE" ]; then NONCE="$(cast nonce "$ADDR")" fi -export WARM_STORAGE_VIEW_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE src/FilecoinWarmStorageServiceStateView.sol:FilecoinWarmStorageServiceStateView --constructor-args $WARM_STORAGE_SERVICE_ADDRESS | grep "Deployed to" | awk '{print $3}') +export FWSS_VIEW_ADDRESS=$(forge create --password "$PASSWORD" --broadcast --nonce $NONCE src/FilecoinWarmStorageServiceStateView.sol:FilecoinWarmStorageServiceStateView --constructor-args $FWSS_PROXY_ADDRESS | grep "Deployed to" | awk '{print $3}') -echo FilecoinWarmStorageServiceStateView deployed at $WARM_STORAGE_VIEW_ADDRESS +echo FilecoinWarmStorageServiceStateView deployed at $FWSS_VIEW_ADDRESS # Automatic contract verification if [ "${AUTO_VERIFY:-true}" = "true" ]; then @@ -54,7 +54,7 @@ if [ "${AUTO_VERIFY:-true}" = "true" ]; then pushd "$(dirname $0)/.." >/dev/null source tools/verify-contracts.sh - verify_contracts_batch "$WARM_STORAGE_VIEW_ADDRESS,src/FilecoinWarmStorageServiceStateView.sol:FilecoinWarmStorageServiceStateView" + verify_contracts_batch "$FWSS_VIEW_ADDRESS,src/FilecoinWarmStorageServiceStateView.sol:FilecoinWarmStorageServiceStateView" popd >/dev/null else echo diff --git a/service_contracts/tools/deployments.sh b/service_contracts/tools/deployments.sh new file mode 100755 index 00000000..d57c847f --- /dev/null +++ b/service_contracts/tools/deployments.sh @@ -0,0 +1,206 @@ +#!/bin/bash +# deployments.sh - Shared functions for loading and updating deployment addresses +# +# This script provides functions to: +# - Load deployment addresses from deployments.json (keyed by chain-id) +# - Update deployment addresses in deployments.json when contracts are deployed +# - Handle missing chains gracefully +# +# Usage: +# source "$(dirname "${BASH_SOURCE[0]}")/deployments.sh" +# load_deployment_addresses "$CHAIN" +# update_deployment_address "$CHAIN" "CONTRACT_NAME" "$ADDRESS" +# +# Environment variables: +# SKIP_LOAD_DEPLOYMENTS - If set to "true", skip loading from JSON (default: false) +# SKIP_UPDATE_DEPLOYMENTS - If set to "true", skip updating JSON (default: false) +# DEPLOYMENTS_JSON_PATH - Path to deployments.json (default: service_contracts/deployments.json) + +# Get the script directory to find deployments.json relative to tools/ +SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +DEPLOYMENTS_JSON_PATH="${DEPLOYMENTS_JSON_PATH:-$SCRIPT_DIR/../deployments.json}" + +# Ensure deployments.json exists with proper structure +ensure_deployments_json() { + if [ ! -f "$DEPLOYMENTS_JSON_PATH" ]; then + echo "Creating deployments.json at $DEPLOYMENTS_JSON_PATH" + echo '{}' > "$DEPLOYMENTS_JSON_PATH" + fi + + # Ensure it's valid JSON + if ! jq empty "$DEPLOYMENTS_JSON_PATH" 2>/dev/null; then + echo "Error: deployments.json is not valid JSON" + exit 1 + fi +} + +# Load deployment addresses from deployments.json for a given chain +# Args: $1=chain_id +# Sets environment variables for all addresses found in the JSON +load_deployment_addresses() { + local chain_id="$1" + + if [ -z "$chain_id" ]; then + echo "Error: chain_id is required for load_deployment_addresses" + return 1 + fi + + # Check if we should skip loading + if [ "${SKIP_LOAD_DEPLOYMENTS:-false}" = "true" ]; then + echo "⏭️ Skipping loading from deployments.json (SKIP_LOAD_DEPLOYMENTS=true)" + return 0 + fi + + ensure_deployments_json + + # Check if chain exists in JSON + if ! jq -e ".[\"$chain_id\"]" "$DEPLOYMENTS_JSON_PATH" > /dev/null 2>&1; then + echo "ℹ️ Chain $chain_id not found in deployments.json, will use environment variables" + return 0 + fi + + echo "📖 Loading deployment addresses from deployments.json for chain $chain_id" + + # Load all addresses from the chain's section + # Extract all keys that are not "metadata" + local addresses=$(jq -r ".[\"$chain_id\"] | to_entries | .[] | select(.key != \"metadata\") | \"\(.key)=\(.value)\"" "$DEPLOYMENTS_JSON_PATH" 2>/dev/null) + + if [ -z "$addresses" ]; then + echo "ℹ️ No addresses found for chain $chain_id in deployments.json" + return 0 + fi + + # Export each address as an environment variable + while IFS='=' read -r key value; do + if [ -n "$key" ] && [ -n "$value" ] && [ "$value" != "null" ]; then + # Only set if not already set (allow env vars to override) + if [ -z "${!key}" ]; then + export "$key=$value" + echo " ✓ Loaded $key=$value" + else + echo " ⊘ Skipped $key (already set to ${!key})" + fi + fi + done <<< "$addresses" +} + +# Update a deployment address in deployments.json +# Args: $1=chain_id, $2=contract_name (env var name), $3=address +update_deployment_address() { + local chain_id="$1" + local contract_name="$2" + local address="$3" + + if [ -z "$chain_id" ]; then + echo "Error: chain_id is required for update_deployment_address" + return 1 + fi + + if [ -z "$contract_name" ]; then + echo "Error: contract_name is required for update_deployment_address" + return 1 + fi + + if [ -z "$address" ]; then + echo "Error: address is required for update_deployment_address" + return 1 + fi + + # Check if we should skip updating + if [ "${SKIP_UPDATE_DEPLOYMENTS:-false}" = "true" ]; then + echo "⏭️ Skipping update to deployments.json (SKIP_UPDATE_DEPLOYMENTS=true)" + return 0 + fi + + ensure_deployments_json + + echo "💾 Updating deployments.json: chain=$chain_id, contract=$contract_name, address=$address" + + # Update the JSON file using jq + # This ensures the chain entry exists and updates the specific contract address + local temp_file=$(mktemp) + jq --arg chain "$chain_id" \ + --arg contract "$contract_name" \ + --arg addr "$address" \ + 'if .[$chain] then .[$chain][$contract] = $addr else .[$chain] = {($contract): $addr} end' \ + "$DEPLOYMENTS_JSON_PATH" > "$temp_file" + + if [ $? -ne 0 ]; then + echo "Error: Failed to update deployments.json" + rm -f "$temp_file" + return 1 + fi + + mv "$temp_file" "$DEPLOYMENTS_JSON_PATH" + echo " ✓ Updated $contract_name=$address for chain $chain_id" +} + +# Update deployment metadata (commit hash, deployment timestamp, etc.) +# Args: $1=chain_id, $2=commit_hash (optional), $3=deployed_at (optional, defaults to current timestamp) +update_deployment_metadata() { + local chain_id="$1" + local commit_hash="${2:-}" + local deployed_at="${3:-$(date -u +"%Y-%m-%dT%H:%M:%SZ")}" + + if [ -z "$chain_id" ]; then + echo "Error: chain_id is required for update_deployment_metadata" + return 1 + fi + + # Check if we should skip updating + if [ "${SKIP_UPDATE_DEPLOYMENTS:-false}" = "true" ]; then + return 0 + fi + + ensure_deployments_json + + # Get current commit hash if not provided + if [ -z "$commit_hash" ]; then + if command -v git >/dev/null 2>&1; then + commit_hash=$(git rev-parse HEAD 2>/dev/null || echo "") + fi + fi + + local temp_file=$(mktemp) + local jq_cmd="if .[\"$chain_id\"] then .[\"$chain_id\"].metadata = {} else .[\"$chain_id\"] = {metadata: {}} end" + + if [ -n "$commit_hash" ]; then + jq_cmd="$jq_cmd | .[\"$chain_id\"].metadata.commit = \"$commit_hash\"" + fi + + jq_cmd="$jq_cmd | .[\"$chain_id\"].metadata.deployed_at = \"$deployed_at\"" + + jq "$jq_cmd" "$DEPLOYMENTS_JSON_PATH" > "$temp_file" + + if [ $? -ne 0 ]; then + echo "Error: Failed to update deployment metadata" + rm -f "$temp_file" + return 1 + fi + + mv "$temp_file" "$DEPLOYMENTS_JSON_PATH" + + if [ -n "$commit_hash" ]; then + echo " ✓ Updated metadata: commit=$commit_hash, deployed_at=$deployed_at" + else + echo " ✓ Updated metadata: deployed_at=$deployed_at" + fi +} + +# Get a deployment address from JSON (useful for scripts that just need to read) +# Args: $1=chain_id, $2=contract_name +# Outputs: address or empty string if not found +get_deployment_address() { + local chain_id="$1" + local contract_name="$2" + + if [ -z "$chain_id" ] || [ -z "$contract_name" ]; then + return 1 + fi + + ensure_deployments_json + + jq -r ".[\"$chain_id\"][\"$contract_name\"] // empty" "$DEPLOYMENTS_JSON_PATH" 2>/dev/null +} + + diff --git a/service_contracts/tools/set-warm-storage-view.sh b/service_contracts/tools/set-warm-storage-view.sh index 731e4506..01c6efdf 100755 --- a/service_contracts/tools/set-warm-storage-view.sh +++ b/service_contracts/tools/set-warm-storage-view.sh @@ -5,8 +5,8 @@ # # Environment variables required: # - ETH_RPC_URL: RPC endpoint URL -# - WARM_STORAGE_SERVICE_ADDRESS: Address of the deployed FilecoinWarmStorageService proxy -# - WARM_STORAGE_VIEW_ADDRESS: Address of the deployed FilecoinWarmStorageServiceStateView +# - FWSS_PROXY_ADDRESS: Address of the deployed FilecoinWarmStorageService proxy +# - FWSS_VIEW_ADDRESS: Address of the deployed FilecoinWarmStorageServiceStateView # - ETH_KEYSTORE: Path to keystore file # - PASSWORD: Keystore password # - NONCE: Transaction nonce (optional, will fetch if not provided) @@ -25,13 +25,13 @@ if [ -z "$CHAIN" ]; then fi fi -if [ -z "$WARM_STORAGE_SERVICE_ADDRESS" ]; then - echo "Error: WARM_STORAGE_SERVICE_ADDRESS is not set" +if [ -z "$FWSS_PROXY_ADDRESS" ]; then + echo "Error: FWSS_PROXY_ADDRESS is not set" exit 1 fi -if [ -z "$WARM_STORAGE_VIEW_ADDRESS" ]; then - echo "Error: WARM_STORAGE_VIEW_ADDRESS is not set" +if [ -z "$FWSS_VIEW_ADDRESS" ]; then + echo "Error: FWSS_VIEW_ADDRESS is not set" exit 1 fi @@ -51,7 +51,7 @@ fi echo "Setting view contract address on FilecoinWarmStorageService..." # Execute transaction and capture output, only show errors if it fails -TX_OUTPUT=$(cast send --password "$PASSWORD" --nonce $NONCE $WARM_STORAGE_SERVICE_ADDRESS "setViewContract(address)" $WARM_STORAGE_VIEW_ADDRESS 2>&1) +TX_OUTPUT=$(cast send --password "$PASSWORD" --nonce $NONCE $FWSS_PROXY_ADDRESS "setViewContract(address)" $FWSS_VIEW_ADDRESS 2>&1) if [ $? -eq 0 ]; then echo "View contract address set successfully" diff --git a/service_contracts/tools/upgrade-registry.sh b/service_contracts/tools/upgrade-registry.sh new file mode 100755 index 00000000..fbd7f9ca --- /dev/null +++ b/service_contracts/tools/upgrade-registry.sh @@ -0,0 +1,153 @@ +#!/bin/bash + +# upgrade-registry.sh: Completes a pending upgrade for ServiceProviderRegistry +# Required args: ETH_RPC_URL, SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS, ETH_KEYSTORE, PASSWORD, NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS +# Optional args: NEW_VERSION +# Calculated if unset: CHAIN + +# Get script directory and source deployments.sh +SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$SCRIPT_DIR/deployments.sh" + +if [ -z "$ETH_RPC_URL" ]; then + echo "Error: ETH_RPC_URL is not set" + exit 1 +fi + +if [ -z "$ETH_KEYSTORE" ]; then + echo "Error: ETH_KEYSTORE is not set" + exit 1 +fi + +if [ -z "$PASSWORD" ]; then + echo "Error: PASSWORD is not set" + exit 1 +fi + +if [ -z "$CHAIN" ]; then + CHAIN=$(cast chain-id) + if [ -z "$CHAIN" ]; then + echo "Error: Failed to detect chain ID from RPC" + exit 1 + fi +fi + +# Load deployment addresses from deployments.json +load_deployment_addresses "$CHAIN" + +ADDR=$(cast wallet address --password "$PASSWORD") +echo "Using owner address: $ADDR" + +# Get current nonce +NONCE=$(cast nonce "$ADDR") + +if [ -z "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" ]; then + echo "Error: SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS is not set" + exit 1 +fi + +PROXY_OWNER=$(cast call -f 0x0000000000000000000000000000000000000000 "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" "owner()(address)" 2>/dev/null) +if [ "$PROXY_OWNER" != "$ADDR" ]; then + echo "Supplied ETH_KEYSTORE ($ADDR) is not the proxy owner ($PROXY_OWNER)." + exit 1 +fi + +# Get the upgrade plan (if any) +# Try to call nextUpgrade() - this will fail if the method doesn't exist (old contracts) +UPGRADE_PLAN_OUTPUT=$(cast call -f 0x0000000000000000000000000000000000000000 "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" "nextUpgrade()(address,uint96)" 2>&1) +CAST_CALL_EXIT_CODE=$? + +ZERO_ADDRESS="0x0000000000000000000000000000000000000000" + +# Check if cast call succeeded (method exists) +if [ $CAST_CALL_EXIT_CODE -eq 0 ] && [ -n "$UPGRADE_PLAN_OUTPUT" ]; then + # Method exists - parse the result + UPGRADE_PLAN=($UPGRADE_PLAN_OUTPUT) + PLANNED_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS=${UPGRADE_PLAN[0]} + AFTER_EPOCH=${UPGRADE_PLAN[1]} + + # Check if there's a planned upgrade (non-zero address) + # Zero address means either no upgrade was announced or the upgrade was already completed + if [ -n "$PLANNED_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" ] && [ "$PLANNED_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" != "$ZERO_ADDRESS" ]; then + # New two-step mechanism: validate planned upgrade + echo "Detected planned upgrade (two-step mechanism)" + + if [ "$PLANNED_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" != "$NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" ]; then + echo "NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS ($NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS) != planned ($PLANNED_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS)" + exit 1 + else + echo "Upgrade plan matches ($NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS)" + fi + + CURRENT_EPOCH=$(cast block-number 2>/dev/null) + + if [ "$CURRENT_EPOCH" -lt "$AFTER_EPOCH" ]; then + echo "Not time yet ($CURRENT_EPOCH < $AFTER_EPOCH)" + exit 1 + else + echo "Upgrade ready ($CURRENT_EPOCH >= $AFTER_EPOCH)" + fi + else + # Method exists but returns zero - no planned upgrade or already completed + # On new contracts, _authorizeUpgrade requires a planned upgrade, so one-step will fail + echo "No planned upgrade detected (nextUpgrade returns zero)" + echo "Error: This contract requires a planned upgrade. Please call announce-planned-upgrade-registry.sh first." + exit 1 + fi +else + # Method doesn't exist (old contract without nextUpgrade) or call failed + echo "nextUpgrade() method not found or call failed, using one-step mechanism (direct upgrade)" + echo "WARNING: This is the legacy upgrade path. For new deployments, use announce-planned-upgrade-registry.sh first." +fi + +if [ -n "$NEW_VERSION" ]; then + echo "Using provided version: $NEW_VERSION" + MIGRATE_DATA=$(cast calldata "migrate(string)" "$NEW_VERSION") +else + echo "Warning: NEW_VERSION is not set. Using empty string for version." + MIGRATE_DATA=$(cast calldata "migrate(string)" "") +fi + +# Call upgradeToAndCall on the proxy with migrate function +echo "Upgrading proxy and calling migrate..." +TX_HASH=$(cast send "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" "upgradeToAndCall(address,bytes)" "$NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" "$MIGRATE_DATA" \ + --password "$PASSWORD" \ + --nonce "$NONCE" \ + --json | jq -r '.transactionHash') + +if [ -z "$TX_HASH" ]; then + echo "Error: Failed to send upgrade transaction" + echo "The transaction may have failed due to:" + echo "- Insufficient permissions (not owner)" + echo "- Proxy is paused or locked" + echo "- Implementation address is invalid" + exit 1 +fi + +echo "Upgrade transaction sent: $TX_HASH" +echo "Waiting for confirmation..." + +# Wait for transaction receipt +cast receipt "$TX_HASH" --confirmations 1 > /dev/null + +# Verify the upgrade by checking the implementation address +echo "Verifying upgrade..." +NEW_IMPL=$(cast rpc eth_getStorageAt "$SERVICE_PROVIDER_REGISTRY_PROXY_ADDRESS" 0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc latest | sed 's/"//g' | sed 's/0x000000000000000000000000/0x/') + +# Compare to lowercase +export EXPECTED_IMPL=$(echo $NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS | tr '[:upper:]' '[:lower:]') + +if [ "$NEW_IMPL" = "$EXPECTED_IMPL" ]; then + echo "✅ Upgrade successful! Proxy now points to: $NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" + + # Update deployments.json with new implementation address + if [ -n "$NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" ]; then + update_deployment_address "$CHAIN" "SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" "$NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" + fi + update_deployment_metadata "$CHAIN" +else + echo "⚠️ Warning: Could not verify upgrade. Please check manually." + echo "Expected: $NEW_SERVICE_PROVIDER_REGISTRY_IMPLEMENTATION_ADDRESS" + echo "Got: $NEW_IMPL" +fi + diff --git a/service_contracts/tools/upgrade.sh b/service_contracts/tools/upgrade.sh index 02291349..bb8ea307 100755 --- a/service_contracts/tools/upgrade.sh +++ b/service_contracts/tools/upgrade.sh @@ -1,12 +1,16 @@ #!/bin/bash # upgrade.sh: Completes a pending upgrade -# Required args: ETH_RPC_URL, WARM_STORAGE_PROXY_ADDRESS, ETH_KEYSTORE, PASSWORD, NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS -# Optional args: NEW_WARM_STORAGE_VIEW_ADDRESS -# Calculated if unset: CHAIN, WARM_STORAGE_VIEW_ADDRESS +# Required args: ETH_RPC_URL, FWSS_PROXY_ADDRESS, ETH_KEYSTORE, PASSWORD, NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS +# Optional args: NEW_FWSS_VIEW_ADDRESS +# Calculated if unset: CHAIN, FWSS_VIEW_ADDRESS -if [ -z "$NEW_WARM_STORAGE_VIEW_ADDRESS" ]; then - echo "Warning: NEW_WARM_STORAGE_VIEW_ADDRESS is not set. Keeping previous view contract." +# Get script directory and source deployments.sh +SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$SCRIPT_DIR/deployments.sh" + +if [ -z "$NEW_FWSS_VIEW_ADDRESS" ]; then + echo "Warning: NEW_FWSS_VIEW_ADDRESS is not set. Keeping previous view contract." fi if [ -z "$ETH_RPC_URL" ]; then @@ -25,36 +29,39 @@ if [ -z "$PASSWORD" ]; then fi if [ -z "$CHAIN" ]; then - CHAIN=$(cast chain-id") + CHAIN=$(cast chain-id) if [ -z "$CHAIN" ]; then echo "Error: Failed to detect chain ID from RPC" exit 1 fi fi +# Load deployment addresses from deployments.json +load_deployment_addresses "$CHAIN" + ADDR=$(cast wallet address --password "$PASSWORD") echo "Using owner address: $ADDR" # Get current nonce NONCE=$(cast nonce "$ADDR") -if [ -z "$WARM_STORAGE_PROXY_ADDRESS" ]; then - echo "Error: WARM_STORAGE_PROXY_ADDRESS is not set" +if [ -z "$FWSS_PROXY_ADDRESS" ]; then + echo "Error: FWSS_PROXY_ADDRESS is not set" exit 1 fi -PROXY_OWNER=$(cast call "$WARM_STORAGE_PROXY_ADDRESS" "owner()(address)" 2>/dev/null) +PROXY_OWNER=$(cast call -f 0x0000000000000000000000000000000000000000 "$FWSS_PROXY_ADDRESS" "owner()(address)" 2>/dev/null) if [ "$PROXY_OWNER" != "$ADDR" ]; then echo "Supplied ETH_KEYSTORE ($ADDR) is not the proxy owner ($PROXY_OWNER)." exit 1 fi -if [ -z "$WARM_STORAGE_VIEW_ADDRESS" ]; then - WARM_STORAGE_VIEW_ADDRESS=$(cast call "$WARM_STORAGE_PROXY_ADDRESS" "viewContractAddress()(address)" 2>/dev/null) +if [ -z "$FWSS_VIEW_ADDRESS" ]; then + FWSS_VIEW_ADDRESS=$(cast call -f 0x0000000000000000000000000000000000000000 "$FWSS_PROXY_ADDRESS" "viewContractAddress()(address)" 2>/dev/null) fi # Get the upgrade plan -UPGRADE_PLAN=($(cast call "$WARM_STORAGE_VIEW_ADDRESS" "nextUpgrade()(address,uint96)" 2>/dev/null)) +UPGRADE_PLAN=($(cast call -f 0x0000000000000000000000000000000000000000 "$FWSS_VIEW_ADDRESS" "nextUpgrade()(address,uint96)" 2>/dev/null)) PLANNED_WARM_STORAGE_IMPLEMENTATION_ADDRESS=${UPGRADE_PLAN[0]} AFTER_EPOCH=${UPGRADE_PLAN[1]} @@ -75,17 +82,17 @@ else echo "Upgrade ready ($CURRENT_EPOCH > $AFTER_EPOCH)" fi -if [ -n "$NEW_WARM_STORAGE_VIEW_ADDRESS" ]; then - echo "Using provided view contract address: $NEW_WARM_STORAGE_VIEW_ADDRESS" - MIGRATE_DATA=$(cast calldata "migrate(address)" "$NEW_WARM_STORAGE_VIEW_ADDRESS") +if [ -n "$NEW_FWSS_VIEW_ADDRESS" ]; then + echo "Using provided view contract address: $NEW_FWSS_VIEW_ADDRESS" + MIGRATE_DATA=$(cast calldata "migrate(address)" "$NEW_FWSS_VIEW_ADDRESS") else - echo "Keeping previous view contract address ($WARM_STORAGE_VIEW_ADDRESS)" + echo "Keeping previous view contract address ($FWSS_VIEW_ADDRESS)" MIGRATE_DATA=$(cast calldata "migrate(address)" "0x0000000000000000000000000000000000000000") fi # Call upgradeToAndCall on the proxy with migrate function echo "Upgrading proxy and calling migrate..." -TX_HASH=$(cast send "$WARM_STORAGE_PROXY_ADDRESS" "upgradeToAndCall(address,bytes)" "$NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS" "$MIGRATE_DATA" \ +TX_HASH=$(cast send "$FWSS_PROXY_ADDRESS" "upgradeToAndCall(address,bytes)" "$NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS" "$MIGRATE_DATA" \ --password "$PASSWORD" \ --nonce "$NONCE" \ --json | jq -r '.transactionHash') @@ -107,13 +114,22 @@ cast receipt "$TX_HASH" --confirmations 1 > /dev/null # Verify the upgrade by checking the implementation address echo "Verifying upgrade..." -NEW_IMPL=$(cast rpc eth_getStorageAt "$WARM_STORAGE_PROXY_ADDRESS" 0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc latest | sed 's/"//g' | sed 's/0x000000000000000000000000/0x/') +NEW_IMPL=$(cast rpc eth_getStorageAt "$FWSS_PROXY_ADDRESS" 0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc latest | sed 's/"//g' | sed 's/0x000000000000000000000000/0x/') # Compare to lowercase export EXPECTED_IMPL=$(echo $NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS | tr '[:upper:]' '[:lower:]') if [ "$NEW_IMPL" = "$EXPECTED_IMPL" ]; then echo "✅ Upgrade successful! Proxy now points to: $NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS" + + # Update deployments.json with new implementation address + if [ -n "$NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS" ]; then + update_deployment_address "$CHAIN" "FWSS_IMPLEMENTATION_ADDRESS" "$NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS" + fi + if [ -n "$NEW_FWSS_VIEW_ADDRESS" ]; then + update_deployment_address "$CHAIN" "FWSS_VIEW_ADDRESS" "$NEW_FWSS_VIEW_ADDRESS" + fi + update_deployment_metadata "$CHAIN" else echo "⚠️ Warning: Could not verify upgrade. Please check manually." echo "Expected: $NEW_WARM_STORAGE_IMPLEMENTATION_ADDRESS"