diff --git a/.github/workflows/applications.yml b/.github/workflows/applications.yml index 0737407710..30b1674189 100644 --- a/.github/workflows/applications.yml +++ b/.github/workflows/applications.yml @@ -371,6 +371,7 @@ jobs: exe:known-graphs \ exe:standalone-pruner \ exe:pact-diff \ + exe:pact-replay \ test:chainweb-tests \ test:multi-node-network-tests \ test:remote-tests \ @@ -417,6 +418,7 @@ jobs: cp $(cabal list-bin known-graphs) artifacts/chainweb cp $(cabal list-bin multi-node-network-tests) artifacts/chainweb cp $(cabal list-bin pact-diff) artifacts/chainweb + cp $(cabal list-bin pact-replay) artifacts/chainweb cp $(cabal list-bin remote-tests) artifacts/chainweb cp $(cabal list-bin standalone-pruner) artifacts/chainweb cp README.md artifacts/chainweb diff --git a/chainweb.cabal b/chainweb.cabal index 2529f980d5..1aa873bace 100644 --- a/chainweb.cabal +++ b/chainweb.cabal @@ -288,6 +288,7 @@ library , Chainweb.VerifierPlugin.Hyperlane.Binary , Chainweb.VerifierPlugin.Hyperlane.Message , Chainweb.VerifierPlugin.Hyperlane.Message.After225 + , Chainweb.VerifierPlugin.Hyperlane.Message.Before225 , Chainweb.VerifierPlugin.Hyperlane.Utils , Chainweb.Version , Chainweb.Version.Development @@ -375,6 +376,16 @@ library , Chainweb.Pact4.TransactionExec , Chainweb.Pact4.Types , Chainweb.Pact4.Validations + , Chainweb.Pact.Transactions.Mainnet0Transactions + , Chainweb.Pact.Transactions.Mainnet1Transactions + , Chainweb.Pact.Transactions.Mainnet2Transactions + , Chainweb.Pact.Transactions.Mainnet3Transactions + , Chainweb.Pact.Transactions.Mainnet4Transactions + , Chainweb.Pact.Transactions.Mainnet5Transactions + , Chainweb.Pact.Transactions.Mainnet6Transactions + , Chainweb.Pact.Transactions.Mainnet7Transactions + , Chainweb.Pact.Transactions.Mainnet8Transactions + , Chainweb.Pact.Transactions.Mainnet9Transactions -- utils , Utils.Logging diff --git a/cwtools/cwtools.cabal b/cwtools/cwtools.cabal index 3bb9fe5f2d..6251c160f2 100644 --- a/cwtools/cwtools.cabal +++ b/cwtools/cwtools.cabal @@ -173,6 +173,45 @@ executable db-checksum , text , unordered-containers +-- Generate genesis headers. +executable pact-replay + import: warning-flags, debugging-flags + default-language: Haskell2010 + ghc-options: + -threaded + -rtsopts + "-with-rtsopts=-N -H1G -A64M" + -Wno-x-partial -Wno-unrecognised-warning-flags + hs-source-dirs: + pact-replay + main-is: + PactReplay.hs + build-depends: + , chainweb + , chainweb:chainweb-test-utils + + , aeson + , async + , base + , chainweb-storage + , constraints + , containers + , filepath + , lens + , loglevel + , optparse-applicative + , pact-json + , pact-tng:pact-request-api + , pact-tng + , resource-pool + , resourcet + , safe-exceptions + , streaming + , temporary + , unordered-containers + , text + , vector + -- Generate genesis headers. executable ea import: warning-flags, debugging-flags diff --git a/cwtools/pact-replay/PactReplay.hs b/cwtools/pact-replay/PactReplay.hs new file mode 100644 index 0000000000..4d9eaf43ea --- /dev/null +++ b/cwtools/pact-replay/PactReplay.hs @@ -0,0 +1,188 @@ +{-# LANGUAGE ApplicativeDo #-} +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PartialTypeSignatures #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TupleSections #-} +{-# LANGUAGE TypeApplications #-} + +module Main(main) where + +import Chainweb.BlockHeader +import Chainweb.BlockHeaderDB +import Chainweb.BlockHeaderDB.PruneForks qualified as PruneForks +import Chainweb.BlockHeight (BlockHeight (..)) +import Chainweb.Core.Brief +import Chainweb.Cut (cutHeaders, unsafeMkCut) +import Chainweb.Cut.Create hiding (join) +import Chainweb.CutDB (cutHashesTable, readHighestCutHeaders) +import Chainweb.Logger +import Chainweb.Pact.Backend.Utils +import Chainweb.Pact.PactService qualified as PactService +import Chainweb.Pact.Payload.PayloadStore.RocksDB qualified as Pact.Payload.PayloadStore.RocksDB +import Chainweb.Pact.Types +import Chainweb.Parent +import Chainweb.PayloadProvider (blockHeaderToEvaluationCtx) +import Chainweb.PayloadProvider.Pact +import Chainweb.PayloadProvider.Pact.Genesis (genesisPayload) +import Chainweb.Storage.Table.RocksDB (modernDefaultOptions, withReadOnlyRocksDb, withRocksDb) +import Chainweb.Time +import Chainweb.TreeDB qualified as TreeDB +import Chainweb.Utils +import Chainweb.Version +import Chainweb.Version.Registry +import Chainweb.WebBlockHeaderDB +import Control.Concurrent(threadDelay) +import Control.Concurrent.Async (forConcurrently, forConcurrently_) +import Control.Exception.Safe +import Control.Lens +import Control.Monad +import Control.Monad.IO.Class +import Control.Monad.Trans.Resource +import Data.Constraint +import Data.HashSet qualified as HS +import Data.HashMap.Strict qualified as HM +import Data.IORef +import Data.List qualified as List +import Data.Text qualified as T +import Data.Text.IO qualified as T +import GHC.Stack +import Options.Applicative +import Streaming qualified as S +import Streaming.Prelude qualified as S +import System.FilePath (()) +import System.LogLevel +import Text.Printf + +main :: IO () +main = join $ + execParser $ info + (parser <**> helper) + (fullDesc + <> progDesc "Replay Pact blocks checking that we get the correct outputs" + <> header "pact-replay") + +getRocksDbDir :: HasCallStack => FilePath -> FilePath +getRocksDbDir base = base "0" "rocksDb" + +getPactDbDir :: HasCallStack => FilePath -> FilePath +getPactDbDir base = base "0" "sqlite" + +isPactChain :: HasVersion => ChainId -> Bool +isPactChain cid = payloadProviderTypeForChain cid == PactProvider + +parser :: Parser (IO ()) +parser = do + version <- option (findKnownVersion =<< textReader) (long "chainweb-version" <> short 'v') + dbDir <- textOption (long "database-directory") + logLevel <- flag' Debug (long "verbose") <|> flag' Warn (long "quiet") <|> pure Info + maybeStart <- optional $ BlockHeight <$> textOption (long "start") + maybeEnd <- optional $ BlockHeight <$> textOption (long "end") + chains :: Dict HasVersion -> [ChainId] <- + fmap const (jsonOption (long "chains")) + <|> pure (\Dict -> filter isPactChain (HS.toList chainIds)) + return $ withVersion version $ do + withRocksDb (getRocksDbDir dbDir) modernDefaultOptions $ \rdb -> do + let logger = genericLogger logLevel T.putStrLn + let cutTable = cutHashesTable rdb + let pdb = Pact.Payload.PayloadStore.RocksDB.newPayloadDb rdb + let wbhdb = mkWebBlockHeaderDb rdb (tabulateChains (mkBlockHeaderDb rdb)) + + initialCut <- unsafeMkCut <$> readHighestCutHeaders (logFunctionText logger) wbhdb cutTable + limitedCut <- maybe (return initialCut) (\end -> limitCut wbhdb end initialCut) maybeEnd + + failureCount <- fmap sum $ forConcurrently (chains Dict `List.intersect` HM.keys (view cutHeaders limitedCut)) $ \cid -> runResourceT $ do + let chainLogger = addLabel ("chain", brief cid) logger + let config = defaultPactServiceConfig + PactPayloadProvider _ serviceEnv <- withPactPayloadProvider cid rdb Nothing chainLogger Nothing mempty pdb + (getPactDbDir dbDir) + config + (genesisPayload cid) + + failureCountRef <- liftIO $ newIORef (0 :: Word) + speedHeightRef <- liftIO $ newIORef (0, 0) + bhdb <- getWebBlockHeaderDb wbhdb cid + _ <- withAsyncR (logProgress chainLogger cid speedHeightRef) + + let upperEndBlock = limitedCut ^?! cutHeaders . ix cid + let upper = HS.singleton (TreeDB.UpperBound $ view blockHash upperEndBlock) + liftIO $ TreeDB.branchEntries bhdb Nothing Nothing Nothing Nothing mempty upper $ \blockStream -> do + blockStream + & S.takeWhile (\blk -> maybe True (\start -> view blockHeight blk >= start) maybeStart) + & withParent + & S.mapM (\(h, ph) -> + fmap (h,) $ + try @_ @SomeException $ + PactService.execReadOnlyReplay chainLogger serviceEnv + (view blockPayloadHash h <$ blockHeaderToEvaluationCtx ph)) + & S.chunksOf 500 + & mapsM_ (\blkChunk -> do + startTime <- getCurrentTimeIntegral + + count S.:> (Just lastHdr S.:> x) <- blkChunk + & S.mapM (\case + (h, Left err) -> do + modifyIORef failureCountRef succ + logFunctionText chainLogger Error $ "Error block: " <> brief h <> ": " <> sshow err + return h + (h, Right (Just err)) -> do + modifyIORef failureCountRef succ + logFunctionText chainLogger Error $ "Invalid block " <> brief h <> ": " <> sshow err + return h + (h, Right Nothing) -> return h + ) + & S.copy + & S.last + & S.length + + endTime <- getCurrentTimeIntegral + let !(TimeSpan (timeTaken :: Micros)) = (endTime `diff` startTime) + let !speed :: Double = int count * 1_000_000 / int timeTaken + + writeIORef speedHeightRef (speed, view blockHeight lastHdr) + + return x + ) + + liftIO $ logFunctionText chainLogger Info $ "finished replaying chain " <> brief cid + + liftIO $ readIORef failureCountRef + when (failureCount > 0) $ + error $ sshow failureCount <> " blocks failed" + where + logProgress logger cid speedHeightRef = do + threadDelay 20_000_000 + (speed, height) <- readIORef speedHeightRef + logFunctionText logger Info $ + "Chain " <> brief cid <> + " speed " <> T.pack (printf "%.2f" speed) <> "/s" + <> " at " <> brief height <> " (desc.)" + +-- requires that the input is descending +withParent :: Monad m => S.Stream (S.Of h) m r -> S.Stream (S.Of (h, Parent h)) m r +withParent = \strm -> do + S.lift (S.next strm) >>= \case + Left r -> return r + Right (bh, strm') -> go bh strm' + where + go bh strm = do + S.lift (S.next strm) >>= \case + Left r -> return r + Right (bh', strm') -> do + S.yield (bh, Parent bh') + go bh' strm' + +mapsM_ :: Monad m => (forall x. f x -> m x) -> S.Stream f m r -> m r +mapsM_ f = go + where + go strm = + S.inspect strm >>= \case + Left r -> return r + Right fstrm -> do + strm' <- f fstrm + go strm' diff --git a/node/src/ChainwebNode.hs b/node/src/ChainwebNode.hs index e8de488124..ae32068192 100644 --- a/node/src/ChainwebNode.hs +++ b/node/src/ChainwebNode.hs @@ -319,13 +319,7 @@ node conf logger = do rocksDbDir <- getRocksDbDir conf pactDbDir <- getPactDbDir conf dbBackupsDir <- getBackupsDir conf - withRocksDb' <- - if _configReadOnlyReplay cwConf - then - withReadOnlyRocksDb <$ logFunctionText logger Info "Opening RocksDB in read-only mode" - else - return withRocksDb - withRocksDb' rocksDbDir modernDefaultOptions $ \rocksDb -> do + withRocksDb rocksDbDir modernDefaultOptions $ \rocksDb -> do logFunctionText logger Info $ "opened rocksdb in directory " <> sshow rocksDbDir logFunctionText logger Debug $ "backup config: " <> sshow (_configBackup cwConf) withChainweb cwConf logger rocksDb pactDbDir dbBackupsDir $ \case @@ -370,7 +364,6 @@ withNodeLogger logCfg chainwebCfg v f = runManaged $ do let !txFailureHandler = if isJust (_cutInitialCutFile (_configCuts chainwebCfg)) || isJust (_cutInitialBlockHeightLimit (_configCuts chainwebCfg)) - || _configReadOnlyReplay chainwebCfg then [dropLogHandler (Proxy :: Proxy PactTxFailureLog)] else [] diff --git a/src/Chainweb/BlockHeaderDB.hs b/src/Chainweb/BlockHeaderDB.hs index e52cc32862..b12b706663 100644 --- a/src/Chainweb/BlockHeaderDB.hs +++ b/src/Chainweb/BlockHeaderDB.hs @@ -19,8 +19,9 @@ module Chainweb.BlockHeaderDB , Configuration(..) , BlockHeaderDb , RankedBlockHeaderDb(..) -, initBlockHeaderDb , closeBlockHeaderDb +, initBlockHeaderDb +, mkBlockHeaderDb , withBlockHeaderDb -- * Misc diff --git a/src/Chainweb/BlockHeaderDB/Internal.hs b/src/Chainweb/BlockHeaderDB/Internal.hs index df6f163314..8b520a3e70 100644 --- a/src/Chainweb/BlockHeaderDB/Internal.hs +++ b/src/Chainweb/BlockHeaderDB/Internal.hs @@ -52,6 +52,7 @@ module Chainweb.BlockHeaderDB.Internal , BlockHeaderDb(..) , RankedBlockHeaderDb(..) , initBlockHeaderDb +, mkBlockHeaderDb , closeBlockHeaderDb , withBlockHeaderDb @@ -274,26 +275,30 @@ dbAddChecked db e = unlessM (tableMember (_chainDbCas db) ek) dbAddCheckedIntern -- initBlockHeaderDb :: HasVersion => Configuration -> IO BlockHeaderDb initBlockHeaderDb config = do + let db = mkBlockHeaderDb (_configRocksDb config) (_chainId rootEntry) dbAddChecked db rootEntry return db where rootEntry = _configRoot config - cid = _chainId rootEntry - cidNs = T.encodeUtf8 (toText cid) +mkBlockHeaderDb :: HasVersion => RocksDb -> ChainId -> BlockHeaderDb +mkBlockHeaderDb rdb cid = db + where headerTable = newTable - (_configRocksDb config) + rdb (Codec (runPutS . encodeRankedBlockHeader) (runGetS decodeRankedBlockHeader)) (Codec (runPutS . encodeRankedBlockHash) (runGetS decodeRankedBlockHash)) ["BlockHeader", cidNs, "header"] rankTable = newTable - (_configRocksDb config) + rdb (Codec (runPutS . encodeBlockHeight) (runGetS decodeBlockHeight)) (Codec (runPutS . encodeBlockHash) (runGetS decodeBlockHash)) ["BlockHeader", cidNs, "rank"] - !db = BlockHeaderDb cid + cidNs = T.encodeUtf8 (toText cid) + db = BlockHeaderDb + cid implicitVersion headerTable rankTable diff --git a/src/Chainweb/Chainweb.hs b/src/Chainweb/Chainweb.hs index 60e8bf09b6..921accc20e 100644 --- a/src/Chainweb/Chainweb.hs +++ b/src/Chainweb/Chainweb.hs @@ -330,7 +330,7 @@ withChainwebInternal conf logger peerRes serviceSock rocksDb defaultPactDbDir ba { _cutDbParamsLogLevel = Info , _cutDbParamsTelemetryLevel = Info , _cutDbParamsInitialHeightLimit = _cutInitialBlockHeightLimit cutConf - , _cutDbParamsReadOnly = _configReadOnlyReplay conf + , _cutDbParamsReadOnly = False , _cutDbParamsInitialCutFile = _cutInitialCutFile cutConf } where @@ -394,94 +394,41 @@ withChainwebInternal conf logger peerRes serviceSock rocksDb defaultPactDbDir ba putPeerThrottler <- mkPutPeerThrottler $ _throttlingPeerRate throt mempoolThrottler <- mkMempoolThrottler $ _throttlingMempoolRate throt logg Debug "initialized throttlers" - - -- synchronize pact dbs with latest cut before we start the server - -- and clients and begin mining. - -- - -- This is a consistency check that validates the blocks in the - -- current cut. If it fails an exception is raised. Also, if it - -- takes long (for example, when doing a reset to a prior block - -- height) we want this to happen before we go online. - -- - let - -- pactSyncChains = - -- case _configSyncPactChains conf of - -- Just syncChains - -- | _configReadOnlyReplay conf - -- -> HM.filterWithKey (\k _ -> elem k syncChains) cs - -- _ -> cs - - if _configReadOnlyReplay conf - then do - -- FIXME implement replay in payload provider - error "Chainweb.Chainweb.withChainwebInternal: pact replay is not supported" - -- logFunctionJson logger Info PactReplayInProgress - -- -- note that we don't use the "initial cut" from cutdb because its height depends on initialBlockHeightLimit. - -- highestCut <- - -- unsafeMkCut v <$> readHighestCutHeaders v (logFunctionText logger) webchain (cutHashesTable rocksDb) - -- lowerBoundCut <- - -- tryLimitCut webchain (fromMaybe 0 $ _cutInitialBlockHeightLimit $ _configCuts conf) highestCut - -- upperBoundCut <- forM (_cutFastForwardBlockHeightLimit $ _configCuts conf) $ \upperBound -> - -- tryLimitCut webchain upperBound highestCut - -- let - -- replayOneChain :: (ChainResources logger, (BlockHeader, Maybe BlockHeader)) -> IO () - -- replayOneChain (cr, (l, u)) = do - -- let chainPact = _chainResPact cr - -- let logCr = logFunctionText - -- $ addLabel ("component", "pact") - -- $ addLabel ("sub-component", "init") - -- $ _chainResLogger cr - -- void $ _pactReadOnlyReplay chainPact l u - -- logCr Info "pact db synchronized" - -- let bounds = - -- HM.intersectionWith (,) - -- pactSyncChains - -- (HM.mapWithKey - -- (\cid bh -> - -- (bh, (HM.! cid) . _cutMap <$> upperBoundCut)) - -- (_cutMap lowerBoundCut) - -- ) - -- mapConcurrently_ replayOneChain bounds - -- logg Info "finished fast forward replay" - -- logFunctionJson logger Info PactReplaySuccessful - -- inner $ Replayed lowerBoundCut upperBoundCut - else do - logg Debug "start initializing miner resources" - logFunctionJson logger Info InitializingMinerResources - - withMiningCoordination mLogger mConf mCutDb $ \mc -> - - -- Miner resources are used by the test-miner when in-node - -- mining is configured or by the mempool noop-miner (which - -- keeps the mempool updated) in production setups. - -- - withMinerResources mLogger (_miningInNode mConf) cs mCutDb mc $ \m -> do - logFunctionJson logger Info ChainwebStarted - logg Debug "finished initializing miner resources" - let !haddr = _peerConfigAddr $ _p2pConfigPeer $ _configP2p conf - inner $ StartedChainweb Chainweb - { _chainwebHostAddress = haddr - , _chainwebChains = cs - , _chainwebCutResources = cutResources - , _chainwebMiner = m - , _chainwebCoordinator = mc - , _chainwebLogger = logger - , _chainwebPeer = peerRes - , _chainwebManager = mgr - -- , _chainwebPactData = pactData - , _chainwebThrottler = throttler - , _chainwebPutPeerThrottler = putPeerThrottler - , _chainwebMempoolThrottler = mempoolThrottler - , _chainwebConfig = conf - , _chainwebServiceSocket = serviceSock - , _chainwebBackup = BackupEnv - { _backupRocksDb = rocksDb - , _backupDir = backupDir - , _backupPactDbDir = defaultPactDbDir - , _backupChainIds = cids - , _backupLogger = backupLogger - } - } + logg Debug "start initializing miner resources" + logFunctionJson logger Info InitializingMinerResources + + withMiningCoordination mLogger mConf mCutDb $ \mc -> + + -- Miner resources are used by the test-miner when in-node + -- mining is configured or by the mempool noop-miner (which + -- keeps the mempool updated) in production setups. + -- + withMinerResources mLogger (_miningInNode mConf) cs mCutDb mc $ \m -> do + logFunctionJson logger Info ChainwebStarted + logg Debug "finished initializing miner resources" + let !haddr = _peerConfigAddr $ _p2pConfigPeer $ _configP2p conf + inner $ StartedChainweb Chainweb + { _chainwebHostAddress = haddr + , _chainwebChains = cs + , _chainwebCutResources = cutResources + , _chainwebMiner = m + , _chainwebCoordinator = mc + , _chainwebLogger = logger + , _chainwebPeer = peerRes + , _chainwebManager = mgr + , _chainwebThrottler = throttler + , _chainwebPutPeerThrottler = putPeerThrottler + , _chainwebMempoolThrottler = mempoolThrottler + , _chainwebConfig = conf + , _chainwebServiceSocket = serviceSock + , _chainwebBackup = BackupEnv + { _backupRocksDb = rocksDb + , _backupDir = backupDir + , _backupPactDbDir = defaultPactDbDir + , _backupChainIds = cids + , _backupLogger = backupLogger + } + } -- synchronizePactDb :: HM.HashMap ChainId (ChainResources logger) -> Cut -> IO () -- synchronizePactDb cs targetCut = do diff --git a/src/Chainweb/Chainweb/ChainResources.hs b/src/Chainweb/Chainweb/ChainResources.hs index 21eaa4dac1..987520c77d 100644 --- a/src/Chainweb/Chainweb/ChainResources.hs +++ b/src/Chainweb/Chainweb/ChainResources.hs @@ -378,7 +378,7 @@ withPayloadProviderResources logger cid serviceApiConfig peerStuff rdb rewindLim (view _4 <$> peerStuff) logger Nothing - mempool + mpa pdb pactDbDir pactConfig @@ -394,6 +394,7 @@ withPayloadProviderResources logger cid serviceApiConfig peerStuff rdb rewindLim (pactPayloadProviderServiceEnv pp) txs ) mempool <- Mempool.withInMemoryMempool (setComponent "mempool" logger) mempoolConfig + let mpa = pactMemPoolAccess mempool $ addLabel ("sub-component", "MempoolAccess") logger let queue = _payloadStoreQueue $ _psPdb $ pactPayloadProviderServiceEnv pp p2pRes <- liftIO $ forM peerStuff $ \(p2pConfig, myPeerInfo, peerDb, mgr) -> pactPayloadP2pResources @v' @c' logger p2pConfig myPeerInfo peerDb pdb queue mgr diff --git a/src/Chainweb/Chainweb/Configuration.hs b/src/Chainweb/Chainweb/Configuration.hs index ca4d4d4eb8..3e33575a36 100644 --- a/src/Chainweb/Chainweb/Configuration.hs +++ b/src/Chainweb/Chainweb/Configuration.hs @@ -71,8 +71,6 @@ module Chainweb.Chainweb.Configuration , configReorgLimit , configBackup , configServiceApi -, configReadOnlyReplay -, configSyncChains , configPayloadProviders , defaultChainwebConfiguration , pChainwebConfiguration @@ -553,16 +551,6 @@ data ChainwebConfiguration = ChainwebConfiguration , _configPayloadProviders :: PayloadProviderConfig , _configPrune :: !PruneConfig - -- The following properties are deprecated: history replay should not be - -- part of normal operation mode. It should probably use a completely - -- separate configuration. - - , _configReadOnlyReplay :: !Bool - -- ^ do a read-only replay using the cut db params for the block heights - , _configSyncChains :: !(Maybe [ChainId]) - -- ^ the only chains to be synchronized on startup to the latest cut. - -- if unset, all chains will be synchronized. - } deriving (Show, Eq, Generic) makeLenses ''ChainwebConfiguration @@ -608,9 +596,7 @@ defaultChainwebConfiguration v = ChainwebConfiguration , _configThrottling = defaultThrottlingConfig , _configReorgLimit = defaultReorgLimit , _configServiceApi = defaultServiceApiConfig - , _configReadOnlyReplay = False , _configPrune = defaultPruneConfig - , _configSyncChains = Nothing , _configBackup = defaultBackupConfig , _configPayloadProviders = defaultPayloadProviderConfig } @@ -624,8 +610,6 @@ instance ToJSON ChainwebConfiguration where , "throttling" .= _configThrottling o , "reorgLimit" .= _configReorgLimit o , "serviceApi" .= _configServiceApi o - , "readOnlyReplay" .= _configReadOnlyReplay o - , "syncChains" .= _configSyncChains o , "backup" .= _configBackup o , "payloadProviders" .= _configPayloadProviders o , "pruning" .= _configPrune o @@ -644,8 +628,6 @@ instance FromJSON (ChainwebConfiguration -> ChainwebConfiguration) where <*< configThrottling %.: "throttling" % o <*< configReorgLimit ..: "reorgLimit" % o <*< configServiceApi %.: "serviceApi" % o - <*< configReadOnlyReplay ..: "readOnlyReplay" % o - <*< configSyncChains ..: "syncChains" % o <*< configBackup %.: "backup" % o <*< configPayloadProviders %.: "payloadProviders" % o <*< configPrune %.: "pruning" % o @@ -662,13 +644,6 @@ pChainwebConfiguration = id <*< parserOptionGroup "Cut Processing" (configCuts %:: pCutConfig) <*< parserOptionGroup "Service API" (configServiceApi %:: pServiceApiConfig) <*< parserOptionGroup "Mining Coordination" (configMining %:: pMiningConfig) - <*< configReadOnlyReplay .:: boolOption_ - % long "read-only-replay" - <> help "Replay the block history non-destructively" - <*< configSyncChains .:: fmap Just % jsonOption - % long "sync-chains" - <> help "The only Pact databases to synchronize. If empty or unset, all chains will be synchronized." - <> metavar "JSON list of chain ids" <*< parserOptionGroup "Backup" (configBackup %:: pBackupConfig) -- FIXME support payload providers diff --git a/src/Chainweb/Pact/Backend/ChainwebPactDb.hs b/src/Chainweb/Pact/Backend/ChainwebPactDb.hs index 1d389d458c..8bb63075ae 100644 --- a/src/Chainweb/Pact/Backend/ChainwebPactDb.hs +++ b/src/Chainweb/Pact/Backend/ChainwebPactDb.hs @@ -261,7 +261,7 @@ chainwebPactBlockDb env = ChainwebPactDb , consult = \(Parent hsh) -> do throwOnDbError (lookupBlockHash (_blockHandlerDb env) hsh) <&> \case Nothing -> False - Just rootHeight -> rootHeight > currentHeight + Just rootHeight -> rootHeight < currentHeight } let spv = pactSPV headerOracle r <- liftIO $ kont pactDb spv @@ -761,57 +761,6 @@ createVersionedTable tablename db = do indexcreationstmt = "CREATE INDEX IF NOT EXISTS " <> tbl ixName <> " ON " <> tbl tablename <> "(txid DESC);" -setConsensusState :: SQ3.Database -> ConsensusState -> ExceptT LocatedSQ3Error IO () -setConsensusState db cs = do - exec' db - "INSERT INTO ConsensusState (blockheight, hash, payloadhash, safety) VALUES \ - \(?, ?, ?, ?);" - (toRow "final" $ _consensusStateFinal cs) - exec' db - "INSERT INTO ConsensusState (blockheight, hash, payloadhash, safety) VALUES \ - \(?, ?, ?, ?);" - (toRow "safe" $ _consensusStateSafe cs) - exec' db - "INSERT INTO ConsensusState (blockheight, hash, payloadhash, safety) VALUES \ - \(?, ?, ?, ?);" - (toRow "latest" $ _consensusStateLatest cs) - where - toRow safety SyncState {..} = - [ SInt $ fromIntegral @BlockHeight @Int64 _syncStateHeight - , SBlob $ runPutS (encodeBlockHash _syncStateBlockHash) - , SBlob $ runPutS (encodeBlockPayloadHash _syncStateBlockPayloadHash) - , SText safety - ] - -getConsensusState :: SQ3.Database -> ExceptT LocatedSQ3Error IO (Maybe ConsensusState) -getConsensusState db = do - maybeState <- qry db "SELECT blockheight, hash, payloadhash, safety FROM ConsensusState ORDER BY safety ASC;" - [] [RInt, RBlob, RBlob, RText] >>= \case - [final, latest, safe] -> return $ Just ConsensusState - { _consensusStateFinal = readRow "final" final - , _consensusStateLatest = readRow "latest" latest - , _consensusStateSafe = readRow "safe" safe - } - [] -> return Nothing - inv -> error $ "invalid contents of the ConsensusState table: " <> sshow inv - case maybeState of - Nothing -> do - getLatestBlock db >>= \case - Nothing -> return Nothing - Just latest -> - return $ Just $ ConsensusState latest latest latest - Just s -> return (Just s) - where - readRow expectedType [SInt height, SBlob hash, SBlob payloadHash, SText type'] - | expectedType == type' = SyncState - { _syncStateHeight = fromIntegral @Int64 @BlockHeight height - , _syncStateBlockHash = either error id $ runGetEitherS decodeBlockHash hash - , _syncStateBlockPayloadHash = either error id $ runGetEitherS decodeBlockPayloadHash payloadHash - } - | otherwise = error $ "wrong type; expected " <> sshow expectedType <> " but got " <> sshow type' - readRow expectedType invalidRow - = error $ "invalid row: expected " <> sshow expectedType <> " but got row " <> sshow invalidRow - -- | Create all tables that exist pre-genesis -- TODO: migrate this logic to the checkpointer itself? initSchema :: SQLiteEnv -> IO () @@ -886,81 +835,3 @@ getSerialiser = do cid <- view blockHandlerChainId blockHeight <- view blockHandlerBlockHeight return $ pact5Serialiser cid blockHeight - -getPayloadsAfter :: HasCallStack => SQLiteEnv -> Parent BlockHeight -> ExceptT LocatedSQ3Error IO [Ranked BlockPayloadHash] -getPayloadsAfter db parentHeight = do - qry db "SELECT blockheight, payloadhash FROM BlockHistory2 WHERE blockheight > ?" - [SInt (fromIntegral @BlockHeight @Int64 (unwrapParent parentHeight))] - [RInt, RBlob] >>= traverse - \case - [SInt bh, SBlob bhash] -> - return $! Ranked (fromIntegral @Int64 @BlockHeight bh) $ either error id $ runGetEitherS decodeBlockPayloadHash bhash - _ -> error "incorrect column type" - --- | Get the checkpointer's idea of the earliest block. The block height --- is the height of the block of the block hash. -getEarliestBlock :: HasCallStack => SQLiteEnv -> ExceptT LocatedSQ3Error IO (Maybe RankedBlockHash) -getEarliestBlock db = do - r <- qry db qtext [] [RInt, RBlob] >>= mapM go - case r of - [] -> return Nothing - (!o:_) -> return (Just o) - where - qtext = "SELECT blockheight, hash FROM BlockHistory2 ORDER BY blockheight ASC LIMIT 1" - - go [SInt hgt, SBlob blob] = - let hash = either error id $ runGetEitherS decodeBlockHash blob - in return (RankedBlockHash (fromIntegral hgt) hash) - go _ = fail "Chainweb.Pact.Backend.RelationalCheckpointer.doGetEarliest: impossible. This is a bug in chainweb-node." - --- | Get the checkpointer's idea of the latest block. -getLatestBlock :: HasCallStack => SQLiteEnv -> ExceptT LocatedSQ3Error IO (Maybe SyncState) -getLatestBlock db = do - r <- qry db qtext [] [RInt, RBlob, RBlob] >>= mapM go - case r of - [] -> return Nothing - (!o:_) -> return (Just o) - where - qtext = "SELECT blockheight, hash, payloadhash FROM BlockHistory2 ORDER BY blockheight DESC LIMIT 1" - - go [SInt hgt, SBlob blob, SBlob pBlob] = - let hash = either error id $ runGetEitherS decodeBlockHash blob - in let pHash = either error id $ runGetEitherS decodeBlockPayloadHash pBlob - in return $ SyncState - { _syncStateBlockHash = hash - , _syncStateBlockPayloadHash = pHash - , _syncStateHeight = int hgt - } - go r = fail $ - "Chainweb.Pact.Backend.ChainwebPactDb.getLatestBlock: impossible. This is a bug in chainweb-node. Details: " - <> sshow r - -lookupBlockWithHeight :: HasCallStack => SQ3.Database -> BlockHeight -> ExceptT LocatedSQ3Error IO (Maybe (Ranked BlockHash)) -lookupBlockWithHeight db bheight = do - qry db qtext [SInt $ fromIntegral bheight] [RBlob] >>= \case - [[SBlob hash]] -> return $! Just $! - Ranked bheight (either error id $ runGetEitherS decodeBlockHash hash) - [] -> return Nothing - res -> error $ "Invalid result, " <> sshow res - where - qtext = "SELECT hash FROM BlockHistory2 WHERE blockheight = ?;" - -lookupBlockHash :: HasCallStack => SQ3.Database -> BlockHash -> ExceptT LocatedSQ3Error IO (Maybe BlockHeight) -lookupBlockHash db hash = do - qry db qtext [SBlob (runPutS (encodeBlockHash hash))] [RInt] >>= \case - [[SInt n]] -> return $! Just $! int n - [] -> return $ Nothing - res -> error $ "Invalid result, " <> sshow res - where - qtext = "SELECT blockheight FROM BlockHistory2 WHERE hash = ?;" - -lookupRankedBlockHash :: HasCallStack => SQ3.Database -> RankedBlockHash -> IO Bool -lookupRankedBlockHash db rankedBHash = throwOnDbError $ do - qry db qtext - [ SInt $ fromIntegral (_rankedBlockHashHeight rankedBHash) - , SBlob $ runPutS $ encodeBlockHash $ _rankedBlockHashHash rankedBHash - ] [RInt] >>= \case - [[SInt n]] -> return $! n == 1 - res -> error $ "Invalid result, " <> sshow res - where - qtext = "SELECT COUNT(*) FROM BlockHistory2 WHERE blockheight = ? AND hash = ?;" diff --git a/src/Chainweb/Pact/Backend/Utils.hs b/src/Chainweb/Pact/Backend/Utils.hs index b4782565f3..ca83948c6c 100644 --- a/src/Chainweb/Pact/Backend/Utils.hs +++ b/src/Chainweb/Pact/Backend/Utils.hs @@ -7,6 +7,7 @@ {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE GADTs #-} +{-# LANGUAGE BlockArguments #-} {-# OPTIONS_GHC -fno-warn-orphans #-} @@ -39,6 +40,14 @@ module Chainweb.Pact.Backend.Utils , getEndTxId -- * Transactions , withTransaction + , setConsensusState + , getConsensusState + , getPayloadsAfter + , getLatestBlock + , getEarliestBlock + , lookupBlockWithHeight + , lookupBlockHash + , lookupRankedBlockHash -- * SQLite conversions and assertions , toUtf8 , fromUtf8 @@ -76,6 +85,7 @@ import Control.Monad.Trans.Resource (ResourceT, allocate) import Data.Bits import Data.Foldable +import Data.Maybe import Data.String import Data.Pool qualified as Pool import Data.Text qualified as T @@ -98,6 +108,7 @@ import Pact.Types.Util (AsString(..)) import Chainweb.Logger import Chainweb.Pact.Backend.SQLite.DirectV2 +import Chainweb.PayloadProvider import Chainweb.Version import Chainweb.Utils @@ -448,6 +459,127 @@ rewindDbToBlock db bh endingTxId = throwOnDbError $ do exec' db "DELETE FROM TransactionIndex WHERE blockheight > ?;" [ SInt (fromIntegral bh) ] +-- | Set the consensus state. Note that the "latest" parameter is ignored; the +-- latest block is always the highest block in the BlockHistory table. +setConsensusState :: SQ3.Database -> ConsensusState -> ExceptT LocatedSQ3Error IO () +setConsensusState db cs = do + exec' db + "INSERT INTO ConsensusState (blockheight, hash, payloadhash, safety) VALUES \ + \(?, ?, ?, ?);" + (toRow "final" $ _consensusStateFinal cs) + exec' db + "INSERT INTO ConsensusState (blockheight, hash, payloadhash, safety) VALUES \ + \(?, ?, ?, ?);" + (toRow "safe" $ _consensusStateSafe cs) + where + toRow safety SyncState {..} = + [ SInt $ fromIntegral @BlockHeight @Int64 _syncStateHeight + , SBlob $ runPutS (encodeBlockHash _syncStateBlockHash) + , SBlob $ runPutS (encodeBlockPayloadHash _syncStateBlockPayloadHash) + , SText safety + ] + +-- | Retrieve the latest "consensus state" including latest, safe, and final blocks. +getConsensusState :: SQ3.Database -> ExceptT LocatedSQ3Error IO ConsensusState +getConsensusState db = do + latestBlock <- fromMaybe (error "before genesis") <$> getLatestBlock db + qry db "SELECT blockheight, hash, payloadhash, safety FROM ConsensusState ORDER BY safety ASC;" + [] [RInt, RBlob, RBlob, RText] >>= \case + [final, safe] -> return $ ConsensusState + { _consensusStateFinal = readRow "final" final + , _consensusStateSafe = readRow "safe" safe + , _consensusStateLatest = latestBlock + } + inv -> error $ "invalid contents of the ConsensusState table: " <> sshow inv + where + readRow expectedType [SInt height, SBlob hash, SBlob payloadHash, SText type'] + | expectedType == type' = SyncState + { _syncStateHeight = fromIntegral @Int64 @BlockHeight height + , _syncStateBlockHash = either error id $ runGetEitherS decodeBlockHash hash + , _syncStateBlockPayloadHash = either error id $ runGetEitherS decodeBlockPayloadHash payloadHash + } + | otherwise = error $ "wrong type; expected " <> sshow expectedType <> " but got " <> sshow type' + readRow expectedType invalidRow + = error $ "invalid row: expected " <> sshow expectedType <> " but got row " <> sshow invalidRow + +getPayloadsAfter :: HasCallStack => SQLiteEnv -> Parent BlockHeight -> ExceptT LocatedSQ3Error IO [Ranked BlockPayloadHash] +getPayloadsAfter db parentHeight = do + qry db "SELECT blockheight, payloadhash FROM BlockHistory2 WHERE blockheight > ?" + [SInt (fromIntegral @BlockHeight @Int64 (unwrapParent parentHeight))] + [RInt, RBlob] >>= traverse + \case + [SInt bh, SBlob bhash] -> + return $! Ranked (fromIntegral @Int64 @BlockHeight bh) $ either error id $ runGetEitherS decodeBlockPayloadHash bhash + _ -> error "incorrect column type" + +-- | Get the checkpointer's idea of the earliest block. The block height +-- is the height of the block of the block hash. +getEarliestBlock :: HasCallStack => SQLiteEnv -> ExceptT LocatedSQ3Error IO (Maybe RankedBlockHash) +getEarliestBlock db = do + r <- qry db qtext [] [RInt, RBlob] >>= mapM go + case r of + [] -> return Nothing + (!o:_) -> return (Just o) + where + qtext = "SELECT blockheight, hash FROM BlockHistory2 ORDER BY blockheight ASC LIMIT 1" + + go [SInt hgt, SBlob blob] = + let hash = either error id $ runGetEitherS decodeBlockHash blob + in return (RankedBlockHash (fromIntegral hgt) hash) + go _ = fail "Chainweb.Pact.Backend.RelationalCheckpointer.doGetEarliest: impossible. This is a bug in chainweb-node." + +-- | Get the checkpointer's idea of the latest block. +getLatestBlock :: HasCallStack => SQLiteEnv -> ExceptT LocatedSQ3Error IO (Maybe SyncState) +getLatestBlock db = do + r <- qry db qtext [] [RInt, RBlob, RBlob] >>= mapM go + case r of + [] -> return Nothing + (!o:_) -> return (Just o) + where + qtext = "SELECT blockheight, hash, payloadhash FROM BlockHistory2 ORDER BY blockheight DESC LIMIT 1" + + go [SInt hgt, SBlob blob, SBlob pBlob] = + let hash = either error id $ runGetEitherS decodeBlockHash blob + in let pHash = either error id $ runGetEitherS decodeBlockPayloadHash pBlob + in return $ SyncState + { _syncStateBlockHash = hash + , _syncStateBlockPayloadHash = pHash + , _syncStateHeight = int hgt + } + go r = fail $ + "Chainweb.Pact.Backend.ChainwebPactDb.getLatestBlock: impossible. This is a bug in chainweb-node. Details: " + <> sshow r + +lookupBlockWithHeight :: HasCallStack => SQ3.Database -> BlockHeight -> ExceptT LocatedSQ3Error IO (Maybe (Ranked BlockHash)) +lookupBlockWithHeight db bheight = do + qry db qtext [SInt $ fromIntegral bheight] [RBlob] >>= \case + [[SBlob hash]] -> return $! Just $! + Ranked bheight (either error id $ runGetEitherS decodeBlockHash hash) + [] -> return Nothing + res -> error $ "Invalid result, " <> sshow res + where + qtext = "SELECT hash FROM BlockHistory2 WHERE blockheight = ?;" + +lookupBlockHash :: HasCallStack => SQ3.Database -> BlockHash -> ExceptT LocatedSQ3Error IO (Maybe BlockHeight) +lookupBlockHash db hash = do + qry db qtext [SBlob (runPutS (encodeBlockHash hash))] [RInt] >>= \case + [[SInt n]] -> return $! Just $! int n + [] -> return $ Nothing + res -> error $ "Invalid result, " <> sshow res + where + qtext = "SELECT blockheight FROM BlockHistory2 WHERE hash = ?;" + +lookupRankedBlockHash :: HasCallStack => SQ3.Database -> RankedBlockHash -> IO Bool +lookupRankedBlockHash db rankedBHash = throwOnDbError $ do + qry db qtext + [ SInt $ fromIntegral (_rankedBlockHashHeight rankedBHash) + , SBlob $ runPutS $ encodeBlockHash $ _rankedBlockHashHash rankedBHash + ] [RInt] >>= \case + [[SInt n]] -> return $! n == 1 + res -> error $ "Invalid result, " <> sshow res + where + qtext = "SELECT COUNT(*) FROM BlockHistory2 WHERE blockheight = ? AND hash = ?;" + data LocatedSQ3Error = LocatedSQ3Error !CallStack !SQ3.Error instance Show LocatedSQ3Error where show (LocatedSQ3Error cs e) = diff --git a/src/Chainweb/Pact/PactService.hs b/src/Chainweb/Pact/PactService.hs index 2ab7cea1aa..bde5ae0fdf 100644 --- a/src/Chainweb/Pact/PactService.hs +++ b/src/Chainweb/Pact/PactService.hs @@ -181,6 +181,8 @@ withPactService cid http memPoolAccess chainwebLogger txFailuresCounter pdb read , _psModuleInitCacheVar = moduleInitCacheVar } + liftIO $ ChainwebPactDb.initSchema readWriteSqlenv + case pactGenesis of GeneratingGenesis -> return () _ -> liftIO $ initialPayloadState chainwebLogger pse @@ -215,8 +217,8 @@ runGenesisIfNeeded -> ServiceEnv tbl -> IO () runGenesisIfNeeded logger serviceEnv = do - withTransaction (_psReadWriteSql serviceEnv) $ do - latestBlock <- fmap _consensusStateLatest <$> Checkpointer.getConsensusState (_psReadWriteSql serviceEnv) + withTransaction rwSql $ do + latestBlock <- Checkpointer.getLatestBlock rwSql when (maybe True (isGenesisBlockHeader' cid . Parent . _syncStateBlockHash) latestBlock) $ do logFunctionText logger Debug "running genesis" let genesisBlockHash = genesisBlockHeader cid ^. blockHash @@ -231,7 +233,7 @@ runGenesisIfNeeded logger serviceEnv = do Just p -> p maybeErr <- runExceptT - $ Checkpointer.restoreAndSave logger cid (_psReadWriteSql serviceEnv) (genesisRankedParentBlockHash cid) + $ Checkpointer.restoreAndSave logger cid rwSql (genesisRankedParentBlockHash cid) $ NEL.singleton $ ( if pact5 cid (genesisHeight cid) @@ -253,7 +255,7 @@ runGenesisIfNeeded logger serviceEnv = do (_payloadStoreTable $ _psPdb serviceEnv) (genesisHeight cid) genesisPayload - Checkpointer.setConsensusState (_psReadWriteSql serviceEnv) targetSyncState + Checkpointer.setConsensusState rwSql targetSyncState -- we can't produce pact 4 blocks anymore, so don't make -- payloads if pact 4 is on when (pact5 cid (succ $ genesisHeight cid)) $ @@ -272,6 +274,7 @@ runGenesisIfNeeded logger serviceEnv = do startPayloadRefresher logger serviceEnv emptyBlock where + rwSql = _psReadWriteSql serviceEnv cid = _chainId serviceEnv -- | only for use in generating genesis blocks in tools. @@ -336,39 +339,35 @@ execReadOnlyReplay => HasVersion => logger -> ServiceEnv tbl - -> [EvaluationCtx BlockPayloadHash] - -> IO [BlockInvalidError] -execReadOnlyReplay logger serviceEnv blocks = do + -> EvaluationCtx BlockPayloadHash + -> IO (Maybe BlockInvalidError) +execReadOnlyReplay logger serviceEnv evalCtx = do let readSqlPool = view psReadSqlPool serviceEnv let cid = view chainId serviceEnv let pdb = view psPdb serviceEnv - blocks - & mapM (\evalCtx -> do - payload <- liftIO $ fromJuste <$> - lookupPayloadWithHeight (_payloadStoreTable pdb) (Just $ _evaluationCtxCurrentHeight evalCtx) (_evaluationCtxPayload evalCtx) - let isPayloadEmpty = V.null (_payloadWithOutputsTransactions payload) - let isUpgradeBlock = isJust $ implicitVersion ^? versionUpgrades . atChain cid . ix (_evaluationCtxCurrentHeight evalCtx) - if isPayloadEmpty && not isUpgradeBlock - then Pool.withResource readSqlPool $ \sql -> withTransaction sql $ do - hist <- Checkpointer.readFrom - logger - cid - sql - (_evaluationCtxParentCreationTime evalCtx) - (_evaluationCtxRankedParentHash evalCtx) - Checkpointer.PactRead - { pact5Read = \blockEnv blockHandle -> - runExceptT $ flip evalStateT blockHandle $ - void $ Pact.execExistingBlock logger serviceEnv blockEnv (CheckablePayloadWithOutputs payload) - , pact4Read = \blockEnv -> - runExceptT $ - void $ Pact4.execBlock logger serviceEnv blockEnv (CheckablePayloadWithOutputs payload) - } - either Just (\_ -> Nothing) <$> throwIfNoHistory hist - else - return Nothing - ) - & fmap catMaybes + payload <- liftIO $ fromJuste <$> + lookupPayloadWithHeight (_payloadStoreTable pdb) (Just $ _evaluationCtxCurrentHeight evalCtx) (_evaluationCtxPayload evalCtx) + let isPayloadEmpty = V.null (_payloadWithOutputsTransactions payload) + let isUpgradeBlock = isJust $ implicitVersion ^? versionUpgrades . atChain cid . ix (_evaluationCtxCurrentHeight evalCtx) + if not isPayloadEmpty || isUpgradeBlock + then Pool.withResource readSqlPool $ \sql -> withTransaction sql $ do + hist <- Checkpointer.readFrom + logger + cid + sql + (_evaluationCtxParentCreationTime evalCtx) + (_evaluationCtxRankedParentHash evalCtx) + Checkpointer.PactRead + { pact5Read = \blockEnv blockHandle -> + runExceptT $ flip evalStateT blockHandle $ + void $ Pact.execExistingBlock logger serviceEnv blockEnv (CheckablePayloadWithOutputs payload) + , pact4Read = \blockEnv -> + runExceptT $ + void $ Pact4.execBlock logger serviceEnv blockEnv (CheckablePayloadWithOutputs payload) + } + either Just (\_ -> Nothing) <$> throwIfNoHistory hist + else + return Nothing execLocal :: (Logger logger, CanReadablePayloadCas tbl) @@ -527,7 +526,7 @@ syncToFork -> IO ConsensusState syncToFork logger serviceEnv hints forkInfo = do (rewoundTxs, validatedTxs, newConsensusState) <- withTransaction sql $ do - pactConsensusState <- fromJuste <$> Checkpointer.getConsensusState sql + pactConsensusState <- Checkpointer.getConsensusState sql let atTarget = _syncStateBlockHash (_consensusStateLatest pactConsensusState) == _latestBlockHash forkInfo._forkInfoTargetState diff --git a/src/Chainweb/Pact/PactService/Checkpointer.hs b/src/Chainweb/Pact/PactService/Checkpointer.hs index bd653c8589..8947e78cb8 100644 --- a/src/Chainweb/Pact/PactService/Checkpointer.hs +++ b/src/Chainweb/Pact/PactService/Checkpointer.hs @@ -43,6 +43,7 @@ module Chainweb.Pact.PactService.Checkpointer -- , findLatestValidBlockHeader -- , exitOnRewindLimitExceeded , getEarliestBlock + , getLatestBlock -- , lookupBlock , lookupRankedBlockHash , lookupBlockHash @@ -71,8 +72,7 @@ import Chainweb.Logger import Chainweb.MinerReward import Chainweb.Pact.Backend.ChainwebPactDb qualified as ChainwebPactDb import Chainweb.Pact.Backend.Types -import Chainweb.Pact.Backend.Utils -import Chainweb.Pact.Backend.Utils qualified as PactDb +import Chainweb.Pact.Backend.Utils qualified as Backend.Utils import Chainweb.Pact.Types import Chainweb.Parent import Chainweb.PayloadProvider @@ -137,8 +137,9 @@ readFromNthParent -> IO (Historical a) readFromNthParent logger cid sql parentCreationTime n doRead = do latest <- - _consensusStateLatest . fromMaybe (error "readFromNthParent is illegal to call before genesis") - <$> getConsensusState sql + fmap (fromMaybe (error "readFromNthParent is illegal to call before genesis")) + $ ChainwebPactDb.throwOnDbError + $ ChainwebPactDb.getLatestBlock sql if genesisHeight cid + fromIntegral @Word @BlockHeight n > _syncStateHeight latest then do logFunctionText logger Warn $ "readFromNthParent asked to rewind beyond genesis, to " @@ -147,12 +148,12 @@ readFromNthParent logger cid sql parentCreationTime n doRead = do else do let targetHeight = _syncStateHeight latest - fromIntegral @Word @BlockHeight n lookupBlockWithHeight sql targetHeight >>= \case - -- this case for shallow nodes without enough history - Nothing -> do - logFunctionText logger Warn "readFromNthParent asked to rewind beyond known blocks" - return NoHistory - Just nthBlock -> - readFrom logger cid sql parentCreationTime (Parent nthBlock) doRead + -- this case for shallow nodes without enough history + Nothing -> do + logFunctionText logger Warn "readFromNthParent asked to rewind beyond known blocks" + return NoHistory + Just nthBlock -> + readFrom logger cid sql parentCreationTime (Parent nthBlock) doRead -- read-only rewind to a target block. -- if that target block is missing, return Nothing. @@ -174,50 +175,50 @@ readFrom logger cid sql parentCreationTime parent pactRead = do , _bctxMinerReward = blockMinerReward (childBlockHeight cid parent) , _bctxChainId = cid } - liftIO $ do - !latestHeader <- maybe (genesisRankedParentBlockHash cid) (Parent . _syncStateRankedBlockHash . _consensusStateLatest) <$> - ChainwebPactDb.throwOnDbError (ChainwebPactDb.getConsensusState sql) - -- is the parent the latest header, i.e., can we get away without rewinding? - let parentIsLatestHeader = latestHeader == parent - let currentHeight = _bctxCurrentBlockHeight blockCtx - PactDb.getEndTxId cid sql parent >>= traverse \startTxId -> - if pact5 cid currentHeight then do - let - blockHandlerEnv = ChainwebPactDb.BlockHandlerEnv - { ChainwebPactDb._blockHandlerDb = sql - , ChainwebPactDb._blockHandlerLogger = logger - , ChainwebPactDb._blockHandlerChainId = cid - , ChainwebPactDb._blockHandlerBlockHeight = currentHeight - , ChainwebPactDb._blockHandlerMode = Pact.Transactional - , ChainwebPactDb._blockHandlerUpperBoundTxId = startTxId - , ChainwebPactDb._blockHandlerAtTip = parentIsLatestHeader - } - let pactDb = ChainwebPactDb.chainwebPactBlockDb blockHandlerEnv - let blockEnv = BlockEnv blockCtx pactDb - pact5Read pactRead blockEnv (emptyBlockHandle startTxId) - else do - let pact4TxId = Pact4.TxId (coerce startTxId) - let blockHandlerEnv = Pact4.mkBlockHandlerEnv cid currentHeight sql logger - newBlockDbEnv <- liftIO $ newMVar $ Pact4.BlockDbEnv - blockHandlerEnv - -- FIXME not sharing the cache - (Pact4.initBlockState defaultModuleCacheLimit pact4TxId) - let pactDb = Pact4.rewoundPactDb currentHeight pact4TxId + !latestHeader <- ChainwebPactDb.throwOnDbError + $ fmap (maybe (genesisRankedParentBlockHash cid) (Parent . _syncStateRankedBlockHash)) + $ ChainwebPactDb.getLatestBlock sql + -- is the parent the latest header, i.e., can we get away without rewinding? + let parentIsLatestHeader = latestHeader == parent + let currentHeight = _bctxCurrentBlockHeight blockCtx + Backend.Utils.getEndTxId cid sql parent >>= traverse \startTxId -> + if pact5 cid currentHeight then do + let + blockHandlerEnv = ChainwebPactDb.BlockHandlerEnv + { ChainwebPactDb._blockHandlerDb = sql + , ChainwebPactDb._blockHandlerLogger = logger + , ChainwebPactDb._blockHandlerChainId = cid + , ChainwebPactDb._blockHandlerBlockHeight = currentHeight + , ChainwebPactDb._blockHandlerMode = Pact.Transactional + , ChainwebPactDb._blockHandlerUpperBoundTxId = startTxId + , ChainwebPactDb._blockHandlerAtTip = parentIsLatestHeader + } + let pactDb = ChainwebPactDb.chainwebPactBlockDb blockHandlerEnv + let blockEnv = BlockEnv blockCtx pactDb + pact5Read pactRead blockEnv (emptyBlockHandle startTxId) + else do + let pact4TxId = Pact4.TxId (coerce startTxId) + let blockHandlerEnv = Pact4.mkBlockHandlerEnv cid currentHeight sql logger + newBlockDbEnv <- liftIO $ newMVar $ Pact4.BlockDbEnv + blockHandlerEnv + -- FIXME not sharing the cache + (Pact4.initBlockState defaultModuleCacheLimit pact4TxId) + let pactDb = Pact4.rewoundPactDb currentHeight pact4TxId - let pact4DbEnv = Pact4.CurrentBlockDbEnv - { _cpPactDbEnv = Pact4.PactDbEnv pactDb newBlockDbEnv - , _cpRegisterProcessedTx = \hash -> - Pact4.runBlockDbEnv newBlockDbEnv (Pact4.indexPactTransaction $ BS.fromShort $ Pact4.unHash $ Pact4.unRequestKey hash) - , _cpLookupProcessedTx = \hs -> do - res <- doLookupSuccessful sql currentHeight (coerce hs) - return - $ HashMap.mapKeys coerce - $ HashMap.map - (\(T3 height _payloadhash bhash) -> T2 height bhash) - res - , _cpHeaderOracle = Pact4.headerOracleForBlock blockHandlerEnv - } - pact4Read pactRead (Pact4.BlockEnv blockCtx pact4DbEnv) + let pact4DbEnv = Pact4.CurrentBlockDbEnv + { _cpPactDbEnv = Pact4.PactDbEnv pactDb newBlockDbEnv + , _cpRegisterProcessedTx = \hash -> + Pact4.runBlockDbEnv newBlockDbEnv (Pact4.indexPactTransaction $ BS.fromShort $ Pact4.unHash $ Pact4.unRequestKey hash) + , _cpLookupProcessedTx = \hs -> do + res <- Backend.Utils.doLookupSuccessful sql currentHeight (coerce hs) + return + $ HashMap.mapKeys coerce + $ HashMap.map + (\(T3 height _payloadhash bhash) -> T2 height bhash) + res + , _cpHeaderOracle = Pact4.headerOracleForBlock blockHandlerEnv + } + pact4Read pactRead (Pact4.BlockEnv blockCtx pact4DbEnv) -- the special case where one doesn't want to extend the chain, just rewind it. rewindTo @@ -227,7 +228,7 @@ rewindTo -> Parent RankedBlockHash -> IO () rewindTo cid sql ancestor = do - void $ PactDb.rewindDbTo cid sql ancestor + void $ Backend.Utils.rewindDbTo cid sql ancestor data PactRead a = PactRead @@ -282,7 +283,7 @@ restoreAndSave -> m q restoreAndSave logger cid sql parent blocks = do -- TODO PP: check first if we're rewinding past "final" point? same with rewindTo above. - startTxId <- liftIO $ PactDb.rewindDbTo cid sql parent + startTxId <- liftIO $ Backend.Utils.rewindDbTo cid sql parent let startBlockHeight = childBlockHeight cid parent foldState1 (fmap executeBlock blocks) (T2 startBlockHeight startTxId) where @@ -304,7 +305,7 @@ restoreAndSave logger cid sql parent blocks = do , _cpRegisterProcessedTx = \hash -> Pact4.runBlockDbEnv newBlockDbEnv (Pact4.indexPactTransaction $ BS.fromShort $ Pact4.unHash $ Pact4.unRequestKey hash) , _cpLookupProcessedTx = \hs -> do - res <- doLookupSuccessful sql currentBlockHeight (coerce hs) + res <- Backend.Utils.doLookupSuccessful sql currentBlockHeight (coerce hs) return $ HashMap.mapKeys coerce $ HashMap.map @@ -367,25 +368,29 @@ getEarliestBlock :: SQLiteEnv -> IO (Maybe RankedBlockHash) getEarliestBlock sql = do ChainwebPactDb.throwOnDbError $ ChainwebPactDb.getEarliestBlock sql -getConsensusState :: SQLiteEnv -> IO (Maybe ConsensusState) +getLatestBlock :: SQLiteEnv -> IO (Maybe SyncState) +getLatestBlock sql = do + ChainwebPactDb.throwOnDbError $ Backend.Utils.getLatestBlock sql + +getConsensusState :: SQLiteEnv -> IO ConsensusState getConsensusState sql = do - ChainwebPactDb.throwOnDbError $ ChainwebPactDb.getConsensusState sql + ChainwebPactDb.throwOnDbError $ Backend.Utils.getConsensusState sql setConsensusState :: SQLiteEnv -> ConsensusState -> IO () setConsensusState sql cs = do - ChainwebPactDb.throwOnDbError $ ChainwebPactDb.setConsensusState sql cs + ChainwebPactDb.throwOnDbError $ Backend.Utils.setConsensusState sql cs lookupBlockWithHeight :: HasCallStack => SQLiteEnv -> BlockHeight -> IO (Maybe (Ranked BlockHash)) lookupBlockWithHeight sql bh = do - ChainwebPactDb.throwOnDbError $ ChainwebPactDb.lookupBlockWithHeight sql bh + ChainwebPactDb.throwOnDbError $ Backend.Utils.lookupBlockWithHeight sql bh lookupBlockHash :: HasCallStack => SQLiteEnv -> BlockHash -> IO (Maybe BlockHeight) lookupBlockHash sql pbh = do - ChainwebPactDb.throwOnDbError $ ChainwebPactDb.lookupBlockHash sql pbh + ChainwebPactDb.throwOnDbError $ Backend.Utils.lookupBlockHash sql pbh getPayloadsAfter :: SQLiteEnv -> Parent BlockHeight -> IO [Ranked BlockPayloadHash] getPayloadsAfter sql b = do - ChainwebPactDb.throwOnDbError $ ChainwebPactDb.getPayloadsAfter sql b + ChainwebPactDb.throwOnDbError $ Backend.Utils.getPayloadsAfter sql b -- -------------------------------------------------------------------------- -- -- Utils diff --git a/src/Chainweb/Pact/PactService/ExecBlock.hs b/src/Chainweb/Pact/PactService/ExecBlock.hs index 9b51d8f0d0..e67792716b 100644 --- a/src/Chainweb/Pact/PactService/ExecBlock.hs +++ b/src/Chainweb/Pact/PactService/ExecBlock.hs @@ -357,7 +357,7 @@ applyCmdInBlock logger serviceEnv blockEnv miner txIdxInBlock tx = StateT $ \(bl -- TODO: trace more info? let rk = Pact.RequestKey $ Pact._cmdHash cmd (resultOrError, blockHandle') <- flip runStateT blockHandle $ - trace' (logFunction logger) "applyCmdInBlock" computeTrace (\_ -> 0) $ + -- trace' (logFunction logger) "applyCmdInBlock" computeTrace (\_ -> 0) $ doChainwebPactDbTransaction dbEnv (Just rk) $ \pactDb spv -> if _bctxIsGenesis blockCtx then do diff --git a/src/Chainweb/Pact/PactService/Pact4/ExecBlock.hs b/src/Chainweb/Pact/PactService/Pact4/ExecBlock.hs index bb40644700..ef8e17ae1d 100644 --- a/src/Chainweb/Pact/PactService/Pact4/ExecBlock.hs +++ b/src/Chainweb/Pact/PactService/Pact4/ExecBlock.hs @@ -50,6 +50,7 @@ import Chainweb.Pact.Types (ServiceEnv(..), Transactions (..), bctxParentCreatio import Chainweb.Pact4.Backend.ChainwebPactDb import Chainweb.Pact4.ModuleCache import Chainweb.Pact4.NoCoinbase +import Chainweb.Pact4.SPV qualified as Pact4 import Chainweb.Pact4.Transaction qualified as Pact4 import Chainweb.Pact4.TransactionExec qualified as Pact4 import Chainweb.Pact4.Types @@ -505,9 +506,7 @@ applyPactCmd logger serviceEnv blockEnv txIdxInBlock miner cmd = StateT $ \(T2 m if _bctxIsGenesis bCtx then liftIO $! Pact4.applyGenesisCmd logger pactDb Pact4.noSPVSupport bCtx gasLimitedCmd else do - -- FIXME - -- let bhdb = view psBlockHeaderDb serviceEnv - -- let spv = Pact4.pactSPV bhdb (_parentHeader parent) + let spv = Pact4.pactSPV (_cpHeaderOracle $ _benvDbEnv blockEnv) bCtx let txTimeout io = do logFunctionText logger Debug $ "txTimeLimit was not set - defaulting to a function of the block gas limit" @@ -516,7 +515,7 @@ applyPactCmd logger serviceEnv blockEnv txIdxInBlock miner cmd = StateT $ \(T2 m liftIO $ txTimeout $ Pact4.applyCmd logger -- FIXME spv - blockEnv miner gasModel txIdxInBlock undefined gasLimitedCmd initialGas mcache + blockEnv miner gasModel txIdxInBlock spv gasLimitedCmd initialGas mcache pure $ T2 r c if _bctxIsGenesis bCtx diff --git a/src/Chainweb/Pact4/Backend/ChainwebPactDb.hs b/src/Chainweb/Pact4/Backend/ChainwebPactDb.hs index 94b26f7c62..77e7c7a535 100644 --- a/src/Chainweb/Pact4/Backend/ChainwebPactDb.hs +++ b/src/Chainweb/Pact4/Backend/ChainwebPactDb.hs @@ -10,7 +10,6 @@ {-# LANGUAGE DataKinds #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE ImportQualifiedPost #-} --- TODO pact5: fix the orphan PactDbFor instance {-# OPTIONS_GHC -Wno-orphans #-} {-# LANGUAGE ViewPatterns #-} @@ -918,6 +917,6 @@ headerOracleForBlock env = HeaderOracle { consult = \(Parent blkHash) -> do lookupBlockHash (_blockHandlerDb env) blkHash <&> \case Nothing -> False - Just rootHeight -> rootHeight > _blockHandlerBlockHeight env + Just rootHeight -> rootHeight < _blockHandlerBlockHeight env , chain = _blockHandlerChainId env } diff --git a/src/Chainweb/Pact4/TransactionExec.hs b/src/Chainweb/Pact4/TransactionExec.hs index 80d3ff2f0b..9de5482637 100644 --- a/src/Chainweb/Pact4/TransactionExec.hs +++ b/src/Chainweb/Pact4/TransactionExec.hs @@ -134,12 +134,14 @@ import Data.Set (Set) import Data.Set qualified as S import Data.Text (Text) import Data.Text qualified as T +import Pact.Core.Command.Types qualified as Pact5 +import Pact.Core.Command.RPC qualified as Pact5 import Pact.Core.Errors (VerifierError(..)) import Pact.Core.Gas qualified as Pact5 import Pact.Eval (eval, liftTerm) import Pact.Gas (freeGasEnv) import Pact.Interpreter -import Pact.JSON.Encode (toJsonViaEncode) +import Pact.JSON.Encode qualified as J import Pact.JSON.Legacy.Value import Pact.Native.Capabilities (evalCap) import Pact.Native.Internal (appToCap) @@ -426,13 +428,16 @@ applyCmd logger blockEnv miner gasModel txIdxInBlock spv cmd initialGas mcache0 then do gasUsed <- use txGasUsed let initGasRemaining = fromIntegral gasLimit - gasUsed + let p5Cmd = + either error id (fromJSON' @(Pact5.Command (Pact5.Payload Value Value)) + (J.toJsonViaEncode (cmd & cmdPayload . mapped %~ _pcCode))) + let p5Verifiers = p5Cmd ^. Pact5.cmdPayload . Pact5.pVerifiers verifierResult <- liftIO $ runVerifierPlugins (cid, currHeight) logger allVerifiers (convertGasFeeToPact5 initGasRemaining) - -- FIXME - [] - -- (fromMaybe [] (cmd ^. cmdPayload . pVerifiers)) + -- don't do this conversion work if there are no verifiers (the usual case) + (fromMaybe [] (fromJuste p5Verifiers <$ cmd ^. cmdPayload . pVerifiers)) case verifierResult of Left err -> do let errMsg = "Tx verifier error: " <> _verifierError err @@ -1158,11 +1163,11 @@ enrichedMsgBody cmd = case (_pPayload $ _cmdPayload cmd) of , "exec-user-data" A..= pactFriendlyUserData (_getLegacyValue userData) ] Continuation (ContMsg pid step isRollback userData proof) -> object [ "tx-type" A..= ("cont" :: Text) - , "cont-pact-id" A..= toJsonViaEncode pid - , "cont-step" A..= toJsonViaEncode (LInteger $ toInteger step) - , "cont-is-rollback" A..= toJsonViaEncode (LBool isRollback) + , "cont-pact-id" A..= J.toJsonViaEncode pid + , "cont-step" A..= J.toJsonViaEncode (LInteger $ toInteger step) + , "cont-is-rollback" A..= J.toJsonViaEncode (LBool isRollback) , "cont-user-data" A..= pactFriendlyUserData (_getLegacyValue userData) - , "cont-has-proof" A..= toJsonViaEncode (isJust proof) + , "cont-has-proof" A..= J.toJsonViaEncode (isJust proof) ] where pactFriendlyUserData Null = object [] @@ -1211,7 +1216,7 @@ redeemGas bctx cmd (Miner mid mks) = do Nothing -> fatal $! "redeemGas: no gas id in scope for gas refunds" Just g -> return g let redeemGasCmd = - ContMsg gid 1 False (toLegacyJson $ object [ "fee" A..= toJsonViaEncode fee ]) Nothing + ContMsg gid 1 False (toLegacyJson $ object [ "fee" A..= J.toJsonViaEncode fee ]) Nothing fmap _crEvents $ locally txQuirkGasFee (const Nothing) $ applyContinuation 0 (initState mcache) redeemGasCmd diff --git a/src/Chainweb/PayloadProvider/Pact.hs b/src/Chainweb/PayloadProvider/Pact.hs index 85fd285777..72d7512fdd 100644 --- a/src/Chainweb/PayloadProvider/Pact.hs +++ b/src/Chainweb/PayloadProvider/Pact.hs @@ -26,6 +26,7 @@ import Chainweb.Counter import Chainweb.Logger import Chainweb.MerkleUniverse import Chainweb.MinerReward qualified as MinerReward +import Chainweb.Pact.Backend.ChainwebPactDb qualified as ChainwebPactDb import Chainweb.Pact.Backend.Utils import Chainweb.Pact.Mempool.Mempool import Chainweb.Pact.PactService qualified as PactService @@ -124,14 +125,15 @@ withPactPayloadProvider -> Maybe HTTP.Manager -> logger -> Maybe (Counter "txFailures") - -> MempoolBackend Pact.Transaction + -> MemPoolAccess -> PayloadDb tbl -> FilePath -> PactServiceConfig -> Maybe PayloadWithOutputs -> ResourceT IO (PactPayloadProvider logger tbl) -withPactPayloadProvider cid rdb http logger txFailuresCounter mp pdb pactDbDir config maybeGenesisPayload = do +withPactPayloadProvider cid rdb http logger txFailuresCounter mpa pdb pactDbDir config maybeGenesisPayload = do readWriteSqlenv <- withSqliteDb cid logger pactDbDir False + liftIO $ ChainwebPactDb.initSchema readWriteSqlenv -- perform the database migration of the `BlockHeader` Table. bhdb <- withBlockHeaderDb rdb cid @@ -141,14 +143,12 @@ withPactPayloadProvider cid rdb http logger txFailuresCounter mp pdb pactDbDir c when needsMigration $ -- We cleanup potential old state and start migrating the entire database -- from scratch. - migrateBlockHistoryTable logger readWriteSqlenv bhdb True + migrateBlockHistoryTable logger cid readWriteSqlenv bhdb True readOnlySqlPool <- withReadSqlitePool cid pactDbDir PactPayloadProvider logger <$> PactService.withPactService cid http mpa logger txFailuresCounter pdb readOnlySqlPool readWriteSqlenv config (maybe GenesisNotNeeded GenesisPayload maybeGenesisPayload) - where - mpa = pactMemPoolAccess mp $ addLabel ("sub-component", "MempoolAccess") logger pactMemPoolAccess :: Logger logger diff --git a/src/Chainweb/PayloadProvider/Pact/BlockHistoryMigration.hs b/src/Chainweb/PayloadProvider/Pact/BlockHistoryMigration.hs index 97366beff3..73319e7b4c 100644 --- a/src/Chainweb/PayloadProvider/Pact/BlockHistoryMigration.hs +++ b/src/Chainweb/PayloadProvider/Pact/BlockHistoryMigration.hs @@ -12,6 +12,7 @@ module Chainweb.PayloadProvider.Pact.BlockHistoryMigration where import Chainweb.BlockHash import Chainweb.BlockHeader import Chainweb.BlockHeaderDB (BlockHeaderDb) +import Chainweb.ChainId import Chainweb.Logger import Chainweb.Pact.Backend.Types (SQLiteEnv) import Chainweb.TreeDB @@ -22,6 +23,8 @@ import System.Logger qualified as L import System.LogLevel import Chainweb.Pact.Backend.Utils import Chainweb.Pact.Backend.PactState (qryStream) +import Chainweb.Parent +import Chainweb.PayloadProvider import Streaming qualified as S import Streaming.Prelude qualified as S import Chainweb.Utils (sshow, whenM) @@ -50,12 +53,13 @@ import Data.Time.Clock (getCurrentTime, diffUTCTime) migrateBlockHistoryTable :: HasVersion => Logger logger - => logger -- ^ logger - -> SQLiteEnv -- ^ sqlite database - -> BlockHeaderDb -- ^ block header database + => logger + -> ChainId + -> SQLiteEnv + -> BlockHeaderDb -> Bool -- ^ cleanup table after migration -> IO () -migrateBlockHistoryTable logger sdb bhdb cleanup +migrateBlockHistoryTable logger cid sdb bhdb cleanup = L.withLoggerLabel ("component", "migrateBlockHistoryTable") logger $ \lf -> whenM (tableNeedsMigration lf sdb) $ do let logf serv msg = liftIO $ logFunctionText lf serv msg @@ -76,41 +80,57 @@ migrateBlockHistoryTable logger sdb bhdb cleanup remainingRowsRef <- newIORef nBlockHistory rs & S.chunksOf 10_000 - & S.mapsM_ (\chunk -> do + & S.mapsM (\chunk -> do remainingRows <- readIORef remainingRowsRef let perc = (1.0 - fromIntegral @_ @Double remainingRows / fromIntegral @_ @Double nBlockHistory) * 100.0 logf Info $ "Table migration: Process remaining rows: " <> sshow remainingRows <> " ("<> sshow perc <> ")" - r <- withTransaction sdb $ flip S.mapM_ chunk $ \case - rr@[SInt bh, SInt _, SBlob h] -> do - let rowBlockHeight = fromIntegral bh - rowBlockHash <- runGetS decodeBlockHash h - blockHeader <- lookupRanked bhdb rowBlockHeight rowBlockHash >>= \case - Nothing -> do - error $ "BlockHeader Entry missing for " - <> "blockHeight=" - <> sshow rowBlockHeight - <> ", blockHash=" - <> sshow rowBlockHash - Just blockHeader -> return blockHeader - - let bph = view blockPayloadHash blockHeader - enc = runPutS $ encodeBlockPayloadHash bph - - throwOnDbError $ exec' sdb "INSERT INTO BlockHistory2 (blockheight, endingtxid, hash, payloadhash) VALUES (?, ?, ?, ?)" - $ rr ++ [SBlob enc] - _ -> error "unexpected row shape" + r <- withTransaction sdb $ chunk + & S.mapMaybeM (\case + rr@[SInt bh, SInt _, SBlob h] -> do + let rowBlockHeight = fromIntegral bh + rowBlockHash <- runGetS decodeBlockHash h + lookupRanked bhdb rowBlockHeight rowBlockHash >>= \case + Nothing -> do + -- TODO: stop when there's a missing header? + -- error if there's a gap? + logFunctionText logger Warn + $ "BlockHeader Entry missing for " + <> "blockHeight=" + <> sshow rowBlockHeight + <> ", blockHash=" + <> sshow rowBlockHash + return Nothing + Just blockHeader -> do + let bph = view blockPayloadHash blockHeader + enc = runPutS $ encodeBlockPayloadHash bph + + throwOnDbError $ exec' sdb + "INSERT INTO BlockHistory2 (blockheight, endingtxid, hash, payloadhash) VALUES (?, ?, ?, ?)" + $ rr ++ [SBlob enc] + return (Just blockHeader) + _ -> error "unexpected row shape" + ) + & S.last modifyIORef' remainingRowsRef (\old -> max 0 (old - 10_000)) pure r) + & S.catMaybes + & S.last case e of - Left e' -> error $ "Table migration failure: " <> sshow e' - Right () -> do + _ S.:> Left e' -> + error $ "Table migration failure: " <> sshow e' + Nothing S.:> Right () -> do + error "No blocks were actually found!" + Just finalBlock S.:> Right () -> do end <- getCurrentTime logf Info $ "Elapsed Time: " <> sshow (diffUTCTime end start) - when cleanup $ do + when cleanup $ withTransaction sdb $ do logf Info "Data migration completed, cleaning up" + _ <- rewindDbTo cid sdb (Parent $ view rankedBlockHash finalBlock) + let ss = syncStateOfBlockHeader finalBlock + throwOnDbError $ setConsensusState sdb (ConsensusState ss ss ss) throwOnDbError $ exec_ sdb "DROP TABLE BlockHistory" logf Info "Table migration successful" diff --git a/src/Chainweb/VerifierPlugin/Hyperlane/Message.hs b/src/Chainweb/VerifierPlugin/Hyperlane/Message.hs index d642d1f56b..7b1b5e1777 100644 --- a/src/Chainweb/VerifierPlugin/Hyperlane/Message.hs +++ b/src/Chainweb/VerifierPlugin/Hyperlane/Message.hs @@ -1,5 +1,8 @@ {-# LANGUAGE DuplicateRecordFields #-} -{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE LambdaCase #-} -- | -- Chainweb.VerifierPlugin.Hyperlane.Message @@ -11,9 +14,13 @@ -- module Chainweb.VerifierPlugin.Hyperlane.Message (plugin) where +import Chainweb.Version.Guards import Chainweb.VerifierPlugin -import Chainweb.VerifierPlugin.Hyperlane.Message.After225 qualified as After225 +import qualified Chainweb.VerifierPlugin.Hyperlane.Message.After225 as After225 +import qualified Chainweb.VerifierPlugin.Hyperlane.Message.Before225 as Before225 plugin :: VerifierPlugin -plugin = VerifierPlugin $ \(_cid, _bh) proof caps gasRef -> - After225.runPlugin proof caps gasRef +plugin = VerifierPlugin $ \(cid, bh) proof caps gasRef -> + if chainweb225Pact cid bh + then After225.runPlugin proof caps gasRef + else Before225.runPlugin proof caps gasRef diff --git a/src/Chainweb/VerifierPlugin/Hyperlane/Message/Before225.hs b/src/Chainweb/VerifierPlugin/Hyperlane/Message/Before225.hs new file mode 100644 index 0000000000..f5418940fb --- /dev/null +++ b/src/Chainweb/VerifierPlugin/Hyperlane/Message/Before225.hs @@ -0,0 +1,126 @@ +{-# LANGUAGE DuplicateRecordFields #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ViewPatterns #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} + +-- | +-- Chainweb.VerifierPlugin.Hyperlane.Message +-- Copyright: Copyright © 2024 Kadena LLC. +-- License: MIT +-- +-- Deprecated verifier plugin behaviour for Hyperlane Message. +-- Verifies the message using the provided Message Id metadata. +-- +module Chainweb.VerifierPlugin.Hyperlane.Message.Before225 (runPlugin) where + +-- import Control.Error +import Control.Monad (unless) +import Control.Monad.Except +import Control.Monad.ST + +import qualified Data.ByteString as BS +import Data.Maybe +import qualified Data.Text.Encoding as Text +import qualified Data.Vector as V +import qualified Data.Set as Set +import Data.STRef + +import Ethereum.Misc hiding (Word256) + +import Pact.Core.Capabilities +import Pact.Core.Errors (VerifierError(..)) +import Pact.Core.Gas +import Pact.Core.Literal +import Pact.Core.Names +import Pact.Core.PactValue +import Pact.Core.Signer + +import Chainweb.Utils.Serialization (putRawByteString, runPutS, runGetS, putWord32be) + +import Chainweb.VerifierPlugin +import Chainweb.VerifierPlugin.Hyperlane.Binary +import Chainweb.VerifierPlugin.Hyperlane.Utils +import Chainweb.Utils (encodeB64UrlNoPaddingText, decodeB64UrlNoPaddingText, sshow) +import Pact.Core.Errors (VerifierError(..)) + +base64DecodeGasCost :: Gas +base64DecodeGasCost = Gas 5 + +runPlugin :: forall s + . PactValue + -> Set.Set SigCapability + -> STRef s Gas + -> ExceptT VerifierError (ST s) () +runPlugin proof caps gasRef = do + -- extract capability values + SigCapability (CapToken {..}) <- case Set.toList caps of + [cap] -> return cap + _ -> throwError $ VerifierError "Expected one capability." + + (capMessageBody, capRecipient, capSigners) <- case _ctArgs of + [mb, r, sigs] -> return (mb, r, sigs) + _ -> throwError $ VerifierError $ "Incorrect number of capability arguments. Expected: messageBody, recipient, signers." + + -- extract proof object values + (hyperlaneMessageBase64, metadataBase64) <- case proof of + PList values + | [PLiteral (LString msg), PLiteral (LString mtdt)] <- V.toList values -> + pure (msg, mtdt) + _ -> throwError $ VerifierError "Expected a proof data as a list" + + (HyperlaneMessage{..}, hyperlaneMessageBinary) <- do + chargeGas gasRef base64DecodeGasCost + msg <- decodeB64UrlNoPaddingText hyperlaneMessageBase64 + decoded <- runGetS getHyperlaneMessage msg + return (decoded, msg) + + MessageIdMultisigIsmMetadata{..} <- do + chargeGas gasRef base64DecodeGasCost + metadata <- decodeB64UrlNoPaddingText metadataBase64 + runGetS getMessageIdMultisigIsmMetadata metadata + + -- validate recipient + let hmRecipientPactValue = PLiteral $ LString $ Text.decodeUtf8 $ BS.dropWhile (== 0) hmRecipient + unless (hmRecipientPactValue == capRecipient) $ + throwError $ VerifierError $ + "Recipients don't match. Expected: " <> sshow hmRecipientPactValue <> " but got " <> sshow capRecipient + + let + hmMessageBodyPactValue = PLiteral $ LString $ encodeB64UrlNoPaddingText hmMessageBody + + unless (hmMessageBodyPactValue == capMessageBody) $ + throwError $ VerifierError $ + "Invalid TokenMessage. Expected: " <> sshow hmMessageBodyPactValue <> " but got " <> sshow capMessageBody + + -- validate signers + let + domainHash = keccak256ByteString $ runPutS $ do + -- Corresponds to abi.encodePacked behaviour + putWord32be hmOriginDomain + putRawByteString mmimOriginMerkleTreeAddress + putRawByteString "HYPERLANE" + + let messageId = keccak256ByteString hyperlaneMessageBinary + + let + digest = keccak256 $ runPutS $ do + -- Corresponds to abi.encodePacked behaviour + putRawByteString ethereumHeader + putRawByteString $ + keccak256ByteString $ runPutS $ do + putRawByteString domainHash + putRawByteString mmimSignedCheckpointRoot + putWord32be mmimSignedCheckpointIndex + putRawByteString messageId + + -- 16250 is a gas cost of the address recovery + addresses <- catMaybes <$> mapM (\sig -> chargeGas gasRef (Gas 16250) >> recoverAddress digest sig) mmimSignatures + let addressesVals = PList $ V.fromList $ map (PLiteral . LString . encodeHex) addresses + + -- Note, that we check the signers for the full equality including their order and amount. + -- Hyperlane's ISM uses a threshold and inclusion check. + unless (addressesVals == capSigners) $ + throwError $ VerifierError $ + "Signers don't match. Expected: " <> sshow addressesVals <> " but got " <> sshow capSigners diff --git a/src/Chainweb/Version/Mainnet.hs b/src/Chainweb/Version/Mainnet.hs index a787ceed0d..0946b75dc3 100644 --- a/src/Chainweb/Version/Mainnet.hs +++ b/src/Chainweb/Version/Mainnet.hs @@ -24,6 +24,21 @@ import P2P.BootstrapNodes import Pact.Core.Gas(Gas(..)) import Pact.Core.Names +import qualified Chainweb.Pact.Transactions.CoinV3Transactions as CoinV3 +import qualified Chainweb.Pact.Transactions.CoinV4Transactions as CoinV4 +import qualified Chainweb.Pact.Transactions.CoinV5Transactions as CoinV5 +import qualified Chainweb.Pact.Transactions.CoinV6Transactions as CoinV6 +import qualified Chainweb.Pact.Transactions.Mainnet0Transactions as MN0 +import qualified Chainweb.Pact.Transactions.Mainnet1Transactions as MN1 +import qualified Chainweb.Pact.Transactions.Mainnet2Transactions as MN2 +import qualified Chainweb.Pact.Transactions.Mainnet3Transactions as MN3 +import qualified Chainweb.Pact.Transactions.Mainnet4Transactions as MN4 +import qualified Chainweb.Pact.Transactions.Mainnet5Transactions as MN5 +import qualified Chainweb.Pact.Transactions.Mainnet6Transactions as MN6 +import qualified Chainweb.Pact.Transactions.Mainnet7Transactions as MN7 +import qualified Chainweb.Pact.Transactions.Mainnet8Transactions as MN8 +import qualified Chainweb.Pact.Transactions.Mainnet9Transactions as MN9 +import qualified Chainweb.Pact.Transactions.MainnetKADTransactions as MNKAD -- | Initial hash target for mainnet 20-chain transition. Difficulty on the new -- chains is 1/4 of the current difficulty. It is based on the following header @@ -169,7 +184,26 @@ mainnet = withVersion mainnet $ ChainwebVersion , ( unsafeChainId 19, unsafeFromText "i-MN4AoxsaPds4M_MzwNSUygAkGnPZoCDvahfckowt4") ] } - , _versionUpgrades = onChains [] + , _versionUpgrades = chainZip HM.union + (indexByForkHeights + [ (CoinV2, onChains + [ (unsafeChainId 0, pact4Upgrade MN0.transactions) + , (unsafeChainId 1, pact4Upgrade MN1.transactions) + , (unsafeChainId 2, pact4Upgrade MN2.transactions) + , (unsafeChainId 3, pact4Upgrade MN3.transactions) + , (unsafeChainId 4, pact4Upgrade MN4.transactions) + , (unsafeChainId 5, pact4Upgrade MN5.transactions) + , (unsafeChainId 6, pact4Upgrade MN6.transactions) + , (unsafeChainId 7, pact4Upgrade MN7.transactions) + , (unsafeChainId 8, pact4Upgrade MN8.transactions) + , (unsafeChainId 9, pact4Upgrade MN9.transactions) + ]) + , (Pact4Coin3, onAllChains $ Pact4Upgrade CoinV3.transactions True) + , (Chainweb214Pact, onAllChains $ Pact4Upgrade CoinV4.transactions True) + , (Chainweb215Pact, onAllChains $ Pact4Upgrade CoinV5.transactions True) + , (Chainweb223Pact, onAllChains $ pact4Upgrade CoinV6.transactions) + ]) + (onChains [(unsafeChainId 0, HM.singleton to20ChainsMainnet (pact4Upgrade MNKAD.transactions))]) , _versionCheats = VersionCheats { _disablePow = False , _fakeFirstEpochStart = False diff --git a/src/Chainweb/Version/Testnet04.hs b/src/Chainweb/Version/Testnet04.hs index 55b4665a81..bc9e5d4ee2 100644 --- a/src/Chainweb/Version/Testnet04.hs +++ b/src/Chainweb/Version/Testnet04.hs @@ -4,6 +4,7 @@ {-# language PatternSynonyms #-} {-# language QuasiQuotes #-} {-# language ViewPatterns #-} +{-# language ImportQualifiedPost #-} module Chainweb.Version.Testnet04(testnet04, pattern Testnet04) where @@ -16,6 +17,23 @@ import Chainweb.BlockHeight import Chainweb.ChainId import Chainweb.Difficulty import Chainweb.Graph +import Chainweb.Pact.Transactions.CoinV3Transactions qualified as CoinV3 +import Chainweb.Pact.Transactions.CoinV4Transactions qualified as CoinV4 +import Chainweb.Pact.Transactions.CoinV5Transactions qualified as CoinV5 +import Chainweb.Pact.Transactions.CoinV6Transactions qualified as CoinV6 +import Chainweb.Pact.Transactions.Mainnet0Transactions qualified as MN0 +import Chainweb.Pact.Transactions.Mainnet1Transactions qualified as MN1 +import Chainweb.Pact.Transactions.Mainnet2Transactions qualified as MN2 +import Chainweb.Pact.Transactions.Mainnet3Transactions qualified as MN3 +import Chainweb.Pact.Transactions.Mainnet4Transactions qualified as MN4 +import Chainweb.Pact.Transactions.Mainnet5Transactions qualified as MN5 +import Chainweb.Pact.Transactions.Mainnet6Transactions qualified as MN6 +import Chainweb.Pact.Transactions.Mainnet7Transactions qualified as MN7 +import Chainweb.Pact.Transactions.Mainnet8Transactions qualified as MN8 +import Chainweb.Pact.Transactions.Mainnet9Transactions qualified as MN9 +import Chainweb.Pact.Transactions.MainnetKADTransactions qualified as MNKAD +import Chainweb.BlockHeader.Genesis.Testnet040Payload qualified as PN0 +import Chainweb.BlockHeader.Genesis.Testnet041to19Payload qualified as PNN import Chainweb.Time import Chainweb.Utils import Chainweb.Utils.Rule @@ -159,8 +177,26 @@ testnet04 = withVersion testnet04 $ ChainwebVersion , (unsafeChainId 19, unsafeFromText "HU-ZhdfsQCiTrfxjtbkr5MHmjoukOt6INqB2vuYiF3g") ] } - -- all upgrades have been removed due to the removal of Pact 4 - , _versionUpgrades = onChains [] + , _versionUpgrades = chainZip HM.union + (indexByForkHeights + [ (CoinV2, onChains $ + [ (unsafeChainId 0, pact4Upgrade MN0.transactions) + , (unsafeChainId 1, pact4Upgrade MN1.transactions) + , (unsafeChainId 2, pact4Upgrade MN2.transactions) + , (unsafeChainId 3, pact4Upgrade MN3.transactions) + , (unsafeChainId 4, pact4Upgrade MN4.transactions) + , (unsafeChainId 5, pact4Upgrade MN5.transactions) + , (unsafeChainId 6, pact4Upgrade MN6.transactions) + , (unsafeChainId 7, pact4Upgrade MN7.transactions) + , (unsafeChainId 8, pact4Upgrade MN8.transactions) + , (unsafeChainId 9, pact4Upgrade MN9.transactions) + ]) + , (Pact4Coin3, onAllChains (Pact4Upgrade CoinV3.transactions True)) + , (Chainweb214Pact, onAllChains (Pact4Upgrade CoinV4.transactions True)) + , (Chainweb215Pact, onAllChains (Pact4Upgrade CoinV5.transactions True)) + , (Chainweb223Pact, onAllChains (pact4Upgrade CoinV6.transactions)) + ]) + (onChains [(unsafeChainId 0, HM.singleton to20ChainsTestnet (pact4Upgrade MNKAD.transactions))]) , _versionCheats = VersionCheats { _disablePow = False , _fakeFirstEpochStart = False diff --git a/test/blockhistory-migration/BlockHistoryMigrationTests.hs b/test/blockhistory-migration/BlockHistoryMigrationTests.hs index 8c14dd4c77..9ed22fd66e 100644 --- a/test/blockhistory-migration/BlockHistoryMigrationTests.hs +++ b/test/blockhistory-migration/BlockHistoryMigrationTests.hs @@ -75,7 +75,7 @@ main = withSystemTempDirectory "sql-db" $ \dbDir -> do withVersion Mainnet01 $ runResourceT $ do bhdb <- withBlockHeaderDb rocksdb cid liftIO $ do - migrateBlockHistoryTable logger sql bhdb False + migrateBlockHistoryTable logger cid sql bhdb False let qstmt = "SELECT A.blockheight, A.endingtxid, \ \ B.hash AS b_hash, B.payloadhash AS b_payload_hash, \ diff --git a/test/unit/Chainweb/Test/Pact/BlockHistoryMigrationTest.hs b/test/unit/Chainweb/Test/Pact/BlockHistoryMigrationTest.hs index ce69d8858f..6054b5b080 100644 --- a/test/unit/Chainweb/Test/Pact/BlockHistoryMigrationTest.hs +++ b/test/unit/Chainweb/Test/Pact/BlockHistoryMigrationTest.hs @@ -62,7 +62,7 @@ initSchema sql = do withSetup :: TestName -> (SQLiteEnv -> IO ()) - -> (HasVersion => GenericLogger -> SQLiteEnv -> BlockHeaderDb -> Bool -> IO ()) + -> (HasVersion => GenericLogger -> ChainId -> SQLiteEnv -> BlockHeaderDb -> Bool -> IO ()) -> TestTree withSetup n setup action = withResourceT (withTempChainSqlite cid) $ \sqlIO -> do testCase n $ do @@ -74,7 +74,7 @@ withSetup n setup action = withResourceT (withTempChainSqlite cid) $ \sqlIO -> d withTempRocksDb "chainweb-tests" $ \rdb -> do withVersion Mainnet01 $ runResourceT $ do bhdb <- withBlockHeaderDb rdb cid - liftIO $ action logger sql bhdb True + liftIO $ action logger cid sql bhdb True tests :: HasCallStack => TestTree tests = testGroup "BlockHistory Table Migration" [ @@ -83,36 +83,36 @@ tests = testGroup "BlockHistory Table Migration" [ migrateBlockHistoryTable , withSetup "test successful migration cleanup" initSchema - $ \lf sdb bhdb cleanup -> do + $ \lf cid sdb bhdb cleanup -> do let qryIO = throwOnDbError $ qry sdb "SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = 'BlockHistory'" [] [RInt] [[SInt p]] <- qryIO assertExpectation "Table should be present" (Expected 1) (Actual p) - migrateBlockHistoryTable lf sdb bhdb cleanup + migrateBlockHistoryTable lf cid sdb bhdb cleanup post <- qryIO assertExpectation "Table should not be present" (Expected []) (Actual post) , withSetup "test migration" initSchema - $ \lf sdb bhdb _cleanup -> do + $ \lf cid sdb bhdb _cleanup -> do traverse_ (unsafeInsertBlockHeaderDb bhdb) blockHeaders traverse_ (unsafeInsertEntry sdb) sqliteData -- Disable original table cleanup for migration verification. - migrateBlockHistoryTable lf sdb bhdb False + migrateBlockHistoryTable lf cid sdb bhdb False verifyMigration sdb bhdb -- Re-run verification - migrateBlockHistoryTable lf sdb bhdb False + migrateBlockHistoryTable lf cid sdb bhdb False verifyMigration sdb bhdb , withSetup "test migration with one missing row" initSchema - $ \lf sdb bhdb _cleanup -> do + $ \lf cid sdb bhdb _cleanup -> do traverse_ (unsafeInsertBlockHeaderDb bhdb) blockHeaders traverse_ (unsafeInsertEntry sdb) sqliteData -- Disable original table cleanup for migration verification. - migrateBlockHistoryTable lf sdb bhdb False + migrateBlockHistoryTable lf cid sdb bhdb False verifyMigration sdb bhdb @@ -124,7 +124,7 @@ tests = testGroup "BlockHistory Table Migration" [ assert (n == 9) $ "BlockHistory2 should contain 9 entries, actual: " <> sshow n -- Re-run verification - migrateBlockHistoryTable lf sdb bhdb False + migrateBlockHistoryTable lf cid sdb bhdb False verifyMigration sdb bhdb ]