diff --git a/Builds/levelization/results/loops.txt b/Builds/levelization/results/loops.txt index df1d273f93b..7da789ce780 100644 --- a/Builds/levelization/results/loops.txt +++ b/Builds/levelization/results/loops.txt @@ -29,7 +29,7 @@ Loop: xrpld.core xrpld.net xrpld.net > xrpld.core Loop: xrpld.core xrpld.perflog - xrpld.perflog == xrpld.core + xrpld.perflog ~= xrpld.core Loop: xrpld.net xrpld.rpc xrpld.rpc ~= xrpld.net diff --git a/Builds/levelization/results/ordering.txt b/Builds/levelization/results/ordering.txt index eca7fc6dc29..fde661840fb 100644 --- a/Builds/levelization/results/ordering.txt +++ b/Builds/levelization/results/ordering.txt @@ -162,6 +162,7 @@ xrpld.ledger > xrpl.basics xrpld.ledger > xrpl.json xrpld.ledger > xrpl.protocol xrpld.net > xrpl.basics +xrpld.net > xrpld.perflog xrpld.net > xrpl.json xrpld.net > xrpl.protocol xrpld.net > xrpl.resource diff --git a/src/xrpld/app/consensus/RCLConsensus.cpp b/src/xrpld/app/consensus/RCLConsensus.cpp index 292ba7d483d..2809fd3ad0f 100644 --- a/src/xrpld/app/consensus/RCLConsensus.cpp +++ b/src/xrpld/app/consensus/RCLConsensus.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -49,6 +50,8 @@ #include #include +using namespace std::chrono_literals; + namespace ripple { RCLConsensus::RCLConsensus( @@ -140,11 +143,17 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash) app_.getJobQueue().addJob( jtADVANCE, "getConsensusLedger1", - [id = hash, &app = app_, this]() { - JLOG(j_.debug()) - << "JOB advanceLedger getConsensusLedger1 started"; - app.getInboundLedgers().acquireAsync( - id, 0, InboundLedger::Reason::CONSENSUS); + [id = hash, &app = app_, this, journal = j_]() { + perf::measureDurationAndLog( + [&]() { + JLOG(j_.debug()) << "JOB advanceLedger " + "getConsensusLedger1 started"; + app.getInboundLedgers().acquireAsync( + id, 0, InboundLedger::Reason::CONSENSUS); + }, + "getConsensusLedger1", + 1s, + journal); }); } return std::nullopt; @@ -442,21 +451,27 @@ RCLConsensus::Adaptor::onAccept( app_.getJobQueue().addJob( jtACCEPT, "acceptLedger", - [=, this, cj = std::move(consensusJson)]() mutable { - // Note that no lock is held or acquired during this job. - // This is because generic Consensus guarantees that once a ledger - // is accepted, the consensus results and capture by reference state - // will not change until startRound is called (which happens via - // endConsensus). - RclConsensusLogger clog("onAccept", validating, j_); - this->doAccept( - result, - prevLedger, - closeResolution, - rawCloseTimes, - mode, - std::move(cj)); - this->app_.getOPs().endConsensus(clog.ss()); + [=, this, cj = std::move(consensusJson), journal = j_]() mutable { + perf::measureDurationAndLog( + [&]() { + // Note that no lock is held or acquired during this job. + // This is because generic Consensus guarantees that once a + // ledger is accepted, the consensus results and capture by + // reference state will not change until startRound is + // called (which happens via endConsensus). + RclConsensusLogger clog("onAccept", validating, j_); + this->doAccept( + result, + prevLedger, + closeResolution, + rawCloseTimes, + mode, + std::move(cj)); + this->app_.getOPs().endConsensus(clog.ss()); + }, + "acceptLedger", + 1s, + journal); }); } diff --git a/src/xrpld/app/consensus/RCLValidations.cpp b/src/xrpld/app/consensus/RCLValidations.cpp index a04047c78a1..e30fbd088fa 100644 --- a/src/xrpld/app/consensus/RCLValidations.cpp +++ b/src/xrpld/app/consensus/RCLValidations.cpp @@ -32,6 +32,8 @@ #include +using namespace std::chrono_literals; + namespace ripple { RCLValidatedLedger::RCLValidatedLedger(MakeGenesis) @@ -142,11 +144,19 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash) Application* pApp = &app_; app_.getJobQueue().addJob( - jtADVANCE, "getConsensusLedger2", [pApp, hash, this]() { - JLOG(j_.debug()) - << "JOB advanceLedger getConsensusLedger2 started"; - pApp->getInboundLedgers().acquireAsync( - hash, 0, InboundLedger::Reason::CONSENSUS); + jtADVANCE, + "getConsensusLedger2", + [pApp, hash, this, journal = j_]() { + perf::measureDurationAndLog( + [&]() { + JLOG(j_.debug()) + << "JOB advanceLedger getConsensusLedger2 started"; + pApp->getInboundLedgers().acquireAsync( + hash, 0, InboundLedger::Reason::CONSENSUS); + }, + "getConsensusLedger2", + 1s, + journal); }); return std::nullopt; } diff --git a/src/xrpld/app/ledger/ConsensusTransSetSF.cpp b/src/xrpld/app/ledger/ConsensusTransSetSF.cpp index e62426b7201..1edf49868b9 100644 --- a/src/xrpld/app/ledger/ConsensusTransSetSF.cpp +++ b/src/xrpld/app/ledger/ConsensusTransSetSF.cpp @@ -23,11 +23,14 @@ #include #include #include +#include #include #include #include +using namespace std::chrono_literals; + namespace ripple { ConsensusTransSetSF::ConsensusTransSetSF(Application& app, NodeCache& nodeCache) @@ -65,9 +68,14 @@ ConsensusTransSetSF::gotNode( "ripple::ConsensusTransSetSF::gotNode : transaction hash " "match"); auto const pap = &app_; - app_.getJobQueue().addJob(jtTRANSACTION, "TXS->TXN", [pap, stx]() { - pap->getOPs().submitTransaction(stx); - }); + app_.getJobQueue().addJob( + jtTRANSACTION, "TXS->TXN", [pap, stx, journal = j_]() { + perf::measureDurationAndLog( + [&]() { pap->getOPs().submitTransaction(stx); }, + "TXS->TXN", + 1s, + journal); + }); } catch (std::exception const& ex) { diff --git a/src/xrpld/app/ledger/Ledger.cpp b/src/xrpld/app/ledger/Ledger.cpp index 3cdf0ab1a7f..deb0655dce4 100644 --- a/src/xrpld/app/ledger/Ledger.cpp +++ b/src/xrpld/app/ledger/Ledger.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -46,6 +47,8 @@ #include #include +using namespace std::chrono_literals; + namespace ripple { create_genesis_t const create_genesis{}; @@ -1028,7 +1031,12 @@ pendSaveValidated( isCurrent ? jtPUBLEDGER : jtPUBOLDLEDGER, std::to_string(ledger->seq()), [&app, ledger, isCurrent]() { - saveValidatedLedger(app, ledger, isCurrent); + beast::Journal journal = app.journal("Ledger"); + perf::measureDurationAndLog( + [&]() { saveValidatedLedger(app, ledger, isCurrent); }, + "OrderBookDB::update:", + 1s, + journal); })) { return true; diff --git a/src/xrpld/app/ledger/OrderBookDB.cpp b/src/xrpld/app/ledger/OrderBookDB.cpp index b8a7b54008a..3beca6a670d 100644 --- a/src/xrpld/app/ledger/OrderBookDB.cpp +++ b/src/xrpld/app/ledger/OrderBookDB.cpp @@ -24,10 +24,13 @@ #include #include #include +#include #include #include +using namespace std::chrono_literals; + namespace ripple { OrderBookDB::OrderBookDB(Application& app) @@ -69,7 +72,13 @@ OrderBookDB::setup(std::shared_ptr const& ledger) app_.getJobQueue().addJob( jtUPDATE_PF, "OrderBookDB::update: " + std::to_string(ledger->seq()), - [this, ledger]() { update(ledger); }); + [this, ledger, journal = j_]() { + perf::measureDurationAndLog( + [&]() { update(ledger); }, + "OrderBookDB::update:", + 1s, + journal); + }); } } diff --git a/src/xrpld/app/ledger/detail/InboundLedger.cpp b/src/xrpld/app/ledger/detail/InboundLedger.cpp index c1eed3a9f32..00541fc8860 100644 --- a/src/xrpld/app/ledger/detail/InboundLedger.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedger.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,8 @@ #include #include +using namespace std::chrono_literals; + namespace ripple { using namespace std::chrono_literals; @@ -473,15 +476,24 @@ InboundLedger::done() // We hold the PeerSet lock, so must dispatch app_.getJobQueue().addJob( - jtLEDGER_DATA, "AcquisitionDone", [self = shared_from_this()]() { - if (self->complete_ && !self->failed_) - { - self->app_.getLedgerMaster().checkAccept(self->getLedger()); - self->app_.getLedgerMaster().tryAdvance(); - } - else - self->app_.getInboundLedgers().logFailure( - self->hash_, self->mSeq); + jtLEDGER_DATA, + "AcquisitionDone", + [self = shared_from_this(), journal = journal_]() { + perf::measureDurationAndLog( + [&]() { + if (self->complete_ && !self->failed_) + { + self->app_.getLedgerMaster().checkAccept( + self->getLedger()); + self->app_.getLedgerMaster().tryAdvance(); + } + else + self->app_.getInboundLedgers().logFailure( + self->hash_, self->mSeq); + }, + "AcquisitionDone", + 1s, + journal); }); } diff --git a/src/xrpld/app/ledger/detail/InboundLedgers.cpp b/src/xrpld/app/ledger/detail/InboundLedgers.cpp index b05268f65b8..a1cb864a53c 100644 --- a/src/xrpld/app/ledger/detail/InboundLedgers.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedgers.cpp @@ -35,6 +35,8 @@ #include #include +using namespace std::chrono_literals; + namespace ripple { class InboundLedgersImp : public InboundLedgers @@ -212,8 +214,14 @@ class InboundLedgersImp : public InboundLedgers // dispatch if (ledger->gotData(std::weak_ptr(peer), packet)) app_.getJobQueue().addJob( - jtLEDGER_DATA, "processLedgerData", [ledger]() { - ledger->runData(); + jtLEDGER_DATA, + "processLedgerData", + [ledger, journal = j_]() { + perf::measureDurationAndLog( + [&]() { ledger->runData(); }, + "processLedgerData", + 1s, + journal); }); return true; @@ -227,8 +235,12 @@ class InboundLedgersImp : public InboundLedgers if (packet->type() == protocol::liAS_NODE) { app_.getJobQueue().addJob( - jtLEDGER_DATA, "gotStaleData", [this, packet]() { - gotStaleData(packet); + jtLEDGER_DATA, "gotStaleData", [this, packet, journal = j_]() { + perf::measureDurationAndLog( + [&]() { gotStaleData(packet); }, + "gotStaleData", + 1s, + journal); }); } diff --git a/src/xrpld/app/ledger/detail/LedgerDeltaAcquire.cpp b/src/xrpld/app/ledger/detail/LedgerDeltaAcquire.cpp index d312a09008a..aadb3bcd760 100644 --- a/src/xrpld/app/ledger/detail/LedgerDeltaAcquire.cpp +++ b/src/xrpld/app/ledger/detail/LedgerDeltaAcquire.cpp @@ -25,6 +25,9 @@ #include #include #include +#include + +using namespace std::chrono_literals; namespace ripple { @@ -244,22 +247,31 @@ LedgerDeltaAcquire::onLedgerBuilt( app_.getJobQueue().addJob( jtREPLAY_TASK, "onLedgerBuilt", - [=, ledger = this->fullLedger_, &app = this->app_]() { - for (auto reason : reasons) - { - switch (reason) - { - case InboundLedger::Reason::GENERIC: - app.getLedgerMaster().storeLedger(ledger); - break; - default: - // TODO for other use cases - break; - } - } + [=, + ledger = this->fullLedger_, + &app = this->app_, + journal = journal_]() { + perf::measureDurationAndLog( + [&]() { + for (auto reason : reasons) + { + switch (reason) + { + case InboundLedger::Reason::GENERIC: + app.getLedgerMaster().storeLedger(ledger); + break; + default: + // TODO for other use cases + break; + } + } - if (firstTime) - app.getLedgerMaster().tryAdvance(); + if (firstTime) + app.getLedgerMaster().tryAdvance(); + }, + "onLedgerBuilt", + 1s, + journal); }); } diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index 78f0375b166..3bef6cb4533 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -56,6 +57,8 @@ #include #include +using namespace std::chrono_literals; + namespace ripple { // Don't catch up more than 100 ledgers (cannot exceed 256) @@ -1361,27 +1364,36 @@ LedgerMaster::tryAdvance() if (!mAdvanceThread && !mValidLedger.empty()) { mAdvanceThread = true; - app_.getJobQueue().addJob(jtADVANCE, "advanceLedger", [this]() { - std::unique_lock sl(m_mutex); + app_.getJobQueue().addJob( + jtADVANCE, "advanceLedger", [this, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { + std::unique_lock sl(m_mutex); - XRPL_ASSERT( - !mValidLedger.empty() && mAdvanceThread, - "ripple::LedgerMaster::tryAdvance : has valid ledger"); + XRPL_ASSERT( + !mValidLedger.empty() && mAdvanceThread, + "ripple::LedgerMaster::tryAdvance : has valid " + "ledger"); - JLOG(m_journal.trace()) << "advanceThread<"; + JLOG(m_journal.trace()) << "advanceThread<"; - try - { - doAdvance(sl); - } - catch (std::exception const& ex) - { - JLOG(m_journal.fatal()) << "doAdvance throws: " << ex.what(); - } + try + { + doAdvance(sl); + } + catch (std::exception const& ex) + { + JLOG(m_journal.fatal()) + << "doAdvance throws: " << ex.what(); + } - mAdvanceThread = false; - JLOG(m_journal.trace()) << "advanceThread>"; - }); + mAdvanceThread = false; + JLOG(m_journal.trace()) << "advanceThread>"; + }, + "advanceLedger", + 1s, + journal); + }); } } @@ -1536,7 +1548,13 @@ LedgerMaster::newPFWork( << "newPFWork: Creating job. path find threads: " << mPathFindThread; if (app_.getJobQueue().addJob( - jtUPDATE_PF, name, [this]() { updatePaths(); })) + jtUPDATE_PF, name, [this, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { updatePaths(); }, + "newPFWork: Creating job. path find threads:", + 1s, + journal); + })) { ++mPathFindThread; } @@ -1857,8 +1875,11 @@ LedgerMaster::fetchForHistory( mFillInProgress = seq; } app_.getJobQueue().addJob( - jtADVANCE, "tryFill", [this, ledger]() { - tryFill(ledger); + jtADVANCE, + "tryFill", + [this, ledger, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { tryFill(ledger); }, "tryFill", 1s, journal); }); } progress = true; @@ -2027,10 +2048,17 @@ LedgerMaster::gotFetchPack(bool progress, std::uint32_t seq) { if (!mGotFetchPackThread.test_and_set(std::memory_order_acquire)) { - app_.getJobQueue().addJob(jtLEDGER_DATA, "gotFetchPack", [&]() { - app_.getInboundLedgers().gotFetchPack(); - mGotFetchPackThread.clear(std::memory_order_release); - }); + app_.getJobQueue().addJob( + jtLEDGER_DATA, "gotFetchPack", [this, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { + app_.getInboundLedgers().gotFetchPack(); + mGotFetchPackThread.clear(std::memory_order_release); + }, + "gotFetchPack", + 1s, + journal); + }); } } diff --git a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp index e81ec6574d2..3e96d525231 100644 --- a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp +++ b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp @@ -19,6 +19,9 @@ #include #include +#include + +using namespace std::chrono_literals; namespace ripple { @@ -83,9 +86,15 @@ TimeoutCounter::queueJob(ScopedLockType& sl) app_.getJobQueue().addJob( queueJobParameter_.jobType, queueJobParameter_.jobName, - [wptr = pmDowncast()]() { - if (auto sptr = wptr.lock(); sptr) - sptr->invokeOnTimer(); + [wptr = pmDowncast(), journal = journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto sptr = wptr.lock(); sptr) + sptr->invokeOnTimer(); + }, + "TimeoutCounter::queueJob", + 1s, + journal); }); } diff --git a/src/xrpld/app/ledger/detail/TransactionAcquire.cpp b/src/xrpld/app/ledger/detail/TransactionAcquire.cpp index 23694b3cb6e..b365f1a19e2 100644 --- a/src/xrpld/app/ledger/detail/TransactionAcquire.cpp +++ b/src/xrpld/app/ledger/detail/TransactionAcquire.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -79,8 +80,16 @@ TransactionAcquire::done() // just updates the consensus and related structures when we acquire // a transaction set. No need to update them if we're shutting down. app_.getJobQueue().addJob( - jtTXN_DATA, "completeAcquire", [pap, hash, map]() { - pap->getInboundTransactions().giveSet(hash, map, true); + jtTXN_DATA, + "completeAcquire", + [pap, hash, map, journal = journal_]() { + perf::measureDurationAndLog( + [&]() { + pap->getInboundTransactions().giveSet(hash, map, true); + }, + "completeAcquire", + 1s, + journal); }); } } diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index d87dea3c524..f1143770b2c 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -81,6 +81,8 @@ #include #include +using namespace std::chrono_literals; + namespace ripple { class NetworkOPsImp final : public NetworkOPs @@ -994,9 +996,16 @@ NetworkOPsImp::setHeartbeatTimer() heartbeatTimer_, mConsensus.parms().ledgerGRANULARITY, [this]() { - m_job_queue.addJob(jtNETOP_TIMER, "NetOPs.heartbeat", [this]() { - processHeartbeatTimer(); - }); + m_job_queue.addJob( + jtNETOP_TIMER, + "NetOPs.heartbeat", + [this, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { processHeartbeatTimer(); }, + "NetOPs.heartbeat", + 1s, + journal); + }); }, [this]() { setHeartbeatTimer(); }); } @@ -1010,9 +1019,16 @@ NetworkOPsImp::setClusterTimer() clusterTimer_, 10s, [this]() { - m_job_queue.addJob(jtNETOP_CLUSTER, "NetOPs.cluster", [this]() { - processClusterTimer(); - }); + m_job_queue.addJob( + jtNETOP_CLUSTER, + "NetOPs.cluster", + [this, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { processClusterTimer(); }, + "NetOPs.cluster", + 1s, + journal); + }); }, [this]() { setClusterTimer(); }); } @@ -1229,10 +1245,17 @@ NetworkOPsImp::submitTransaction(std::shared_ptr const& iTrans) auto tx = std::make_shared(trans, reason, app_); - m_job_queue.addJob(jtTRANSACTION, "submitTxn", [this, tx]() { - auto t = tx; - processTransaction(t, false, false, FailHard::no); - }); + m_job_queue.addJob( + jtTRANSACTION, "submitTxn", [this, tx, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { + auto t = tx; + processTransaction(t, false, false, FailHard::no); + }, + "submitTxn", + 1s, + journal); + }); } bool @@ -1315,7 +1338,13 @@ NetworkOPsImp::doTransactionAsync( if (mDispatchState == DispatchState::none) { if (m_job_queue.addJob( - jtBATCH, "transactionBatch", [this]() { transactionBatch(); })) + jtBATCH, "transactionBatch", [this, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { transactionBatch(); }, + "transactionBatch", + 1s, + journal); + })) { mDispatchState = DispatchState::scheduled; } @@ -1362,9 +1391,16 @@ NetworkOPsImp::doTransactionSyncBatch( if (mTransactions.size()) { // More transactions need to be applied, but by another job. - if (m_job_queue.addJob(jtBATCH, "transactionBatch", [this]() { - transactionBatch(); - })) + if (m_job_queue.addJob( + jtBATCH, + "transactionBatch", + [this, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { transactionBatch(); }, + "transactionBatch", + 1s, + journal); + })) { mDispatchState = DispatchState::scheduled; } @@ -3175,8 +3211,17 @@ NetworkOPsImp::reportFeeChange() if (f != mLastFeeSummary) { m_job_queue.addJob( - jtCLIENT_FEE_CHANGE, "reportFeeChange->pubServer", [this]() { - pubServer(); + jtCLIENT_FEE_CHANGE, + "reportFeeChange->pubServer", + [this, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { + pubServer(); + return true; + }, + "reportFeeChange->pubServer", + 1s, + journal); }); } } @@ -3187,7 +3232,16 @@ NetworkOPsImp::reportConsensusStateChange(ConsensusPhase phase) m_job_queue.addJob( jtCLIENT_CONSENSUS, "reportConsensusStateChange->pubConsensus", - [this, phase]() { pubConsensus(phase); }); + [this, phase, journal = m_journal]() { + perf::measureDurationAndLog( + [&]() { + pubConsensus(phase); + return true; + }, + "reportConsensusStateChange->pubConsensus", + 1s, + journal); + }); } inline void @@ -3693,262 +3747,301 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) jtCLIENT_ACCT_HIST, "AccountHistoryTxStream", [this, dbType = databaseType, subInfo]() { - auto const& accountId = subInfo.index_->accountId_; - auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_; - auto& txHistoryIndex = subInfo.index_->historyTxIndex_; - - JLOG(m_journal.trace()) - << "AccountHistory job for account " << toBase58(accountId) - << " started. lastLedgerSeq=" << lastLedgerSeq; - - auto isFirstTx = [&](std::shared_ptr const& tx, - std::shared_ptr const& meta) -> bool { - /* - * genesis account: first tx is the one with seq 1 - * other account: first tx is the one created the account - */ - if (accountId == genesisAccountId) - { - auto stx = tx->getSTransaction(); - if (stx->getAccountID(sfAccount) == accountId && - stx->getSeqValue() == 1) - return true; - } + perf::measureDurationAndLog( + [&]() { + auto const& accountId = subInfo.index_->accountId_; + auto& lastLedgerSeq = subInfo.index_->historyLastLedgerSeq_; + auto& txHistoryIndex = subInfo.index_->historyTxIndex_; - for (auto& node : meta->getNodes()) - { - if (node.getFieldU16(sfLedgerEntryType) != ltACCOUNT_ROOT) - continue; - - if (node.isFieldPresent(sfNewFields)) - { - if (auto inner = dynamic_cast( - node.peekAtPField(sfNewFields)); - inner) + JLOG(m_journal.trace()) + << "AccountHistory job for account " + << toBase58(accountId) + << " started. lastLedgerSeq=" << lastLedgerSeq; + + auto isFirstTx = + [&](std::shared_ptr const& tx, + std::shared_ptr const& meta) -> bool { + /* + * genesis account: first tx is the one with seq 1 + * other account: first tx is the one created the + * account + */ + if (accountId == genesisAccountId) { - if (inner->isFieldPresent(sfAccount) && - inner->getAccountID(sfAccount) == accountId) - { + auto stx = tx->getSTransaction(); + if (stx->getAccountID(sfAccount) == accountId && + stx->getSeqValue() == 1) return true; - } } - } - } - - return false; - }; - auto send = [&](Json::Value const& jvObj, - bool unsubscribe) -> bool { - if (auto sptr = subInfo.sinkWptr_.lock()) - { - sptr->send(jvObj, true); - if (unsubscribe) - unsubAccountHistory(sptr, accountId, false); - return true; - } - - return false; - }; + for (auto& node : meta->getNodes()) + { + if (node.getFieldU16(sfLedgerEntryType) != + ltACCOUNT_ROOT) + continue; - auto sendMultiApiJson = [&](MultiApiJson const& jvObj, - bool unsubscribe) -> bool { - if (auto sptr = subInfo.sinkWptr_.lock()) - { - jvObj.visit( - sptr->getApiVersion(), // - [&](Json::Value const& jv) { sptr->send(jv, true); }); + if (node.isFieldPresent(sfNewFields)) + { + if (auto inner = dynamic_cast( + node.peekAtPField(sfNewFields)); + inner) + { + if (inner->isFieldPresent(sfAccount) && + inner->getAccountID(sfAccount) == + accountId) + { + return true; + } + } + } + } - if (unsubscribe) - unsubAccountHistory(sptr, accountId, false); - return true; - } + return false; + }; - return false; - }; - - auto getMoreTxns = - [&](std::uint32_t minLedger, - std::uint32_t maxLedger, - std::optional marker) - -> std::optional>> { - switch (dbType) - { - case Sqlite: { - auto db = static_cast( - &app_.getRelationalDatabase()); - RelationalDatabase::AccountTxPageOptions options{ - accountId, minLedger, maxLedger, marker, 0, true}; - return db->newestAccountTxPage(options); - } - default: { - UNREACHABLE( - "ripple::NetworkOPsImp::addAccountHistoryJob::" - "getMoreTxns : invalid database type"); - return {}; - } - } - }; + auto send = [&](Json::Value const& jvObj, + bool unsubscribe) -> bool { + if (auto sptr = subInfo.sinkWptr_.lock()) + { + sptr->send(jvObj, true); + if (unsubscribe) + unsubAccountHistory(sptr, accountId, false); + return true; + } - /* - * search backward until the genesis ledger or asked to stop - */ - while (lastLedgerSeq >= 2 && !subInfo.index_->stopHistorical_) - { - int feeChargeCount = 0; - if (auto sptr = subInfo.sinkWptr_.lock(); sptr) - { - sptr->getConsumer().charge(Resource::feeMediumBurdenRPC); - ++feeChargeCount; - } - else - { - JLOG(m_journal.trace()) - << "AccountHistory job for account " - << toBase58(accountId) << " no InfoSub. Fee charged " - << feeChargeCount << " times."; - return; - } + return false; + }; - // try to search in 1024 ledgers till reaching genesis ledgers - auto startLedgerSeq = - (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 : 2); - JLOG(m_journal.trace()) - << "AccountHistory job for account " << toBase58(accountId) - << ", working on ledger range [" << startLedgerSeq << "," - << lastLedgerSeq << "]"; - - auto haveRange = [&]() -> bool { - std::uint32_t validatedMin = UINT_MAX; - std::uint32_t validatedMax = 0; - auto haveSomeValidatedLedgers = - app_.getLedgerMaster().getValidatedRange( - validatedMin, validatedMax); - - return haveSomeValidatedLedgers && - validatedMin <= startLedgerSeq && - lastLedgerSeq <= validatedMax; - }(); - - if (!haveRange) - { - JLOG(m_journal.debug()) - << "AccountHistory reschedule job for account " - << toBase58(accountId) << ", incomplete ledger range [" - << startLedgerSeq << "," << lastLedgerSeq << "]"; - setAccountHistoryJobTimer(subInfo); - return; - } + auto sendMultiApiJson = [&](MultiApiJson const& jvObj, + bool unsubscribe) -> bool { + if (auto sptr = subInfo.sinkWptr_.lock()) + { + jvObj.visit( + sptr->getApiVersion(), // + [&](Json::Value const& jv) { + sptr->send(jv, true); + }); + + if (unsubscribe) + unsubAccountHistory(sptr, accountId, false); + return true; + } - std::optional marker{}; - while (!subInfo.index_->stopHistorical_) - { - auto dbResult = - getMoreTxns(startLedgerSeq, lastLedgerSeq, marker); - if (!dbResult) - { - JLOG(m_journal.debug()) - << "AccountHistory job for account " - << toBase58(accountId) << " getMoreTxns failed."; - send(rpcError(rpcINTERNAL), true); - return; - } + return false; + }; + + auto getMoreTxns = + [&](std::uint32_t minLedger, + std::uint32_t maxLedger, + std::optional + marker) + -> std::optional>> { + switch (dbType) + { + case Sqlite: { + auto db = static_cast( + &app_.getRelationalDatabase()); + RelationalDatabase::AccountTxPageOptions + options{ + accountId, + minLedger, + maxLedger, + marker, + 0, + true}; + return db->newestAccountTxPage(options); + } + default: { + UNREACHABLE( + "ripple::NetworkOPsImp::" + "addAccountHistoryJob::" + "getMoreTxns : invalid database type"); + return {}; + } + } + }; - auto const& txns = dbResult->first; - marker = dbResult->second; - size_t num_txns = txns.size(); - for (size_t i = 0; i < num_txns; ++i) + /* + * search backward until the genesis ledger or asked to stop + */ + while (lastLedgerSeq >= 2 && + !subInfo.index_->stopHistorical_) { - auto const& [tx, meta] = txns[i]; - - if (!tx || !meta) + int feeChargeCount = 0; + if (auto sptr = subInfo.sinkWptr_.lock(); sptr) { - JLOG(m_journal.debug()) - << "AccountHistory job for account " - << toBase58(accountId) << " empty tx or meta."; - send(rpcError(rpcINTERNAL), true); - return; + sptr->getConsumer().charge( + Resource::feeMediumBurdenRPC); + ++feeChargeCount; } - auto curTxLedger = - app_.getLedgerMaster().getLedgerBySeq( - tx->getLedger()); - if (!curTxLedger) + else { - JLOG(m_journal.debug()) + JLOG(m_journal.trace()) << "AccountHistory job for account " - << toBase58(accountId) << " no ledger."; - send(rpcError(rpcINTERNAL), true); + << toBase58(accountId) + << " no InfoSub. Fee charged " << feeChargeCount + << " times."; return; } - std::shared_ptr stTxn = - tx->getSTransaction(); - if (!stTxn) + + // try to search in 1024 ledgers till reaching genesis + // ledgers + auto startLedgerSeq = + (lastLedgerSeq > 1024 + 2 ? lastLedgerSeq - 1024 + : 2); + JLOG(m_journal.trace()) + << "AccountHistory job for account " + << toBase58(accountId) + << ", working on ledger range [" << startLedgerSeq + << "," << lastLedgerSeq << "]"; + + auto haveRange = [&]() -> bool { + std::uint32_t validatedMin = UINT_MAX; + std::uint32_t validatedMax = 0; + auto haveSomeValidatedLedgers = + app_.getLedgerMaster().getValidatedRange( + validatedMin, validatedMax); + + return haveSomeValidatedLedgers && + validatedMin <= startLedgerSeq && + lastLedgerSeq <= validatedMax; + }(); + + if (!haveRange) { JLOG(m_journal.debug()) - << "AccountHistory job for account " + << "AccountHistory reschedule job for account " << toBase58(accountId) - << " getSTransaction failed."; - send(rpcError(rpcINTERNAL), true); + << ", incomplete ledger range [" + << startLedgerSeq << "," << lastLedgerSeq + << "]"; + setAccountHistoryJobTimer(subInfo); return; } - auto const mRef = std::ref(*meta); - auto const trR = meta->getResultTER(); - MultiApiJson jvTx = - transJson(stTxn, trR, true, curTxLedger, mRef); - - jvTx.set( - jss::account_history_tx_index, txHistoryIndex--); - if (i + 1 == num_txns || - txns[i + 1].first->getLedger() != tx->getLedger()) - jvTx.set(jss::account_history_boundary, true); - - if (isFirstTx(tx, meta)) + std::optional + marker{}; + while (!subInfo.index_->stopHistorical_) { - jvTx.set(jss::account_history_tx_first, true); - sendMultiApiJson(jvTx, false); + auto dbResult = getMoreTxns( + startLedgerSeq, lastLedgerSeq, marker); + if (!dbResult) + { + JLOG(m_journal.debug()) + << "AccountHistory job for account " + << toBase58(accountId) + << " getMoreTxns failed."; + send(rpcError(rpcINTERNAL), true); + return; + } - JLOG(m_journal.trace()) - << "AccountHistory job for account " - << toBase58(accountId) - << " done, found last tx."; - return; + auto const& txns = dbResult->first; + marker = dbResult->second; + size_t num_txns = txns.size(); + for (size_t i = 0; i < num_txns; ++i) + { + auto const& [tx, meta] = txns[i]; + + if (!tx || !meta) + { + JLOG(m_journal.debug()) + << "AccountHistory job for account " + << toBase58(accountId) + << " empty tx or meta."; + send(rpcError(rpcINTERNAL), true); + return; + } + auto curTxLedger = + app_.getLedgerMaster().getLedgerBySeq( + tx->getLedger()); + if (!curTxLedger) + { + JLOG(m_journal.debug()) + << "AccountHistory job for account " + << toBase58(accountId) << " no ledger."; + send(rpcError(rpcINTERNAL), true); + return; + } + std::shared_ptr stTxn = + tx->getSTransaction(); + if (!stTxn) + { + JLOG(m_journal.debug()) + << "AccountHistory job for account " + << toBase58(accountId) + << " getSTransaction failed."; + send(rpcError(rpcINTERNAL), true); + return; + } + + auto const mRef = std::ref(*meta); + auto const trR = meta->getResultTER(); + MultiApiJson jvTx = transJson( + stTxn, trR, true, curTxLedger, mRef); + + jvTx.set( + jss::account_history_tx_index, + txHistoryIndex--); + if (i + 1 == num_txns || + txns[i + 1].first->getLedger() != + tx->getLedger()) + jvTx.set( + jss::account_history_boundary, true); + + if (isFirstTx(tx, meta)) + { + jvTx.set( + jss::account_history_tx_first, true); + sendMultiApiJson(jvTx, false); + + JLOG(m_journal.trace()) + << "AccountHistory job for account " + << toBase58(accountId) + << " done, found last tx."; + return; + } + else + { + sendMultiApiJson(jvTx, false); + } + } + + if (marker) + { + JLOG(m_journal.trace()) + << "AccountHistory job for account " + << toBase58(accountId) + << " paging, marker=" << marker->ledgerSeq + << ":" << marker->txnSeq; + } + else + { + break; + } } - else + + if (!subInfo.index_->stopHistorical_) { - sendMultiApiJson(jvTx, false); + lastLedgerSeq = startLedgerSeq - 1; + if (lastLedgerSeq <= 1) + { + JLOG(m_journal.trace()) + << "AccountHistory job for account " + << toBase58(accountId) + << " done, reached genesis ledger."; + return; + } } } - if (marker) - { - JLOG(m_journal.trace()) - << "AccountHistory job for account " - << toBase58(accountId) - << " paging, marker=" << marker->ledgerSeq << ":" - << marker->txnSeq; - } - else - { - break; - } - } - - if (!subInfo.index_->stopHistorical_) - { - lastLedgerSeq = startLedgerSeq - 1; - if (lastLedgerSeq <= 1) - { - JLOG(m_journal.trace()) - << "AccountHistory job for account " - << toBase58(accountId) - << " done, reached genesis ledger."; - return; - } - } - } + return; + }, + "AccountHistoryTxStream", + 1s, + this->m_journal); }); } diff --git a/src/xrpld/core/detail/SociDB.cpp b/src/xrpld/core/detail/SociDB.cpp index 5b298dac43a..4b3c3a7b849 100644 --- a/src/xrpld/core/detail/SociDB.cpp +++ b/src/xrpld/core/detail/SociDB.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -35,6 +36,8 @@ #include +using namespace std::chrono_literals; + namespace ripple { static auto checkpointPageCount = 1000; @@ -257,9 +260,17 @@ class WALCheckpointer : public Checkpointer // There is a separate check in `checkpoint` for a valid // connection in the rare case when the DatabaseCon is destroyed // after locking this weak_ptr - [wp = std::weak_ptr{shared_from_this()}]() { - if (auto self = wp.lock()) - self->checkpoint(); + [wp = std::weak_ptr{shared_from_this()}, + journal = j_]() { + perf::measureDurationAndLog( + [&]() { + if (auto self = wp.lock()) + self->checkpoint(); + return true; + }, + "WAL", + 1s, + journal); })) { std::lock_guard lock(mutex_); diff --git a/src/xrpld/net/detail/RPCSub.cpp b/src/xrpld/net/detail/RPCSub.cpp index 3f0c923e135..a0e1b9974fc 100644 --- a/src/xrpld/net/detail/RPCSub.cpp +++ b/src/xrpld/net/detail/RPCSub.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -27,6 +28,8 @@ #include +using namespace std::chrono_literals; + namespace ripple { // Subscription object for JSON-RPC @@ -91,8 +94,17 @@ class RPCSubImp : public RPCSub JLOG(j_.info()) << "RPCCall::fromNetwork start"; mSending = m_jobQueue.addJob( - jtCLIENT_SUBSCRIBE, "RPCSub::sendThread", [this]() { - sendThread(); + jtCLIENT_SUBSCRIBE, + "RPCSub::sendThread", + [this, journal = j_]() { + perf::measureDurationAndLog( + [&]() { + sendThread(); + return true; + }, + "RPCSub::sendThread", + 1s, + journal); }); } } diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index bca2cfd8c70..70b00ecfde7 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -1077,8 +1077,17 @@ PeerImp::onMessage(std::shared_ptr const& m) fee_.update(Resource::feeModerateBurdenPeer, "oversize"); app_.getJobQueue().addJob( - jtMANIFEST, "receiveManifests", [this, that = shared_from_this(), m]() { - overlay_.onManifests(m, that); + jtMANIFEST, + "receiveManifests", + [this, that = shared_from_this(), m, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + overlay_.onManifests(m, that); + return true; + }, + "receiveManifests", + 1s, + journal); }); } @@ -1349,10 +1358,18 @@ PeerImp::handleTransaction( flags, checkSignature, batch, - stx]() { - if (auto peer = weak.lock()) - peer->checkTransaction( - flags, checkSignature, stx, batch); + stx, + journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto peer = weak.lock()) + peer->checkTransaction( + flags, checkSignature, stx, batch); + return true; + }, + "recvTransaction->checkTransaction", + 1s, + journal); }); } } @@ -1447,10 +1464,18 @@ PeerImp::onMessage(std::shared_ptr const& m) // Queue a job to process the request std::weak_ptr weak = shared_from_this(); - app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() { - if (auto peer = weak.lock()) - peer->processLedgerRequest(m); - }); + app_.getJobQueue().addJob( + jtLEDGER_REQ, "recvGetLedger", [weak, m, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto peer = weak.lock()) + peer->processLedgerRequest(m); + return true; + }, + "recvGetLedger", + 1s, + journal); + }); } void @@ -1468,27 +1493,38 @@ PeerImp::onMessage(std::shared_ptr const& m) Resource::feeModerateBurdenPeer, "received a proof path request"); std::weak_ptr weak = shared_from_this(); app_.getJobQueue().addJob( - jtREPLAY_REQ, "recvProofPathRequest", [weak, m]() { - if (auto peer = weak.lock()) - { - auto reply = - peer->ledgerReplayMsgHandler_.processProofPathRequest(m); - if (reply.has_error()) - { - if (reply.error() == protocol::TMReplyError::reBAD_REQUEST) - peer->charge( - Resource::feeMalformedRequest, - "proof_path_request"); - else - peer->charge( - Resource::feeRequestNoReply, "proof_path_request"); - } - else - { - peer->send(std::make_shared( - reply, protocol::mtPROOF_PATH_RESPONSE)); - } - } + jtREPLAY_REQ, + "recvProofPathRequest", + [weak, m, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto peer = weak.lock()) + { + auto reply = peer->ledgerReplayMsgHandler_ + .processProofPathRequest(m); + if (reply.has_error()) + { + if (reply.error() == + protocol::TMReplyError::reBAD_REQUEST) + peer->charge( + Resource::feeMalformedRequest, + "proof_path_request"); + else + peer->charge( + Resource::feeRequestNoReply, + "proof_path_request"); + } + else + { + peer->send(std::make_shared( + reply, protocol::mtPROOF_PATH_RESPONSE)); + } + } + return true; + }, + "recvProofPathRequest", + 1s, + journal); }); } @@ -1522,28 +1558,38 @@ PeerImp::onMessage(std::shared_ptr const& m) fee_.fee = Resource::feeModerateBurdenPeer; std::weak_ptr weak = shared_from_this(); app_.getJobQueue().addJob( - jtREPLAY_REQ, "recvReplayDeltaRequest", [weak, m]() { - if (auto peer = weak.lock()) - { - auto reply = - peer->ledgerReplayMsgHandler_.processReplayDeltaRequest(m); - if (reply.has_error()) - { - if (reply.error() == protocol::TMReplyError::reBAD_REQUEST) - peer->charge( - Resource::feeMalformedRequest, - "replay_delta_request"); - else - peer->charge( - Resource::feeRequestNoReply, - "replay_delta_request"); - } - else - { - peer->send(std::make_shared( - reply, protocol::mtREPLAY_DELTA_RESPONSE)); - } - } + jtREPLAY_REQ, + "recvReplayDeltaRequest", + [weak, m, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto peer = weak.lock()) + { + auto reply = peer->ledgerReplayMsgHandler_ + .processReplayDeltaRequest(m); + if (reply.has_error()) + { + if (reply.error() == + protocol::TMReplyError::reBAD_REQUEST) + peer->charge( + Resource::feeMalformedRequest, + "replay_delta_request"); + else + peer->charge( + Resource::feeRequestNoReply, + "replay_delta_request"); + } + else + { + peer->send(std::make_shared( + reply, protocol::mtREPLAY_DELTA_RESPONSE)); + } + } + return true; + }, + "recvReplayDeltaRequest", + 1s, + journal); }); } @@ -1640,12 +1686,21 @@ PeerImp::onMessage(std::shared_ptr const& m) { std::weak_ptr weak{shared_from_this()}; app_.getJobQueue().addJob( - jtTXN_DATA, "recvPeerData", [weak, ledgerHash, m]() { - if (auto peer = weak.lock()) - { - peer->app_.getInboundTransactions().gotData( - ledgerHash, peer, m); - } + jtTXN_DATA, + "recvPeerData", + [weak, ledgerHash, m, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto peer = weak.lock()) + { + peer->app_.getInboundTransactions().gotData( + ledgerHash, peer, m); + } + return true; + }, + "recvPeerData", + 1s, + journal); }); return; } @@ -1770,9 +1825,16 @@ PeerImp::onMessage(std::shared_ptr const& m) app_.getJobQueue().addJob( isTrusted ? jtPROPOSAL_t : jtPROPOSAL_ut, "recvPropose->checkPropose", - [weak, isTrusted, m, proposal]() { - if (auto peer = weak.lock()) - peer->checkPropose(isTrusted, m, proposal); + [weak, isTrusted, m, proposal, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto peer = weak.lock()) + peer->checkPropose(isTrusted, m, proposal); + return true; + }, + "recvPropose->checkPropose", + 1s, + journal); }); } @@ -2408,9 +2470,16 @@ PeerImp::onMessage(std::shared_ptr const& m) app_.getJobQueue().addJob( isTrusted ? jtVALIDATION_t : jtVALIDATION_ut, name, - [weak, val, m, key]() { - if (auto peer = weak.lock()) - peer->checkValidation(val, key, m); + [weak, val, m, key, name, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto peer = weak.lock()) + peer->checkValidation(val, key, m); + return true; + }, + name, + 1s, + journal); }); } else @@ -2463,9 +2532,18 @@ PeerImp::onMessage(std::shared_ptr const& m) std::weak_ptr weak = shared_from_this(); app_.getJobQueue().addJob( - jtREQUESTED_TXN, "doTransactions", [weak, m]() { - if (auto peer = weak.lock()) - peer->doTransactions(m); + jtREQUESTED_TXN, + "doTransactions", + [weak, m, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto peer = weak.lock()) + peer->doTransactions(m); + return true; + }, + "doTransactions", + 1s, + journal); }); return; } @@ -2597,9 +2675,18 @@ PeerImp::onMessage(std::shared_ptr const& m) std::weak_ptr weak = shared_from_this(); app_.getJobQueue().addJob( - jtMISSING_TXN, "handleHaveTransactions", [weak, m]() { - if (auto peer = weak.lock()) - peer->handleHaveTransactions(m); + jtMISSING_TXN, + "handleHaveTransactions", + [weak, m, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + if (auto peer = weak.lock()) + peer->handleHaveTransactions(m); + return true; + }, + "handleHaveTransactions", + 1s, + journal); }); } @@ -2768,8 +2855,18 @@ PeerImp::doFetchPack(std::shared_ptr const& packet) auto elapsed = UptimeClock::now(); auto const pap = &app_; app_.getJobQueue().addJob( - jtPACK, "MakeFetchPack", [pap, weak, packet, hash, elapsed]() { - pap->getLedgerMaster().makeFetchPack(weak, packet, hash, elapsed); + jtPACK, + "MakeFetchPack", + [pap, weak, packet, hash, elapsed, journal = p_journal_]() { + perf::measureDurationAndLog( + [&]() { + pap->getLedgerMaster().makeFetchPack( + weak, packet, hash, elapsed); + return true; + }, + "MakeFetchPack", + 1s, + journal); }); } diff --git a/src/xrpld/perflog/PerfLog.h b/src/xrpld/perflog/PerfLog.h index 5212752ec71..98ff2dbc9ee 100644 --- a/src/xrpld/perflog/PerfLog.h +++ b/src/xrpld/perflog/PerfLog.h @@ -190,20 +190,35 @@ measureDurationAndLog( std::chrono::duration maxDelay, beast::Journal const& journal) { - auto start_time = std::chrono::high_resolution_clock::now(); + using Result = std::invoke_result_t; - auto result = func(); + auto start_time = std::chrono::high_resolution_clock::now(); - auto end_time = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast( - end_time - start_time); - if (duration > maxDelay) + if constexpr (std::is_void_v) { - JLOG(journal.warn()) - << actionDescription << " took " << duration.count() << " ms"; + std::forward(func)(); + auto end_time = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast( + end_time - start_time); + if (duration > maxDelay) + { + JLOG(journal.warn()) + << actionDescription << " took " << duration.count() << " ms"; + } + } + else + { + Result result = std::forward(func)(); + auto end_time = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast( + end_time - start_time); + if (duration > maxDelay) + { + JLOG(journal.warn()) + << actionDescription << " took " << duration.count() << " ms"; + } + return result; } - - return result; } } // namespace perf